From 7b9726dd649efa169cd86c3c0d1f0395e4655a38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Nov 2024 15:33:51 +0000 Subject: [PATCH 01/63] Bump github.com/databricks/databricks-sdk-go from 0.51.0 to 0.52.0 (#1931) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.51.0 to 0.52.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.52.0

Internal Changes

API Changes:

OpenAPI SHA: f2385add116e3716c8a90a0b68e204deb40f996c, Date: 2024-11-15

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

[Release] Release v0.52.0

Internal Changes

API Changes:

OpenAPI SHA: f2385add116e3716c8a90a0b68e204deb40f996c, Date: 2024-11-15

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.51.0&new-version=0.52.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- bundle/schema/jsonschema.json | 8 +- cmd/account/budgets/budgets.go | 2 +- .../custom-app-integration.go | 5 +- .../workspace-assignment.go | 2 +- cmd/workspace/apps/apps.go | 8 +- cmd/workspace/apps/overrides.go | 59 ------------ cmd/workspace/credentials/credentials.go | 95 ++++++++++++++----- cmd/workspace/lakeview/lakeview.go | 10 +- .../notification-destinations.go | 5 +- cmd/workspace/repos/repos.go | 6 +- .../token-management/token-management.go | 6 +- go.mod | 2 +- go.sum | 4 +- 14 files changed, 105 insertions(+), 109 deletions(-) delete mode 100644 cmd/workspace/apps/overrides.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 5f4b508602..a2ba58aa56 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d25296d2f4aa7bd6195c816fdf82e0f960f775da \ No newline at end of file +f2385add116e3716c8a90a0b68e204deb40f996c \ No newline at end of file diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 703daafebd..cf003f4235 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -817,6 +817,9 @@ "metastore": { "$ref": "#/$defs/string" }, + "notification_destination": { + "$ref": "#/$defs/string" + }, "pipeline": { "$ref": "#/$defs/string" }, @@ -1079,6 +1082,9 @@ "pipelines_development": { "$ref": "#/$defs/bool" }, + "source_linked_deployment": { + "$ref": "#/$defs/bool" + }, "tags": { "$ref": "#/$defs/map/string" }, @@ -2824,7 +2830,7 @@ "anyOf": [ { "type": "object", - "description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nExactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an error is thrown.", + "description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", "properties": { "service_principal_name": { "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index 87ed41d576..748dc6994d 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -194,7 +194,7 @@ func newGet() *cobra.Command { configuration are specified by ID. Arguments: - BUDGET_ID: The Databricks budget configuration ID.` + BUDGET_ID: The budget configuration ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index 9d16a44d45..1eec1018eb 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -195,7 +195,10 @@ func newGet() *cobra.Command { cmd.Short = `Get OAuth Custom App Integration.` cmd.Long = `Get OAuth Custom App Integration. - Gets the Custom OAuth App Integration for the given integration id.` + Gets the Custom OAuth App Integration for the given integration id. + + Arguments: + INTEGRATION_ID: The OAuth app integration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index e09095d371..c5385c92a4 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -257,7 +257,7 @@ func newUpdate() *cobra.Command { workspace for the specified principal. Arguments: - WORKSPACE_ID: The workspace ID for the account. + WORKSPACE_ID: The workspace ID. PRINCIPAL_ID: The ID of the user, service principal, or group.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 514da697b2..a103ba7a87 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -942,14 +942,13 @@ func newUpdate() *cobra.Command { // TODO: complex arg: pending_deployment // TODO: array: resources - cmd.Use = "update NAME NAME" + cmd.Use = "update NAME" cmd.Short = `Update an app.` cmd.Long = `Update an app. Updates the app with the supplied name. Arguments: - NAME: The name of the app. NAME: The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It must be unique within the workspace.` @@ -963,7 +962,7 @@ func newUpdate() *cobra.Command { } return nil } - check := root.ExactArgs(2) + check := root.ExactArgs(1) return check(cmd, args) } @@ -985,9 +984,6 @@ func newUpdate() *cobra.Command { } } updateReq.Name = args[0] - if !cmd.Flags().Changed("json") { - updateReq.App.Name = args[1] - } response, err := w.Apps.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/apps/overrides.go b/cmd/workspace/apps/overrides.go deleted file mode 100644 index debd9f5a66..0000000000 --- a/cmd/workspace/apps/overrides.go +++ /dev/null @@ -1,59 +0,0 @@ -package apps - -import ( - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/apps" - "github.com/spf13/cobra" -) - -// We override apps.Update command beccause currently genkit does not support -// a way to identify that path field (such as name) matches the field in the request body. -// As a result, genkit generates a command with 2 required same fields, update NAME NAME. -// This override should be removed when genkit supports this. -func updateOverride(cmd *cobra.Command, req *apps.UpdateAppRequest) { - cmd.Use = "update NAME" - cmd.Long = `Update an app. - - Updates the app with the supplied name. - - Arguments: - NAME: The name of the app. The name must contain only lowercase alphanumeric - characters and hyphens. It must be unique within the workspace.` - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - updateJson := cmd.Flag("json").Value.(*flags.JsonFlag) - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&req.App) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - - req.Name = args[0] - response, err := w.Apps.Update(ctx, *req) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } -} - -func init() { - updateOverrides = append(updateOverrides, updateOverride) -} diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go index 869df06285..44ee0cf310 100755 --- a/cmd/workspace/credentials/credentials.go +++ b/cmd/workspace/credentials/credentials.go @@ -3,6 +3,8 @@ package credentials import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -30,9 +32,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "catalog", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods @@ -72,21 +71,39 @@ func newCreateCredential() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity + // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`) - cmd.Flags().StringVar(&createCredentialReq.Name, "name", createCredentialReq.Name, `The credential name.`) - cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE]`) + // TODO: complex arg: gcp_service_account_key + cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE, STORAGE]`) + cmd.Flags().BoolVar(&createCredentialReq.ReadOnly, "read-only", createCredentialReq.ReadOnly, `Whether the credential is usable only for read operations.`) cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`) - cmd.Use = "create-credential" + cmd.Use = "create-credential NAME" cmd.Short = `Create a credential.` cmd.Long = `Create a credential. - Creates a new credential.` + Creates a new credential. The type of credential to be created is determined + by the **purpose** field, which should be either **SERVICE** or **STORAGE**. + + The caller must be a metastore admin or have the metastore privilege + **CREATE_STORAGE_CREDENTIAL** for storage credentials, or + **CREATE_SERVICE_CREDENTIAL** for service credentials. + + Arguments: + NAME: The credential name. The name must be unique among storage and service + credentials within the metastore.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) return check(cmd, args) } @@ -107,6 +124,9 @@ func newCreateCredential() *cobra.Command { } } } + if !cmd.Flags().Changed("json") { + createCredentialReq.Name = args[0] + } response, err := w.Credentials.CreateCredential(ctx, createCredentialReq) if err != nil { @@ -143,14 +163,14 @@ func newDeleteCredential() *cobra.Command { // TODO: short flags - cmd.Flags().BoolVar(&deleteCredentialReq.Force, "force", deleteCredentialReq.Force, `Force deletion even if there are dependent services.`) + cmd.Flags().BoolVar(&deleteCredentialReq.Force, "force", deleteCredentialReq.Force, `Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**).`) cmd.Use = "delete-credential NAME_ARG" cmd.Short = `Delete a credential.` cmd.Long = `Delete a credential. - Deletes a credential from the metastore. The caller must be an owner of the - credential. + Deletes a service or storage credential from the metastore. The caller must be + an owner of the credential. Arguments: NAME_ARG: Name of the credential.` @@ -207,20 +227,29 @@ func newGenerateTemporaryServiceCredential() *cobra.Command { cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: azure_options - cmd.Flags().StringVar(&generateTemporaryServiceCredentialReq.CredentialName, "credential-name", generateTemporaryServiceCredentialReq.CredentialName, `The name of the service credential used to generate a temporary credential.`) - cmd.Use = "generate-temporary-service-credential" + cmd.Use = "generate-temporary-service-credential CREDENTIAL_NAME" cmd.Short = `Generate a temporary service credential.` cmd.Long = `Generate a temporary service credential. Returns a set of temporary credentials generated using the specified service credential. The caller must be a metastore admin or have the metastore - privilege **ACCESS** on the service credential.` + privilege **ACCESS** on the service credential. + + Arguments: + CREDENTIAL_NAME: The name of the service credential used to generate a temporary credential` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'credential_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) return check(cmd, args) } @@ -241,6 +270,9 @@ func newGenerateTemporaryServiceCredential() *cobra.Command { } } } + if !cmd.Flags().Changed("json") { + generateTemporaryServiceCredentialReq.CredentialName = args[0] + } response, err := w.Credentials.GenerateTemporaryServiceCredential(ctx, generateTemporaryServiceCredentialReq) if err != nil { @@ -281,8 +313,9 @@ func newGetCredential() *cobra.Command { cmd.Short = `Get a credential.` cmd.Long = `Get a credential. - Gets a credential from the metastore. The caller must be a metastore admin, - the owner of the credential, or have any permission on the credential. + Gets a service or storage credential from the metastore. The caller must be a + metastore admin, the owner of the credential, or have any permission on the + credential. Arguments: NAME_ARG: Name of the credential.` @@ -338,7 +371,7 @@ func newListCredentials() *cobra.Command { cmd.Flags().IntVar(&listCredentialsReq.MaxResults, "max-results", listCredentialsReq.MaxResults, `Maximum number of credentials to return.`) cmd.Flags().StringVar(&listCredentialsReq.PageToken, "page-token", listCredentialsReq.PageToken, `Opaque token to retrieve the next page of results.`) - cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE]`) + cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE, STORAGE]`) cmd.Use = "list-credentials" cmd.Short = `List credentials.` @@ -399,18 +432,20 @@ func newUpdateCredential() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity + // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`) - cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force update even if there are dependent services.`) + cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**).`) cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`) cmd.Flags().StringVar(&updateCredentialReq.Owner, "owner", updateCredentialReq.Owner, `Username of current owner of credential.`) + cmd.Flags().BoolVar(&updateCredentialReq.ReadOnly, "read-only", updateCredentialReq.ReadOnly, `Whether the credential is usable only for read operations.`) cmd.Flags().BoolVar(&updateCredentialReq.SkipValidation, "skip-validation", updateCredentialReq.SkipValidation, `Supply true to this argument to skip validation of the updated credential.`) cmd.Use = "update-credential NAME_ARG" cmd.Short = `Update a credential.` cmd.Long = `Update a credential. - Updates a credential on the metastore. + Updates a service or storage credential on the metastore. The caller must be the owner of the credential or a metastore admin or have the MANAGE permission. If the caller is a metastore admin, only the @@ -485,7 +520,10 @@ func newValidateCredential() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`) - cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE]`) + cmd.Flags().StringVar(&validateCredentialReq.ExternalLocationName, "external-location-name", validateCredentialReq.ExternalLocationName, `The name of an existing external location to validate.`) + cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE, STORAGE]`) + cmd.Flags().BoolVar(&validateCredentialReq.ReadOnly, "read-only", validateCredentialReq.ReadOnly, `Whether the credential is only usable for read operations.`) + cmd.Flags().StringVar(&validateCredentialReq.Url, "url", validateCredentialReq.Url, `The external location url to validate.`) cmd.Use = "validate-credential" cmd.Short = `Validate a credential.` @@ -493,10 +531,19 @@ func newValidateCredential() *cobra.Command { Validates a credential. - Either the __credential_name__ or the cloud-specific credential must be - provided. + For service credentials (purpose is **SERVICE**), either the + __credential_name__ or the cloud-specific credential must be provided. + + For storage credentials (purpose is **STORAGE**), at least one of + __external_location_name__ and __url__ need to be provided. If only one of + them is provided, it will be used for validation. And if both are provided, + the __url__ will be used for validation, and __external_location_name__ will + be ignored when checking overlapping urls. Either the __credential_name__ or + the cloud-specific credential must be provided. - The caller must be a metastore admin or the credential owner.` + The caller must be a metastore admin or the credential owner or have the + required permission on the metastore and the credential (e.g., + **CREATE_EXTERNAL_LOCATION** when purpose is **STORAGE**).` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 239c72b6ef..35c3bdf4e6 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -503,7 +503,7 @@ func newGetPublished() *cobra.Command { Get the current published dashboard. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to be published.` + DASHBOARD_ID: UUID identifying the published dashboard.` cmd.Annotations = make(map[string]string) @@ -737,7 +737,7 @@ func newListSchedules() *cobra.Command { cmd.Long = `List dashboard schedules. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + DASHBOARD_ID: UUID identifying the dashboard to which the schedules belongs.` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -795,8 +795,8 @@ func newListSubscriptions() *cobra.Command { cmd.Long = `List schedule subscriptions. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. - SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + DASHBOARD_ID: UUID identifying the dashboard which the subscriptions belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscriptions belongs.` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -1072,7 +1072,7 @@ func newUnpublish() *cobra.Command { Unpublish the dashboard. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to be published.` + DASHBOARD_ID: UUID identifying the published dashboard.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/notification-destinations/notification-destinations.go b/cmd/workspace/notification-destinations/notification-destinations.go index 470765879d..b06652c71a 100755 --- a/cmd/workspace/notification-destinations/notification-destinations.go +++ b/cmd/workspace/notification-destinations/notification-destinations.go @@ -304,7 +304,10 @@ func newUpdate() *cobra.Command { cmd.Long = `Update a notification destination. Updates a notification destination. Requires workspace admin permissions. At - least one field is required in the request body.` + least one field is required in the request body. + + Arguments: + ID: UUID identifying notification destination.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 7dcb135384..7994726502 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -171,7 +171,7 @@ func newDelete() *cobra.Command { Deletes the specified repo. Arguments: - REPO_ID: ID of the Git folder (repo) object in the workspace.` + REPO_ID: The ID for the corresponding repo to delete.` cmd.Annotations = make(map[string]string) @@ -188,14 +188,14 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "ID of the Git folder (repo) object in the workspace") + id, err := cmdio.Select(ctx, names, "The ID for the corresponding repo to delete") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have id of the git folder (repo) object in the workspace") + return fmt.Errorf("expected to have the id for the corresponding repo to delete") } _, err = fmt.Sscan(args[0], &deleteReq.RepoId) if err != nil { diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index c8d57fd6d3..fcc70c126a 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -169,7 +169,7 @@ func newDelete() *cobra.Command { Deletes a token, specified by its ID. Arguments: - TOKEN_ID: The ID of the token to get.` + TOKEN_ID: The ID of the token to revoke.` cmd.Annotations = make(map[string]string) @@ -186,14 +186,14 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Token Management drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "The ID of the token to get") + id, err := cmdio.Select(ctx, names, "The ID of the token to revoke") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have the id of the token to get") + return fmt.Errorf("expected to have the id of the token to revoke") } deleteReq.TokenId = args[0] diff --git a/go.mod b/go.mod index 353313131d..7141ed768a 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.2 require ( github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.51.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.52.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 312a4d9345..5d2c53a376 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.51.0 h1:tcvB9TID3oUl0O8npccB5c+33tarBiYMBFbq4U4AB6M= -github.com/databricks/databricks-sdk-go v0.51.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.52.0 h1:WKcj0F+pdx0gjI5xMicjYC4O43S2q5nyTpaGGMFmgHw= +github.com/databricks/databricks-sdk-go v0.52.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 00bd98f898b726a4f39d4b70fe7a76a25b15cf10 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Mon, 2 Dec 2024 10:49:32 +0100 Subject: [PATCH 02/63] Move loadGitDetails mutator to Initialize phase (#1944) This will require API call when run inside a workspace, which will require workspace client (we don't have one at the current point). We want to keep Load phase quick, since it's common across all commands. --- bundle/config/mutator/mutator.go | 1 - bundle/phases/initialize.go | 1 + bundle/tests/environment_git_test.go | 5 +++++ bundle/tests/git_test.go | 3 +++ 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index faf50ae6e3..5fd9f53e5b 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -26,7 +26,6 @@ func DefaultMutators() []bundle.Mutator { ComputeIdToClusterId(), InitializeVariables(), DefineDefaultTarget(), - LoadGitDetails(), pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad), // Note: This mutator must run before the target overrides are merged. diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 3d5ad5e8bd..3345872022 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -39,6 +39,7 @@ func Initialize() bundle.Mutator { mutator.MergePipelineClusters(), mutator.InitializeWorkspaceClient(), mutator.PopulateCurrentUser(), + mutator.LoadGitDetails(), mutator.DefineDefaultWorkspaceRoot(), mutator.ExpandWorkspaceRoot(), diff --git a/bundle/tests/environment_git_test.go b/bundle/tests/environment_git_test.go index ad4aec2e6f..d4695c78dd 100644 --- a/bundle/tests/environment_git_test.go +++ b/bundle/tests/environment_git_test.go @@ -1,15 +1,19 @@ package config_tests import ( + "context" "fmt" "strings" "testing" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" "github.com/stretchr/testify/assert" ) func TestGitAutoLoadWithEnvironment(t *testing.T) { b := load(t, "./environments_autoload_git") + bundle.Apply(context.Background(), b, mutator.LoadGitDetails()) assert.True(t, b.Config.Bundle.Git.Inferred) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) @@ -17,6 +21,7 @@ func TestGitAutoLoadWithEnvironment(t *testing.T) { func TestGitManuallySetBranchWithEnvironment(t *testing.T) { b := loadTarget(t, "./environments_autoload_git", "production") + bundle.Apply(context.Background(), b, mutator.LoadGitDetails()) assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") diff --git a/bundle/tests/git_test.go b/bundle/tests/git_test.go index 21eaaedd29..dec6c268a0 100644 --- a/bundle/tests/git_test.go +++ b/bundle/tests/git_test.go @@ -14,6 +14,7 @@ import ( func TestGitAutoLoad(t *testing.T) { b := load(t, "./autoload_git") + bundle.Apply(context.Background(), b, mutator.LoadGitDetails()) assert.True(t, b.Config.Bundle.Git.Inferred) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) @@ -21,6 +22,7 @@ func TestGitAutoLoad(t *testing.T) { func TestGitManuallySetBranch(t *testing.T) { b := loadTarget(t, "./autoload_git", "production") + bundle.Apply(context.Background(), b, mutator.LoadGitDetails()) assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") @@ -34,6 +36,7 @@ func TestGitBundleBranchValidation(t *testing.T) { }) b := load(t, "./git_branch_validation") + bundle.Apply(context.Background(), b, mutator.LoadGitDetails()) assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "feature-a", b.Config.Bundle.Git.Branch) assert.Equal(t, "feature-b", b.Config.Bundle.Git.ActualBranch) From e86a949d9908dea65117419d3e2d55e8858cb5c0 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 2 Dec 2024 15:59:29 +0530 Subject: [PATCH 03/63] Add the `bundle_uuid` helper function for templates (#1947) ## Changes This PR adds the `bundle_uuid` helper function that'll return a stable identifier for the bundle for the duration of the `bundle init` command. This is also the UUID that'll be set in the telemetry event sent during `databricks bundle init` and would be used to correlate revenue from bundle init with resource deployments. Template authors should add the uuid field to their `databricks.yml` file they generate: ``` bundle: # A stable identified for your DAB project. We use this UUID in the Databricks backend # to correlate and identify multiple deployments of the same DAB project. uuid: {{ bundle_uuid }} ``` ## Tests Unit test --- bundle/config/bundle.go | 4 ++++ libs/template/helpers.go | 10 ++++++++++ libs/template/helpers_test.go | 18 ++++++++++++++++++ .../testdata/bundle-uuid/library/.gitkeep | 0 .../testdata/bundle-uuid/template/bar.txt.tmpl | 1 + .../testdata/bundle-uuid/template/foo.txt.tmpl | 1 + 6 files changed, 34 insertions(+) create mode 100644 libs/template/testdata/bundle-uuid/library/.gitkeep create mode 100644 libs/template/testdata/bundle-uuid/template/bar.txt.tmpl create mode 100644 libs/template/testdata/bundle-uuid/template/foo.txt.tmpl diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index f533c4d184..af9e1f4b50 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -49,4 +49,8 @@ type Bundle struct { // Databricks CLI version constraints required to run the bundle. DatabricksCliVersion string `json:"databricks_cli_version,omitempty"` + + // A stable generated UUID for the bundle. This is normally serialized by + // Databricks first party template when a user runs bundle init. + Uuid string `json:"uuid,omitempty"` } diff --git a/libs/template/helpers.go b/libs/template/helpers.go index f25cbee4f2..7f7acbd24a 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -35,6 +35,13 @@ var cachedUser *iam.User var cachedIsServicePrincipal *bool var cachedCatalog *string +// UUID that is stable for the duration of the template execution. This can be used +// to populate the `bundle.uuid` field in databricks.yml by template authors. +// +// It's automatically logged in our telemetry logs when `databricks bundle init` +// is run and can be used to attribute DBU revenue to bundle templates. +var bundleUuid = uuid.New().String() + func loadHelpers(ctx context.Context) template.FuncMap { w := root.WorkspaceClient(ctx) return template.FuncMap{ @@ -57,6 +64,9 @@ func loadHelpers(ctx context.Context) template.FuncMap { "uuid": func() string { return uuid.New().String() }, + "bundle_uuid": func() string { + return bundleUuid + }, // A key value pair. This is used with the map function to generate maps // to use inside a template "pair": func(k string, v any) pair { diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 9f5804c036..802a8f6d55 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -32,6 +32,24 @@ func TestTemplatePrintStringWithoutProcessing(t *testing.T) { assert.Equal(t, `{{ fail "abc" }}`, cleanContent) } +func TestTemplateBundleUuidFunction(t *testing.T) { + ctx := context.Background() + + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/bundle-uuid/template", "./testdata/bundle-uuid/library") + require.NoError(t, err) + + err = r.walk() + assert.NoError(t, err) + + assert.Len(t, r.files, 2) + c1 := strings.Trim(string(r.files[0].(*inMemoryFile).content), "\n\r") + assert.Equal(t, strings.Repeat(bundleUuid, 3), c1) + c2 := strings.Trim(string(r.files[1].(*inMemoryFile).content), "\n\r") + assert.Equal(t, strings.Repeat(bundleUuid, 5), c2) +} + func TestTemplateRegexpCompileFunction(t *testing.T) { ctx := context.Background() diff --git a/libs/template/testdata/bundle-uuid/library/.gitkeep b/libs/template/testdata/bundle-uuid/library/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libs/template/testdata/bundle-uuid/template/bar.txt.tmpl b/libs/template/testdata/bundle-uuid/template/bar.txt.tmpl new file mode 100644 index 0000000000..824412df23 --- /dev/null +++ b/libs/template/testdata/bundle-uuid/template/bar.txt.tmpl @@ -0,0 +1 @@ +{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}} diff --git a/libs/template/testdata/bundle-uuid/template/foo.txt.tmpl b/libs/template/testdata/bundle-uuid/template/foo.txt.tmpl new file mode 100644 index 0000000000..64150e42bc --- /dev/null +++ b/libs/template/testdata/bundle-uuid/template/foo.txt.tmpl @@ -0,0 +1 @@ +{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}}{{bundle_uuid}} From 94b221b8ba54c6be89fa19119c80eb98d1ae3e47 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 2 Dec 2024 15:04:08 +0100 Subject: [PATCH 04/63] Build snapshot releases for bugbash branches (#1948) ## Changes To be used with https://github.com/databricks/cli/tree/main/internal/bugbash. ## Tests n/a --- .github/workflows/release-snapshot.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 4392c248e8..4a7597dc08 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -5,6 +5,7 @@ on: branches: - "main" - "demo-*" + - "bugbash-*" # Confirm that snapshot builds work if this file is modified. pull_request: From f9d65f315fcded1dd6649b4dfeb297d0a5e9023d Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 2 Dec 2024 20:10:57 +0530 Subject: [PATCH 05/63] Add comment for why we test two files for `bundle_uuid` (#1949) ## Changes Addresses feedback from this thread https://github.com/databricks/cli/pull/1947#discussion_r1865692479 --- libs/template/helpers_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 802a8f6d55..6c476c658a 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -43,11 +43,16 @@ func TestTemplateBundleUuidFunction(t *testing.T) { err = r.walk() assert.NoError(t, err) + // We test the content of two generated files to ensure that the same UUID + // is used across all files generated by the template. Each file content is + // generated by a separate (*template.Template).Execute call, so testing different + // files ensures that the UUID is stable across all template execution calls. + copy := strings.Clone(bundleUuid) assert.Len(t, r.files, 2) c1 := strings.Trim(string(r.files[0].(*inMemoryFile).content), "\n\r") - assert.Equal(t, strings.Repeat(bundleUuid, 3), c1) + assert.Equal(t, strings.Repeat(copy, 3), c1) c2 := strings.Trim(string(r.files[1].(*inMemoryFile).content), "\n\r") - assert.Equal(t, strings.Repeat(bundleUuid, 5), c2) + assert.Equal(t, strings.Repeat(copy, 5), c2) } func TestTemplateRegexpCompileFunction(t *testing.T) { From 2847533e1e4ad7ee1a67621d37291b27c0f6176d Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 3 Dec 2024 02:48:07 +0530 Subject: [PATCH 06/63] Add DABs support for Unity Catalog volumes (#1762) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR adds support for UC volumes to DABs. ### Can I use a UC volume managed by DABs in `artifact_path`? Yes, but we require the volume to exist before being referenced in `artifact_path`. Otherwise you'll see an error that the volume does not exist. For this case, this PR also adds a warning if we detect that the UC volume is defined in the DAB itself, which informs the user to deploy the UC volume in a separate deployment first before using it in `artifact_path`. We cannot create the UC volume and then upload the artifacts to it in the same `bundle deploy` because `bundle deploy` always uploads the artifacts to `artifact_path` before materializing any resources defined in the bundle. Supporting this in a single deployment requires us to migrate away from our dependency on the Databricks Terraform provider to manage the CRUD lifecycle of DABs resources. ### Why do we not support `preset.name_prefix` for UC volumes? UC volumes will not have a `dev_shreyas_goenka` prefix added in `mode: development`. Configuring `presets.name_prefix` will be a no-op for UC volumes. We have decided not to support prefixing for UC resources. This is because: 1. UC provides its own namespace hierarchy that is independent of DABs. 2. Users can always manually use `${workspace.current_user.short_name}` to configure the prefixes manually. Customers often manually set up a UC hierarchy for dev and prod, including a schema or catalog per developer. Thus, it's often unnecessary for us to add prefixing in `mode: development` by default for UC resources. In retrospect, supporting prefixing for UC schemas and registered models was a mistake and will be removed in a future release of DABs. ## Tests Unit, integration test, and manually. ### Manual Testing cases: 1. UC volume does not exist: ``` ➜ bundle-playground git:(master) ✗ cli bundle deploy Error: failed to fetch metadata for the UC volume /Volumes/main/caps/my_volume that is configured in the artifact_path: Not Found ``` 2. UC Volume does not exist, but is defined in the DAB ``` ➜ bundle-playground git:(master) ✗ cli bundle deploy Error: failed to fetch metadata for the UC volume /Volumes/main/caps/managed_by_dab that is configured in the artifact_path: Not Found Warning: You might be using a UC volume in your artifact_path that is managed by this bundle but which has not been deployed yet. Please deploy the UC volume in a separate bundle deploy before using it in the artifact_path. at resources.volumes.bar in databricks.yml:24:7 ``` --------- Co-authored-by: Pieter Noordhuis --- bundle/artifacts/upload.go | 13 +- bundle/config/mutator/apply_presets_test.go | 32 +- .../mutator/process_target_mode_test.go | 36 ++- bundle/config/mutator/run_as_test.go | 2 + bundle/config/resources.go | 8 + bundle/config/resources/volume.go | 62 ++++ bundle/deploy/terraform/convert.go | 15 + bundle/deploy/terraform/convert_test.go | 56 ++++ bundle/deploy/terraform/interpolate.go | 2 + bundle/deploy/terraform/interpolate_test.go | 2 + .../deploy/terraform/tfdyn/convert_volume.go | 45 +++ .../terraform/tfdyn/convert_volume_test.go | 70 +++++ bundle/libraries/filer.go | 32 ++ bundle/libraries/filer_test.go | 63 ++++ bundle/libraries/filer_volume.go | 132 +++++++++ bundle/libraries/filer_volume_test.go | 275 ++++++++++++++++++ bundle/libraries/filer_workspace.go | 15 + bundle/libraries/filer_workspace_test.go | 27 ++ bundle/libraries/upload.go | 41 +-- bundle/libraries/upload_test.go | 7 + bundle/phases/deploy.go | 45 ++- bundle/phases/deploy_test.go | 12 +- internal/bundle/artifacts_test.go | 85 ++++++ .../databricks_template_schema.json | 16 + .../template/databricks.yml.tmpl | 14 + .../volume/databricks_template_schema.json | 8 + .../volume/template/databricks.yml.tmpl | 31 ++ .../bundle/bundles/volume/template/nb.sql | 2 + internal/bundle/deploy_test.go | 70 +++++ libs/dyn/dynvar/ref.go | 20 ++ libs/dyn/dynvar/ref_test.go | 31 ++ libs/filer/workspace_files_client.go | 16 +- .../workspace_files_extensions_client_test.go | 2 +- 33 files changed, 1186 insertions(+), 101 deletions(-) create mode 100644 bundle/config/resources/volume.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_volume.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_volume_test.go create mode 100644 bundle/libraries/filer.go create mode 100644 bundle/libraries/filer_test.go create mode 100644 bundle/libraries/filer_volume.go create mode 100644 bundle/libraries/filer_volume_test.go create mode 100644 bundle/libraries/filer_workspace.go create mode 100644 bundle/libraries/filer_workspace_test.go create mode 100644 internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json create mode 100644 internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/volume/databricks_template_schema.json create mode 100644 internal/bundle/bundles/volume/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/volume/template/nb.sql diff --git a/bundle/artifacts/upload.go b/bundle/artifacts/upload.go index 58c006dc18..c69939e8c7 100644 --- a/bundle/artifacts/upload.go +++ b/bundle/artifacts/upload.go @@ -21,18 +21,13 @@ func (m *cleanUp) Name() string { } func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - uploadPath, err := libraries.GetUploadBasePath(b) - if err != nil { - return diag.FromErr(err) - } - - client, err := libraries.GetFilerForLibraries(b.WorkspaceClient(), uploadPath) - if err != nil { - return diag.FromErr(err) + client, uploadPath, diags := libraries.GetFilerForLibraries(ctx, b) + if diags.HasError() { + return diags } // We intentionally ignore the error because it is not critical to the deployment - err = client.Delete(ctx, ".", filer.DeleteRecursively) + err := client.Delete(ctx, ".", filer.DeleteRecursively) if err != nil { log.Errorf(ctx, "failed to delete %s: %v", uploadPath, err) } diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index 497ef051ad..91d5b62e5f 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -73,7 +73,7 @@ func TestApplyPresetsPrefix(t *testing.T) { } } -func TestApplyPresetsPrefixForUcSchema(t *testing.T) { +func TestApplyPresetsPrefixForSchema(t *testing.T) { tests := []struct { name string prefix string @@ -129,6 +129,36 @@ func TestApplyPresetsPrefixForUcSchema(t *testing.T) { } } +func TestApplyPresetsVolumesShouldNotBePrefixed(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Volumes: map[string]*resources.Volume{ + "volume1": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + Name: "volume1", + CatalogName: "catalog1", + SchemaName: "schema1", + }, + }, + }, + }, + Presets: config.Presets{ + NamePrefix: "[prefix]", + }, + }, + } + + ctx := context.Background() + diag := bundle.Apply(ctx, b, mutator.ApplyPresets()) + + if diag.HasError() { + t.Fatalf("unexpected error: %v", diag) + } + + require.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name) +} + func TestApplyPresetsTags(t *testing.T) { tests := []struct { name string diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index c5ea9adeae..14d524416e 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -4,7 +4,7 @@ import ( "context" "reflect" "runtime" - "strings" + "slices" "testing" "github.com/databricks/cli/bundle" @@ -131,6 +131,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle { Schemas: map[string]*resources.Schema{ "schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}}, }, + Volumes: map[string]*resources.Volume{ + "volume1": {CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{Name: "volume1"}}, + }, Clusters: map[string]*resources.Cluster{ "cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}}, }, @@ -311,6 +314,8 @@ func TestProcessTargetModeDefault(t *testing.T) { assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) + assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name) + assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name) assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName) } @@ -355,6 +360,8 @@ func TestProcessTargetModeProduction(t *testing.T) { assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) + assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name) + assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name) assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName) } @@ -388,10 +395,17 @@ func TestAllResourcesMocked(t *testing.T) { } } -// Make sure that we at least rename all resources -func TestAllResourcesRenamed(t *testing.T) { +// Make sure that we at rename all non UC resources +func TestAllNonUcResourcesAreRenamed(t *testing.T) { b := mockBundle(config.Development) + // UC resources should not have a prefix added to their name. Right now + // this list only contains the Volume resource since we have yet to remove + // prefixing support for UC schemas and registered models. + ucFields := []reflect.Type{ + reflect.TypeOf(&resources.Volume{}), + } + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) @@ -404,14 +418,14 @@ func TestAllResourcesRenamed(t *testing.T) { for _, key := range field.MapKeys() { resource := field.MapIndex(key) nameField := resource.Elem().FieldByName("Name") - if nameField.IsValid() && nameField.Kind() == reflect.String { - assert.True( - t, - strings.Contains(nameField.String(), "dev"), - "process_target_mode should rename '%s' in '%s'", - key, - resources.Type().Field(i).Name, - ) + if !nameField.IsValid() || nameField.Kind() != reflect.String { + continue + } + + if slices.Contains(ucFields, resource.Type()) { + assert.NotContains(t, nameField.String(), "dev", "process_target_mode should not rename '%s' in '%s'", key, resources.Type().Field(i).Name) + } else { + assert.Contains(t, nameField.String(), "dev", "process_target_mode should rename '%s' in '%s'", key, resources.Type().Field(i).Name) } } } diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go index acb6c3a43f..dbf4bf8060 100644 --- a/bundle/config/mutator/run_as_test.go +++ b/bundle/config/mutator/run_as_test.go @@ -42,6 +42,7 @@ func allResourceTypes(t *testing.T) []string { "quality_monitors", "registered_models", "schemas", + "volumes", }, resourceTypes, ) @@ -141,6 +142,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) { "registered_models", "experiments", "schemas", + "volumes", } base := config.Root{ diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 2886e35718..13cf0d4625 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -20,6 +20,7 @@ type Resources struct { RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"` QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"` Schemas map[string]*resources.Schema `json:"schemas,omitempty"` + Volumes map[string]*resources.Volume `json:"volumes,omitempty"` Clusters map[string]*resources.Cluster `json:"clusters,omitempty"` Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"` } @@ -85,6 +86,7 @@ func (r *Resources) AllResources() []ResourceGroup { collectResourceMap(descriptions["schemas"], r.Schemas), collectResourceMap(descriptions["clusters"], r.Clusters), collectResourceMap(descriptions["dashboards"], r.Dashboards), + collectResourceMap(descriptions["volumes"], r.Volumes), } } @@ -189,5 +191,11 @@ func SupportedResources() map[string]ResourceDescription { SingularTitle: "Dashboard", PluralTitle: "Dashboards", }, + "volumes": { + SingularName: "volume", + PluralName: "volumes", + SingularTitle: "Volume", + PluralTitle: "Volumes", + }, } } diff --git a/bundle/config/resources/volume.go b/bundle/config/resources/volume.go new file mode 100644 index 0000000000..cae2a34637 --- /dev/null +++ b/bundle/config/resources/volume.go @@ -0,0 +1,62 @@ +package resources + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/catalog" +) + +type Volume struct { + // List of grants to apply on this volume. + Grants []Grant `json:"grants,omitempty"` + + // Full name of the volume (catalog_name.schema_name.volume_name). This value is read from + // the terraform state after deployment succeeds. + ID string `json:"id,omitempty" bundle:"readonly"` + + *catalog.CreateVolumeRequestContent + + ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + URL string `json:"url,omitempty" bundle:"internal"` +} + +func (v *Volume) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, v) +} + +func (v Volume) MarshalJSON() ([]byte, error) { + return marshal.Marshal(v) +} + +func (v *Volume) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + return false, fmt.Errorf("volume.Exists() is not supported") +} + +func (v *Volume) TerraformResourceName() string { + return "databricks_volume" +} + +func (v *Volume) InitializeURL(baseURL url.URL) { + if v.ID == "" { + return + } + baseURL.Path = fmt.Sprintf("explore/data/volumes/%s", strings.ReplaceAll(v.ID, ".", "/")) + v.URL = baseURL.String() +} + +func (v *Volume) GetURL() string { + return v.URL +} + +func (v *Volume) GetName() string { + return v.Name +} + +func (v *Volume) IsNil() bool { + return v.CreateVolumeRequestContent == nil +} diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 0ace7c66ee..b710c690f3 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -166,6 +166,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { } cur.ID = instance.Attributes.ID config.Resources.Schemas[resource.Name] = cur + case "databricks_volume": + if config.Resources.Volumes == nil { + config.Resources.Volumes = make(map[string]*resources.Volume) + } + cur := config.Resources.Volumes[resource.Name] + if cur == nil { + cur = &resources.Volume{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID + config.Resources.Volumes[resource.Name] = cur case "databricks_cluster": if config.Resources.Clusters == nil { config.Resources.Clusters = make(map[string]*resources.Cluster) @@ -235,6 +245,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { src.ModifiedStatus = resources.ModifiedStatusCreated } } + for _, src := range config.Resources.Volumes { + if src.ModifiedStatus == "" && src.ID == "" { + src.ModifiedStatus = resources.ModifiedStatusCreated + } + } for _, src := range config.Resources.Clusters { if src.ModifiedStatus == "" && src.ID == "" { src.ModifiedStatus = resources.ModifiedStatusCreated diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 6ed34d4306..076d9b7a05 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -670,6 +670,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "1"}}, }, }, + { + Type: "databricks_volume", + Mode: "managed", + Name: "test_volume", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, { Type: "databricks_cluster", Mode: "managed", @@ -715,6 +723,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Volumes["test_volume"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Volumes["test_volume"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus) @@ -783,6 +794,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, }, + Volumes: map[string]*resources.Volume{ + "test_volume": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + Name: "test_volume", + }, + }, + }, Clusters: map[string]*resources.Cluster{ "test_cluster": { ClusterSpec: &compute.ClusterSpec{ @@ -829,6 +847,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { assert.Equal(t, "", config.Resources.Schemas["test_schema"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Volumes["test_volume"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus) @@ -937,6 +958,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, }, + Volumes: map[string]*resources.Volume{ + "test_volume": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + Name: "test_volume", + }, + }, + "test_volume_new": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + Name: "test_volume_new", + }, + }, + }, Clusters: map[string]*resources.Cluster{ "test_cluster": { ClusterSpec: &compute.ClusterSpec{ @@ -1093,6 +1126,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "2"}}, }, }, + { + Type: "databricks_volume", + Mode: "managed", + Name: "test_volume", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_volume", + Mode: "managed", + Name: "test_volume_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, { Type: "databricks_cluster", Mode: "managed", @@ -1186,6 +1235,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { assert.Equal(t, "", config.Resources.Schemas["test_schema_new"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema_new"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Volumes["test_volume"].ID) + assert.Equal(t, "", config.Resources.Volumes["test_volume"].ModifiedStatus) + assert.Equal(t, "2", config.Resources.Volumes["test_volume_old"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Volumes["test_volume_old"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Volumes["test_volume_new"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume_new"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID) assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ModifiedStatus) assert.Equal(t, "2", config.Resources.Clusters["test_cluster_old"].ID) diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index eb15c63ec7..9c2126aeca 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -58,6 +58,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...) case dyn.Key("schemas"): path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...) + case dyn.Key("volumes"): + path = dyn.NewPath(dyn.Key("databricks_volume")).Append(path[2:]...) case dyn.Key("clusters"): path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...) case dyn.Key("dashboards"): diff --git a/bundle/deploy/terraform/interpolate_test.go b/bundle/deploy/terraform/interpolate_test.go index b26ef928da..fc5c4d184c 100644 --- a/bundle/deploy/terraform/interpolate_test.go +++ b/bundle/deploy/terraform/interpolate_test.go @@ -31,6 +31,7 @@ func TestInterpolate(t *testing.T) { "other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}", "other_registered_model": "${resources.registered_models.other_registered_model.id}", "other_schema": "${resources.schemas.other_schema.id}", + "other_volume": "${resources.volumes.other_volume.id}", "other_cluster": "${resources.clusters.other_cluster.id}", "other_dashboard": "${resources.dashboards.other_dashboard.id}", }, @@ -69,6 +70,7 @@ func TestInterpolate(t *testing.T) { assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"]) assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"]) assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"]) + assert.Equal(t, "${databricks_volume.other_volume.id}", j.Tags["other_volume"]) assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"]) assert.Equal(t, "${databricks_dashboard.other_dashboard.id}", j.Tags["other_dashboard"]) diff --git a/bundle/deploy/terraform/tfdyn/convert_volume.go b/bundle/deploy/terraform/tfdyn/convert_volume.go new file mode 100644 index 0000000000..4211e1f9e1 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_volume.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertVolumeResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceVolume{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "volume normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type volumeConverter struct{} + +func (volumeConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertVolumeResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.Volume[key] = vout.AsAny() + + // Configure grants for this resource. + if grants := convertGrantsResource(ctx, vin); grants != nil { + grants.Volume = fmt.Sprintf("${databricks_volume.%s.id}", key) + out.Grants["volume_"+key] = grants + } + + return nil +} + +func init() { + registerConverter("volumes", volumeConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_volume_test.go b/bundle/deploy/terraform/tfdyn/convert_volume_test.go new file mode 100644 index 0000000000..c897ae69a8 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_volume_test.go @@ -0,0 +1,70 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertVolume(t *testing.T) { + var src = resources.Volume{ + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + CatalogName: "catalog", + Comment: "comment", + Name: "name", + SchemaName: "schema", + StorageLocation: "s3://bucket/path", + VolumeType: "EXTERNAL", + }, + Grants: []resources.Grant{ + { + Privileges: []string{"READ_VOLUME"}, + Principal: "jack@gmail.com", + }, + { + Privileges: []string{"WRITE_VOLUME"}, + Principal: "jane@gmail.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = volumeConverter{}.Convert(ctx, "my_volume", vin, out) + require.NoError(t, err) + + // Assert equality on the volume + require.Equal(t, map[string]any{ + "catalog_name": "catalog", + "comment": "comment", + "name": "name", + "schema_name": "schema", + "storage_location": "s3://bucket/path", + "volume_type": "EXTERNAL", + }, out.Volume["my_volume"]) + + // Assert equality on the grants + assert.Equal(t, &schema.ResourceGrants{ + Volume: "${databricks_volume.my_volume.id}", + Grant: []schema.ResourceGrantsGrant{ + { + Privileges: []string{"READ_VOLUME"}, + Principal: "jack@gmail.com", + }, + { + Privileges: []string{"WRITE_VOLUME"}, + Principal: "jane@gmail.com", + }, + }, + }, out.Grants["volume_my_volume"]) +} diff --git a/bundle/libraries/filer.go b/bundle/libraries/filer.go new file mode 100644 index 0000000000..4448ed325b --- /dev/null +++ b/bundle/libraries/filer.go @@ -0,0 +1,32 @@ +package libraries + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/filer" +) + +// We upload artifacts to the workspace in a directory named ".internal" to have +// a well defined location for artifacts that have been uploaded by the DABs. +const InternalDirName = ".internal" + +// This function returns a filer for uploading artifacts to the configured location. +// Supported locations: +// 1. WSFS +// 2. UC volumes +func GetFilerForLibraries(ctx context.Context, b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) { + artifactPath := b.Config.Workspace.ArtifactPath + if artifactPath == "" { + return nil, "", diag.Errorf("remote artifact path not configured") + } + + switch { + case IsVolumesPath(artifactPath): + return filerForVolume(ctx, b) + + default: + return filerForWorkspace(b) + } +} diff --git a/bundle/libraries/filer_test.go b/bundle/libraries/filer_test.go new file mode 100644 index 0000000000..88ba152fc1 --- /dev/null +++ b/bundle/libraries/filer_test.go @@ -0,0 +1,63 @@ +package libraries + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/filer" + sdkconfig "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestGetFilerForLibrariesValidWsfs(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/foo/bar/artifacts", + }, + }, + } + + client, uploadPath, diags := GetFilerForLibraries(context.Background(), b) + require.NoError(t, diags.Error()) + assert.Equal(t, "/foo/bar/artifacts/.internal", uploadPath) + + assert.IsType(t, &filer.WorkspaceFilesClient{}, client) +} + +func TestGetFilerForLibrariesValidUcVolume(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/main/my_schema/my_volume", + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdkconfig.Config{} + m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil) + b.SetWorkpaceClient(m.WorkspaceClient) + + client, uploadPath, diags := GetFilerForLibraries(context.Background(), b) + require.NoError(t, diags.Error()) + assert.Equal(t, "/Volumes/main/my_schema/my_volume/.internal", uploadPath) + + assert.IsType(t, &filer.FilesClient{}, client) +} + +func TestGetFilerForLibrariesRemotePathNotSet(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{}, + }, + } + + _, _, diags := GetFilerForLibraries(context.Background(), b) + require.EqualError(t, diags.Error(), "remote artifact path not configured") +} diff --git a/bundle/libraries/filer_volume.go b/bundle/libraries/filer_volume.go new file mode 100644 index 0000000000..aecf68db13 --- /dev/null +++ b/bundle/libraries/filer_volume.go @@ -0,0 +1,132 @@ +package libraries + +import ( + "context" + "errors" + "fmt" + "path" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go/apierr" +) + +func extractVolumeFromPath(artifactPath string) (string, string, string, error) { + if !IsVolumesPath(artifactPath) { + return "", "", "", fmt.Errorf("expected artifact_path to start with /Volumes/, got %s", artifactPath) + } + + parts := strings.Split(artifactPath, "/") + volumeFormatErr := fmt.Errorf("expected UC volume path to be in the format /Volumes////..., got %s", artifactPath) + + // Incorrect format. + if len(parts) < 5 { + return "", "", "", volumeFormatErr + } + + catalogName := parts[2] + schemaName := parts[3] + volumeName := parts[4] + + // Incorrect format. + if catalogName == "" || schemaName == "" || volumeName == "" { + return "", "", "", volumeFormatErr + } + + return catalogName, schemaName, volumeName, nil +} + +// This function returns a filer for ".internal" folder inside the directory configured +// at `workspace.artifact_path`. +// This function also checks if the UC volume exists in the workspace and then: +// 1. If the UC volume exists in the workspace: +// Returns a filer for the UC volume. +// 2. If the UC volume does not exist in the workspace but is (with high confidence) defined in +// the bundle configuration: +// Returns an error and a warning that instructs the user to deploy the +// UC volume before using it in the artifact path. +// 3. If the UC volume does not exist in the workspace and is not defined in the bundle configuration: +// Returns an error. +func filerForVolume(ctx context.Context, b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) { + artifactPath := b.Config.Workspace.ArtifactPath + w := b.WorkspaceClient() + + catalogName, schemaName, volumeName, err := extractVolumeFromPath(artifactPath) + if err != nil { + return nil, "", diag.Diagnostics{ + { + Severity: diag.Error, + Summary: err.Error(), + Locations: b.Config.GetLocations("workspace.artifact_path"), + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + }, + } + } + + // Check if the UC volume exists in the workspace. + volumePath := fmt.Sprintf("/Volumes/%s/%s/%s", catalogName, schemaName, volumeName) + err = w.Files.GetDirectoryMetadataByDirectoryPath(ctx, volumePath) + + // If the volume exists already, directly return the filer for the path to + // upload the artifacts to. + if err == nil { + uploadPath := path.Join(artifactPath, InternalDirName) + f, err := filer.NewFilesClient(w, uploadPath) + return f, uploadPath, diag.FromErr(err) + } + + baseErr := diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("unable to determine if volume at %s exists: %s", volumePath, err), + Locations: b.Config.GetLocations("workspace.artifact_path"), + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + } + + if errors.Is(err, apierr.ErrNotFound) { + // Since the API returned a 404, the volume does not exist. + // Modify the error message to provide more context. + baseErr.Summary = fmt.Sprintf("volume %s does not exist: %s", volumePath, err) + + // If the volume is defined in the bundle, provide a more helpful error diagnostic, + // with more details and location information. + path, locations, ok := findVolumeInBundle(b, catalogName, schemaName, volumeName) + if !ok { + return nil, "", diag.Diagnostics{baseErr} + } + baseErr.Detail = `You are using a volume in your artifact_path that is managed by +this bundle but which has not been deployed yet. Please first deploy +the volume using 'bundle deploy' and then switch over to using it in +the artifact_path.` + baseErr.Paths = append(baseErr.Paths, path) + baseErr.Locations = append(baseErr.Locations, locations...) + } + + return nil, "", diag.Diagnostics{baseErr} +} + +func findVolumeInBundle(b *bundle.Bundle, catalogName, schemaName, volumeName string) (dyn.Path, []dyn.Location, bool) { + volumes := b.Config.Resources.Volumes + for k, v := range volumes { + if v.CatalogName != catalogName || v.Name != volumeName { + continue + } + // UC schemas can be defined in the bundle itself, and thus might be interpolated + // at runtime via the ${resources.schemas.} syntax. Thus we match the volume + // definition if the schema name is the same as the one in the bundle, or if the + // schema name is interpolated. + // We only have to check for ${resources.schemas...} references because any + // other valid reference (like ${var.foo}) would have been interpolated by this point. + p, ok := dynvar.PureReferenceToPath(v.SchemaName) + isSchemaDefinedInBundle := ok && p.HasPrefix(dyn.Path{dyn.Key("resources"), dyn.Key("schemas")}) + if v.SchemaName != schemaName && !isSchemaDefinedInBundle { + continue + } + pathString := fmt.Sprintf("resources.volumes.%s", k) + return dyn.MustPathFromString(pathString), b.Config.GetLocations(pathString), true + } + return nil, nil, false +} diff --git a/bundle/libraries/filer_volume_test.go b/bundle/libraries/filer_volume_test.go new file mode 100644 index 0000000000..0d886824d9 --- /dev/null +++ b/bundle/libraries/filer_volume_test.go @@ -0,0 +1,275 @@ +package libraries + +import ( + "context" + "fmt" + "path" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go/apierr" + sdkconfig "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestFindVolumeInBundle(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Volumes: map[string]*resources.Volume{ + "foo": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + CatalogName: "main", + Name: "my_volume", + SchemaName: "my_schema", + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{ + { + File: "volume.yml", + Line: 1, + Column: 2, + }, + }) + + // volume is in DAB. + path, locations, ok := findVolumeInBundle(b, "main", "my_schema", "my_volume") + assert.True(t, ok) + assert.Equal(t, []dyn.Location{{ + File: "volume.yml", + Line: 1, + Column: 2, + }}, locations) + assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path) + + // wrong volume name + _, _, ok = findVolumeInBundle(b, "main", "my_schema", "doesnotexist") + assert.False(t, ok) + + // wrong schema name + _, _, ok = findVolumeInBundle(b, "main", "doesnotexist", "my_volume") + assert.False(t, ok) + + // wrong catalog name + _, _, ok = findVolumeInBundle(b, "doesnotexist", "my_schema", "my_volume") + assert.False(t, ok) + + // schema name is interpolated but does not have the right prefix. In this case + // we should not match the volume. + b.Config.Resources.Volumes["foo"].SchemaName = "${foo.bar.baz}" + _, _, ok = findVolumeInBundle(b, "main", "my_schema", "my_volume") + assert.False(t, ok) + + // schema name is interpolated. + b.Config.Resources.Volumes["foo"].SchemaName = "${resources.schemas.my_schema.name}" + path, locations, ok = findVolumeInBundle(b, "main", "valuedoesnotmatter", "my_volume") + assert.True(t, ok) + assert.Equal(t, []dyn.Location{{ + File: "volume.yml", + Line: 1, + Column: 2, + }}, locations) + assert.Equal(t, dyn.MustPathFromString("resources.volumes.foo"), path) +} + +func TestFilerForVolumeForErrorFromAPI(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/main/my_schema/my_volume", + }, + }, + } + + bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdkconfig.Config{} + m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(fmt.Errorf("error from API")) + b.SetWorkpaceClient(m.WorkspaceClient) + + _, _, diags := filerForVolume(context.Background(), b) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "unable to determine if volume at /Volumes/main/my_schema/my_volume exists: error from API", + Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + }}, diags) +} + +func TestFilerForVolumeWithVolumeNotFound(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/main/my_schema/doesnotexist", + }, + }, + } + + bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdkconfig.Config{} + m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/doesnotexist").Return(apierr.NotFound("some error message")) + b.SetWorkpaceClient(m.WorkspaceClient) + + _, _, diags := filerForVolume(context.Background(), b) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "volume /Volumes/main/my_schema/doesnotexist does not exist: some error message", + Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + }}, diags) +} + +func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/main/my_schema/my_volume", + }, + Resources: config.Resources{ + Volumes: map[string]*resources.Volume{ + "foo": { + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + CatalogName: "main", + Name: "my_volume", + VolumeType: "MANAGED", + SchemaName: "my_schema", + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) + bundletest.SetLocation(b, "resources.volumes.foo", []dyn.Location{{File: "volume.yml", Line: 1, Column: 2}}) + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdkconfig.Config{} + m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(apierr.NotFound("error from API")) + b.SetWorkpaceClient(m.WorkspaceClient) + + _, _, diags := GetFilerForLibraries(context.Background(), b) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API", + Locations: []dyn.Location{{"config.yml", 1, 2}, {"volume.yml", 1, 2}}, + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")}, + Detail: `You are using a volume in your artifact_path that is managed by +this bundle but which has not been deployed yet. Please first deploy +the volume using 'bundle deploy' and then switch over to using it in +the artifact_path.`, + }, + }, diags) +} + +func invalidVolumePaths() []string { + return []string{ + "/Volumes/", + "/Volumes/main", + "/Volumes/main/", + "/Volumes/main//", + "/Volumes/main//my_schema", + "/Volumes/main/my_schema", + "/Volumes/main/my_schema/", + "/Volumes/main/my_schema//", + "/Volumes//my_schema/my_volume", + } +} + +func TestFilerForVolumeWithInvalidVolumePaths(t *testing.T) { + for _, p := range invalidVolumePaths() { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: p, + }, + }, + } + + bundletest.SetLocation(b, "workspace.artifact_path", []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}) + + _, _, diags := GetFilerForLibraries(context.Background(), b) + require.Equal(t, diags, diag.Diagnostics{{ + Severity: diag.Error, + Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes////..., got %s", p), + Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, + Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, + }}) + } +} + +func TestFilerForVolumeWithInvalidPrefix(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volume/main/my_schema/my_volume", + }, + }, + } + + _, _, diags := filerForVolume(context.Background(), b) + require.EqualError(t, diags.Error(), "expected artifact_path to start with /Volumes/, got /Volume/main/my_schema/my_volume") +} + +func TestFilerForVolumeWithValidVolumePaths(t *testing.T) { + validPaths := []string{ + "/Volumes/main/my_schema/my_volume", + "/Volumes/main/my_schema/my_volume/", + "/Volumes/main/my_schema/my_volume/a/b/c", + "/Volumes/main/my_schema/my_volume/a/a/a", + } + + for _, p := range validPaths { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: p, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdkconfig.Config{} + m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/main/my_schema/my_volume").Return(nil) + b.SetWorkpaceClient(m.WorkspaceClient) + + client, uploadPath, diags := filerForVolume(context.Background(), b) + require.NoError(t, diags.Error()) + assert.Equal(t, path.Join(p, ".internal"), uploadPath) + assert.IsType(t, &filer.FilesClient{}, client) + } +} + +func TestExtractVolumeFromPath(t *testing.T) { + catalogName, schemaName, volumeName, err := extractVolumeFromPath("/Volumes/main/my_schema/my_volume") + require.NoError(t, err) + assert.Equal(t, "main", catalogName) + assert.Equal(t, "my_schema", schemaName) + assert.Equal(t, "my_volume", volumeName) + + for _, p := range invalidVolumePaths() { + _, _, _, err := extractVolumeFromPath(p) + assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes////..., got %s", p)) + } +} diff --git a/bundle/libraries/filer_workspace.go b/bundle/libraries/filer_workspace.go new file mode 100644 index 0000000000..8d54519ff0 --- /dev/null +++ b/bundle/libraries/filer_workspace.go @@ -0,0 +1,15 @@ +package libraries + +import ( + "path" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/filer" +) + +func filerForWorkspace(b *bundle.Bundle) (filer.Filer, string, diag.Diagnostics) { + uploadPath := path.Join(b.Config.Workspace.ArtifactPath, InternalDirName) + f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath) + return f, uploadPath, diag.FromErr(err) +} diff --git a/bundle/libraries/filer_workspace_test.go b/bundle/libraries/filer_workspace_test.go new file mode 100644 index 0000000000..f761592f9b --- /dev/null +++ b/bundle/libraries/filer_workspace_test.go @@ -0,0 +1,27 @@ +package libraries + +import ( + "path" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFilerForWorkspace(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Workspace/Users/shreyas.goenka@databricks.com/a/b/c", + }, + }, + } + + client, uploadPath, diags := filerForWorkspace(b) + require.NoError(t, diags.Error()) + assert.Equal(t, path.Join("/Workspace/Users/shreyas.goenka@databricks.com/a/b/c/.internal"), uploadPath) + assert.IsType(t, &filer.WorkspaceFilesClient{}, client) +} diff --git a/bundle/libraries/upload.go b/bundle/libraries/upload.go index 90a1a21fc4..4b6f43701b 100644 --- a/bundle/libraries/upload.go +++ b/bundle/libraries/upload.go @@ -16,8 +16,6 @@ import ( "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" - "github.com/databricks/databricks-sdk-go" - "golang.org/x/sync/errgroup" ) @@ -130,24 +128,17 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error } func (u *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - uploadPath, err := GetUploadBasePath(b) - if err != nil { - return diag.FromErr(err) + client, uploadPath, diags := GetFilerForLibraries(ctx, b) + if diags.HasError() { + return diags } - // If the client is not initialized, initialize it - // We use client field in mutator to allow for mocking client in testing + // Only set the filer client if it's not already set. We use the client field + // in the mutator to mock the filer client in testing if u.client == nil { - filer, err := GetFilerForLibraries(b.WorkspaceClient(), uploadPath) - if err != nil { - return diag.FromErr(err) - } - - u.client = filer + u.client = client } - var diags diag.Diagnostics - libs, err := collectLocalLibraries(b) if err != nil { return diag.FromErr(err) @@ -197,17 +188,6 @@ func (u *upload) Name() string { return "libraries.Upload" } -func GetFilerForLibraries(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) { - if isVolumesPath(uploadPath) { - return filer.NewFilesClient(w, uploadPath) - } - return filer.NewWorkspaceFilesClient(w, uploadPath) -} - -func isVolumesPath(path string) bool { - return strings.HasPrefix(path, "/Volumes/") -} - // Function to upload file (a library, artifact and etc) to Workspace or UC volume func UploadFile(ctx context.Context, file string, client filer.Filer) error { filename := filepath.Base(file) @@ -227,12 +207,3 @@ func UploadFile(ctx context.Context, file string, client filer.Filer) error { log.Infof(ctx, "Upload succeeded") return nil } - -func GetUploadBasePath(b *bundle.Bundle) (string, error) { - artifactPath := b.Config.Workspace.ArtifactPath - if artifactPath == "" { - return "", fmt.Errorf("remote artifact path not configured") - } - - return path.Join(artifactPath, ".internal"), nil -} diff --git a/bundle/libraries/upload_test.go b/bundle/libraries/upload_test.go index 44b194c56a..493785bf50 100644 --- a/bundle/libraries/upload_test.go +++ b/bundle/libraries/upload_test.go @@ -11,6 +11,8 @@ import ( mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" + sdkconfig "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/mock" @@ -181,6 +183,11 @@ func TestArtifactUploadForVolumes(t *testing.T) { filer.CreateParentDirectories, ).Return(nil) + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdkconfig.Config{} + m.GetMockFilesAPI().EXPECT().GetDirectoryMetadataByDirectoryPath(mock.Anything, "/Volumes/foo/bar/artifacts").Return(nil) + b.SetWorkpaceClient(m.WorkspaceClient) + diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler))) require.NoError(t, diags.Error()) diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index e623c364f9..2dc9623bd0 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -23,10 +23,10 @@ import ( tfjson "github.com/hashicorp/terraform-json" ) -func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ string, actions tfjson.Actions) bool) []terraformlib.Action { +func filterDeleteOrRecreateActions(changes []*tfjson.ResourceChange, resourceType string) []terraformlib.Action { res := make([]terraformlib.Action, 0) for _, rc := range changes { - if !toInclude(rc.Type, rc.Change.Actions) { + if rc.Type != resourceType { continue } @@ -37,7 +37,7 @@ func parseTerraformActions(changes []*tfjson.ResourceChange, toInclude func(typ case rc.Change.Actions.Replace(): actionType = terraformlib.ActionTypeRecreate default: - // No use case for other action types yet. + // Filter other action types.. continue } @@ -63,30 +63,12 @@ func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) { return false, err } - schemaActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool { - // Filter in only UC schema resources. - if typ != "databricks_schema" { - return false - } - - // We only display prompts for destructive actions like deleting or - // recreating a schema. - return actions.Delete() || actions.Replace() - }) - - dltActions := parseTerraformActions(plan.ResourceChanges, func(typ string, actions tfjson.Actions) bool { - // Filter in only DLT pipeline resources. - if typ != "databricks_pipeline" { - return false - } - - // Recreating DLT pipeline leads to metadata loss and for a transient period - // the underling tables will be unavailable. - return actions.Replace() || actions.Delete() - }) + schemaActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_schema") + dltActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_pipeline") + volumeActions := filterDeleteOrRecreateActions(plan.ResourceChanges, "databricks_volume") // We don't need to display any prompts in this case. - if len(dltActions) == 0 && len(schemaActions) == 0 { + if len(schemaActions) == 0 && len(dltActions) == 0 && len(volumeActions) == 0 { return true, nil } @@ -111,6 +93,19 @@ properties such as the 'catalog' or 'storage' are changed:` } } + // One or more volumes is being recreated. + if len(volumeActions) != 0 { + msg := ` +This action will result in the deletion or recreation of the following volumes. +For managed volumes, the files stored in the volume are also deleted from your +cloud tenant within 30 days. For external volumes, the metadata about the volume +is removed from the catalog, but the underlying files are not deleted:` + cmdio.LogString(ctx, msg) + for _, action := range volumeActions { + cmdio.Log(ctx, action) + } + } + if b.AutoApprove { return true, nil } diff --git a/bundle/phases/deploy_test.go b/bundle/phases/deploy_test.go index e00370b380..2cef954044 100644 --- a/bundle/phases/deploy_test.go +++ b/bundle/phases/deploy_test.go @@ -40,17 +40,7 @@ func TestParseTerraformActions(t *testing.T) { }, } - res := parseTerraformActions(changes, func(typ string, actions tfjson.Actions) bool { - if typ != "databricks_pipeline" { - return false - } - - if actions.Delete() || actions.Replace() { - return true - } - - return false - }) + res := filterDeleteOrRecreateActions(changes, "databricks_pipeline") assert.Equal(t, []terraformlib.Action{ { diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 775327e18a..34d101e4f8 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -1,6 +1,7 @@ package bundle import ( + "fmt" "os" "path" "path/filepath" @@ -13,8 +14,11 @@ import ( "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -225,3 +229,84 @@ func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, ) } + +func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { + ctx, wt := acc.UcWorkspaceTest(t) + w := wt.W + + schemaName := internal.RandomName("schema-") + + _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ + CatalogName: "main", + Comment: "test schema", + Name: schemaName, + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = w.Schemas.DeleteByFullName(ctx, "main."+schemaName) + require.NoError(t, err) + }) + + bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + "unique_id": uuid.New().String(), + "schema_name": schemaName, + "volume_name": "doesnotexist", + }) + require.NoError(t, err) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + + assert.Error(t, err) + assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/doesnotexist does not exist: Not Found + at workspace.artifact_path + in databricks.yml:6:18 + +`, schemaName), stdout.String()) + assert.Equal(t, "", stderr.String()) +} + +func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { + ctx, wt := acc.UcWorkspaceTest(t) + w := wt.W + + schemaName := internal.RandomName("schema-") + + _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ + CatalogName: "main", + Comment: "test schema", + Name: schemaName, + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = w.Schemas.DeleteByFullName(ctx, "main."+schemaName) + require.NoError(t, err) + }) + + bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + "unique_id": uuid.New().String(), + "schema_name": schemaName, + "volume_name": "my_volume", + }) + require.NoError(t, err) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + + assert.Error(t, err) + assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/my_volume does not exist: Not Found + at workspace.artifact_path + resources.volumes.foo + in databricks.yml:6:18 + databricks.yml:11:7 + +You are using a volume in your artifact_path that is managed by +this bundle but which has not been deployed yet. Please first deploy +the volume using 'bundle deploy' and then switch over to using it in +the artifact_path. + +`, schemaName), stdout.String()) + assert.Equal(t, "", stderr.String()) +} diff --git a/internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json b/internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json new file mode 100644 index 0000000000..7dd81ef2a1 --- /dev/null +++ b/internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json @@ -0,0 +1,16 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "schema_name": { + "type": "string", + "description": "schema name to use in the artifact_path" + }, + "volume_name": { + "type": "string", + "description": "volume name to use in the artifact_path" + } + } +} diff --git a/internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl b/internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl new file mode 100644 index 0000000000..2d87620e3a --- /dev/null +++ b/internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl @@ -0,0 +1,14 @@ +bundle: + name: artifact_path_with_volume + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + artifact_path: /Volumes/main/{{.schema_name}}/{{.volume_name}} + +resources: + volumes: + foo: + catalog_name: main + name: my_volume + schema_name: {{.schema_name}} + volume_type: MANAGED diff --git a/internal/bundle/bundles/volume/databricks_template_schema.json b/internal/bundle/bundles/volume/databricks_template_schema.json new file mode 100644 index 0000000000..d849dacf83 --- /dev/null +++ b/internal/bundle/bundles/volume/databricks_template_schema.json @@ -0,0 +1,8 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for the schema names" + } + } +} diff --git a/internal/bundle/bundles/volume/template/databricks.yml.tmpl b/internal/bundle/bundles/volume/template/databricks.yml.tmpl new file mode 100644 index 0000000000..d7f31439b8 --- /dev/null +++ b/internal/bundle/bundles/volume/template/databricks.yml.tmpl @@ -0,0 +1,31 @@ +bundle: + name: test-uc-volumes-{{.unique_id}} + +variables: + schema_name: + default: ${resources.schemas.schema1.name} + +resources: + schemas: + schema1: + name: schema1-{{.unique_id}} + catalog_name: main + comment: This schema was created from DABs + + schema2: + name: schema2-{{.unique_id}} + catalog_name: main + comment: This schema was created from DABs + + volumes: + foo: + catalog_name: main + name: my_volume + schema_name: ${var.schema_name} + volume_type: MANAGED + comment: This volume was created from DABs. + + grants: + - principal: account users + privileges: + - WRITE_VOLUME diff --git a/internal/bundle/bundles/volume/template/nb.sql b/internal/bundle/bundles/volume/template/nb.sql new file mode 100644 index 0000000000..199ff50788 --- /dev/null +++ b/internal/bundle/bundles/volume/template/nb.sql @@ -0,0 +1,2 @@ +-- Databricks notebook source +select 1 diff --git a/internal/bundle/deploy_test.go b/internal/bundle/deploy_test.go index 8854358555..759e85de5d 100644 --- a/internal/bundle/deploy_test.go +++ b/internal/bundle/deploy_test.go @@ -243,3 +243,73 @@ func TestAccDeployBasicBundleLogs(t *testing.T) { }, "\n"), stderr) assert.Equal(t, "", stdout) } + +func TestAccDeployUcVolume(t *testing.T) { + ctx, wt := acc.UcWorkspaceTest(t) + w := wt.W + + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{ + "unique_id": uniqueId, + }) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, ctx, bundleRoot) + }) + + // Assert the volume is created successfully + catalogName := "main" + schemaName := "schema1-" + uniqueId + volumeName := "my_volume" + fullName := fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName) + volume, err := w.Volumes.ReadByName(ctx, fullName) + require.NoError(t, err) + require.Equal(t, volume.Name, volumeName) + require.Equal(t, catalogName, volume.CatalogName) + require.Equal(t, schemaName, volume.SchemaName) + + // Assert that the grants were successfully applied. + grants, err := w.Grants.GetBySecurableTypeAndFullName(ctx, catalog.SecurableTypeVolume, fullName) + require.NoError(t, err) + assert.Len(t, grants.PrivilegeAssignments, 1) + assert.Equal(t, "account users", grants.PrivilegeAssignments[0].Principal) + assert.Equal(t, []catalog.Privilege{catalog.PrivilegeWriteVolume}, grants.PrivilegeAssignments[0].Privileges) + + // Recreation of the volume without --auto-approve should fail since prompting is not possible + t.Setenv("TERM", "dumb") + t.Setenv("BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() + assert.Error(t, err) + assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following volumes. +For managed volumes, the files stored in the volume are also deleted from your +cloud tenant within 30 days. For external volumes, the metadata about the volume +is removed from the catalog, but the underlying files are not deleted: + recreate volume foo`) + assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") + + // Successfully recreate the volume with --auto-approve + t.Setenv("TERM", "dumb") + t.Setenv("BUNDLE_ROOT", bundleRoot) + _, _, err = internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() + assert.NoError(t, err) + + // Assert the volume is updated successfully + schemaName = "schema2-" + uniqueId + fullName = fmt.Sprintf("%s.%s.%s", catalogName, schemaName, volumeName) + volume, err = w.Volumes.ReadByName(ctx, fullName) + require.NoError(t, err) + require.Equal(t, volume.Name, volumeName) + require.Equal(t, catalogName, volume.CatalogName) + require.Equal(t, schemaName, volume.SchemaName) + + // assert that the grants were applied / retained on recreate. + grants, err = w.Grants.GetBySecurableTypeAndFullName(ctx, catalog.SecurableTypeVolume, fullName) + require.NoError(t, err) + assert.Len(t, grants.PrivilegeAssignments, 1) + assert.Equal(t, "account users", grants.PrivilegeAssignments[0].Principal) + assert.Equal(t, []catalog.Privilege{catalog.PrivilegeWriteVolume}, grants.PrivilegeAssignments[0].Privileges) +} diff --git a/libs/dyn/dynvar/ref.go b/libs/dyn/dynvar/ref.go index 338ac8ce6c..a28938823e 100644 --- a/libs/dyn/dynvar/ref.go +++ b/libs/dyn/dynvar/ref.go @@ -71,3 +71,23 @@ func (v ref) references() []string { func IsPureVariableReference(s string) bool { return len(s) > 0 && re.FindString(s) == s } + +// If s is a pure variable reference, this function returns the corresponding +// dyn.Path. Otherwise, it returns false. +func PureReferenceToPath(s string) (dyn.Path, bool) { + ref, ok := newRef(dyn.V(s)) + if !ok { + return nil, false + } + + if !ref.isPure() { + return nil, false + } + + p, err := dyn.NewPathFromString(ref.references()[0]) + if err != nil { + return nil, false + } + + return p, true +} diff --git a/libs/dyn/dynvar/ref_test.go b/libs/dyn/dynvar/ref_test.go index aff3643e02..4110732f89 100644 --- a/libs/dyn/dynvar/ref_test.go +++ b/libs/dyn/dynvar/ref_test.go @@ -51,3 +51,34 @@ func TestIsPureVariableReference(t *testing.T) { assert.False(t, IsPureVariableReference("prefix ${foo.bar}")) assert.True(t, IsPureVariableReference("${foo.bar}")) } + +func TestPureReferenceToPath(t *testing.T) { + for _, tc := range []struct { + in string + out string + ok bool + }{ + {"${foo.bar}", "foo.bar", true}, + {"${foo.bar.baz}", "foo.bar.baz", true}, + {"${foo.bar.baz[0]}", "foo.bar.baz[0]", true}, + {"${foo.bar.baz[0][1]}", "foo.bar.baz[0][1]", true}, + {"${foo.bar.baz[0][1].qux}", "foo.bar.baz[0][1].qux", true}, + + {"${foo.one}${foo.two}", "", false}, + {"prefix ${foo.bar}", "", false}, + {"${foo.bar} suffix", "", false}, + {"${foo.bar", "", false}, + {"foo.bar}", "", false}, + {"foo.bar", "", false}, + {"{foo.bar}", "", false}, + {"", "", false}, + } { + path, ok := PureReferenceToPath(tc.in) + if tc.ok { + assert.True(t, ok) + assert.Equal(t, dyn.MustPathFromString(tc.out), path) + } else { + assert.False(t, ok) + } + } +} diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index 4bb03aea52..9e0a7ce50e 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -114,7 +114,7 @@ type apiClient interface { // NOTE: This API is available for files under /Repos if a workspace has files-in-repos enabled. // It can access any workspace path if files-in-workspace is enabled. -type workspaceFilesClient struct { +type WorkspaceFilesClient struct { workspaceClient *databricks.WorkspaceClient apiClient apiClient @@ -128,7 +128,7 @@ func NewWorkspaceFilesClient(w *databricks.WorkspaceClient, root string) (Filer, return nil, err } - return &workspaceFilesClient{ + return &WorkspaceFilesClient{ workspaceClient: w, apiClient: apiClient, @@ -136,7 +136,7 @@ func NewWorkspaceFilesClient(w *databricks.WorkspaceClient, root string) (Filer, }, nil } -func (w *workspaceFilesClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error { +func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error { absPath, err := w.root.Join(name) if err != nil { return err @@ -214,7 +214,7 @@ func (w *workspaceFilesClient) Write(ctx context.Context, name string, reader io return err } -func (w *workspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { +func (w *WorkspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { absPath, err := w.root.Join(name) if err != nil { return nil, err @@ -238,7 +238,7 @@ func (w *workspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCl return w.workspaceClient.Workspace.Download(ctx, absPath) } -func (w *workspaceFilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { +func (w *WorkspaceFilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { absPath, err := w.root.Join(name) if err != nil { return err @@ -282,7 +282,7 @@ func (w *workspaceFilesClient) Delete(ctx context.Context, name string, mode ... return err } -func (w *workspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { +func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { absPath, err := w.root.Join(name) if err != nil { return nil, err @@ -315,7 +315,7 @@ func (w *workspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.D return wsfsDirEntriesFromObjectInfos(objects), nil } -func (w *workspaceFilesClient) Mkdir(ctx context.Context, name string) error { +func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error { dirPath, err := w.root.Join(name) if err != nil { return err @@ -325,7 +325,7 @@ func (w *workspaceFilesClient) Mkdir(ctx context.Context, name string) error { }) } -func (w *workspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { +func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { absPath, err := w.root.Join(name) if err != nil { return nil, err diff --git a/libs/filer/workspace_files_extensions_client_test.go b/libs/filer/workspace_files_extensions_client_test.go index 974a6a37b5..10c176b315 100644 --- a/libs/filer/workspace_files_extensions_client_test.go +++ b/libs/filer/workspace_files_extensions_client_test.go @@ -174,7 +174,7 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { "return_export_info": "true", }, mock.AnythingOfType("*filer.wsfsFileInfo"), []func(*http.Request) error(nil)).Return(nil, statNotebook) - workspaceFilesClient := workspaceFilesClient{ + workspaceFilesClient := WorkspaceFilesClient{ workspaceClient: mockedWorkspaceClient.WorkspaceClient, apiClient: &mockedApiClient, root: NewWorkspaceRootPath("/dir"), From 0e088eb9f8c034aea93184ad25f752bb5ac0e58d Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Mon, 2 Dec 2024 23:41:38 +0100 Subject: [PATCH 07/63] Simplify load_git_details.go; remove unnecessary Abs() call (#1950) Suggested here https://github.com/databricks/cli/pull/1945#discussion_r1866088579 --- bundle/config/mutator/load_git_details.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 00e7f54d16..77558d9b50 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -55,13 +55,8 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn b.Config.Bundle.Git.OriginURL = remoteUrl } - // Compute relative path of the bundle root from the Git repo root. - absBundlePath, err := filepath.Abs(b.BundleRootPath) - if err != nil { - return diag.FromErr(err) - } // repo.Root() returns the absolute path of the repo - relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath) + relBundlePath, err := filepath.Rel(repo.Root(), b.BundleRoot.Native()) if err != nil { return diag.FromErr(err) } From 0da17f6ec6805778ac04ea8c6f5c5155926018c8 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 4 Dec 2024 16:35:54 +0530 Subject: [PATCH 08/63] Add default value for `volume_type` for DABs (#1952) ## Changes The Unity Catalog volumes API requires a `volume_type` argument when creating volumes. In the context of DABs, it's unnecessary to require users to specify the volume type every time. We can default to "MANAGED" instead. This PR is similar to https://github.com/databricks/cli/pull/1743 which does the same for dashboards. ## Tests Unit test --- .../mutator/configure_volume_defaults.go | 44 +++++++++++ .../mutator/configure_volume_defaults_test.go | 75 +++++++++++++++++++ bundle/internal/schema/main.go | 19 +++++ .../testdata/fail/incorrect_volume_type.yml | 10 +++ .../internal/schema/testdata/pass/volume.yml | 9 +++ bundle/phases/initialize.go | 1 + bundle/schema/jsonschema.json | 72 ++++++++++++++++++ 7 files changed, 230 insertions(+) create mode 100644 bundle/config/mutator/configure_volume_defaults.go create mode 100644 bundle/config/mutator/configure_volume_defaults_test.go create mode 100644 bundle/internal/schema/testdata/fail/incorrect_volume_type.yml create mode 100644 bundle/internal/schema/testdata/pass/volume.yml diff --git a/bundle/config/mutator/configure_volume_defaults.go b/bundle/config/mutator/configure_volume_defaults.go new file mode 100644 index 0000000000..b2bea95d1c --- /dev/null +++ b/bundle/config/mutator/configure_volume_defaults.go @@ -0,0 +1,44 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type configureVolumeDefaults struct{} + +func ConfigureVolumeDefaults() bundle.Mutator { + return &configureVolumeDefaults{} +} + +func (m *configureVolumeDefaults) Name() string { + return "ConfigureVolumeDefaults" +} + +func (m *configureVolumeDefaults) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + var diags diag.Diagnostics + + pattern := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("volumes"), + dyn.AnyKey(), + ) + + // Configure defaults for all volumes. + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + var err error + v, err = setIfNotExists(v, dyn.NewPath(dyn.Key("volume_type")), dyn.V("MANAGED")) + if err != nil { + return dyn.InvalidValue, err + } + return v, nil + }) + }) + + diags = diags.Extend(diag.FromErr(err)) + return diags +} diff --git a/bundle/config/mutator/configure_volume_defaults_test.go b/bundle/config/mutator/configure_volume_defaults_test.go new file mode 100644 index 0000000000..b04006964f --- /dev/null +++ b/bundle/config/mutator/configure_volume_defaults_test.go @@ -0,0 +1,75 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConfigureVolumeDefaultsVolumeType(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Volumes: map[string]*resources.Volume{ + "v1": { + // Empty string is skipped. + // See below for how it is set. + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + VolumeType: "", + }, + }, + "v2": { + // Non-empty string is skipped. + CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ + VolumeType: "already-set", + }, + }, + "v3": { + // No volume type set. + }, + "v4": nil, + }, + }, + }, + } + + // We can't set an empty string in the typed configuration. + // Do it on the dyn.Value directly. + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.volumes.v1.volume_type", dyn.V("")) + }) + + diags := bundle.Apply(context.Background(), b, mutator.ConfigureVolumeDefaults()) + require.NoError(t, diags.Error()) + + var v dyn.Value + var err error + + // Set to empty string; unchanged. + v, err = dyn.Get(b.Config.Value(), "resources.volumes.v1.volume_type") + require.NoError(t, err) + assert.Equal(t, "", v.MustString()) + + // Set to non-empty string; unchanged. + v, err = dyn.Get(b.Config.Value(), "resources.volumes.v2.volume_type") + require.NoError(t, err) + assert.Equal(t, "already-set", v.MustString()) + + // Not set; set to default. + v, err = dyn.Get(b.Config.Value(), "resources.volumes.v3.volume_type") + require.NoError(t, err) + assert.Equal(t, "MANAGED", v.MustString()) + + // No valid volume; No change. + _, err = dyn.Get(b.Config.Value(), "resources.volumes.v4.volume_type") + assert.True(t, dyn.IsCannotTraverseNilError(err)) +} diff --git a/bundle/internal/schema/main.go b/bundle/internal/schema/main.go index ddeffe2fda..881ce3496c 100644 --- a/bundle/internal/schema/main.go +++ b/bundle/internal/schema/main.go @@ -93,6 +93,24 @@ func removeJobsFields(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { return s } +// While volume_type is required in the volume create API, DABs automatically sets +// it's value to "MANAGED" if it's not provided. Thus, we make it optional +// in the bundle schema. +func makeVolumeTypeOptional(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + if typ != reflect.TypeOf(resources.Volume{}) { + return s + } + + res := []string{} + for _, r := range s.Required { + if r != "volume_type" { + res = append(res, r) + } + } + s.Required = res + return s +} + func main() { if len(os.Args) != 2 { fmt.Println("Usage: go run main.go ") @@ -118,6 +136,7 @@ func main() { p.addDescriptions, p.addEnums, removeJobsFields, + makeVolumeTypeOptional, addInterpolationPatterns, }) if err != nil { diff --git a/bundle/internal/schema/testdata/fail/incorrect_volume_type.yml b/bundle/internal/schema/testdata/fail/incorrect_volume_type.yml new file mode 100644 index 0000000000..d540056d4a --- /dev/null +++ b/bundle/internal/schema/testdata/fail/incorrect_volume_type.yml @@ -0,0 +1,10 @@ +bundle: + name: volume with incorrect type + +resources: + volumes: + foo: + catalog_name: main + name: my_volume + schema_name: myschema + volume_type: incorrect_type diff --git a/bundle/internal/schema/testdata/pass/volume.yml b/bundle/internal/schema/testdata/pass/volume.yml new file mode 100644 index 0000000000..14684f36e5 --- /dev/null +++ b/bundle/internal/schema/testdata/pass/volume.yml @@ -0,0 +1,9 @@ +bundle: + name: a volume + +resources: + volumes: + foo: + catalog_name: main + name: my_volume + schema_name: myschema diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 3345872022..6fa0e5fede 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -68,6 +68,7 @@ func Initialize() bundle.Mutator { mutator.SetRunAs(), mutator.OverrideCompute(), mutator.ConfigureDashboardDefaults(), + mutator.ConfigureVolumeDefaults(), mutator.ProcessTargetMode(), mutator.ApplyPresets(), mutator.DefaultQueueing(), diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index cf003f4235..f791b8440e 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -791,6 +791,51 @@ } ] }, + "resources.Volume": { + "anyOf": [ + { + "type": "object", + "properties": { + "catalog_name": { + "description": "The name of the catalog where the schema and the volume are", + "$ref": "#/$defs/string" + }, + "comment": { + "description": "The comment attached to the volume", + "$ref": "#/$defs/string" + }, + "grants": { + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Grant" + }, + "name": { + "description": "The name of the volume", + "$ref": "#/$defs/string" + }, + "schema_name": { + "description": "The name of the schema where the volume is", + "$ref": "#/$defs/string" + }, + "storage_location": { + "description": "The storage location on the cloud", + "$ref": "#/$defs/string" + }, + "volume_type": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.VolumeType" + } + }, + "additionalProperties": false, + "required": [ + "catalog_name", + "name", + "schema_name" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "variable.Lookup": { "anyOf": [ { @@ -963,6 +1008,9 @@ }, "name": { "$ref": "#/$defs/string" + }, + "uuid": { + "$ref": "#/$defs/string" } }, "additionalProperties": false, @@ -1157,6 +1205,9 @@ }, "schemas": { "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema" + }, + "volumes": { + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Volume" } }, "additionalProperties": false @@ -1558,6 +1609,13 @@ } ] }, + "catalog.VolumeType": { + "type": "string", + "enum": [ + "EXTERNAL", + "MANAGED" + ] + }, "compute.Adlsgen2Info": { "anyOf": [ { @@ -5565,6 +5623,20 @@ } ] }, + "resources.Volume": { + "anyOf": [ + { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Volume" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "variable.TargetVariable": { "anyOf": [ { From 0a36681bef863b58a4d33c461efab5f502d422cc Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Wed, 4 Dec 2024 18:40:19 +0100 Subject: [PATCH 09/63] Add golangci-lint v1.62.2 (#1953) --- .github/workflows/push.yml | 17 +++++++++++++++-- .golangci.yaml | 19 +++++++++++++++++++ .vscode/settings.json | 4 ++++ Makefile | 12 +++++++++--- bundle/deploy/terraform/import.go | 2 +- internal/helpers.go | 2 +- 6 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 .golangci.yaml diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 8aea95494b..ebb3e75d4a 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -45,7 +45,6 @@ jobs: echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV echo "$(go env GOPATH)/bin" >> $GITHUB_PATH go install gotest.tools/gotestsum@latest - go install honnef.co/go/tools/cmd/staticcheck@latest - name: Pull external libraries run: | @@ -53,7 +52,7 @@ jobs: pip3 install wheel - name: Run tests - run: make test + run: make testonly - name: Publish test coverage uses: codecov/codecov-action@v4 @@ -90,6 +89,20 @@ jobs: # Exit with status code 1 if there are differences (i.e. unformatted files) git diff --exit-code + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: 1.23.2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.62.2 + args: --timeout=15m + validate-bundle-schema: runs-on: ubuntu-latest diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000000..ef25962065 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,19 @@ +linters: + disable-all: true + enable: + # errcheck and govet are part of default setup and should be included but give too many errors now + # once errors are fixed, they should be enabled here: + #- errcheck + - gosimple + #- govet + - ineffassign + - staticcheck + - unused + - gofmt +linters-settings: + gofmt: + rewrite-rules: + - pattern: 'a[b:len(a)]' + replacement: 'a[b:]' +issues: + exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/ diff --git a/.vscode/settings.json b/.vscode/settings.json index 9697e221d9..853e84de83 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,6 +3,10 @@ "editor.insertSpaces": false, "editor.formatOnSave": true }, + "go.lintTool": "golangci-lint", + "go.lintFlags": [ + "--fast" + ], "files.trimTrailingWhitespace": true, "files.insertFinalNewline": true, "files.trimFinalNewlines": true, diff --git a/Makefile b/Makefile index 243a911916..13787fdda1 100644 --- a/Makefile +++ b/Makefile @@ -7,10 +7,16 @@ fmt: @gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") lint: vendor - @echo "✓ Linting source code with https://staticcheck.io/ ..." - @staticcheck ./... + @echo "✓ Linting source code with https://golangci-lint.run/ ..." + @golangci-lint run ./... -test: lint +lintfix: vendor + @echo "✓ Linting source code with 'golangci-lint run --fix' ..." + @golangci-lint run --fix ./... + +test: lint testonly + +testonly: @echo "✓ Running tests ..." @gotestsum --format pkgname-and-test-fails --no-summary=skipped --raw-command go test -v -json -short -coverprofile=coverage.txt ./... diff --git a/bundle/deploy/terraform/import.go b/bundle/deploy/terraform/import.go index dfe60a5814..0a1d1b9ce4 100644 --- a/bundle/deploy/terraform/import.go +++ b/bundle/deploy/terraform/import.go @@ -56,7 +56,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn buf := bytes.NewBuffer(nil) tf.SetStdout(buf) - //lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file + //nolint:staticcheck // SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress)) if err != nil { return diag.Errorf("terraform plan: %v", err) diff --git a/internal/helpers.go b/internal/helpers.go index 3e4b4e97c8..7cd2b578bf 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -250,7 +250,7 @@ func (t *cobraTestRunner) RunBackground() { // Reset context on command for the next test. // These commands are globals so we have to clean up to the best of our ability after each run. // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 - //lint:ignore SA1012 cobra sets the context and doesn't clear it + //nolint:staticcheck // cobra sets the context and doesn't clear it cli.SetContext(nil) // Make caller aware of error. From 0ad790e46863f947df5cdb0425d0aa217ae781e3 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Thu, 5 Dec 2024 11:13:13 +0100 Subject: [PATCH 10/63] Properly read Git metadata when running inside workspace (#1945) ## Changes Since there is no .git directory in Workspace file system, we need to make an API call to api/2.0/workspace/get-status?return_git_info=true to fetch git the root of the repo, current branch, commit and origin. Added new function FetchRepositoryInfo that either looks up and parses .git or calls remote API depending on env. Refactor Repository/View/FileSet to accept repository root rather than calculate it. This helps because: - Repository is currently created in multiple places and finding the repository root is becoming relatively expensive (API call needed). - Repository/FileSet/View do not have access to current Bundle which is where WorkplaceClient is stored. ## Tests - Tested manually by running "bundle validate --json" inside web terminal within Databricks env. - Added integration tests for the new API. --------- Co-authored-by: Andrew Nester Co-authored-by: Pieter Noordhuis --- bundle/bundle.go | 4 + bundle/bundle_read_only.go | 4 + bundle/config/mutator/load_git_details.go | 49 +++--- bundle/config/validate/files_to_sync_test.go | 1 + bundle/deploy/files/sync.go | 9 +- cmd/sync/sync.go | 32 +++- internal/git_fetch_test.go | 172 +++++++++++++++++++ libs/diag/diagnostic.go | 13 ++ libs/git/fileset.go | 10 +- libs/git/fileset_test.go | 24 ++- libs/git/info.go | 161 +++++++++++++++++ libs/git/repository.go | 14 +- libs/git/view.go | 8 +- libs/git/view_test.go | 30 ++-- libs/sync/snapshot_test.go | 12 +- libs/sync/sync.go | 11 +- libs/sync/sync_test.go | 6 +- 17 files changed, 471 insertions(+), 89 deletions(-) create mode 100644 internal/git_fetch_test.go create mode 100644 libs/git/info.go diff --git a/bundle/bundle.go b/bundle/bundle.go index 46710538a8..76c87c24cd 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -48,6 +48,10 @@ type Bundle struct { // Exclusively use this field for filesystem operations. SyncRoot vfs.Path + // Path to the root of git worktree containing the bundle. + // https://git-scm.com/docs/git-worktree + WorktreeRoot vfs.Path + // Config contains the bundle configuration. // It is loaded from the bundle configuration files and mutators may update it. Config config.Root diff --git a/bundle/bundle_read_only.go b/bundle/bundle_read_only.go index ceab95c0b4..4bdd94e598 100644 --- a/bundle/bundle_read_only.go +++ b/bundle/bundle_read_only.go @@ -32,6 +32,10 @@ func (r ReadOnlyBundle) SyncRoot() vfs.Path { return r.b.SyncRoot } +func (r ReadOnlyBundle) WorktreeRoot() vfs.Path { + return r.b.WorktreeRoot +} + func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient { return r.b.WorkspaceClient() } diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 77558d9b50..82255552aa 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/git" - "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/vfs" ) type loadGitDetails struct{} @@ -21,45 +21,40 @@ func (m *loadGitDetails) Name() string { } func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - // Load relevant git repository - repo, err := git.NewRepository(b.BundleRoot) + var diags diag.Diagnostics + info, err := git.FetchRepositoryInfo(ctx, b.BundleRoot.Native(), b.WorkspaceClient()) if err != nil { - return diag.FromErr(err) + diags = append(diags, diag.WarningFromErr(err)...) } - // Read branch name of current checkout - branch, err := repo.CurrentBranch() - if err == nil { - b.Config.Bundle.Git.ActualBranch = branch - if b.Config.Bundle.Git.Branch == "" { - // Only load branch if there's no user defined value - b.Config.Bundle.Git.Inferred = true - b.Config.Bundle.Git.Branch = branch - } + if info.WorktreeRoot == "" { + b.WorktreeRoot = b.BundleRoot } else { - log.Warnf(ctx, "failed to load current branch: %s", err) + b.WorktreeRoot = vfs.MustNew(info.WorktreeRoot) + } + + b.Config.Bundle.Git.ActualBranch = info.CurrentBranch + if b.Config.Bundle.Git.Branch == "" { + // Only load branch if there's no user defined value + b.Config.Bundle.Git.Inferred = true + b.Config.Bundle.Git.Branch = info.CurrentBranch } // load commit hash if undefined if b.Config.Bundle.Git.Commit == "" { - commit, err := repo.LatestCommit() - if err != nil { - log.Warnf(ctx, "failed to load latest commit: %s", err) - } else { - b.Config.Bundle.Git.Commit = commit - } + b.Config.Bundle.Git.Commit = info.LatestCommit } + // load origin url if undefined if b.Config.Bundle.Git.OriginURL == "" { - remoteUrl := repo.OriginUrl() - b.Config.Bundle.Git.OriginURL = remoteUrl + b.Config.Bundle.Git.OriginURL = info.OriginURL } - // repo.Root() returns the absolute path of the repo - relBundlePath, err := filepath.Rel(repo.Root(), b.BundleRoot.Native()) + relBundlePath, err := filepath.Rel(b.WorktreeRoot.Native(), b.BundleRoot.Native()) if err != nil { - return diag.FromErr(err) + diags = append(diags, diag.FromErr(err)...) + } else { + b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath) } - b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath) - return nil + return diags } diff --git a/bundle/config/validate/files_to_sync_test.go b/bundle/config/validate/files_to_sync_test.go index 2a598fa723..30af9026d1 100644 --- a/bundle/config/validate/files_to_sync_test.go +++ b/bundle/config/validate/files_to_sync_test.go @@ -44,6 +44,7 @@ func setupBundleForFilesToSyncTest(t *testing.T) *bundle.Bundle { BundleRoot: vfs.MustNew(dir), SyncRootPath: dir, SyncRoot: vfs.MustNew(dir), + WorktreeRoot: vfs.MustNew(dir), Config: config.Root{ Bundle: config.Bundle{ Target: "default", diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 347ed30793..e3abc5fefa 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -28,10 +28,11 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp } opts := &sync.SyncOptions{ - LocalRoot: rb.SyncRoot(), - Paths: rb.Config().Sync.Paths, - Include: includes, - Exclude: rb.Config().Sync.Exclude, + WorktreeRoot: rb.WorktreeRoot(), + LocalRoot: rb.SyncRoot(), + Paths: rb.Config().Sync.Paths, + Include: includes, + Exclude: rb.Config().Sync.Exclude, RemotePath: rb.Config().Workspace.FilePath, Host: rb.WorkspaceClient().Config.Host, diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 2092d9e330..6d722fb08a 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -12,6 +12,8 @@ import ( "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/libs/git" + "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/sync" "github.com/databricks/cli/libs/vfs" "github.com/spf13/cobra" @@ -37,6 +39,7 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b * opts.Full = f.full opts.PollInterval = f.interval + opts.WorktreeRoot = b.WorktreeRoot return opts, nil } @@ -60,11 +63,30 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn } } + ctx := cmd.Context() + client := root.WorkspaceClient(ctx) + + localRoot := vfs.MustNew(args[0]) + info, err := git.FetchRepositoryInfo(ctx, localRoot.Native(), client) + + if err != nil { + log.Warnf(ctx, "Failed to read git info: %s", err) + } + + var worktreeRoot vfs.Path + + if info.WorktreeRoot == "" { + worktreeRoot = localRoot + } else { + worktreeRoot = vfs.MustNew(info.WorktreeRoot) + } + opts := sync.SyncOptions{ - LocalRoot: vfs.MustNew(args[0]), - Paths: []string{"."}, - Include: nil, - Exclude: nil, + WorktreeRoot: worktreeRoot, + LocalRoot: localRoot, + Paths: []string{"."}, + Include: nil, + Exclude: nil, RemotePath: args[1], Full: f.full, @@ -75,7 +97,7 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn // The sync code will automatically create this directory if it doesn't // exist and add it to the `.gitignore` file in the root. SnapshotBasePath: filepath.Join(args[0], ".databricks"), - WorkspaceClient: root.WorkspaceClient(cmd.Context()), + WorkspaceClient: client, OutputHandler: outputHandler, } diff --git a/internal/git_fetch_test.go b/internal/git_fetch_test.go new file mode 100644 index 0000000000..5dab6be762 --- /dev/null +++ b/internal/git_fetch_test.go @@ -0,0 +1,172 @@ +package internal + +import ( + "os" + "os/exec" + "path" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/git" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const examplesRepoUrl = "https://github.com/databricks/bundle-examples" +const examplesRepoProvider = "gitHub" + +func assertFullGitInfo(t *testing.T, expectedRoot string, info git.RepositoryInfo) { + assert.Equal(t, "main", info.CurrentBranch) + assert.NotEmpty(t, info.LatestCommit) + assert.Equal(t, examplesRepoUrl, info.OriginURL) + assert.Equal(t, expectedRoot, info.WorktreeRoot) +} + +func assertEmptyGitInfo(t *testing.T, info git.RepositoryInfo) { + assertSparseGitInfo(t, "", info) +} + +func assertSparseGitInfo(t *testing.T, expectedRoot string, info git.RepositoryInfo) { + assert.Equal(t, "", info.CurrentBranch) + assert.Equal(t, "", info.LatestCommit) + assert.Equal(t, "", info.OriginURL) + assert.Equal(t, expectedRoot, info.WorktreeRoot) +} + +func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + me, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + targetPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-")) + stdout, stderr := RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath) + t.Cleanup(func() { + RequireSuccessfulRun(t, "repos", "delete", targetPath) + }) + + assert.Empty(t, stderr.String()) + assert.NotEmpty(t, stdout.String()) + ctx = dbr.MockRuntime(ctx, true) + + for _, inputPath := range []string{ + path.Join(targetPath, "knowledge_base/dashboard_nyc_taxi"), + targetPath, + } { + t.Run(inputPath, func(t *testing.T) { + info, err := git.FetchRepositoryInfo(ctx, inputPath, wt.W) + assert.NoError(t, err) + assertFullGitInfo(t, targetPath, info) + }) + } +} + +func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + me, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + rootPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-")) + _, stderr := RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c")) + t.Cleanup(func() { + RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath) + }) + + assert.Empty(t, stderr.String()) + ctx = dbr.MockRuntime(ctx, true) + + tests := []struct { + input string + msg string + }{ + { + input: path.Join(rootPath, "a/b/c"), + msg: "", + }, + { + input: rootPath, + msg: "", + }, + { + input: path.Join(rootPath, "/non-existent"), + msg: "doesn't exist", + }, + } + + for _, test := range tests { + t.Run(test.input+" <==> "+test.msg, func(t *testing.T) { + info, err := git.FetchRepositoryInfo(ctx, test.input, wt.W) + if test.msg == "" { + assert.NoError(t, err) + } else { + assert.Error(t, err) + assert.Contains(t, err.Error(), test.msg) + } + assertEmptyGitInfo(t, info) + }) + } +} + +func TestAccFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + repo := cloneRepoLocally(t, examplesRepoUrl) + + for _, inputPath := range []string{ + filepath.Join(repo, "knowledge_base/dashboard_nyc_taxi"), + repo, + } { + t.Run(inputPath, func(t *testing.T) { + info, err := git.FetchRepositoryInfo(ctx, inputPath, wt.W) + assert.NoError(t, err) + assertFullGitInfo(t, repo, info) + }) + } +} + +func cloneRepoLocally(t *testing.T, repoUrl string) string { + tempDir := t.TempDir() + localRoot := filepath.Join(tempDir, "repo") + + cmd := exec.Command("git", "clone", "--depth=1", examplesRepoUrl, localRoot) + err := cmd.Run() + require.NoError(t, err) + return localRoot +} + +func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + tempDir := t.TempDir() + root := filepath.Join(tempDir, "repo") + require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0700)) + + tests := []string{ + filepath.Join(root, "a/b/c"), + root, + filepath.Join(root, "/non-existent"), + } + + for _, input := range tests { + t.Run(input, func(t *testing.T) { + info, err := git.FetchRepositoryInfo(ctx, input, wt.W) + assert.NoError(t, err) + assertEmptyGitInfo(t, info) + }) + } +} + +func TestAccFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + tempDir := t.TempDir() + root := filepath.Join(tempDir, "repo") + path := filepath.Join(root, "a/b/c") + require.NoError(t, os.MkdirAll(path, 0700)) + require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0000)) + + info, err := git.FetchRepositoryInfo(ctx, path, wt.W) + assert.NoError(t, err) + assertSparseGitInfo(t, root, info) +} diff --git a/libs/diag/diagnostic.go b/libs/diag/diagnostic.go index 254ecbd7d3..a4f8c7b6b7 100644 --- a/libs/diag/diagnostic.go +++ b/libs/diag/diagnostic.go @@ -53,6 +53,19 @@ func FromErr(err error) Diagnostics { } } +// FromErr returns a new warning diagnostic from the specified error, if any. +func WarningFromErr(err error) Diagnostics { + if err == nil { + return nil + } + return []Diagnostic{ + { + Severity: Warning, + Summary: err.Error(), + }, + } +} + // Warningf creates a new warning diagnostic. func Warningf(format string, args ...any) Diagnostics { return []Diagnostic{ diff --git a/libs/git/fileset.go b/libs/git/fileset.go index bb1cd4692f..8391548c9c 100644 --- a/libs/git/fileset.go +++ b/libs/git/fileset.go @@ -13,10 +13,10 @@ type FileSet struct { view *View } -// NewFileSet returns [FileSet] for the Git repository located at `root`. -func NewFileSet(root vfs.Path, paths ...[]string) (*FileSet, error) { +// NewFileSet returns [FileSet] for the directory `root` which is contained within Git worktree located at `worktreeRoot`. +func NewFileSet(worktreeRoot, root vfs.Path, paths ...[]string) (*FileSet, error) { fs := fileset.New(root, paths...) - v, err := NewView(root) + v, err := NewView(worktreeRoot, root) if err != nil { return nil, err } @@ -27,6 +27,10 @@ func NewFileSet(root vfs.Path, paths ...[]string) (*FileSet, error) { }, nil } +func NewFileSetAtRoot(root vfs.Path, paths ...[]string) (*FileSet, error) { + return NewFileSet(root, root, paths...) +} + func (f *FileSet) IgnoreFile(file string) (bool, error) { return f.view.IgnoreFile(file) } diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index 37f3611d1f..f4fd931fd1 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -12,8 +12,8 @@ import ( "github.com/stretchr/testify/require" ) -func testFileSetAll(t *testing.T, root string) { - fileSet, err := NewFileSet(vfs.MustNew(root)) +func testFileSetAll(t *testing.T, worktreeRoot, root string) { + fileSet, err := NewFileSet(vfs.MustNew(worktreeRoot), vfs.MustNew(root)) require.NoError(t, err) files, err := fileSet.Files() require.NoError(t, err) @@ -24,18 +24,28 @@ func testFileSetAll(t *testing.T, root string) { } func TestFileSetListAllInRepo(t *testing.T) { - testFileSetAll(t, "./testdata") + testFileSetAll(t, "./testdata", "./testdata") +} + +func TestFileSetListAllInRepoDifferentRoot(t *testing.T) { + testFileSetAll(t, ".", "./testdata") } func TestFileSetListAllInTempDir(t *testing.T) { - testFileSetAll(t, copyTestdata(t, "./testdata")) + dir := copyTestdata(t, "./testdata") + testFileSetAll(t, dir, dir) +} + +func TestFileSetListAllInTempDirDifferentRoot(t *testing.T) { + dir := copyTestdata(t, "./testdata") + testFileSetAll(t, filepath.Dir(dir), dir) } func TestFileSetNonCleanRoot(t *testing.T) { // Test what happens if the root directory can be simplified. // Path simplification is done by most filepath functions. // This should yield the same result as above test. - fileSet, err := NewFileSet(vfs.MustNew("./testdata/../testdata")) + fileSet, err := NewFileSetAtRoot(vfs.MustNew("./testdata/../testdata")) require.NoError(t, err) files, err := fileSet.Files() require.NoError(t, err) @@ -44,7 +54,7 @@ func TestFileSetNonCleanRoot(t *testing.T) { func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) { projectDir := t.TempDir() - fileSet, err := NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) fileSet.EnsureValidGitIgnoreExists() @@ -59,7 +69,7 @@ func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { projectDir := t.TempDir() gitIgnorePath := filepath.Join(projectDir, ".gitignore") - fileSet, err := NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) err = os.WriteFile(gitIgnorePath, []byte(".databricks"), 0o644) require.NoError(t, err) diff --git a/libs/git/info.go b/libs/git/info.go new file mode 100644 index 0000000000..13c2981135 --- /dev/null +++ b/libs/git/info.go @@ -0,0 +1,161 @@ +package git + +import ( + "context" + "errors" + "io/fs" + "net/http" + "os" + "path" + "path/filepath" + "strings" + + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/client" +) + +type RepositoryInfo struct { + // Various metadata about the repo. Each could be "" if it could not be read. No error is returned for such case. + OriginURL string + LatestCommit string + CurrentBranch string + + // Absolute path to determined worktree root or "" if worktree root could not be determined. + WorktreeRoot string +} + +type gitInfo struct { + Branch string `json:"branch"` + HeadCommitID string `json:"head_commit_id"` + Path string `json:"path"` + URL string `json:"url"` +} + +type response struct { + GitInfo *gitInfo `json:"git_info,omitempty"` +} + +// Fetch repository information either by quering .git or by fetching it from API (for dabs-in-workspace case). +// - In case we could not find git repository, all string fields of RepositoryInfo will be "" and err will be nil. +// - If there were any errors when trying to determine git root (e.g. API call returned an error or there were permission issues +// reading the file system), all strings fields of RepositoryInfo will be "" and err will be non-nil. +// - If we could determine git worktree root but there were errors when reading metadata (origin, branch, commit), those errors +// will be logged as warnings, RepositoryInfo is guaranteed to have non-empty WorktreeRoot and other fields on best effort basis. +// - In successful case, all fields are set to proper git repository metadata. +func FetchRepositoryInfo(ctx context.Context, path string, w *databricks.WorkspaceClient) (RepositoryInfo, error) { + if strings.HasPrefix(path, "/Workspace/") && dbr.RunsOnRuntime(ctx) { + return fetchRepositoryInfoAPI(ctx, path, w) + } else { + return fetchRepositoryInfoDotGit(ctx, path) + } +} + +func fetchRepositoryInfoAPI(ctx context.Context, path string, w *databricks.WorkspaceClient) (RepositoryInfo, error) { + result := RepositoryInfo{} + + apiClient, err := client.New(w.Config) + if err != nil { + return result, err + } + + var response response + const apiEndpoint = "/api/2.0/workspace/get-status" + + err = apiClient.Do( + ctx, + http.MethodGet, + apiEndpoint, + nil, + map[string]string{ + "path": path, + "return_git_info": "true", + }, + &response, + ) + + if err != nil { + return result, err + } + + // Check if GitInfo is present and extract relevant fields + gi := response.GitInfo + if gi != nil { + fixedPath := ensureWorkspacePrefix(gi.Path) + result.OriginURL = gi.URL + result.LatestCommit = gi.HeadCommitID + result.CurrentBranch = gi.Branch + result.WorktreeRoot = fixedPath + } else { + log.Warnf(ctx, "Failed to load git info from %s", apiEndpoint) + } + + return result, nil +} + +func ensureWorkspacePrefix(p string) string { + if !strings.HasPrefix(p, "/Workspace/") { + return path.Join("/Workspace", p) + } + return p +} + +func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo, error) { + result := RepositoryInfo{} + + rootDir, err := findLeafInTree(path, GitDirectoryName) + if rootDir == "" { + return result, err + } + + result.WorktreeRoot = rootDir + + repo, err := NewRepository(vfs.MustNew(rootDir)) + if err != nil { + log.Warnf(ctx, "failed to read .git: %s", err) + + // return early since operations below won't work + return result, nil + } + + result.OriginURL = repo.OriginUrl() + + result.CurrentBranch, err = repo.CurrentBranch() + if err != nil { + log.Warnf(ctx, "failed to load current branch: %s", err) + } + + result.LatestCommit, err = repo.LatestCommit() + if err != nil { + log.Warnf(ctx, "failed to load latest commit: %s", err) + } + + return result, nil +} + +func findLeafInTree(p string, leafName string) (string, error) { + var err error + for i := 0; i < 10000; i++ { + _, err = os.Stat(filepath.Join(p, leafName)) + + if err == nil { + // Found [leafName] in p + return p, nil + } + + // ErrNotExist means we continue traversal up the tree. + if errors.Is(err, fs.ErrNotExist) { + parent := filepath.Dir(p) + if parent == p { + return "", nil + } + p = parent + continue + } + break + } + + return "", err +} diff --git a/libs/git/repository.go b/libs/git/repository.go index f0e9e1eb23..b94297ab98 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -1,9 +1,7 @@ package git import ( - "errors" "fmt" - "io/fs" "net/url" "path" "path/filepath" @@ -204,17 +202,7 @@ func (r *Repository) Ignore(relPath string) (bool, error) { return false, nil } -func NewRepository(path vfs.Path) (*Repository, error) { - rootDir, err := vfs.FindLeafInTree(path, GitDirectoryName) - if err != nil { - if !errors.Is(err, fs.ErrNotExist) { - return nil, err - } - // Cannot find `.git` directory. - // Treat the specified path as a potential repository root checkout. - rootDir = path - } - +func NewRepository(rootDir vfs.Path) (*Repository, error) { // Derive $GIT_DIR and $GIT_COMMON_DIR paths if this is a real repository. // If it isn't a real repository, they'll point to the (non-existent) `.git` directory. gitDir, gitCommonDir, err := resolveGitDirs(rootDir) diff --git a/libs/git/view.go b/libs/git/view.go index 2d2e39a60b..2eaba1f8b4 100644 --- a/libs/git/view.go +++ b/libs/git/view.go @@ -72,8 +72,8 @@ func (v *View) IgnoreDirectory(dir string) (bool, error) { return v.Ignore(dir + "/") } -func NewView(root vfs.Path) (*View, error) { - repo, err := NewRepository(root) +func NewView(worktreeRoot, root vfs.Path) (*View, error) { + repo, err := NewRepository(worktreeRoot) if err != nil { return nil, err } @@ -96,6 +96,10 @@ func NewView(root vfs.Path) (*View, error) { }, nil } +func NewViewAtRoot(root vfs.Path) (*View, error) { + return NewView(root, root) +} + func (v *View) EnsureValidGitIgnoreExists() error { ign, err := v.IgnoreDirectory(".databricks") if err != nil { diff --git a/libs/git/view_test.go b/libs/git/view_test.go index 76fba34584..06f6f94194 100644 --- a/libs/git/view_test.go +++ b/libs/git/view_test.go @@ -90,19 +90,19 @@ func testViewAtRoot(t *testing.T, tv testView) { } func TestViewRootInBricksRepo(t *testing.T) { - v, err := NewView(vfs.MustNew("./testdata")) + v, err := NewViewAtRoot(vfs.MustNew("./testdata")) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } func TestViewRootInTempRepo(t *testing.T) { - v, err := NewView(vfs.MustNew(createFakeRepo(t, "testdata"))) + v, err := NewViewAtRoot(vfs.MustNew(createFakeRepo(t, "testdata"))) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } func TestViewRootInTempDir(t *testing.T) { - v, err := NewView(vfs.MustNew(copyTestdata(t, "testdata"))) + v, err := NewViewAtRoot(vfs.MustNew(copyTestdata(t, "testdata"))) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } @@ -125,20 +125,21 @@ func testViewAtA(t *testing.T, tv testView) { } func TestViewAInBricksRepo(t *testing.T) { - v, err := NewView(vfs.MustNew("./testdata/a")) + v, err := NewView(vfs.MustNew("."), vfs.MustNew("./testdata/a")) require.NoError(t, err) testViewAtA(t, testView{t, v}) } func TestViewAInTempRepo(t *testing.T) { - v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a"))) + repo := createFakeRepo(t, "testdata") + v, err := NewView(vfs.MustNew(repo), vfs.MustNew(filepath.Join(repo, "a"))) require.NoError(t, err) testViewAtA(t, testView{t, v}) } func TestViewAInTempDir(t *testing.T) { // Since this is not a fake repo it should not traverse up the tree. - v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a"))) + v, err := NewViewAtRoot(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a"))) require.NoError(t, err) tv := testView{t, v} @@ -175,20 +176,21 @@ func testViewAtAB(t *testing.T, tv testView) { } func TestViewABInBricksRepo(t *testing.T) { - v, err := NewView(vfs.MustNew("./testdata/a/b")) + v, err := NewView(vfs.MustNew("."), vfs.MustNew("./testdata/a/b")) require.NoError(t, err) testViewAtAB(t, testView{t, v}) } func TestViewABInTempRepo(t *testing.T) { - v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a", "b"))) + repo := createFakeRepo(t, "testdata") + v, err := NewView(vfs.MustNew(repo), vfs.MustNew(filepath.Join(repo, "a", "b"))) require.NoError(t, err) testViewAtAB(t, testView{t, v}) } func TestViewABInTempDir(t *testing.T) { // Since this is not a fake repo it should not traverse up the tree. - v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a", "b"))) + v, err := NewViewAtRoot(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a", "b"))) tv := testView{t, v} require.NoError(t, err) @@ -215,7 +217,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredAtRoot(t *testing.T) // Since root .gitignore already has .databricks, there should be no edits // to root .gitignore - v, err := NewView(vfs.MustNew(repoPath)) + v, err := NewViewAtRoot(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -235,7 +237,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredInSubdir(t *testing.T // Since root .gitignore already has .databricks, there should be no edits // to a/.gitignore - v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a"))) + v, err := NewView(vfs.MustNew(repoPath), vfs.MustNew(filepath.Join(repoPath, "a"))) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -253,7 +255,7 @@ func TestViewAddsGitignoreWithCacheDir(t *testing.T) { assert.NoError(t, err) // Since root .gitignore was deleted, new view adds .databricks to root .gitignore - v, err := NewView(vfs.MustNew(repoPath)) + v, err := NewViewAtRoot(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -271,7 +273,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) { require.NoError(t, err) // Since root .gitignore was deleted, new view adds .databricks to a/.gitignore - v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a"))) + v, err := NewView(vfs.MustNew(repoPath), vfs.MustNew(filepath.Join(repoPath, "a"))) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -288,7 +290,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) { func TestViewAlwaysIgnoresCacheDir(t *testing.T) { repoPath := createFakeRepo(t, "testdata") - v, err := NewView(vfs.MustNew(repoPath)) + v, err := NewViewAtRoot(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() diff --git a/libs/sync/snapshot_test.go b/libs/sync/snapshot_test.go index b7830406de..eef526e583 100644 --- a/libs/sync/snapshot_test.go +++ b/libs/sync/snapshot_test.go @@ -30,7 +30,7 @@ func TestDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -94,7 +94,7 @@ func TestSymlinkDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -125,7 +125,7 @@ func TestFolderDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -170,7 +170,7 @@ func TestPythonNotebookDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -245,7 +245,7 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -282,7 +282,7 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) + fileSet, err := git.NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ diff --git a/libs/sync/sync.go b/libs/sync/sync.go index cc9c739446..6bd26f2241 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -19,10 +19,11 @@ import ( type OutputHandler func(context.Context, <-chan Event) type SyncOptions struct { - LocalRoot vfs.Path - Paths []string - Include []string - Exclude []string + WorktreeRoot vfs.Path + LocalRoot vfs.Path + Paths []string + Include []string + Exclude []string RemotePath string @@ -62,7 +63,7 @@ type Sync struct { // New initializes and returns a new [Sync] instance. func New(ctx context.Context, opts SyncOptions) (*Sync, error) { - fileSet, err := git.NewFileSet(opts.LocalRoot, opts.Paths) + fileSet, err := git.NewFileSet(opts.WorktreeRoot, opts.LocalRoot, opts.Paths) if err != nil { return nil, err } diff --git a/libs/sync/sync_test.go b/libs/sync/sync_test.go index 2d800f4668..6168dc2172 100644 --- a/libs/sync/sync_test.go +++ b/libs/sync/sync_test.go @@ -37,7 +37,7 @@ func TestGetFileSet(t *testing.T) { dir := setupFiles(t) root := vfs.MustNew(dir) - fileSet, err := git.NewFileSet(root) + fileSet, err := git.NewFileSetAtRoot(root) require.NoError(t, err) err = fileSet.EnsureValidGitIgnoreExists() @@ -103,7 +103,7 @@ func TestRecursiveExclude(t *testing.T) { dir := setupFiles(t) root := vfs.MustNew(dir) - fileSet, err := git.NewFileSet(root) + fileSet, err := git.NewFileSetAtRoot(root) require.NoError(t, err) err = fileSet.EnsureValidGitIgnoreExists() @@ -133,7 +133,7 @@ func TestNegateExclude(t *testing.T) { dir := setupFiles(t) root := vfs.MustNew(dir) - fileSet, err := git.NewFileSet(root) + fileSet, err := git.NewFileSetAtRoot(root) require.NoError(t, err) err = fileSet.EnsureValidGitIgnoreExists() From 647b09e6e299cf4698b2aab67f81a4b14eed4c42 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 5 Dec 2024 12:09:45 +0100 Subject: [PATCH 11/63] Upgrade TF provider to 1.59.0 (#1960) ## Changes Notable changes: * Fixes dashboard deployment if it was trashed out-of-band. * Removes client-side validation for single-node cluster configuration (also see #1546). Beware: for the same reason as in #1900, this excludes the changes for the quality monitor resource. ## Tests Integration tests pass. --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../data_source_aws_assume_role_policy.go | 1 + .../schema/data_source_aws_bucket_policy.go | 1 + .../data_source_aws_crossaccount_policy.go | 1 + ...ce_aws_unity_catalog_assume_role_policy.go | 1 + .../data_source_aws_unity_catalog_policy.go | 1 + ..._source_mws_network_connectivity_config.go | 51 +++++ ...source_mws_network_connectivity_configs.go | 9 + .../data_source_registered_model_versions.go | 52 +++++ .../schema/data_source_serving_endpoints.go | 178 ++++++++++++++++++ bundle/internal/tf/schema/data_sources.go | 8 + .../tf/schema/resource_permissions.go | 51 ++--- bundle/internal/tf/schema/root.go | 2 +- 13 files changed, 331 insertions(+), 27 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_mws_network_connectivity_config.go create mode 100644 bundle/internal/tf/schema/data_source_mws_network_connectivity_configs.go create mode 100644 bundle/internal/tf/schema/data_source_registered_model_versions.go create mode 100644 bundle/internal/tf/schema/data_source_serving_endpoints.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index cfbc46c08d..a778e0232f 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.58.0" +const ProviderVersion = "1.59.0" diff --git a/bundle/internal/tf/schema/data_source_aws_assume_role_policy.go b/bundle/internal/tf/schema/data_source_aws_assume_role_policy.go index 7c1cace31f..25fea09022 100644 --- a/bundle/internal/tf/schema/data_source_aws_assume_role_policy.go +++ b/bundle/internal/tf/schema/data_source_aws_assume_role_policy.go @@ -3,6 +3,7 @@ package schema type DataSourceAwsAssumeRolePolicy struct { + AwsPartition string `json:"aws_partition,omitempty"` DatabricksAccountId string `json:"databricks_account_id,omitempty"` ExternalId string `json:"external_id"` ForLogDelivery bool `json:"for_log_delivery,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_aws_bucket_policy.go b/bundle/internal/tf/schema/data_source_aws_bucket_policy.go index e1ce2f5049..d6b26c8cdf 100644 --- a/bundle/internal/tf/schema/data_source_aws_bucket_policy.go +++ b/bundle/internal/tf/schema/data_source_aws_bucket_policy.go @@ -3,6 +3,7 @@ package schema type DataSourceAwsBucketPolicy struct { + AwsPartition string `json:"aws_partition,omitempty"` Bucket string `json:"bucket"` DatabricksAccountId string `json:"databricks_account_id,omitempty"` DatabricksE2AccountId string `json:"databricks_e2_account_id,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go b/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go index d639c82a8c..9591940db9 100644 --- a/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go +++ b/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go @@ -4,6 +4,7 @@ package schema type DataSourceAwsCrossaccountPolicy struct { AwsAccountId string `json:"aws_account_id,omitempty"` + AwsPartition string `json:"aws_partition,omitempty"` Id string `json:"id,omitempty"` Json string `json:"json,omitempty"` PassRoles []string `json:"pass_roles,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go index 14d5c169dc..29d47f9239 100644 --- a/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go +++ b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go @@ -4,6 +4,7 @@ package schema type DataSourceAwsUnityCatalogAssumeRolePolicy struct { AwsAccountId string `json:"aws_account_id"` + AwsPartition string `json:"aws_partition,omitempty"` ExternalId string `json:"external_id"` Id string `json:"id,omitempty"` Json string `json:"json,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go b/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go index 2832bdf72a..6d6acc57dd 100644 --- a/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go +++ b/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go @@ -4,6 +4,7 @@ package schema type DataSourceAwsUnityCatalogPolicy struct { AwsAccountId string `json:"aws_account_id"` + AwsPartition string `json:"aws_partition,omitempty"` BucketName string `json:"bucket_name"` Id string `json:"id,omitempty"` Json string `json:"json,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_mws_network_connectivity_config.go b/bundle/internal/tf/schema/data_source_mws_network_connectivity_config.go new file mode 100644 index 0000000000..5d03bd4919 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_mws_network_connectivity_config.go @@ -0,0 +1,51 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule struct { + CidrBlocks []string `json:"cidr_blocks,omitempty"` +} + +type DataSourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule struct { + Subnets []string `json:"subnets,omitempty"` + TargetRegion string `json:"target_region,omitempty"` + TargetServices []string `json:"target_services,omitempty"` +} + +type DataSourceMwsNetworkConnectivityConfigEgressConfigDefaultRules struct { + AwsStableIpRule *DataSourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule `json:"aws_stable_ip_rule,omitempty"` + AzureServiceEndpointRule *DataSourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule `json:"azure_service_endpoint_rule,omitempty"` +} + +type DataSourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules struct { + ConnectionState string `json:"connection_state,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Deactivated bool `json:"deactivated,omitempty"` + DeactivatedAt int `json:"deactivated_at,omitempty"` + EndpointName string `json:"endpoint_name,omitempty"` + GroupId string `json:"group_id,omitempty"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + ResourceId string `json:"resource_id,omitempty"` + RuleId string `json:"rule_id,omitempty"` + UpdatedTime int `json:"updated_time,omitempty"` +} + +type DataSourceMwsNetworkConnectivityConfigEgressConfigTargetRules struct { + AzurePrivateEndpointRules []DataSourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules `json:"azure_private_endpoint_rules,omitempty"` +} + +type DataSourceMwsNetworkConnectivityConfigEgressConfig struct { + DefaultRules *DataSourceMwsNetworkConnectivityConfigEgressConfigDefaultRules `json:"default_rules,omitempty"` + TargetRules *DataSourceMwsNetworkConnectivityConfigEgressConfigTargetRules `json:"target_rules,omitempty"` +} + +type DataSourceMwsNetworkConnectivityConfig struct { + AccountId string `json:"account_id,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + Region string `json:"region,omitempty"` + UpdatedTime int `json:"updated_time,omitempty"` + EgressConfig *DataSourceMwsNetworkConnectivityConfigEgressConfig `json:"egress_config,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_mws_network_connectivity_configs.go b/bundle/internal/tf/schema/data_source_mws_network_connectivity_configs.go new file mode 100644 index 0000000000..721483a9ea --- /dev/null +++ b/bundle/internal/tf/schema/data_source_mws_network_connectivity_configs.go @@ -0,0 +1,9 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceMwsNetworkConnectivityConfigs struct { + Id string `json:"id,omitempty"` + Names []string `json:"names,omitempty"` + Region string `json:"region,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_registered_model_versions.go b/bundle/internal/tf/schema/data_source_registered_model_versions.go new file mode 100644 index 0000000000..f70e58f85f --- /dev/null +++ b/bundle/internal/tf/schema/data_source_registered_model_versions.go @@ -0,0 +1,52 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceRegisteredModelVersionsModelVersionsAliases struct { + AliasName string `json:"alias_name,omitempty"` + VersionNum int `json:"version_num,omitempty"` +} + +type DataSourceRegisteredModelVersionsModelVersionsModelVersionDependenciesDependenciesFunction struct { + FunctionFullName string `json:"function_full_name"` +} + +type DataSourceRegisteredModelVersionsModelVersionsModelVersionDependenciesDependenciesTable struct { + TableFullName string `json:"table_full_name"` +} + +type DataSourceRegisteredModelVersionsModelVersionsModelVersionDependenciesDependencies struct { + Function []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependenciesDependenciesFunction `json:"function,omitempty"` + Table []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependenciesDependenciesTable `json:"table,omitempty"` +} + +type DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies struct { + Dependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependenciesDependencies `json:"dependencies,omitempty"` +} + +type DataSourceRegisteredModelVersionsModelVersions struct { + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + ModelName string `json:"model_name,omitempty"` + RunId string `json:"run_id,omitempty"` + RunWorkspaceId int `json:"run_workspace_id,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + Source string `json:"source,omitempty"` + Status string `json:"status,omitempty"` + StorageLocation string `json:"storage_location,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Version int `json:"version,omitempty"` + Aliases []DataSourceRegisteredModelVersionsModelVersionsAliases `json:"aliases,omitempty"` + ModelVersionDependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies `json:"model_version_dependencies,omitempty"` +} + +type DataSourceRegisteredModelVersions struct { + FullName string `json:"full_name"` + ModelVersions []DataSourceRegisteredModelVersionsModelVersions `json:"model_versions,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_serving_endpoints.go b/bundle/internal/tf/schema/data_source_serving_endpoints.go new file mode 100644 index 0000000000..028121b5ad --- /dev/null +++ b/bundle/internal/tf/schema/data_source_serving_endpoints.go @@ -0,0 +1,178 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii struct { + Behavior string `json:"behavior"` +} + +type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInput struct { + InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Safety bool `json:"safety,omitempty"` + ValidTopics []string `json:"valid_topics,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii `json:"pii,omitempty"` +} + +type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct { + Behavior string `json:"behavior"` +} + +type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutput struct { + InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Safety bool `json:"safety,omitempty"` + ValidTopics []string `json:"valid_topics,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii `json:"pii,omitempty"` +} + +type DataSourceServingEndpointsEndpointsAiGatewayGuardrails struct { + Input []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInput `json:"input,omitempty"` + Output []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutput `json:"output,omitempty"` +} + +type DataSourceServingEndpointsEndpointsAiGatewayInferenceTableConfig struct { + CatalogName string `json:"catalog_name,omitempty"` + Enabled bool `json:"enabled,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + TableNamePrefix string `json:"table_name_prefix,omitempty"` +} + +type DataSourceServingEndpointsEndpointsAiGatewayRateLimits struct { + Calls int `json:"calls"` + Key string `json:"key,omitempty"` + RenewalPeriod string `json:"renewal_period"` +} + +type DataSourceServingEndpointsEndpointsAiGatewayUsageTrackingConfig struct { + Enabled bool `json:"enabled,omitempty"` +} + +type DataSourceServingEndpointsEndpointsAiGateway struct { + Guardrails []DataSourceServingEndpointsEndpointsAiGatewayGuardrails `json:"guardrails,omitempty"` + InferenceTableConfig []DataSourceServingEndpointsEndpointsAiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` + RateLimits []DataSourceServingEndpointsEndpointsAiGatewayRateLimits `json:"rate_limits,omitempty"` + UsageTrackingConfig []DataSourceServingEndpointsEndpointsAiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAi21LabsConfig struct { + Ai21LabsApiKey string `json:"ai21labs_api_key,omitempty"` + Ai21LabsApiKeyPlaintext string `json:"ai21labs_api_key_plaintext,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAmazonBedrockConfig struct { + AwsAccessKeyId string `json:"aws_access_key_id,omitempty"` + AwsAccessKeyIdPlaintext string `json:"aws_access_key_id_plaintext,omitempty"` + AwsRegion string `json:"aws_region"` + AwsSecretAccessKey string `json:"aws_secret_access_key,omitempty"` + AwsSecretAccessKeyPlaintext string `json:"aws_secret_access_key_plaintext,omitempty"` + BedrockProvider string `json:"bedrock_provider"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAnthropicConfig struct { + AnthropicApiKey string `json:"anthropic_api_key,omitempty"` + AnthropicApiKeyPlaintext string `json:"anthropic_api_key_plaintext,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelCohereConfig struct { + CohereApiBase string `json:"cohere_api_base,omitempty"` + CohereApiKey string `json:"cohere_api_key,omitempty"` + CohereApiKeyPlaintext string `json:"cohere_api_key_plaintext,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelDatabricksModelServingConfig struct { + DatabricksApiToken string `json:"databricks_api_token,omitempty"` + DatabricksApiTokenPlaintext string `json:"databricks_api_token_plaintext,omitempty"` + DatabricksWorkspaceUrl string `json:"databricks_workspace_url"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig struct { + PrivateKey string `json:"private_key,omitempty"` + PrivateKeyPlaintext string `json:"private_key_plaintext,omitempty"` + ProjectId string `json:"project_id,omitempty"` + Region string `json:"region,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelOpenaiConfig struct { + MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"` + MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"` + MicrosoftEntraClientSecretPlaintext string `json:"microsoft_entra_client_secret_plaintext,omitempty"` + MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"` + OpenaiApiBase string `json:"openai_api_base,omitempty"` + OpenaiApiKey string `json:"openai_api_key,omitempty"` + OpenaiApiKeyPlaintext string `json:"openai_api_key_plaintext,omitempty"` + OpenaiApiType string `json:"openai_api_type,omitempty"` + OpenaiApiVersion string `json:"openai_api_version,omitempty"` + OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` + OpenaiOrganization string `json:"openai_organization,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmConfig struct { + PalmApiKey string `json:"palm_api_key,omitempty"` + PalmApiKeyPlaintext string `json:"palm_api_key_plaintext,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel struct { + Name string `json:"name"` + Provider string `json:"provider"` + Task string `json:"task"` + Ai21LabsConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAi21LabsConfig `json:"ai21labs_config,omitempty"` + AmazonBedrockConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAmazonBedrockConfig `json:"amazon_bedrock_config,omitempty"` + AnthropicConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"` + CohereConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"` + DatabricksModelServingConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` + GoogleCloudVertexAiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig `json:"google_cloud_vertex_ai_config,omitempty"` + OpenaiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"` + PalmConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel struct { + Description string `json:"description,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Docs string `json:"docs,omitempty"` + Name string `json:"name,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedEntities struct { + EntityName string `json:"entity_name,omitempty"` + EntityVersion string `json:"entity_version,omitempty"` + Name string `json:"name,omitempty"` + ExternalModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel `json:"external_model,omitempty"` + FoundationModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel `json:"foundation_model,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfigServedModels struct { + ModelName string `json:"model_name,omitempty"` + ModelVersion string `json:"model_version,omitempty"` + Name string `json:"name,omitempty"` +} + +type DataSourceServingEndpointsEndpointsConfig struct { + ServedEntities []DataSourceServingEndpointsEndpointsConfigServedEntities `json:"served_entities,omitempty"` + ServedModels []DataSourceServingEndpointsEndpointsConfigServedModels `json:"served_models,omitempty"` +} + +type DataSourceServingEndpointsEndpointsState struct { + ConfigUpdate string `json:"config_update,omitempty"` + Ready string `json:"ready,omitempty"` +} + +type DataSourceServingEndpointsEndpointsTags struct { + Key string `json:"key"` + Value string `json:"value,omitempty"` +} + +type DataSourceServingEndpointsEndpoints struct { + CreationTimestamp int `json:"creation_timestamp,omitempty"` + Creator string `json:"creator,omitempty"` + Id string `json:"id,omitempty"` + LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` + Name string `json:"name,omitempty"` + Task string `json:"task,omitempty"` + AiGateway []DataSourceServingEndpointsEndpointsAiGateway `json:"ai_gateway,omitempty"` + Config []DataSourceServingEndpointsEndpointsConfig `json:"config,omitempty"` + State []DataSourceServingEndpointsEndpointsState `json:"state,omitempty"` + Tags []DataSourceServingEndpointsEndpointsTags `json:"tags,omitempty"` +} + +type DataSourceServingEndpoints struct { + Endpoints []DataSourceServingEndpointsEndpoints `json:"endpoints,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index e32609b0f3..3a59bf8c37 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -33,6 +33,8 @@ type DataSources struct { MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` MlflowModels map[string]any `json:"databricks_mlflow_models,omitempty"` MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsNetworkConnectivityConfig map[string]any `json:"databricks_mws_network_connectivity_config,omitempty"` + MwsNetworkConnectivityConfigs map[string]any `json:"databricks_mws_network_connectivity_configs,omitempty"` MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` NodeType map[string]any `json:"databricks_node_type,omitempty"` Notebook map[string]any `json:"databricks_notebook,omitempty"` @@ -40,10 +42,12 @@ type DataSources struct { NotificationDestinations map[string]any `json:"databricks_notification_destinations,omitempty"` Pipelines map[string]any `json:"databricks_pipelines,omitempty"` RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` + RegisteredModelVersions map[string]any `json:"databricks_registered_model_versions,omitempty"` Schema map[string]any `json:"databricks_schema,omitempty"` Schemas map[string]any `json:"databricks_schemas,omitempty"` ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` ServicePrincipals map[string]any `json:"databricks_service_principals,omitempty"` + ServingEndpoints map[string]any `json:"databricks_serving_endpoints,omitempty"` Share map[string]any `json:"databricks_share,omitempty"` Shares map[string]any `json:"databricks_shares,omitempty"` SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` @@ -92,6 +96,8 @@ func NewDataSources() *DataSources { MlflowModel: make(map[string]any), MlflowModels: make(map[string]any), MwsCredentials: make(map[string]any), + MwsNetworkConnectivityConfig: make(map[string]any), + MwsNetworkConnectivityConfigs: make(map[string]any), MwsWorkspaces: make(map[string]any), NodeType: make(map[string]any), Notebook: make(map[string]any), @@ -99,10 +105,12 @@ func NewDataSources() *DataSources { NotificationDestinations: make(map[string]any), Pipelines: make(map[string]any), RegisteredModel: make(map[string]any), + RegisteredModelVersions: make(map[string]any), Schema: make(map[string]any), Schemas: make(map[string]any), ServicePrincipal: make(map[string]any), ServicePrincipals: make(map[string]any), + ServingEndpoints: make(map[string]any), Share: make(map[string]any), Shares: make(map[string]any), SparkVersion: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_permissions.go b/bundle/internal/tf/schema/resource_permissions.go index 0c3b90ed38..a3d05e6f2c 100644 --- a/bundle/internal/tf/schema/resource_permissions.go +++ b/bundle/internal/tf/schema/resource_permissions.go @@ -10,29 +10,30 @@ type ResourcePermissionsAccessControl struct { } type ResourcePermissions struct { - Authorization string `json:"authorization,omitempty"` - ClusterId string `json:"cluster_id,omitempty"` - ClusterPolicyId string `json:"cluster_policy_id,omitempty"` - DashboardId string `json:"dashboard_id,omitempty"` - DirectoryId string `json:"directory_id,omitempty"` - DirectoryPath string `json:"directory_path,omitempty"` - ExperimentId string `json:"experiment_id,omitempty"` - Id string `json:"id,omitempty"` - InstancePoolId string `json:"instance_pool_id,omitempty"` - JobId string `json:"job_id,omitempty"` - NotebookId string `json:"notebook_id,omitempty"` - NotebookPath string `json:"notebook_path,omitempty"` - ObjectType string `json:"object_type,omitempty"` - PipelineId string `json:"pipeline_id,omitempty"` - RegisteredModelId string `json:"registered_model_id,omitempty"` - RepoId string `json:"repo_id,omitempty"` - RepoPath string `json:"repo_path,omitempty"` - ServingEndpointId string `json:"serving_endpoint_id,omitempty"` - SqlAlertId string `json:"sql_alert_id,omitempty"` - SqlDashboardId string `json:"sql_dashboard_id,omitempty"` - SqlEndpointId string `json:"sql_endpoint_id,omitempty"` - SqlQueryId string `json:"sql_query_id,omitempty"` - WorkspaceFileId string `json:"workspace_file_id,omitempty"` - WorkspaceFilePath string `json:"workspace_file_path,omitempty"` - AccessControl []ResourcePermissionsAccessControl `json:"access_control,omitempty"` + Authorization string `json:"authorization,omitempty"` + ClusterId string `json:"cluster_id,omitempty"` + ClusterPolicyId string `json:"cluster_policy_id,omitempty"` + DashboardId string `json:"dashboard_id,omitempty"` + DirectoryId string `json:"directory_id,omitempty"` + DirectoryPath string `json:"directory_path,omitempty"` + ExperimentId string `json:"experiment_id,omitempty"` + Id string `json:"id,omitempty"` + InstancePoolId string `json:"instance_pool_id,omitempty"` + JobId string `json:"job_id,omitempty"` + NotebookId string `json:"notebook_id,omitempty"` + NotebookPath string `json:"notebook_path,omitempty"` + ObjectType string `json:"object_type,omitempty"` + PipelineId string `json:"pipeline_id,omitempty"` + RegisteredModelId string `json:"registered_model_id,omitempty"` + RepoId string `json:"repo_id,omitempty"` + RepoPath string `json:"repo_path,omitempty"` + ServingEndpointId string `json:"serving_endpoint_id,omitempty"` + SqlAlertId string `json:"sql_alert_id,omitempty"` + SqlDashboardId string `json:"sql_dashboard_id,omitempty"` + SqlEndpointId string `json:"sql_endpoint_id,omitempty"` + SqlQueryId string `json:"sql_query_id,omitempty"` + VectorSearchEndpointId string `json:"vector_search_endpoint_id,omitempty"` + WorkspaceFileId string `json:"workspace_file_id,omitempty"` + WorkspaceFilePath string `json:"workspace_file_path,omitempty"` + AccessControl []ResourcePermissionsAccessControl `json:"access_control,omitempty"` } diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 7ccb7a0f01..ebfbd7df7e 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.58.0" +const ProviderVersion = "1.59.0" func NewRoot() *Root { return &Root{ From 7ffe93e4d09bc5d075325a9984ca9ffadc0d4799 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 5 Dec 2024 15:39:26 +0100 Subject: [PATCH 12/63] [Release] Release v0.236.0 (#1966) **New features for Databricks Asset Bundles:** This release adds support for managing Unity Catalog volumes as part of your bundle configuration. Bundles: * Add DABs support for Unity Catalog volumes ([#1762](https://github.com/databricks/cli/pull/1762)). * Support lookup by name of notification destinations ([#1922](https://github.com/databricks/cli/pull/1922)). * Extend "notebook not found" error to warn about missing extension ([#1920](https://github.com/databricks/cli/pull/1920)). * Skip sync warning if no sync paths are defined ([#1926](https://github.com/databricks/cli/pull/1926)). * Add validation for single node clusters ([#1909](https://github.com/databricks/cli/pull/1909)). * Fix segfault in bundle summary command ([#1937](https://github.com/databricks/cli/pull/1937)). * Add the `bundle_uuid` helper function for templates ([#1947](https://github.com/databricks/cli/pull/1947)). * Add default value for `volume_type` for DABs ([#1952](https://github.com/databricks/cli/pull/1952)). * Properly read Git metadata when running inside workspace ([#1945](https://github.com/databricks/cli/pull/1945)). * Upgrade TF provider to 1.59.0 ([#1960](https://github.com/databricks/cli/pull/1960)). Internal: * Breakout variable lookup into separate files and tests ([#1921](https://github.com/databricks/cli/pull/1921)). * Add golangci-lint v1.62.2 ([#1953](https://github.com/databricks/cli/pull/1953)). Dependency updates: * Bump golang.org/x/term from 0.25.0 to 0.26.0 ([#1907](https://github.com/databricks/cli/pull/1907)). * Bump github.com/Masterminds/semver/v3 from 3.3.0 to 3.3.1 ([#1930](https://github.com/databricks/cli/pull/1930)). * Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 ([#1932](https://github.com/databricks/cli/pull/1932)). * Bump github.com/databricks/databricks-sdk-go from 0.51.0 to 0.52.0 ([#1931](https://github.com/databricks/cli/pull/1931)). --- CHANGELOG.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2645b2184..56207686a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Version changelog +## [Release] Release v0.236.0 + +**New features for Databricks Asset Bundles:** + +This release adds support for managing Unity Catalog volumes as part of your bundle configuration. + +Bundles: + * Add DABs support for Unity Catalog volumes ([#1762](https://github.com/databricks/cli/pull/1762)). + * Support lookup by name of notification destinations ([#1922](https://github.com/databricks/cli/pull/1922)). + * Extend "notebook not found" error to warn about missing extension ([#1920](https://github.com/databricks/cli/pull/1920)). + * Skip sync warning if no sync paths are defined ([#1926](https://github.com/databricks/cli/pull/1926)). + * Add validation for single node clusters ([#1909](https://github.com/databricks/cli/pull/1909)). + * Fix segfault in bundle summary command ([#1937](https://github.com/databricks/cli/pull/1937)). + * Add the `bundle_uuid` helper function for templates ([#1947](https://github.com/databricks/cli/pull/1947)). + * Add default value for `volume_type` for DABs ([#1952](https://github.com/databricks/cli/pull/1952)). + * Properly read Git metadata when running inside workspace ([#1945](https://github.com/databricks/cli/pull/1945)). + * Upgrade TF provider to 1.59.0 ([#1960](https://github.com/databricks/cli/pull/1960)). + +Internal: + * Breakout variable lookup into separate files and tests ([#1921](https://github.com/databricks/cli/pull/1921)). + * Add golangci-lint v1.62.2 ([#1953](https://github.com/databricks/cli/pull/1953)). + +Dependency updates: + * Bump golang.org/x/term from 0.25.0 to 0.26.0 ([#1907](https://github.com/databricks/cli/pull/1907)). + * Bump github.com/Masterminds/semver/v3 from 3.3.0 to 3.3.1 ([#1930](https://github.com/databricks/cli/pull/1930)). + * Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 ([#1932](https://github.com/databricks/cli/pull/1932)). + * Bump github.com/databricks/databricks-sdk-go from 0.51.0 to 0.52.0 ([#1931](https://github.com/databricks/cli/pull/1931)). ## [Release] Release v0.235.0 **Note:** the `bundle generate` command now uses the `..yml` From 6e754d4f341a0b45e02f638c8a3c4bc4bf5c2077 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 5 Dec 2024 16:37:24 +0100 Subject: [PATCH 13/63] Rewrite 'interface{} -> any' (#1959) ## Changes The `any` alias for `interface{}` has been around since Go 1.18. Now that we're using golangci-lint (#1953), we can lint on it. Existing commits can be updated with: ``` gofmt -w -r 'interface{} -> any' . ``` ## Tests n/a --- .golangci.yaml | 2 + bundle/config/root_test.go | 6 +-- bundle/internal/tf/codegen/schema/generate.go | 8 ++-- bundle/internal/tf/schema/root.go | 6 +-- bundle/render/render_text_output.go | 4 +- bundle/run/output/task.go | 2 +- bundle/tests/complex_variables_test.go | 2 +- internal/helpers.go | 2 +- internal/mocks/libs/filer/mock_filer.go | 24 +++++----- libs/cmdgroup/command.go | 2 +- libs/cmdio/render.go | 4 +- libs/dyn/dynassert/assert.go | 44 +++++++++---------- libs/dyn/yamlloader/loader.go | 2 +- libs/errs/aggregate.go | 2 +- libs/jsonschema/from_type_test.go | 8 ++-- 15 files changed, 60 insertions(+), 58 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index ef25962065..28227006a4 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -15,5 +15,7 @@ linters-settings: rewrite-rules: - pattern: 'a[b:len(a)]' replacement: 'a[b:]' + - pattern: 'interface{}' + replacement: 'any' issues: exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/ diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index 9e61235346..a77f961bd6 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -133,7 +133,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { "complex": { Type: variable.VariableTypeComplex, Description: "complex var", - Default: map[string]interface{}{ + Default: map[string]any{ "key": "value", }, }, @@ -148,7 +148,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { "complex": { Type: "wrong", Description: "wrong", - Default: map[string]interface{}{ + Default: map[string]any{ "key1": "value1", }, }, @@ -164,7 +164,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { assert.Equal(t, "foo2", root.Variables["foo2"].Default) assert.Equal(t, "foo2 var", root.Variables["foo2"].Description) - assert.Equal(t, map[string]interface{}{ + assert.Equal(t, map[string]any{ "key1": "value1", }, root.Variables["complex"].Default) assert.Equal(t, "complex var", root.Variables["complex"].Description) diff --git a/bundle/internal/tf/codegen/schema/generate.go b/bundle/internal/tf/codegen/schema/generate.go index de2d27225d..c0253c9b45 100644 --- a/bundle/internal/tf/codegen/schema/generate.go +++ b/bundle/internal/tf/codegen/schema/generate.go @@ -15,10 +15,10 @@ import ( ) func (s *Schema) writeTerraformBlock(_ context.Context) error { - var body = map[string]interface{}{ - "terraform": map[string]interface{}{ - "required_providers": map[string]interface{}{ - "databricks": map[string]interface{}{ + var body = map[string]any{ + "terraform": map[string]any{ + "required_providers": map[string]any{ + "databricks": map[string]any{ "source": "databricks/databricks", "version": ProviderVersion, }, diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index ebfbd7df7e..2cadb80905 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -25,9 +25,9 @@ const ProviderVersion = "1.59.0" func NewRoot() *Root { return &Root{ - Terraform: map[string]interface{}{ - "required_providers": map[string]interface{}{ - "databricks": map[string]interface{}{ + Terraform: map[string]any{ + "required_providers": map[string]any{ + "databricks": map[string]any{ "source": ProviderSource, "version": ProviderVersion, }, diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index 2f7affbf3b..92dacb4488 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -23,10 +23,10 @@ var renderFuncMap = template.FuncMap{ "yellow": color.YellowString, "magenta": color.MagentaString, "cyan": color.CyanString, - "bold": func(format string, a ...interface{}) string { + "bold": func(format string, a ...any) string { return color.New(color.Bold).Sprintf(format, a...) }, - "italic": func(format string, a ...interface{}) string { + "italic": func(format string, a ...any) string { return color.New(color.Italic).Sprintf(format, a...) }, } diff --git a/bundle/run/output/task.go b/bundle/run/output/task.go index 1beac17f3d..402e4d66ab 100644 --- a/bundle/run/output/task.go +++ b/bundle/run/output/task.go @@ -15,7 +15,7 @@ type LogsOutput struct { LogsTruncated bool `json:"logs_truncated"` } -func structToString(val interface{}) (string, error) { +func structToString(val any) (string, error) { b, err := json.MarshalIndent(val, "", " ") if err != nil { return "", err diff --git a/bundle/tests/complex_variables_test.go b/bundle/tests/complex_variables_test.go index 7a9a53a76c..e68823c330 100644 --- a/bundle/tests/complex_variables_test.go +++ b/bundle/tests/complex_variables_test.go @@ -104,5 +104,5 @@ func TestComplexVariablesOverrideWithFullSyntax(t *testing.T) { require.Empty(t, diags) complexvar := b.Config.Variables["complexvar"].Value - require.Equal(t, map[string]interface{}{"key1": "1", "key2": "2", "key3": "3"}, complexvar) + require.Equal(t, map[string]any{"key1": "1", "key2": "2", "key3": "3"}, complexvar) } diff --git a/internal/helpers.go b/internal/helpers.go index 7cd2b578bf..596f45537c 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -276,7 +276,7 @@ func (t *cobraTestRunner) Run() (bytes.Buffer, bytes.Buffer, error) { } // Like [require.Eventually] but errors if the underlying command has failed. -func (c *cobraTestRunner) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { +func (c *cobraTestRunner) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...any) { ch := make(chan bool, 1) timer := time.NewTimer(waitFor) diff --git a/internal/mocks/libs/filer/mock_filer.go b/internal/mocks/libs/filer/mock_filer.go index d0d58cbda1..c2e64cad48 100644 --- a/internal/mocks/libs/filer/mock_filer.go +++ b/internal/mocks/libs/filer/mock_filer.go @@ -28,11 +28,11 @@ func (_m *MockFiler) EXPECT() *MockFiler_Expecter { // Delete provides a mock function with given fields: ctx, path, mode func (_m *MockFiler) Delete(ctx context.Context, path string, mode ...filer.DeleteMode) error { - _va := make([]interface{}, len(mode)) + _va := make([]any, len(mode)) for _i := range mode { _va[_i] = mode[_i] } - var _ca []interface{} + var _ca []any _ca = append(_ca, ctx, path) _ca = append(_ca, _va...) ret := _m.Called(_ca...) @@ -60,9 +60,9 @@ type MockFiler_Delete_Call struct { // - ctx context.Context // - path string // - mode ...filer.DeleteMode -func (_e *MockFiler_Expecter) Delete(ctx interface{}, path interface{}, mode ...interface{}) *MockFiler_Delete_Call { +func (_e *MockFiler_Expecter) Delete(ctx any, path any, mode ...any) *MockFiler_Delete_Call { return &MockFiler_Delete_Call{Call: _e.mock.On("Delete", - append([]interface{}{ctx, path}, mode...)...)} + append([]any{ctx, path}, mode...)...)} } func (_c *MockFiler_Delete_Call) Run(run func(ctx context.Context, path string, mode ...filer.DeleteMode)) *MockFiler_Delete_Call { @@ -114,7 +114,7 @@ type MockFiler_Mkdir_Call struct { // Mkdir is a helper method to define mock.On call // - ctx context.Context // - path string -func (_e *MockFiler_Expecter) Mkdir(ctx interface{}, path interface{}) *MockFiler_Mkdir_Call { +func (_e *MockFiler_Expecter) Mkdir(ctx any, path any) *MockFiler_Mkdir_Call { return &MockFiler_Mkdir_Call{Call: _e.mock.On("Mkdir", ctx, path)} } @@ -173,7 +173,7 @@ type MockFiler_Read_Call struct { // Read is a helper method to define mock.On call // - ctx context.Context // - path string -func (_e *MockFiler_Expecter) Read(ctx interface{}, path interface{}) *MockFiler_Read_Call { +func (_e *MockFiler_Expecter) Read(ctx any, path any) *MockFiler_Read_Call { return &MockFiler_Read_Call{Call: _e.mock.On("Read", ctx, path)} } @@ -232,7 +232,7 @@ type MockFiler_ReadDir_Call struct { // ReadDir is a helper method to define mock.On call // - ctx context.Context // - path string -func (_e *MockFiler_Expecter) ReadDir(ctx interface{}, path interface{}) *MockFiler_ReadDir_Call { +func (_e *MockFiler_Expecter) ReadDir(ctx any, path any) *MockFiler_ReadDir_Call { return &MockFiler_ReadDir_Call{Call: _e.mock.On("ReadDir", ctx, path)} } @@ -291,7 +291,7 @@ type MockFiler_Stat_Call struct { // Stat is a helper method to define mock.On call // - ctx context.Context // - name string -func (_e *MockFiler_Expecter) Stat(ctx interface{}, name interface{}) *MockFiler_Stat_Call { +func (_e *MockFiler_Expecter) Stat(ctx any, name any) *MockFiler_Stat_Call { return &MockFiler_Stat_Call{Call: _e.mock.On("Stat", ctx, name)} } @@ -314,11 +314,11 @@ func (_c *MockFiler_Stat_Call) RunAndReturn(run func(context.Context, string) (f // Write provides a mock function with given fields: ctx, path, reader, mode func (_m *MockFiler) Write(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) error { - _va := make([]interface{}, len(mode)) + _va := make([]any, len(mode)) for _i := range mode { _va[_i] = mode[_i] } - var _ca []interface{} + var _ca []any _ca = append(_ca, ctx, path, reader) _ca = append(_ca, _va...) ret := _m.Called(_ca...) @@ -347,9 +347,9 @@ type MockFiler_Write_Call struct { // - path string // - reader io.Reader // - mode ...filer.WriteMode -func (_e *MockFiler_Expecter) Write(ctx interface{}, path interface{}, reader interface{}, mode ...interface{}) *MockFiler_Write_Call { +func (_e *MockFiler_Expecter) Write(ctx any, path any, reader any, mode ...any) *MockFiler_Write_Call { return &MockFiler_Write_Call{Call: _e.mock.On("Write", - append([]interface{}{ctx, path, reader}, mode...)...)} + append([]any{ctx, path, reader}, mode...)...)} } func (_c *MockFiler_Write_Call) Run(run func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode)) *MockFiler_Write_Call { diff --git a/libs/cmdgroup/command.go b/libs/cmdgroup/command.go index a2a776935d..388dbe62b3 100644 --- a/libs/cmdgroup/command.go +++ b/libs/cmdgroup/command.go @@ -100,7 +100,7 @@ func trimRightSpace(s string) string { } // tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { +func tmpl(w io.Writer, text string, data any) error { t := template.New("top") t.Funcs(templateFuncs) template.Must(t.Parse(text)) diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index 72d95978ad..c68ddca0df 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -297,10 +297,10 @@ var renderFuncMap = template.FuncMap{ "yellow": color.YellowString, "magenta": color.MagentaString, "cyan": color.CyanString, - "bold": func(format string, a ...interface{}) string { + "bold": func(format string, a ...any) string { return color.New(color.Bold).Sprintf(format, a...) }, - "italic": func(format string, a ...interface{}) string { + "italic": func(format string, a ...any) string { return color.New(color.Italic).Sprintf(format, a...) }, "replace": strings.ReplaceAll, diff --git a/libs/dyn/dynassert/assert.go b/libs/dyn/dynassert/assert.go index f667b08c72..ebdba1214a 100644 --- a/libs/dyn/dynassert/assert.go +++ b/libs/dyn/dynassert/assert.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" ) -func Equal(t assert.TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { +func Equal(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { ev, eok := expected.(dyn.Value) av, aok := actual.(dyn.Value) if eok && aok && ev.IsValid() && av.IsValid() { @@ -32,86 +32,86 @@ func Equal(t assert.TestingT, expected interface{}, actual interface{}, msgAndAr return assert.Equal(t, expected, actual, msgAndArgs...) } -func EqualValues(t assert.TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { +func EqualValues(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool { return assert.EqualValues(t, expected, actual, msgAndArgs...) } -func NotEqual(t assert.TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { +func NotEqual(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { return assert.NotEqual(t, expected, actual, msgAndArgs...) } -func Len(t assert.TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { +func Len(t assert.TestingT, object any, length int, msgAndArgs ...any) bool { return assert.Len(t, object, length, msgAndArgs...) } -func Empty(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { +func Empty(t assert.TestingT, object any, msgAndArgs ...any) bool { return assert.Empty(t, object, msgAndArgs...) } -func Nil(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { +func Nil(t assert.TestingT, object any, msgAndArgs ...any) bool { return assert.Nil(t, object, msgAndArgs...) } -func NotNil(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { +func NotNil(t assert.TestingT, object any, msgAndArgs ...any) bool { return assert.NotNil(t, object, msgAndArgs...) } -func NoError(t assert.TestingT, err error, msgAndArgs ...interface{}) bool { +func NoError(t assert.TestingT, err error, msgAndArgs ...any) bool { return assert.NoError(t, err, msgAndArgs...) } -func Error(t assert.TestingT, err error, msgAndArgs ...interface{}) bool { +func Error(t assert.TestingT, err error, msgAndArgs ...any) bool { return assert.Error(t, err, msgAndArgs...) } -func EqualError(t assert.TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { +func EqualError(t assert.TestingT, theError error, errString string, msgAndArgs ...any) bool { return assert.EqualError(t, theError, errString, msgAndArgs...) } -func ErrorContains(t assert.TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { +func ErrorContains(t assert.TestingT, theError error, contains string, msgAndArgs ...any) bool { return assert.ErrorContains(t, theError, contains, msgAndArgs...) } -func ErrorIs(t assert.TestingT, theError, target error, msgAndArgs ...interface{}) bool { +func ErrorIs(t assert.TestingT, theError, target error, msgAndArgs ...any) bool { return assert.ErrorIs(t, theError, target, msgAndArgs...) } -func True(t assert.TestingT, value bool, msgAndArgs ...interface{}) bool { +func True(t assert.TestingT, value bool, msgAndArgs ...any) bool { return assert.True(t, value, msgAndArgs...) } -func False(t assert.TestingT, value bool, msgAndArgs ...interface{}) bool { +func False(t assert.TestingT, value bool, msgAndArgs ...any) bool { return assert.False(t, value, msgAndArgs...) } -func Contains(t assert.TestingT, list interface{}, element interface{}, msgAndArgs ...interface{}) bool { +func Contains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { return assert.Contains(t, list, element, msgAndArgs...) } -func NotContains(t assert.TestingT, list interface{}, element interface{}, msgAndArgs ...interface{}) bool { +func NotContains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { return assert.NotContains(t, list, element, msgAndArgs...) } -func ElementsMatch(t assert.TestingT, listA, listB interface{}, msgAndArgs ...interface{}) bool { +func ElementsMatch(t assert.TestingT, listA, listB any, msgAndArgs ...any) bool { return assert.ElementsMatch(t, listA, listB, msgAndArgs...) } -func Panics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool { +func Panics(t assert.TestingT, f func(), msgAndArgs ...any) bool { return assert.Panics(t, f, msgAndArgs...) } -func PanicsWithValue(t assert.TestingT, expected interface{}, f func(), msgAndArgs ...interface{}) bool { +func PanicsWithValue(t assert.TestingT, expected any, f func(), msgAndArgs ...any) bool { return assert.PanicsWithValue(t, expected, f, msgAndArgs...) } -func PanicsWithError(t assert.TestingT, errString string, f func(), msgAndArgs ...interface{}) bool { +func PanicsWithError(t assert.TestingT, errString string, f func(), msgAndArgs ...any) bool { return assert.PanicsWithError(t, errString, f, msgAndArgs...) } -func NotPanics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool { +func NotPanics(t assert.TestingT, f func(), msgAndArgs ...any) bool { return assert.NotPanics(t, f, msgAndArgs...) } -func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { +func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...any) bool { return assert.JSONEq(t, expected, actual, msgAndArgs...) } diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index b4aaf0a741..a77ee0744a 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -14,7 +14,7 @@ type loader struct { path string } -func errorf(loc dyn.Location, format string, args ...interface{}) error { +func errorf(loc dyn.Location, format string, args ...any) error { return fmt.Errorf("yaml (%s): %s", loc, fmt.Sprintf(format, args...)) } diff --git a/libs/errs/aggregate.go b/libs/errs/aggregate.go index b6bab0ef77..0883649486 100644 --- a/libs/errs/aggregate.go +++ b/libs/errs/aggregate.go @@ -59,7 +59,7 @@ func (ec errorChain) Unwrap() error { return ec[1:] } -func (ec errorChain) As(target interface{}) bool { +func (ec errorChain) As(target any) bool { return errors.As(ec[0], target) } diff --git a/libs/jsonschema/from_type_test.go b/libs/jsonschema/from_type_test.go index 174ffad889..0ddb1011a8 100644 --- a/libs/jsonschema/from_type_test.go +++ b/libs/jsonschema/from_type_test.go @@ -11,10 +11,10 @@ import ( func TestFromTypeBasic(t *testing.T) { type myStruct struct { - S string `json:"s"` - I *int `json:"i,omitempty"` - V interface{} `json:"v,omitempty"` - TriplePointer ***int `json:"triple_pointer,omitempty"` + S string `json:"s"` + I *int `json:"i,omitempty"` + V any `json:"v,omitempty"` + TriplePointer ***int `json:"triple_pointer,omitempty"` // These fields should be ignored in the resulting schema. NotAnnotated string From 62bc59a3a6f8cab1393028dae47785cdf5ba3bd9 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 5 Dec 2024 19:20:46 +0100 Subject: [PATCH 14/63] Fail filer integration test if error is nil (#1967) ## Changes I found a race where this error is nil and the subsequent assert panics on the error being nil. This change makes the test robust against this to fail immediately if the error is different from the one we expect. ## Tests n/a --- internal/filer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/filer_test.go b/internal/filer_test.go index a2760d9113..4e6a156711 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -457,7 +457,7 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { // Assert uploading a second time fails due to overwrite mode missing err = f.Write(ctx, tc.name, strings.NewReader(tc.content2)) - assert.ErrorIs(t, err, fs.ErrExist) + require.ErrorIs(t, err, fs.ErrExist) assert.Regexp(t, regexp.MustCompile(`file already exists: .*/`+tc.nameWithoutExt+`$`), err.Error()) // Try uploading the notebook again with overwrite flag. This time it should succeed. From 4c1042132bb3fa0becb4473c4f6bcf8abfc8d177 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Thu, 5 Dec 2024 20:11:49 +0100 Subject: [PATCH 15/63] Enable linter bodyclose (#1968) ## Changes Enable linter '[bodyclose](https://github.com/timakin/bodyclose)' and fix 2 cases in tests. ## Tests Existing tests. --- .golangci.yaml | 1 + libs/auth/oauth_test.go | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index 28227006a4..82e4d98484 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,6 +1,7 @@ linters: disable-all: true enable: + - bodyclose # errcheck and govet are part of default setup and should be included but give too many errors now # once errors are fixed, they should be enabled here: #- errcheck diff --git a/libs/auth/oauth_test.go b/libs/auth/oauth_test.go index fdf0d04bfb..837ff4fee9 100644 --- a/libs/auth/oauth_test.go +++ b/libs/auth/oauth_test.go @@ -15,6 +15,7 @@ import ( "github.com/databricks/databricks-sdk-go/httpclient/fixtures" "github.com/databricks/databricks-sdk-go/qa" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/oauth2" ) @@ -182,7 +183,8 @@ func TestChallenge(t *testing.T) { state := <-browserOpened resp, err := http.Get(fmt.Sprintf("http://%s?code=__THIS__&state=%s", appRedirectAddr, state)) - assert.NoError(t, err) + require.NoError(t, err) + defer resp.Body.Close() assert.Equal(t, 200, resp.StatusCode) err = <-errc @@ -221,7 +223,8 @@ func TestChallengeFailed(t *testing.T) { resp, err := http.Get(fmt.Sprintf( "http://%s?error=access_denied&error_description=Policy%%20evaluation%%20failed%%20for%%20this%%20request", appRedirectAddr)) - assert.NoError(t, err) + require.NoError(t, err) + defer resp.Body.Close() assert.Equal(t, 400, resp.StatusCode) err = <-errc From 227a13556b32caf890e132fc3cb59efad710f821 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 9 Dec 2024 10:52:08 +0100 Subject: [PATCH 16/63] Add build rule for running integration tests (#1965) ## Changes Make it possible to change what/how we run our integration tests from within this repository. ## Tests Integration tests all pass when run with this command. --- Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 13787fdda1..bc1c42cfaa 100644 --- a/Makefile +++ b/Makefile @@ -36,5 +36,7 @@ vendor: @echo "✓ Filling vendor folder with library code ..." @go mod vendor -.PHONY: build vendor coverage test lint fmt +integration: + gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./internal/..." -- -run "TestAcc.*" -parallel 4 -timeout=2h +.PHONY: fmt lint lintfix test testonly coverage build snapshot vendor integration From e0d54f0bb6a017f231bda41a37583ef09030a1ff Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 9 Dec 2024 18:04:27 +0530 Subject: [PATCH 17/63] Do not run mlops-stacks integration test in parallel (#1982) ## Changes This test changes the cwd using the `testutil.Chdir` function. This causes flakiness with other integration tests, like `TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles`, which rely on the cwd being configured correctly to read test fixtures. The `t.Setenv` call in `testutil.Chdir` ensures that it is not run from a test whose upstream is executing in parallel. --- internal/init_test.go | 1 - internal/testutil/env.go | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/init_test.go b/internal/init_test.go index 25bfc19dac..839d8185d0 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -39,7 +39,6 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { // make changes that can break the MLOps Stacks DAB. In which case we should // skip this test until the MLOps Stacks DAB is updated to work again. func TestAccBundleInitOnMlopsStacks(t *testing.T) { - t.Parallel() env := testutil.GetCloud(t).String() tmpDir1 := t.TempDir() diff --git a/internal/testutil/env.go b/internal/testutil/env.go index e1973ba82a..7a7436f47a 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -51,6 +51,10 @@ func GetEnvOrSkipTest(t *testing.T, name string) string { // Changes into specified directory for the duration of the test. // Returns the current working directory. func Chdir(t *testing.T, dir string) string { + // Prevent parallel execution when changing the working directory. + // t.Setenv automatically fails if t.Parallel is set. + t.Setenv("DO_NOT_RUN_IN_PARALLEL", "true") + wd, err := os.Getwd() require.NoError(t, err) From 1b2be1b2cb4b7909df2a8ad4cb6a0f43e8fcf0c6 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Mon, 9 Dec 2024 13:56:41 +0100 Subject: [PATCH 18/63] Add error checking in tests and enable errcheck there (#1980) ## Changes Fix all errcheck-found issues in tests and test helpers. Mostly this done by adding require.NoError(t, err), sometimes panic() where t object is not available). Initial change is obtained with aider+claude, then manually reviewed and cleaned up. ## Tests Existing tests. --- .golangci.yaml | 8 ++-- bundle/config/mutator/initialize_urls_test.go | 6 ++- .../resolve_resource_references_test.go | 3 +- bundle/config/mutator/translate_paths_test.go | 3 +- bundle/config/resources_test.go | 6 ++- bundle/config/root_test.go | 4 +- bundle/config/workspace_test.go | 27 +++++++----- bundle/internal/bundletest/location.go | 5 ++- bundle/render/render_text_output_test.go | 3 +- bundle/run/job_test.go | 6 ++- cmd/auth/describe_test.go | 33 +++++++++------ cmd/bundle/generate/dashboard_test.go | 20 +++++---- cmd/bundle/generate/generate_test.go | 20 ++++----- cmd/labs/github/ref_test.go | 7 +++- cmd/labs/github/releases_test.go | 4 +- cmd/labs/github/repositories_test.go | 4 +- cmd/labs/project/installer_test.go | 41 +++++++++---------- cmd/root/bundle_test.go | 22 ++++++---- cmd/root/progress_logger_test.go | 18 ++++---- internal/bundle/bind_resource_test.go | 3 +- internal/bundle/deploy_test.go | 9 ++-- internal/bundle/deployment_state_test.go | 2 +- internal/bundle/environments_test.go | 3 +- internal/bundle/python_wheel_test.go | 3 +- internal/bundle/spark_jar_test.go | 3 +- internal/helpers.go | 11 +++-- internal/init_test.go | 3 +- internal/locker_test.go | 3 +- internal/tags_test.go | 6 ++- libs/cmdgroup/command_test.go | 3 +- libs/dyn/convert/normalize_test.go | 24 ++++++----- libs/dyn/jsonsaver/marshal_test.go | 13 +++--- libs/dyn/merge/override_test.go | 6 ++- libs/exec/exec_test.go | 7 +++- libs/flags/json_flag_test.go | 3 +- libs/git/fileset_test.go | 6 ++- libs/git/reference_test.go | 9 ++-- libs/git/repository_test.go | 3 +- libs/jsonschema/from_type_test.go | 3 +- libs/process/stub_test.go | 10 ++++- libs/python/interpreters_unix_test.go | 3 +- 41 files changed, 233 insertions(+), 143 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index 82e4d98484..602f126303 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -2,9 +2,7 @@ linters: disable-all: true enable: - bodyclose - # errcheck and govet are part of default setup and should be included but give too many errors now - # once errors are fixed, they should be enabled here: - #- errcheck + - errcheck - gosimple #- govet - ineffassign @@ -20,3 +18,7 @@ linters-settings: replacement: 'any' issues: exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/ + exclude-rules: + - path-except: (_test\.go|internal) + linters: + - errcheck diff --git a/bundle/config/mutator/initialize_urls_test.go b/bundle/config/mutator/initialize_urls_test.go index ec4e790c4b..f07a7deb38 100644 --- a/bundle/config/mutator/initialize_urls_test.go +++ b/bundle/config/mutator/initialize_urls_test.go @@ -110,7 +110,8 @@ func TestInitializeURLs(t *testing.T) { "dashboard1": "https://mycompany.databricks.com/dashboardsv3/01ef8d56871e1d50ae30ce7375e42478/published?o=123456", } - initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") + err := initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/") + require.NoError(t, err) for _, group := range b.Config.Resources.AllResources() { for key, r := range group.Resources { @@ -133,7 +134,8 @@ func TestInitializeURLsWithoutOrgId(t *testing.T) { }, } - initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/") + err := initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/") + require.NoError(t, err) require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL) } diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index ee2f0e2ea9..624e337c7d 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -108,7 +108,8 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) - b.Config.Variables["my-cluster-id"].Set("random value") + err := b.Config.Variables["my-cluster-id"].Set("random value") + require.NoError(t, err) diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index bf6ba15d8b..8442ff6cf5 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -28,7 +28,8 @@ import ( func touchNotebookFile(t *testing.T, path string) { f, err := os.Create(path) require.NoError(t, err) - f.WriteString("# Databricks notebook source\n") + _, err = f.WriteString("# Databricks notebook source\n") + require.NoError(t, err) f.Close() } diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 9ae73b22a0..2d05acf3ee 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -49,7 +49,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { // Eg: resource.Job implements MarshalJSON v := reflect.Zero(vt.Elem()).Interface() assert.NotPanics(t, func() { - json.Marshal(v) + _, err := json.Marshal(v) + assert.NoError(t, err) }, "Resource %s does not have a custom marshaller", field.Name) // Unmarshalling a *resourceStruct will panic if the resource does not have a custom unmarshaller @@ -58,7 +59,8 @@ func TestCustomMarshallerIsImplemented(t *testing.T) { // Eg: *resource.Job implements UnmarshalJSON v = reflect.New(vt.Elem()).Interface() assert.NotPanics(t, func() { - json.Unmarshal([]byte("{}"), v) + err := json.Unmarshal([]byte("{}"), v) + assert.NoError(t, err) }, "Resource %s does not have a custom unmarshaller", field.Name) } } diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index a77f961bd6..fc1c148c32 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -100,7 +100,7 @@ func TestRootMergeTargetOverridesWithMode(t *testing.T) { }, }, } - root.initializeDynamicValue() + require.NoError(t, root.initializeDynamicValue()) require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, Development, root.Bundle.Mode) } @@ -156,7 +156,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { }, }, } - root.initializeDynamicValue() + require.NoError(t, root.initializeDynamicValue()) require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, "bar", root.Variables["foo"].Default) assert.Equal(t, "foo var", root.Variables["foo"].Description) diff --git a/bundle/config/workspace_test.go b/bundle/config/workspace_test.go index 3ef9632530..384cc0a2c1 100644 --- a/bundle/config/workspace_test.go +++ b/bundle/config/workspace_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func setupWorkspaceTest(t *testing.T) string { @@ -42,11 +43,12 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "default", Host: "https://abc.cloud.databricks.com", Token: "123", }) + require.NoError(t, err) client, err := w.Client() assert.NoError(t, err) @@ -57,12 +59,13 @@ func TestWorkspaceResolveProfileFromHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "custom", Host: "https://abc.cloud.databricks.com", Token: "123", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) client, err := w.Client() @@ -90,12 +93,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "abc", Host: "https://abc.cloud.databricks.com", }) + require.NoError(t, err) - _, err := w.Client() + _, err = w.Client() assert.NoError(t, err) }) @@ -103,12 +107,13 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ Profile: "abc", Host: "https://def.cloud.databricks.com", }) + require.NoError(t, err) - _, err := w.Client() + _, err = w.Client() assert.ErrorContains(t, err, "config host mismatch") }) @@ -116,14 +121,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "abc", Host: "https://abc.cloud.databricks.com", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) - _, err := w.Client() + _, err = w.Client() assert.NoError(t, err) }) @@ -131,14 +137,15 @@ func TestWorkspaceVerifyProfileForHost(t *testing.T) { home := setupWorkspaceTest(t) // This works if there is a config file with a matching profile. - databrickscfg.SaveToProfile(context.Background(), &config.Config{ + err := databrickscfg.SaveToProfile(context.Background(), &config.Config{ ConfigFile: filepath.Join(home, "customcfg"), Profile: "abc", Host: "https://def.cloud.databricks.com", }) + require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", filepath.Join(home, "customcfg")) - _, err := w.Client() + _, err = w.Client() assert.ErrorContains(t, err, "config host mismatch") }) } diff --git a/bundle/internal/bundletest/location.go b/bundle/internal/bundletest/location.go index 2ffd621bf5..5dcd9d78f4 100644 --- a/bundle/internal/bundletest/location.go +++ b/bundle/internal/bundletest/location.go @@ -10,7 +10,7 @@ import ( // with the path it is loaded from. func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) { start := dyn.MustPathFromString(prefix) - b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { // If the path has the given prefix, set the location. if p.HasPrefix(start) { @@ -27,4 +27,7 @@ func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) { return v, dyn.ErrSkip }) }) + if err != nil { + panic("Mutate() failed: " + err.Error()) + } } diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go index 135d79daee..2b7f0e8254 100644 --- a/bundle/render/render_text_output_test.go +++ b/bundle/render/render_text_output_test.go @@ -489,7 +489,8 @@ func TestRenderSummaryTemplate_nilBundle(t *testing.T) { err := renderSummaryHeaderTemplate(writer, nil) require.NoError(t, err) - io.WriteString(writer, buildTrailer(nil)) + _, err = io.WriteString(writer, buildTrailer(nil)) + require.NoError(t, err) assert.Equal(t, "Validation OK!\n", writer.String()) } diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index 369c546aa6..2461f61bde 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -42,7 +42,8 @@ func TestConvertPythonParams(t *testing.T) { opts := &Options{ Job: JobOptions{}, } - runner.convertPythonParams(opts) + err := runner.convertPythonParams(opts) + require.NoError(t, err) require.NotContains(t, opts.Job.notebookParams, "__python_params") opts = &Options{ @@ -50,7 +51,8 @@ func TestConvertPythonParams(t *testing.T) { pythonParams: []string{"param1", "param2", "param3"}, }, } - runner.convertPythonParams(opts) + err = runner.convertPythonParams(opts) + require.NoError(t, err) require.Contains(t, opts.Job.notebookParams, "__python_params") require.Equal(t, opts.Job.notebookParams["__python_params"], `["param1","param2","param3"]`) } diff --git a/cmd/auth/describe_test.go b/cmd/auth/describe_test.go index d0260abc7b..7f5f900d4f 100644 --- a/cmd/auth/describe_test.go +++ b/cmd/auth/describe_test.go @@ -31,7 +31,8 @@ func TestGetWorkspaceAuthStatus(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -39,14 +40,16 @@ func TestGetWorkspaceAuthStatus(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err := config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", }) + require.NoError(t, err) return cfg, false, nil }) require.NoError(t, err) @@ -81,7 +84,8 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -89,10 +93,11 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", @@ -128,7 +133,8 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -136,10 +142,11 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { } m.WorkspaceClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "host": "https://test.com", "token": "test-token", "auth_type": "azure-cli", @@ -171,7 +178,8 @@ func TestGetAccountAuthStatus(t *testing.T) { cmd.Flags().String("host", "", "") cmd.Flags().String("profile", "", "") - cmd.Flag("profile").Value.Set("my-profile") + err := cmd.Flag("profile").Value.Set("my-profile") + require.NoError(t, err) cmd.Flag("profile").Changed = true cfg := &config.Config{ @@ -179,13 +187,14 @@ func TestGetAccountAuthStatus(t *testing.T) { } m.AccountClient.Config = cfg t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") - config.ConfigAttributes.Configure(cfg) + err = config.ConfigAttributes.Configure(cfg) + require.NoError(t, err) wsApi := m.GetMockWorkspacesAPI() wsApi.EXPECT().List(mock.Anything).Return(nil, nil) status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { - config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + err = config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ "account_id": "test-account-id", "username": "test-user", "host": "https://test.com", diff --git a/cmd/bundle/generate/dashboard_test.go b/cmd/bundle/generate/dashboard_test.go index 6741e6a39e..f1161950b6 100644 --- a/cmd/bundle/generate/dashboard_test.go +++ b/cmd/bundle/generate/dashboard_test.go @@ -67,9 +67,10 @@ func TestDashboard_ExistingID_Nominal(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-id").Value.Set("f00dcafe") + err := cmd.Flag("existing-id").Value.Set("f00dcafe") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.NoError(t, err) // Assert the contents of the generated configuration @@ -105,9 +106,10 @@ func TestDashboard_ExistingID_NotFound(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-id").Value.Set("f00dcafe") + err := cmd.Flag("existing-id").Value.Set("f00dcafe") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.Error(t, err) } @@ -137,9 +139,10 @@ func TestDashboard_ExistingPath_Nominal(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + err := cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.NoError(t, err) // Assert the contents of the generated configuration @@ -175,8 +178,9 @@ func TestDashboard_ExistingPath_NotFound(t *testing.T) { ctx := bundle.Context(context.Background(), b) cmd := NewGenerateDashboardCommand() cmd.SetContext(ctx) - cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + err := cmd.Flag("existing-path").Value.Set("/path/to/dashboard") + require.NoError(t, err) - err := cmd.RunE(cmd, []string{}) + err = cmd.RunE(cmd, []string{}) require.Error(t, err) } diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index bc1549e64b..33f1661586 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -78,13 +78,13 @@ func TestGeneratePipelineCommand(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/file.py", mock.Anything).Return(pyContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-pipeline-id").Value.Set("test-pipeline") + require.NoError(t, cmd.Flag("existing-pipeline-id").Value.Set("test-pipeline")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_pipeline", "") @@ -174,13 +174,13 @@ func TestGenerateJobCommand(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-job-id").Value.Set("1234") + require.NoError(t, cmd.Flag("existing-job-id").Value.Set("1234")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_job", "") @@ -279,13 +279,13 @@ func TestGenerateJobCommandOldFileRename(t *testing.T) { workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) cmd.SetContext(bundle.Context(context.Background(), b)) - cmd.Flag("existing-job-id").Value.Set("1234") + require.NoError(t, cmd.Flag("existing-job-id").Value.Set("1234")) configDir := filepath.Join(root, "resources") - cmd.Flag("config-dir").Value.Set(configDir) + require.NoError(t, cmd.Flag("config-dir").Value.Set(configDir)) srcDir := filepath.Join(root, "src") - cmd.Flag("source-dir").Value.Set(srcDir) + require.NoError(t, cmd.Flag("source-dir").Value.Set(srcDir)) var key string cmd.Flags().StringVar(&key, "key", "test_job", "") @@ -295,7 +295,7 @@ func TestGenerateJobCommandOldFileRename(t *testing.T) { touchEmptyFile(t, oldFilename) // Having an existing files require --force flag to regenerate them - cmd.Flag("force").Value.Set("true") + require.NoError(t, cmd.Flag("force").Value.Set("true")) err := cmd.RunE(cmd, []string{}) require.NoError(t, err) diff --git a/cmd/labs/github/ref_test.go b/cmd/labs/github/ref_test.go index 2a9ffcc5bd..cc27d1e81c 100644 --- a/cmd/labs/github/ref_test.go +++ b/cmd/labs/github/ref_test.go @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFileFromRef(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/ucx/main/README.md" { - w.Write([]byte(`abc`)) + _, err := w.Write([]byte(`abc`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) @@ -31,7 +33,8 @@ func TestFileFromRef(t *testing.T) { func TestDownloadZipball(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/repos/databrickslabs/ucx/zipball/main" { - w.Write([]byte(`abc`)) + _, err := w.Write([]byte(`abc`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/github/releases_test.go b/cmd/labs/github/releases_test.go index ea24a1e2ee..9c3d7a9598 100644 --- a/cmd/labs/github/releases_test.go +++ b/cmd/labs/github/releases_test.go @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLoadsReleasesForCLI(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/repos/databricks/cli/releases" { - w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`)) + _, err := w.Write([]byte(`[{"tag_name": "v1.2.3"}, {"tag_name": "v1.2.2"}]`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/github/repositories_test.go b/cmd/labs/github/repositories_test.go index 4f2fef3e17..412b440bcf 100644 --- a/cmd/labs/github/repositories_test.go +++ b/cmd/labs/github/repositories_test.go @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestRepositories(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/users/databrickslabs/repos" { - w.Write([]byte(`[{"name": "x"}]`)) + _, err := w.Write([]byte(`[{"name": "x"}]`)) + require.NoError(t, err) return } t.Logf("Requested: %s", r.URL.Path) diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 1e45fafe68..c62d546b76 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -117,10 +117,10 @@ func installerContext(t *testing.T, server *httptest.Server) context.Context { func respondWithJSON(t *testing.T, w http.ResponseWriter, v any) { raw, err := json.Marshal(v) - if err != nil { - require.NoError(t, err) - } - w.Write(raw) + require.NoError(t, err) + + _, err = w.Write(raw) + require.NoError(t, err) } type fileTree struct { @@ -167,19 +167,17 @@ func TestInstallerWorksForReleases(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/blueprint/v0.3.15/labs.yml" { raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") - if err != nil { - panic(err) - } - w.Write(raw) + require.NoError(t, err) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.3.15" { raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") - if err != nil { - panic(err) - } + require.NoError(t, err) w.Header().Add("Content-Type", "application/octet-stream") - w.Write(raw) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/api/2.1/clusters/get" { @@ -314,7 +312,10 @@ func TestInstallerWorksForDevelopment(t *testing.T) { defer server.Close() wd, _ := os.Getwd() - defer os.Chdir(wd) + defer func() { + err := os.Chdir(wd) + require.NoError(t, err) + }() devDir := copyTestdata(t, "testdata/installed-in-home/.databricks/labs/blueprint/lib") err := os.Chdir(devDir) @@ -373,19 +374,17 @@ func TestUpgraderWorksForReleases(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/databrickslabs/blueprint/v0.4.0/labs.yml" { raw, err := os.ReadFile("testdata/installed-in-home/.databricks/labs/blueprint/lib/labs.yml") - if err != nil { - panic(err) - } - w.Write(raw) + require.NoError(t, err) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/repos/databrickslabs/blueprint/zipball/v0.4.0" { raw, err := zipballFromFolder("testdata/installed-in-home/.databricks/labs/blueprint/lib") - if err != nil { - panic(err) - } + require.NoError(t, err) w.Header().Add("Content-Type", "application/octet-stream") - w.Write(raw) + _, err = w.Write(raw) + require.NoError(t, err) return } if r.URL.Path == "/api/2.1/clusters/get" { diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 3018842870..72e0bef556 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -99,10 +99,11 @@ func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("NOEXIST") + err := cmd.Flag("profile").Value.Set("NOEXIST") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://x.com") - _, err := b.InitializeWorkspaceClient() + _, err = b.InitializeWorkspaceClient() assert.ErrorContains(t, err, "has no NOEXIST profile configured") } @@ -110,10 +111,11 @@ func TestBundleConfigureWithMismatchedProfile(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://x.com") - _, err := b.InitializeWorkspaceClient() + _, err = b.InitializeWorkspaceClient() assert.ErrorContains(t, err, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com") } @@ -121,7 +123,8 @@ func TestBundleConfigureWithCorrectProfile(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://a.com") client, err := b.InitializeWorkspaceClient() @@ -146,7 +149,8 @@ func TestBundleConfigureWithProfileFlagAndEnvVariable(t *testing.T) { t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-1") + err := cmd.Flag("profile").Value.Set("PROFILE-1") + require.NoError(t, err) b := setupWithHost(t, cmd, "https://a.com") client, err := b.InitializeWorkspaceClient() @@ -174,7 +178,8 @@ func TestBundleConfigureProfileFlag(t *testing.T) { // The --profile flag takes precedence over the profile in the databricks.yml file cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-2") + err := cmd.Flag("profile").Value.Set("PROFILE-2") + require.NoError(t, err) b := setupWithProfile(t, cmd, "PROFILE-1") client, err := b.InitializeWorkspaceClient() @@ -205,7 +210,8 @@ func TestBundleConfigureProfileFlagAndEnvVariable(t *testing.T) { // The --profile flag takes precedence over the DATABRICKS_CONFIG_PROFILE environment variable t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) - cmd.Flag("profile").Value.Set("PROFILE-2") + err := cmd.Flag("profile").Value.Set("PROFILE-2") + require.NoError(t, err) b := setupWithProfile(t, cmd, "PROFILE-1") client, err := b.InitializeWorkspaceClient() diff --git a/cmd/root/progress_logger_test.go b/cmd/root/progress_logger_test.go index 9dceee8d58..42ba1bdc6c 100644 --- a/cmd/root/progress_logger_test.go +++ b/cmd/root/progress_logger_test.go @@ -33,27 +33,27 @@ func initializeProgressLoggerTest(t *testing.T) ( func TestInitializeErrorOnIncompatibleConfig(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("info") - logFile.Set("stderr") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("info")) + require.NoError(t, logFile.Set("stderr")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.ErrorContains(t, err, "inplace progress logging cannot be used when log-file is stderr") } func TestNoErrorOnDisabledLogLevel(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("disabled") - logFile.Set("stderr") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("disabled")) + require.NoError(t, logFile.Set("stderr")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.NoError(t, err) } func TestNoErrorOnNonStderrLogFile(t *testing.T) { plt, logLevel, logFile, progressFormat := initializeProgressLoggerTest(t) - logLevel.Set("info") - logFile.Set("stdout") - progressFormat.Set("inplace") + require.NoError(t, logLevel.Set("info")) + require.NoError(t, logFile.Set("stdout")) + require.NoError(t, progressFormat.Set("inplace")) _, err := plt.progressLoggerFlag.initializeContext(context.Background()) assert.NoError(t, err) } diff --git a/internal/bundle/bind_resource_test.go b/internal/bundle/bind_resource_test.go index 8cc5da5364..bf80b36691 100644 --- a/internal/bundle/bind_resource_test.go +++ b/internal/bundle/bind_resource_test.go @@ -99,7 +99,8 @@ func TestAccAbortBind(t *testing.T) { jobId := gt.createTestJob(ctx) t.Cleanup(func() { gt.destroyJob(ctx, jobId) - destroyBundle(t, ctx, bundleRoot) + err := destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) }) // Bind should fail because prompting is not possible. diff --git a/internal/bundle/deploy_test.go b/internal/bundle/deploy_test.go index 759e85de5d..f4c6a440c3 100644 --- a/internal/bundle/deploy_test.go +++ b/internal/bundle/deploy_test.go @@ -33,7 +33,8 @@ func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.Worksp require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, ctx, bundleRoot) + err := destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) }) // Assert the schema is created @@ -190,7 +191,8 @@ func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, ctx, bundleRoot) + err := destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) }) // Assert the pipeline is created @@ -258,7 +260,8 @@ func TestAccDeployUcVolume(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, ctx, bundleRoot) + err := destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) }) // Assert the volume is created successfully diff --git a/internal/bundle/deployment_state_test.go b/internal/bundle/deployment_state_test.go index 25f36d4a26..0ab99aeb38 100644 --- a/internal/bundle/deployment_state_test.go +++ b/internal/bundle/deployment_state_test.go @@ -46,7 +46,7 @@ func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, ctx, bundleRoot) + require.NoError(t, destroyBundle(t, ctx, bundleRoot)) }) remoteRoot := getBundleRemoteRootPath(w, t, uniqueId) diff --git a/internal/bundle/environments_test.go b/internal/bundle/environments_test.go index 5cffe8857d..7c5b6db893 100644 --- a/internal/bundle/environments_test.go +++ b/internal/bundle/environments_test.go @@ -22,7 +22,8 @@ func TestAccPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, ctx, bundleRoot) + err := destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) }) out, err := runResource(t, ctx, bundleRoot, "some_other_job") diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index 846f141772..44f6200c1b 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -29,7 +29,8 @@ func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, ctx, bundleRoot) + err := destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) }) out, err := runResource(t, ctx, bundleRoot, "some_other_job") diff --git a/internal/bundle/spark_jar_test.go b/internal/bundle/spark_jar_test.go index 4b469617c5..cffcbb755a 100644 --- a/internal/bundle/spark_jar_test.go +++ b/internal/bundle/spark_jar_test.go @@ -31,7 +31,8 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion strin require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, ctx, bundleRoot) + err := destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) }) out, err := runResource(t, ctx, bundleRoot, "jar_job") diff --git a/internal/helpers.go b/internal/helpers.go index 596f45537c..a76ea92b5b 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -176,7 +176,10 @@ func (t *cobraTestRunner) SendText(text string) { if t.stdinW == nil { panic("no standard input configured") } - t.stdinW.Write([]byte(text + "\n")) + _, err := t.stdinW.Write([]byte(text + "\n")) + if err != nil { + panic("Failed to to write to t.stdinW") + } } func (t *cobraTestRunner) RunBackground() { @@ -496,9 +499,10 @@ func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { }) require.NoError(t, err) t.Cleanup(func() { - w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ + err := w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ FullName: schema.FullName, }) + require.NoError(t, err) }) // Create a volume @@ -510,9 +514,10 @@ func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { }) require.NoError(t, err) t.Cleanup(func() { - w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ + err := w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ Name: volume.FullName, }) + require.NoError(t, err) }) return path.Join("/Volumes", "main", schema.Name, volume.Name) diff --git a/internal/init_test.go b/internal/init_test.go index 839d8185d0..08a808f458 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -58,7 +58,8 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { } b, err := json.Marshal(initConfig) require.NoError(t, err) - os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0644) + err = os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0644) + require.NoError(t, err) // Run bundle init assert.NoFileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) diff --git a/internal/locker_test.go b/internal/locker_test.go index 3ae783d1bf..62fcfc69fd 100644 --- a/internal/locker_test.go +++ b/internal/locker_test.go @@ -133,7 +133,8 @@ func TestAccLock(t *testing.T) { // assert on active locker content var res map[string]string - json.Unmarshal(b, &res) + err = json.Unmarshal(b, &res) + require.NoError(t, err) assert.NoError(t, err) assert.Equal(t, "Khan", res["surname"]) assert.Equal(t, "Shah Rukh", res["name"]) diff --git a/internal/tags_test.go b/internal/tags_test.go index 2dd3759acb..54039223f1 100644 --- a/internal/tags_test.go +++ b/internal/tags_test.go @@ -47,7 +47,11 @@ func testTags(t *testing.T, tags map[string]string) error { if resp != nil { t.Cleanup(func() { - w.Jobs.DeleteByJobId(ctx, resp.JobId) + _ = w.Jobs.DeleteByJobId(ctx, resp.JobId) + // Cannot enable errchecking there, tests fail with: + // Error: Received unexpected error: + // Job 0 does not exist. + // require.NoError(t, err) }) } diff --git a/libs/cmdgroup/command_test.go b/libs/cmdgroup/command_test.go index f3e3fe6aba..2c248f09f9 100644 --- a/libs/cmdgroup/command_test.go +++ b/libs/cmdgroup/command_test.go @@ -42,7 +42,8 @@ func TestCommandFlagGrouping(t *testing.T) { buf := bytes.NewBuffer(nil) cmd.SetOutput(buf) - cmd.Usage() + err := cmd.Usage() + require.NoError(t, err) expected := `Usage: parent test [flags] diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index ab0a1cec15..449c09075c 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" ) func TestNormalizeStruct(t *testing.T) { @@ -20,8 +21,8 @@ func TestNormalizeStruct(t *testing.T) { "bar": dyn.V("baz"), }) - vout, err := Normalize(typ, vin) - assert.Empty(t, err) + vout, diags := Normalize(typ, vin) + assert.Empty(t, diags) assert.Equal(t, vin, vout) } @@ -37,14 +38,14 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { "bar": dyn.V(map[string]dyn.Value{"an": dyn.V("error")}), }) - vout, err := Normalize(typ, vin) - assert.Len(t, err, 1) + vout, diags := Normalize(typ, vin) + assert.Len(t, diags, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Warning, Summary: `expected string, found map`, Locations: []dyn.Location{{}}, Paths: []dyn.Path{dyn.NewPath(dyn.Key("bar"))}, - }, err[0]) + }, diags[0]) // Elements that encounter an error during normalization are dropped. assert.Equal(t, map[string]any{ @@ -60,17 +61,20 @@ func TestNormalizeStructUnknownField(t *testing.T) { var typ Tmp m := dyn.NewMapping() - m.Set(dyn.V("foo"), dyn.V("val-foo")) + err := m.Set(dyn.V("foo"), dyn.V("val-foo")) + require.NoError(t, err) + // Set the unknown field, with location information. - m.Set(dyn.NewValue("bar", []dyn.Location{ + err = m.Set(dyn.NewValue("bar", []dyn.Location{ {File: "hello.yaml", Line: 1, Column: 1}, {File: "world.yaml", Line: 2, Column: 2}, }), dyn.V("var-bar")) + require.NoError(t, err) vin := dyn.V(m) - vout, err := Normalize(typ, vin) - assert.Len(t, err, 1) + vout, diags := Normalize(typ, vin) + assert.Len(t, diags, 1) assert.Equal(t, diag.Diagnostic{ Severity: diag.Warning, Summary: `unknown field: bar`, @@ -80,7 +84,7 @@ func TestNormalizeStructUnknownField(t *testing.T) { {File: "world.yaml", Line: 2, Column: 2}, }, Paths: []dyn.Path{dyn.EmptyPath}, - }, err[0]) + }, diags[0]) // The field that can be mapped to the struct field is retained. assert.Equal(t, map[string]any{ diff --git a/libs/dyn/jsonsaver/marshal_test.go b/libs/dyn/jsonsaver/marshal_test.go index 0b6a342831..e8897ea49c 100644 --- a/libs/dyn/jsonsaver/marshal_test.go +++ b/libs/dyn/jsonsaver/marshal_test.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" ) func TestMarshal_String(t *testing.T) { @@ -44,8 +45,8 @@ func TestMarshal_Time(t *testing.T) { func TestMarshal_Map(t *testing.T) { m := dyn.NewMapping() - m.Set(dyn.V("key1"), dyn.V("value1")) - m.Set(dyn.V("key2"), dyn.V("value2")) + require.NoError(t, m.Set(dyn.V("key1"), dyn.V("value1"))) + require.NoError(t, m.Set(dyn.V("key2"), dyn.V("value2"))) b, err := Marshal(dyn.V(m)) if assert.NoError(t, err) { @@ -66,16 +67,16 @@ func TestMarshal_Sequence(t *testing.T) { func TestMarshal_Complex(t *testing.T) { map1 := dyn.NewMapping() - map1.Set(dyn.V("str1"), dyn.V("value1")) - map1.Set(dyn.V("str2"), dyn.V("value2")) + require.NoError(t, map1.Set(dyn.V("str1"), dyn.V("value1"))) + require.NoError(t, map1.Set(dyn.V("str2"), dyn.V("value2"))) seq1 := []dyn.Value{} seq1 = append(seq1, dyn.V("value1")) seq1 = append(seq1, dyn.V("value2")) root := dyn.NewMapping() - root.Set(dyn.V("map1"), dyn.V(map1)) - root.Set(dyn.V("seq1"), dyn.V(seq1)) + require.NoError(t, root.Set(dyn.V("map1"), dyn.V(map1))) + require.NoError(t, root.Set(dyn.V("seq1"), dyn.V(seq1))) // Marshal without indent. b, err := Marshal(dyn.V(root)) diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index 264c32e5e2..4af9377749 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -432,10 +432,12 @@ func TestOverride_PreserveMappingKeys(t *testing.T) { rightValueLocation := dyn.Location{File: "right.yml", Line: 3, Column: 1} left := dyn.NewMapping() - left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) + err := left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) + require.NoError(t, err) right := dyn.NewMapping() - right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) + err = right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) + require.NoError(t, err) state, visitor := createVisitor(visitorOpts{}) diff --git a/libs/exec/exec_test.go b/libs/exec/exec_test.go index ad54601d08..e75c158bd7 100644 --- a/libs/exec/exec_test.go +++ b/libs/exec/exec_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestExecutorWithSimpleInput(t *testing.T) { @@ -86,9 +87,11 @@ func testExecutorWithShell(t *testing.T, shell string) { tmpDir := t.TempDir() t.Setenv("PATH", tmpDir) if runtime.GOOS == "windows" { - os.Symlink(p, fmt.Sprintf("%s/%s.exe", tmpDir, shell)) + err = os.Symlink(p, fmt.Sprintf("%s/%s.exe", tmpDir, shell)) + require.NoError(t, err) } else { - os.Symlink(p, fmt.Sprintf("%s/%s", tmpDir, shell)) + err = os.Symlink(p, fmt.Sprintf("%s/%s", tmpDir, shell)) + require.NoError(t, err) } executor, err := NewCommandExecutor(".") diff --git a/libs/flags/json_flag_test.go b/libs/flags/json_flag_test.go index 77530086a1..564d4f813c 100644 --- a/libs/flags/json_flag_test.go +++ b/libs/flags/json_flag_test.go @@ -62,7 +62,8 @@ func TestJsonFlagFile(t *testing.T) { { f, err := os.Create(path.Join(t.TempDir(), "file")) require.NoError(t, err) - f.Write(payload) + _, err = f.Write(payload) + require.NoError(t, err) f.Close() fpath = f.Name() } diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index f4fd931fd1..6d239edf59 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -56,7 +56,8 @@ func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) { projectDir := t.TempDir() fileSet, err := NewFileSetAtRoot(vfs.MustNew(projectDir)) require.NoError(t, err) - fileSet.EnsureValidGitIgnoreExists() + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) gitIgnorePath := filepath.Join(projectDir, ".gitignore") assert.FileExists(t, gitIgnorePath) @@ -74,7 +75,8 @@ func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { err = os.WriteFile(gitIgnorePath, []byte(".databricks"), 0o644) require.NoError(t, err) - fileSet.EnsureValidGitIgnoreExists() + err = fileSet.EnsureValidGitIgnoreExists() + require.NoError(t, err) b, err := os.ReadFile(gitIgnorePath) require.NoError(t, err) diff --git a/libs/git/reference_test.go b/libs/git/reference_test.go index 194d79333e..bfa0e50e5b 100644 --- a/libs/git/reference_test.go +++ b/libs/git/reference_test.go @@ -54,7 +54,8 @@ func TestReferenceLoadingForObjectID(t *testing.T) { f, err := os.Create(filepath.Join(tmp, "HEAD")) require.NoError(t, err) defer f.Close() - f.WriteString(strings.Repeat("e", 40) + "\r\n") + _, err = f.WriteString(strings.Repeat("e", 40) + "\r\n") + require.NoError(t, err) ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) @@ -67,7 +68,8 @@ func TestReferenceLoadingForReference(t *testing.T) { f, err := os.OpenFile(filepath.Join(tmp, "HEAD"), os.O_CREATE|os.O_WRONLY, os.ModePerm) require.NoError(t, err) defer f.Close() - f.WriteString("ref: refs/heads/foo\n") + _, err = f.WriteString("ref: refs/heads/foo\n") + require.NoError(t, err) ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) @@ -80,7 +82,8 @@ func TestReferenceLoadingFailsForInvalidContent(t *testing.T) { f, err := os.OpenFile(filepath.Join(tmp, "HEAD"), os.O_CREATE|os.O_WRONLY, os.ModePerm) require.NoError(t, err) defer f.Close() - f.WriteString("abc") + _, err = f.WriteString("abc") + require.NoError(t, err) _, err = LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.ErrorContains(t, err, "unknown format for git HEAD") diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index 93d9a03dcc..474880e273 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -27,7 +27,7 @@ func newTestRepository(t *testing.T) *testRepository { require.NoError(t, err) defer f1.Close() - f1.WriteString( + _, err = f1.WriteString( `[core] repositoryformatversion = 0 filemode = true @@ -36,6 +36,7 @@ func newTestRepository(t *testing.T) *testRepository { ignorecase = true precomposeunicode = true `) + require.NoError(t, err) f2, err := os.Create(filepath.Join(tmp, ".git", "HEAD")) require.NoError(t, err) diff --git a/libs/jsonschema/from_type_test.go b/libs/jsonschema/from_type_test.go index 0ddb1011a8..cdfdcfd10e 100644 --- a/libs/jsonschema/from_type_test.go +++ b/libs/jsonschema/from_type_test.go @@ -403,7 +403,8 @@ func TestFromTypeError(t *testing.T) { // Maps with non-string keys should panic. type mapOfInts map[int]int assert.PanicsWithValue(t, "found map with non-string key: int", func() { - FromType(reflect.TypeOf(mapOfInts{}), nil) + _, err := FromType(reflect.TypeOf(mapOfInts{}), nil) + require.NoError(t, err) }) // Unsupported types should return an error. diff --git a/libs/process/stub_test.go b/libs/process/stub_test.go index 65f59f8172..81afa3a891 100644 --- a/libs/process/stub_test.go +++ b/libs/process/stub_test.go @@ -43,8 +43,14 @@ func TestStubCallback(t *testing.T) { ctx := context.Background() ctx, stub := process.WithStub(ctx) stub.WithCallback(func(cmd *exec.Cmd) error { - cmd.Stderr.Write([]byte("something...")) - cmd.Stdout.Write([]byte("else...")) + _, err := cmd.Stderr.Write([]byte("something...")) + if err != nil { + return err + } + _, err = cmd.Stdout.Write([]byte("else...")) + if err != nil { + return err + } return fmt.Errorf("yep") }) diff --git a/libs/python/interpreters_unix_test.go b/libs/python/interpreters_unix_test.go index e2b0a5a1cb..b5229c2ace 100644 --- a/libs/python/interpreters_unix_test.go +++ b/libs/python/interpreters_unix_test.go @@ -34,7 +34,8 @@ func TestFilteringInterpreters(t *testing.T) { rogueBin := filepath.Join(t.TempDir(), "rogue-bin") err := os.Mkdir(rogueBin, 0o777) assert.NoError(t, err) - os.Chmod(rogueBin, 0o777) + err = os.Chmod(rogueBin, 0o777) + assert.NoError(t, err) raw, err := os.ReadFile("testdata/world-writeable/python8.4") assert.NoError(t, err) From e9fa7b7c0e8ed7eeb6940d4cdf99d09ae2675aca Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 9 Dec 2024 15:25:06 +0100 Subject: [PATCH 19/63] Remove global variable from clusters integration test (#1972) ## Changes I saw this test fail on rerun because the global wasn't set. Fix by removing the global and using a different approach to acquire a valid cluster ID. ## Tests Integration tests. --- internal/clusters_test.go | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/internal/clusters_test.go b/internal/clusters_test.go index 6daddcce37..2c41ebe1dd 100644 --- a/internal/clusters_test.go +++ b/internal/clusters_test.go @@ -5,11 +5,13 @@ import ( "regexp" "testing" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -var clusterId string - func TestAccClustersList(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) @@ -21,13 +23,14 @@ func TestAccClustersList(t *testing.T) { assert.Equal(t, "", stderr.String()) idRegExp := regexp.MustCompile(`[0-9]{4}\-[0-9]{6}-[a-z0-9]{8}`) - clusterId = idRegExp.FindString(outStr) + clusterId := idRegExp.FindString(outStr) assert.NotEmpty(t, clusterId) } func TestAccClustersGet(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + clusterId := findValidClusterID(t) stdout, stderr := RequireSuccessfulRun(t, "clusters", "get", clusterId) outStr := stdout.String() assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId)) @@ -38,3 +41,22 @@ func TestClusterCreateErrorWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "clusters", "create") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } + +// findValidClusterID lists clusters in the workspace to find a valid cluster ID. +func findValidClusterID(t *testing.T) string { + ctx, wt := acc.WorkspaceTest(t) + it := wt.W.Clusters.List(ctx, compute.ListClustersRequest{ + FilterBy: &compute.ListClustersFilterBy{ + ClusterSources: []compute.ClusterSource{ + compute.ClusterSourceApi, + compute.ClusterSourceUi, + }, + }, + }) + + clusterIDs, err := listing.ToSliceN(ctx, it, 1) + require.NoError(t, err) + require.Len(t, clusterIDs, 1) + + return clusterIDs[0].ClusterId +} From 3457c10c7f9afa7fda01cf55202b4dd527d45310 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 9 Dec 2024 16:19:19 +0100 Subject: [PATCH 20/63] Pin gotestsum version to v1.12.0 (#1981) Make this robust against inadvertent upgrades. --- .github/workflows/push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index ebb3e75d4a..a497a7d7b7 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -44,7 +44,7 @@ jobs: run: | echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - go install gotest.tools/gotestsum@latest + go install gotest.tools/gotestsum@v1.12.0 - name: Pull external libraries run: | From dc2cf3bc42f24c180dcaec9605e0b80eda8989d8 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 9 Dec 2024 16:26:41 +0100 Subject: [PATCH 21/63] Process top-level permissions for resources using dynamic config (#1964) ## Changes This PR ensures that when new resources are added they are handled by top-level permissions mutator, either by supporting or not supporting the resource type. ## Tests Added unit tests --- bundle/permissions/mutator.go | 126 ++++++++++++++--------------- bundle/permissions/mutator_test.go | 26 ++++++ bundle/permissions/utils.go | 2 +- 3 files changed, 86 insertions(+), 68 deletions(-) diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index bc1392d932..c4b9708571 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -7,13 +7,18 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" ) const CAN_MANAGE = "CAN_MANAGE" const CAN_VIEW = "CAN_VIEW" const CAN_RUN = "CAN_RUN" +var unsupportedResources = []string{"clusters", "volumes", "schemas", "quality_monitors", "registered_models"} + var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} var levelsMap = map[string](map[string]string){ "jobs": { @@ -26,11 +31,11 @@ var levelsMap = map[string](map[string]string){ CAN_VIEW: "CAN_VIEW", CAN_RUN: "CAN_RUN", }, - "mlflow_experiments": { + "experiments": { CAN_MANAGE: "CAN_MANAGE", CAN_VIEW: "CAN_READ", }, - "mlflow_models": { + "models": { CAN_MANAGE: "CAN_MANAGE", CAN_VIEW: "CAN_READ", }, @@ -57,11 +62,58 @@ func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Di return diag.FromErr(err) } - applyForJobs(ctx, b) - applyForPipelines(ctx, b) - applyForMlModels(ctx, b) - applyForMlExperiments(ctx, b) - applyForModelServiceEndpoints(ctx, b) + patterns := make(map[string]dyn.Pattern, 0) + for key := range levelsMap { + patterns[key] = dyn.NewPattern( + dyn.Key("resources"), + dyn.Key(key), + dyn.AnyKey(), + ) + } + + err = b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + for key, pattern := range patterns { + v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + var permissions []resources.Permission + pv, err := dyn.Get(v, "permissions") + + // If the permissions field is not found, we set to an empty array + if err != nil { + pv = dyn.V([]dyn.Value{}) + } + + err = convert.ToTyped(&permissions, pv) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to convert permissions: %w", err) + } + + permissions = append(permissions, convertPermissions( + ctx, + b.Config.Permissions, + permissions, + key, + levelsMap[key], + )...) + + pv, err = convert.FromTyped(permissions, dyn.NilValue) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to convert permissions: %w", err) + } + + return dyn.Set(v, "permissions", pv) + }) + + if err != nil { + return dyn.InvalidValue, err + } + } + + return v, nil + }) + + if err != nil { + return diag.FromErr(err) + } return nil } @@ -76,66 +128,6 @@ func validate(b *bundle.Bundle) error { return nil } -func applyForJobs(ctx context.Context, b *bundle.Bundle) { - for key, job := range b.Config.Resources.Jobs { - job.Permissions = append(job.Permissions, convert( - ctx, - b.Config.Permissions, - job.Permissions, - key, - levelsMap["jobs"], - )...) - } -} - -func applyForPipelines(ctx context.Context, b *bundle.Bundle) { - for key, pipeline := range b.Config.Resources.Pipelines { - pipeline.Permissions = append(pipeline.Permissions, convert( - ctx, - b.Config.Permissions, - pipeline.Permissions, - key, - levelsMap["pipelines"], - )...) - } -} - -func applyForMlExperiments(ctx context.Context, b *bundle.Bundle) { - for key, experiment := range b.Config.Resources.Experiments { - experiment.Permissions = append(experiment.Permissions, convert( - ctx, - b.Config.Permissions, - experiment.Permissions, - key, - levelsMap["mlflow_experiments"], - )...) - } -} - -func applyForMlModels(ctx context.Context, b *bundle.Bundle) { - for key, model := range b.Config.Resources.Models { - model.Permissions = append(model.Permissions, convert( - ctx, - b.Config.Permissions, - model.Permissions, - key, - levelsMap["mlflow_models"], - )...) - } -} - -func applyForModelServiceEndpoints(ctx context.Context, b *bundle.Bundle) { - for key, model := range b.Config.Resources.ModelServingEndpoints { - model.Permissions = append(model.Permissions, convert( - ctx, - b.Config.Permissions, - model.Permissions, - key, - levelsMap["model_serving_endpoints"], - )...) - } -} - func (m *bundlePermissions) Name() string { return "ApplyBundlePermissions" } diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go index 1a177d9024..78703e90f0 100644 --- a/bundle/permissions/mutator_test.go +++ b/bundle/permissions/mutator_test.go @@ -2,12 +2,15 @@ package permissions import ( "context" + "fmt" + "slices" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -51,6 +54,10 @@ func TestApplyBundlePermissions(t *testing.T) { "endpoint_1": {}, "endpoint_2": {}, }, + Dashboards: map[string]*resources.Dashboard{ + "dashboard_1": {}, + "dashboard_2": {}, + }, }, }, } @@ -103,6 +110,10 @@ func TestApplyBundlePermissions(t *testing.T) { require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) require.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint_2"].Permissions, resources.Permission{Level: "CAN_QUERY", ServicePrincipalName: "TestServicePrincipal"}) + + require.Len(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, 2) + require.Contains(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) + require.Contains(t, b.Config.Resources.Dashboards["dashboard_1"].Permissions, resources.Permission{Level: "CAN_READ", GroupName: "TestGroup"}) } func TestWarningOnOverlapPermission(t *testing.T) { @@ -146,5 +157,20 @@ func TestWarningOnOverlapPermission(t *testing.T) { require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", UserName: "TestUser2"}) require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.Jobs["job_2"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) +} + +func TestAllResourcesExplicitlyDefinedForPermissionsSupport(t *testing.T) { + r := config.Resources{} + for _, resource := range unsupportedResources { + _, ok := levelsMap[resource] + assert.False(t, ok, fmt.Sprintf("Resource %s is defined in both levelsMap and unsupportedResources", resource)) + } + + for _, resource := range r.AllResources() { + _, ok := levelsMap[resource.Description.PluralName] + if !slices.Contains(unsupportedResources, resource.Description.PluralName) && !ok { + assert.Fail(t, fmt.Sprintf("Resource %s is not explicitly defined in levelsMap or unsupportedResources", resource.Description.PluralName)) + } + } } diff --git a/bundle/permissions/utils.go b/bundle/permissions/utils.go index 9072cd2522..cf16ea9b28 100644 --- a/bundle/permissions/utils.go +++ b/bundle/permissions/utils.go @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -func convert( +func convertPermissions( ctx context.Context, bundlePermissions []resources.Permission, resourcePermissions []resources.Permission, From 48d7c08a46c4a760752d651e12b4344bb69f211d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 9 Dec 2024 21:29:11 +0100 Subject: [PATCH 22/63] Bump TF codegen dependencies to latest (#1961) ## Changes This updates the TF codegen dependencies to latest. ## Tests Ran codegen and confirmed it still works. See `bundle/internal/tf/codegen/README.md` for instructions. --- bundle/internal/tf/codegen/go.mod | 29 ++++++----- bundle/internal/tf/codegen/go.sum | 80 ++++++++++++++++++------------- 2 files changed, 62 insertions(+), 47 deletions(-) diff --git a/bundle/internal/tf/codegen/go.mod b/bundle/internal/tf/codegen/go.mod index 67ac4bbc7e..508ff6ffba 100644 --- a/bundle/internal/tf/codegen/go.mod +++ b/bundle/internal/tf/codegen/go.mod @@ -1,24 +1,27 @@ module github.com/databricks/cli/bundle/internal/tf/codegen -go 1.21 +go 1.23 + +toolchain go1.23.2 require ( - github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/hc-install v0.6.3 - github.com/hashicorp/terraform-exec v0.20.0 - github.com/hashicorp/terraform-json v0.21.0 + github.com/hashicorp/go-version v1.7.0 + github.com/hashicorp/hc-install v0.9.0 + github.com/hashicorp/terraform-exec v0.21.0 + github.com/hashicorp/terraform-json v0.23.0 github.com/iancoleman/strcase v0.3.0 - github.com/zclconf/go-cty v1.14.2 - golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + github.com/zclconf/go-cty v1.15.1 + golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d ) require ( - github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + golang.org/x/crypto v0.30.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect ) diff --git a/bundle/internal/tf/codegen/go.sum b/bundle/internal/tf/codegen/go.sum index 7a4023ba5e..df59b9d056 100644 --- a/bundle/internal/tf/codegen/go.sum +++ b/bundle/internal/tf/codegen/go.sum @@ -2,67 +2,79 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= +github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= -github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= -github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= -github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= -github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= -github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6eLhghE= +github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg= +github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= +github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= +github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= +github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= -github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= +github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d h1:0olWaB5pg3+oychR51GUVCEsGkeCU/2JxjBgIo4f3M0= +golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= From f3c628e53794dd0fb917bf0408030e32c3101fcc Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 10 Dec 2024 11:02:44 +0100 Subject: [PATCH 23/63] Allow overriding compute for non-development mode targets (#1899) ## Changes Allow overriding compute for non-development targets. We previously had a restriction in place where `--cluster-id` was only allowed for targets that use `mode: development`. The intention was to prevent mistakes, but this was overly restrictive. ## Tests Updated unit tests. --- bundle/config/mutator/override_compute.go | 24 +++++++++++---- .../config/mutator/override_compute_test.go | 30 +++++++++++++------ 2 files changed, 40 insertions(+), 14 deletions(-) diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 5700cdf261..2e14b58245 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" ) @@ -38,18 +39,31 @@ func overrideJobCompute(j *resources.Job, compute string) { } func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - if b.Config.Bundle.Mode != config.Development { + var diags diag.Diagnostics + + if b.Config.Bundle.Mode == config.Production { if b.Config.Bundle.ClusterId != "" { - return diag.Errorf("cannot override compute for an target that does not use 'mode: development'") + // Overriding compute via a command-line flag for production works, but is not recommended. + diags = diags.Extend(diag.Diagnostics{{ + Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended", + Detail: "It is recommended to always use the same compute for production target for consistency.", + }}) } - return nil } if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" { + // For historical reasons, we allow setting the cluster ID via the DATABRICKS_CLUSTER_ID + // when development mode is used. Sometimes, this is done by accident, so we log an info message. + if b.Config.Bundle.Mode == config.Development { + cmdio.LogString(ctx, "Setting a cluster override because DATABRICKS_CLUSTER_ID is set. It is recommended to use --cluster-id instead, which works in any target mode.") + } else { + // We don't allow using DATABRICKS_CLUSTER_ID in any other mode, it's too error-prone. + return diag.Warningf("The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'") + } b.Config.Bundle.ClusterId = v } if b.Config.Bundle.ClusterId == "" { - return nil + return diags } r := b.Config.Resources @@ -57,5 +71,5 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId) } - return nil + return diags } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 369447d7e2..950091ed70 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestOverrideDevelopment(t *testing.T) { +func TestOverrideComputeModeDevelopment(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "") b := &bundle.Bundle{ Config: config.Root{ @@ -62,10 +62,13 @@ func TestOverrideDevelopment(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) } -func TestOverrideDevelopmentEnv(t *testing.T) { +func TestOverrideComputeModeDefaultIgnoresVariable(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ + Bundle: config.Bundle{ + Mode: "", + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": {JobSettings: &jobs.JobSettings{ @@ -86,11 +89,12 @@ func TestOverrideDevelopmentEnv(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.NoError(t, diags.Error()) + require.Len(t, diags, 1) + assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary) assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } -func TestOverridePipelineTask(t *testing.T) { +func TestOverrideComputePipelineTask(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ @@ -115,7 +119,7 @@ func TestOverridePipelineTask(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } -func TestOverrideForEachTask(t *testing.T) { +func TestOverrideComputeForEachTask(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ @@ -140,10 +144,11 @@ func TestOverrideForEachTask(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task) } -func TestOverrideProduction(t *testing.T) { +func TestOverrideComputeModeProduction(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ + Mode: config.Production, ClusterId: "newClusterID", }, Resources: config.Resources{ @@ -166,13 +171,18 @@ func TestOverrideProduction(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.True(t, diags.HasError()) + require.Len(t, diags, 1) + assert.Equal(t, "Setting a cluster override for a target that uses 'mode: production' is not recommended", diags[0].Summary) + assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } -func TestOverrideProductionEnv(t *testing.T) { +func TestOverrideComputeModeProductionIgnoresVariable(t *testing.T) { t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") b := &bundle.Bundle{ Config: config.Root{ + Bundle: config.Bundle{ + Mode: config.Production, + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": {JobSettings: &jobs.JobSettings{ @@ -193,5 +203,7 @@ func TestOverrideProductionEnv(t *testing.T) { m := mutator.OverrideCompute() diags := bundle.Apply(context.Background(), b, m) - require.NoError(t, diags.Error()) + require.Len(t, diags, 1) + assert.Equal(t, "The DATABRICKS_CLUSTER_ID variable is set but is ignored since the current target does not use 'mode: development'", diags[0].Summary) + assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } From 070dc6813f733eb8af27d980b8e50520374575d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:10:25 +0100 Subject: [PATCH 24/63] Bump golang.org/x/term from 0.26.0 to 0.27.0 (#1983) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.26.0 to 0.27.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.26.0&new-version=0.27.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7141ed768a..d24f97f428 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( golang.org/x/mod v0.22.0 golang.org/x/oauth2 v0.24.0 golang.org/x/sync v0.9.0 - golang.org/x/term v0.26.0 + golang.org/x/term v0.27.0 golang.org/x/text v0.20.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 @@ -64,7 +64,7 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.24.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.27.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect diff --git a/go.sum b/go.sum index 5d2c53a376..a62dbdc6cb 100644 --- a/go.sum +++ b/go.sum @@ -212,10 +212,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= From ad1359c1eba6c74eb00bce714661d0c529e04594 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:14:20 +0100 Subject: [PATCH 25/63] Bump golang.org/x/sync from 0.9.0 to 0.10.0 (#1984) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.9.0 to 0.10.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/sync&package-manager=go_modules&previous-version=0.9.0&new-version=0.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d24f97f428..20a0e28953 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.22.0 golang.org/x/oauth2 v0.24.0 - golang.org/x/sync v0.9.0 + golang.org/x/sync v0.10.0 golang.org/x/term v0.27.0 golang.org/x/text v0.20.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 diff --git a/go.sum b/go.sum index a62dbdc6cb..bf70ca509a 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 67f08ba924394157aa70519e3389d28667e2589d Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Wed, 11 Dec 2024 09:40:14 +0100 Subject: [PATCH 26/63] Avoid panic if Config.Workspace.CurrentUser.User is not set (#1993) ## Changes Extra check to avoid panic if /api/2.0/preview/scim/v2/Me returns `{}` ## Tests Existing tests. --- bundle/config/mutator/expand_workspace_root.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle/config/mutator/expand_workspace_root.go b/bundle/config/mutator/expand_workspace_root.go index 3f0547de1a..a29d129b0f 100644 --- a/bundle/config/mutator/expand_workspace_root.go +++ b/bundle/config/mutator/expand_workspace_root.go @@ -28,7 +28,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag. } currentUser := b.Config.Workspace.CurrentUser - if currentUser == nil || currentUser.UserName == "" { + if currentUser == nil || currentUser.User == nil || currentUser.UserName == "" { return diag.Errorf("unable to expand workspace root: current user not set") } From 4236e7122f8d2fe66f5a95e5da49af8b8bdf2038 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Wed, 11 Dec 2024 09:44:22 +0100 Subject: [PATCH 27/63] Switch to `folders.FindDirWithLeaf` (#1963) ## Changes Remove two duplicate implementations of the same logic, switch everywhere to folders.FindDirWithLeaf. Add Abs() call to FindDirWithLeaf, it cannot really work on relative paths. ## Tests Existing tests. --- bundle/config/mutator/load_git_details.go | 6 +++- internal/bundle/helpers.go | 7 ++--- internal/git_fetch_test.go | 4 +-- libs/folders/folders.go | 4 +++ libs/git/info.go | 32 ++----------------- libs/vfs/leaf.go | 29 ----------------- libs/vfs/leaf_test.go | 38 ----------------------- 7 files changed, 16 insertions(+), 104 deletions(-) delete mode 100644 libs/vfs/leaf.go delete mode 100644 libs/vfs/leaf_test.go diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 82255552aa..5c263ac035 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -2,6 +2,8 @@ package mutator import ( "context" + "errors" + "os" "path/filepath" "github.com/databricks/cli/bundle" @@ -24,7 +26,9 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn var diags diag.Diagnostics info, err := git.FetchRepositoryInfo(ctx, b.BundleRoot.Native(), b.WorkspaceClient()) if err != nil { - diags = append(diags, diag.WarningFromErr(err)...) + if !errors.Is(err, os.ErrNotExist) { + diags = append(diags, diag.WarningFromErr(err)...) + } } if info.WorktreeRoot == "" { diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index dd9c841c9e..00a941c952 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -18,8 +18,8 @@ import ( "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/template" - "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) @@ -144,15 +144,14 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, unique } func blackBoxRun(t *testing.T, root string, args ...string) (stdout string, stderr string) { - cwd := vfs.MustNew(".") - gitRoot, err := vfs.FindLeafInTree(cwd, ".git") + gitRoot, err := folders.FindDirWithLeaf(".", ".git") require.NoError(t, err) t.Setenv("BUNDLE_ROOT", root) // Create the command cmd := exec.Command("go", append([]string{"run", "main.go"}, args...)...) - cmd.Dir = gitRoot.Native() + cmd.Dir = gitRoot // Create buffers to capture output var outBuffer, errBuffer bytes.Buffer diff --git a/internal/git_fetch_test.go b/internal/git_fetch_test.go index 5dab6be762..3da024a3a3 100644 --- a/internal/git_fetch_test.go +++ b/internal/git_fetch_test.go @@ -101,7 +101,7 @@ func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { assert.NoError(t, err) } else { assert.Error(t, err) - assert.Contains(t, err.Error(), test.msg) + assert.ErrorContains(t, err, test.msg) } assertEmptyGitInfo(t, info) }) @@ -151,7 +151,7 @@ func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { for _, input := range tests { t.Run(input, func(t *testing.T) { info, err := git.FetchRepositoryInfo(ctx, input, wt.W) - assert.NoError(t, err) + assert.ErrorIs(t, err, os.ErrNotExist) assertEmptyGitInfo(t, info) }) } diff --git a/libs/folders/folders.go b/libs/folders/folders.go index c83c711d3f..81bf7ed01d 100644 --- a/libs/folders/folders.go +++ b/libs/folders/folders.go @@ -9,6 +9,10 @@ import ( // FindDirWithLeaf returns the first directory that holds `leaf`, // traversing up to the root of the filesystem, starting at `dir`. func FindDirWithLeaf(dir string, leaf string) (string, error) { + dir, err := filepath.Abs(dir) + if err != nil { + return "", err + } for { _, err := os.Stat(filepath.Join(dir, leaf)) diff --git a/libs/git/info.go b/libs/git/info.go index 13c2981135..ce9c70a5c1 100644 --- a/libs/git/info.go +++ b/libs/git/info.go @@ -2,15 +2,12 @@ package git import ( "context" - "errors" - "io/fs" "net/http" - "os" "path" - "path/filepath" "strings" "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" @@ -105,7 +102,7 @@ func ensureWorkspacePrefix(p string) string { func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo, error) { result := RepositoryInfo{} - rootDir, err := findLeafInTree(path, GitDirectoryName) + rootDir, err := folders.FindDirWithLeaf(path, GitDirectoryName) if rootDir == "" { return result, err } @@ -134,28 +131,3 @@ func fetchRepositoryInfoDotGit(ctx context.Context, path string) (RepositoryInfo return result, nil } - -func findLeafInTree(p string, leafName string) (string, error) { - var err error - for i := 0; i < 10000; i++ { - _, err = os.Stat(filepath.Join(p, leafName)) - - if err == nil { - // Found [leafName] in p - return p, nil - } - - // ErrNotExist means we continue traversal up the tree. - if errors.Is(err, fs.ErrNotExist) { - parent := filepath.Dir(p) - if parent == p { - return "", nil - } - p = parent - continue - } - break - } - - return "", err -} diff --git a/libs/vfs/leaf.go b/libs/vfs/leaf.go deleted file mode 100644 index 8c11f9039a..0000000000 --- a/libs/vfs/leaf.go +++ /dev/null @@ -1,29 +0,0 @@ -package vfs - -import ( - "errors" - "io/fs" -) - -// FindLeafInTree returns the first path that holds `name`, -// traversing up to the root of the filesystem, starting at `p`. -func FindLeafInTree(p Path, name string) (Path, error) { - for p != nil { - _, err := fs.Stat(p, name) - - // No error means we found the leaf in p. - if err == nil { - return p, nil - } - - // ErrNotExist means we continue traversal up the tree. - if errors.Is(err, fs.ErrNotExist) { - p = p.Parent() - continue - } - - return nil, err - } - - return nil, fs.ErrNotExist -} diff --git a/libs/vfs/leaf_test.go b/libs/vfs/leaf_test.go deleted file mode 100644 index da9412ec02..0000000000 --- a/libs/vfs/leaf_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package vfs - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFindLeafInTree(t *testing.T) { - wd, err := os.Getwd() - require.NoError(t, err) - - root := filepath.Join(wd, "..", "..") - - // Find from working directory should work. - { - out, err := FindLeafInTree(MustNew(wd), ".git") - assert.NoError(t, err) - assert.Equal(t, root, out.Native()) - } - - // Find from project root itself should work. - { - out, err := FindLeafInTree(MustNew(root), ".git") - assert.NoError(t, err) - assert.Equal(t, root, out.Native()) - } - - // Find for something that doesn't exist should work. - { - out, err := FindLeafInTree(MustNew(root), "this-leaf-doesnt-exist-anywhere") - assert.ErrorIs(t, err, os.ErrNotExist) - assert.Equal(t, nil, out) - } -} From 8d5351c1c3d7befda4baae5d6adb99367aa50b3c Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Wed, 11 Dec 2024 13:26:00 +0100 Subject: [PATCH 28/63] Enable errcheck everywhere and fix or silent remaining issues (#1987) ## Changes Enable errcheck linter for the whole codebase. Fix remaining complaints: - If we can propagate error to caller, do that - If we writing to stdout, continue ignoring errors (to avoid crashing in "cli | head" case) - Add exception for cobra non-critical API such as MarkHidden/MarkDeprecated/RegisterFlagCompletionFunc. This keeps current code and behaviour, to be decided later if we want to change this. - Continue ignoring errors where that is desired behaviour (e.g. git.loadConfig). - Continue ignoring errors where panicking seems riskier than ignoring the error. - Annotate cases in libs/dyn with //nolint:errcheck - to be addressed later. Note, this PR is not meant to come up with the best strategy for each case, but to be a relative safe change to enable errcheck linter. ## Tests Existing tests. --- .golangci.yaml | 10 +++++---- bundle/config/mutator/initialize_urls.go | 5 ++++- .../mutator/resolve_resource_references.go | 3 +-- bundle/deploy/state_pull.go | 10 +++++++-- bundle/render/render_text_output.go | 10 +++++++-- cmd/auth/describe.go | 5 ++++- cmd/auth/env.go | 2 +- cmd/auth/token.go | 2 +- cmd/bundle/debug/terraform.go | 4 ++-- cmd/bundle/run.go | 10 +++++++-- cmd/bundle/summary.go | 2 +- cmd/bundle/validate.go | 2 +- cmd/root/io.go | 2 +- cmd/root/logger.go | 11 ++++++---- cmd/root/progress_logger.go | 2 +- libs/auth/callback.go | 4 +++- libs/cmdio/logger.go | 18 ++++++++-------- libs/cmdio/render.go | 4 +++- libs/dyn/convert/from_typed.go | 4 ++-- libs/dyn/convert/normalize.go | 6 +++--- libs/dyn/jsonloader/json.go | 2 +- libs/dyn/mapping.go | 4 ++-- libs/dyn/merge/merge.go | 4 ++-- libs/dyn/pattern.go | 2 +- libs/dyn/visit.go | 2 +- libs/dyn/visit_map.go | 2 +- libs/dyn/visit_set.go | 2 +- libs/dyn/walk.go | 2 +- libs/dyn/yamlloader/loader.go | 2 +- libs/env/loader.go | 4 +++- libs/git/config.go | 4 ++-- libs/process/stub.go | 21 ++++++++++++++----- libs/sync/output.go | 6 +++--- 33 files changed, 109 insertions(+), 64 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index 602f126303..6ab8bb2fef 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -16,9 +16,11 @@ linters-settings: replacement: 'a[b:]' - pattern: 'interface{}' replacement: 'any' + errcheck: + exclude-functions: + - (*github.com/spf13/cobra.Command).RegisterFlagCompletionFunc + - (*github.com/spf13/cobra.Command).MarkFlagRequired + - (*github.com/spf13/pflag.FlagSet).MarkDeprecated + - (*github.com/spf13/pflag.FlagSet).MarkHidden issues: exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/ - exclude-rules: - - path-except: (_test\.go|internal) - linters: - - errcheck diff --git a/bundle/config/mutator/initialize_urls.go b/bundle/config/mutator/initialize_urls.go index 3193059121..4cf7ed72c0 100644 --- a/bundle/config/mutator/initialize_urls.go +++ b/bundle/config/mutator/initialize_urls.go @@ -32,7 +32,10 @@ func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn } orgId := strconv.FormatInt(workspaceId, 10) host := b.WorkspaceClient().Config.CanonicalHostName() - initializeForWorkspace(b, orgId, host) + err = initializeForWorkspace(b, orgId, host) + if err != nil { + return diag.FromErr(err) + } return nil } diff --git a/bundle/config/mutator/resolve_resource_references.go b/bundle/config/mutator/resolve_resource_references.go index 89eaa346c6..bf902f9282 100644 --- a/bundle/config/mutator/resolve_resource_references.go +++ b/bundle/config/mutator/resolve_resource_references.go @@ -36,8 +36,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) return fmt.Errorf("failed to resolve %s, err: %w", v.Lookup, err) } - v.Set(id) - return nil + return v.Set(id) }) } diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 5e301a6f35..96f138e9d3 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -62,8 +62,14 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } // Truncating the file before writing - local.Truncate(0) - local.Seek(0, 0) + err = local.Truncate(0) + if err != nil { + return diag.FromErr(err) + } + _, err = local.Seek(0, 0) + if err != nil { + return diag.FromErr(err) + } // Write file to disk. log.Infof(ctx, "Writing remote deployment state file to local cache directory") diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index 92dacb4488..bf7c9a53a9 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -171,10 +171,16 @@ func RenderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, if err != nil { return fmt.Errorf("failed to render summary: %w", err) } - io.WriteString(out, "\n") + _, err = io.WriteString(out, "\n") + if err != nil { + return err + } } trailer := buildTrailer(diags) - io.WriteString(out, trailer) + _, err = io.WriteString(out, trailer) + if err != nil { + return err + } } return nil diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go index 3a6e3d5d77..c5fa8d26bf 100644 --- a/cmd/auth/describe.go +++ b/cmd/auth/describe.go @@ -141,7 +141,10 @@ func render(ctx context.Context, cmd *cobra.Command, status *authStatus, templat if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, err = cmd.OutOrStdout().Write(buf) + if err != nil { + return err + } default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/auth/env.go b/cmd/auth/env.go index e72d153998..52b7cbbfdf 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -138,7 +138,7 @@ func newEnvCommand() *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(raw) + _, _ = cmd.OutOrStdout().Write(raw) return nil } diff --git a/cmd/auth/token.go b/cmd/auth/token.go index 3f9af43fa4..fbf8b68f6e 100644 --- a/cmd/auth/token.go +++ b/cmd/auth/token.go @@ -94,7 +94,7 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(raw) + _, _ = cmd.OutOrStdout().Write(raw) return nil } diff --git a/cmd/bundle/debug/terraform.go b/cmd/bundle/debug/terraform.go index 843ecac4ec..c7d49ebb2f 100644 --- a/cmd/bundle/debug/terraform.go +++ b/cmd/bundle/debug/terraform.go @@ -60,13 +60,13 @@ For more information about filesystem mirrors, see the Terraform documentation: } switch root.OutputType(cmd) { case flags.OutputText: - cmdio.Render(cmd.Context(), dependencies.Terraform) + _ = cmdio.Render(cmd.Context(), dependencies.Terraform) case flags.OutputJSON: buf, err := json.MarshalIndent(dependencies, "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 7a92766d90..3bcebddd59 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -159,13 +159,19 @@ task or a Python wheel task, the second example applies. if err != nil { return err } - cmd.OutOrStdout().Write([]byte(resultString)) + _, err = cmd.OutOrStdout().Write([]byte(resultString)) + if err != nil { + return err + } case flags.OutputJSON: b, err := json.MarshalIndent(output, "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(b) + _, err = cmd.OutOrStdout().Write(b) + if err != nil { + return err + } default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 8c34dd6122..7c669c8454 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -73,7 +73,7 @@ func newSummaryCommand() *cobra.Command { if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) default: return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 5331e7e7b1..3b50cc2580 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -20,7 +20,7 @@ func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnosti if err != nil { return err } - cmd.OutOrStdout().Write(buf) + _, _ = cmd.OutOrStdout().Write(buf) return diags.Error() } diff --git a/cmd/root/io.go b/cmd/root/io.go index b224bbb27a..96c5a9a266 100644 --- a/cmd/root/io.go +++ b/cmd/root/io.go @@ -21,7 +21,7 @@ func initOutputFlag(cmd *cobra.Command) *outputFlag { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envOutputFormat); ok { - f.output.Set(v) + f.output.Set(v) //nolint:errcheck } cmd.PersistentFlags().VarP(&f.output, "output", "o", "output type: text or json") diff --git a/cmd/root/logger.go b/cmd/root/logger.go index 48cb99a370..38e09b9c9f 100644 --- a/cmd/root/logger.go +++ b/cmd/root/logger.go @@ -45,7 +45,10 @@ func (f *logFlags) makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error func (f *logFlags) initializeContext(ctx context.Context) (context.Context, error) { if f.debug { - f.level.Set("debug") + err := f.level.Set("debug") + if err != nil { + return nil, err + } } opts := slog.HandlerOptions{} @@ -81,13 +84,13 @@ func initLogFlags(cmd *cobra.Command) *logFlags { // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envLogFile); ok { - f.file.Set(v) + f.file.Set(v) //nolint:errcheck } if v, ok := env.Lookup(cmd.Context(), envLogLevel); ok { - f.level.Set(v) + f.level.Set(v) //nolint:errcheck } if v, ok := env.Lookup(cmd.Context(), envLogFormat); ok { - f.output.Set(v) + f.output.Set(v) //nolint:errcheck } flags := cmd.PersistentFlags() diff --git a/cmd/root/progress_logger.go b/cmd/root/progress_logger.go index 7d6a1fa46d..1458de13a3 100644 --- a/cmd/root/progress_logger.go +++ b/cmd/root/progress_logger.go @@ -59,7 +59,7 @@ func initProgressLoggerFlag(cmd *cobra.Command, logFlags *logFlags) *progressLog // Configure defaults from environment, if applicable. // If the provided value is invalid it is ignored. if v, ok := env.Lookup(cmd.Context(), envProgressFormat); ok { - f.Set(v) + _ = f.Set(v) } flags := cmd.PersistentFlags() diff --git a/libs/auth/callback.go b/libs/auth/callback.go index 5a24006978..3893a5041a 100644 --- a/libs/auth/callback.go +++ b/libs/auth/callback.go @@ -53,7 +53,9 @@ func newCallback(ctx context.Context, a *PersistentAuth) (*callbackServer, error a: a, } cb.srv.Handler = cb - go cb.srv.Serve(cb.ln) + go func() { + _ = cb.srv.Serve(cb.ln) + }() return cb, nil } diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 45b1883ce8..5e89b3a34f 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -188,29 +188,29 @@ func (l *Logger) writeJson(event Event) { // we panic because there we cannot catch this in jobs.RunNowAndWait panic(err) } - l.Writer.Write([]byte(b)) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(b)) + _, _ = l.Writer.Write([]byte("\n")) } func (l *Logger) writeAppend(event Event) { - l.Writer.Write([]byte(event.String())) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(event.String())) + _, _ = l.Writer.Write([]byte("\n")) } func (l *Logger) writeInplace(event Event) { if l.isFirstEvent { // save cursor location - l.Writer.Write([]byte("\033[s")) + _, _ = l.Writer.Write([]byte("\033[s")) } // move cursor to saved location - l.Writer.Write([]byte("\033[u")) + _, _ = l.Writer.Write([]byte("\033[u")) // clear from cursor to end of screen - l.Writer.Write([]byte("\033[0J")) + _, _ = l.Writer.Write([]byte("\033[0J")) - l.Writer.Write([]byte(event.String())) - l.Writer.Write([]byte("\n")) + _, _ = l.Writer.Write([]byte(event.String())) + _, _ = l.Writer.Write([]byte("\n")) l.isFirstEvent = false } diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index c68ddca0df..1529274a3c 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -361,7 +361,9 @@ func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, h if err != nil { return err } - tw.Write([]byte("\n")) + if _, err := tw.Write([]byte("\n")); err != nil { + return err + } // Do not flush here. Instead, allow the first 100 resources to determine the initial spacing of the header columns. } t, err := base.Parse(tmpl) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index cd92ad0eb2..ed1b85a365 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -126,7 +126,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio // Either if the key was set in the reference or the field is not zero-valued, we include it. if ok || nv.Kind() != dyn.KindNil { - out.Set(refk, nv) + out.Set(refk, nv) // nolint:errcheck } } @@ -184,7 +184,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Every entry is represented, even if it is a nil. // Otherwise, a map with zero-valued structs would yield a nil as well. - out.Set(refk, nv) + out.Set(refk, nv) //nolint:errcheck } return dyn.V(out), nil diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 106add35df..31cd8b6e3f 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -116,7 +116,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen } } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } // Return the normalized value if missing fields are not included. @@ -162,7 +162,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen continue } if v.IsValid() { - out.Set(dyn.V(k), v) + out.Set(dyn.V(k), v) // nolint:errcheck } } @@ -201,7 +201,7 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r } } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } return dyn.NewValue(out, src.Locations()), diags diff --git a/libs/dyn/jsonloader/json.go b/libs/dyn/jsonloader/json.go index cbf539263a..3f2dc859fe 100644 --- a/libs/dyn/jsonloader/json.go +++ b/libs/dyn/jsonloader/json.go @@ -70,7 +70,7 @@ func decodeValue(decoder *json.Decoder, o *Offset) (dyn.Value, error) { return invalidValueWithLocation(decoder, o), err } - obj.Set(keyVal, val) + obj.Set(keyVal, val) //nolint:errcheck } // Consume the closing '}' if _, err := decoder.Token(); err != nil { diff --git a/libs/dyn/mapping.go b/libs/dyn/mapping.go index f9f2d2e97e..f5dbd18e6c 100644 --- a/libs/dyn/mapping.go +++ b/libs/dyn/mapping.go @@ -41,7 +41,7 @@ func newMappingWithSize(size int) Mapping { func newMappingFromGoMap(vin map[string]Value) Mapping { m := newMappingWithSize(len(vin)) for k, v := range vin { - m.Set(V(k), v) + m.Set(V(k), v) //nolint:errcheck } return m } @@ -144,6 +144,6 @@ func (m Mapping) Clone() Mapping { // Merge merges the key-value pairs from another Mapping into the current Mapping. func (m *Mapping) Merge(n Mapping) { for _, p := range n.pairs { - m.Set(p.Key, p.Value) + m.Set(p.Key, p.Value) //nolint:errcheck } } diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index 29decd7793..a83bbc1527 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -88,10 +88,10 @@ func mergeMap(a, b dyn.Value) (dyn.Value, error) { if err != nil { return dyn.InvalidValue, err } - out.Set(pk, merged) + out.Set(pk, merged) //nolint:errcheck } else { // Otherwise, just set the value. - out.Set(pk, pv) + out.Set(pk, pv) //nolint:errcheck } } diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go index aecdc3ca63..2d2e9cae7b 100644 --- a/libs/dyn/pattern.go +++ b/libs/dyn/pattern.go @@ -69,7 +69,7 @@ func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitO return InvalidValue, err } - m.Set(pk, nv) + m.Set(pk, nv) //nolint:errcheck } return NewValue(m, v.Locations()), nil diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 38adec24ff..95515115ea 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -122,7 +122,7 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts // Return an updated map value. m = m.Clone() - m.Set(V(component.key), nv) + m.Set(V(component.key), nv) //nolint:errcheck return Value{ v: m, k: KindMap, diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index 3f0cded03b..db45260381 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -25,7 +25,7 @@ func Foreach(fn MapFunc) MapFunc { if err != nil { return InvalidValue, err } - m.Set(pk, nv) + m.Set(pk, nv) //nolint:errcheck } return NewValue(m, v.Locations()), nil case KindSequence: diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index b086fb8a91..9991d311fa 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -41,7 +41,7 @@ func SetByPath(v Value, p Path, nv Value) (Value, error) { // Return an updated map value. m = m.Clone() - m.Set(V(component.key), nv) + m.Set(V(component.key), nv) //nolint:errcheck return Value{ v: m, k: KindMap, diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index c51a11e22c..b3576e0881 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -45,7 +45,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro if err != nil { return InvalidValue, err } - out.Set(pk, nv) + out.Set(pk, nv) //nolint:errcheck } v.v = out case KindSequence: diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index a77ee0744a..965753ccd4 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -129,7 +129,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro return dyn.InvalidValue, err } - acc.Set(k, v) + acc.Set(k, v) //nolint:errcheck } if merge == nil { diff --git a/libs/env/loader.go b/libs/env/loader.go index f441ffa15e..74c54cee8f 100644 --- a/libs/env/loader.go +++ b/libs/env/loader.go @@ -43,7 +43,9 @@ func (le *configLoader) Configure(cfg *config.Config) error { if v == "" { continue } - a.Set(cfg, v) + if err := a.Set(cfg, v); err != nil { + return err + } } } return nil diff --git a/libs/git/config.go b/libs/git/config.go index fafd81bd67..f7ff057e1b 100644 --- a/libs/git/config.go +++ b/libs/git/config.go @@ -155,8 +155,8 @@ func globalGitConfig() (*config, error) { // > are missing or unreadable they will be ignored. // // We therefore ignore the error return value for the calls below. - config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") - config.loadFile(vfs.MustNew(config.home), ".gitconfig") + _ = config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") + _ = config.loadFile(vfs.MustNew(config.home), ".gitconfig") return config, nil } diff --git a/libs/process/stub.go b/libs/process/stub.go index 8472f65d5b..8ab6fd705f 100644 --- a/libs/process/stub.go +++ b/libs/process/stub.go @@ -148,13 +148,20 @@ func (s *processStub) run(cmd *exec.Cmd) error { if !re.MatchString(norm) { continue } + err := resp.err if resp.stdout != "" { - cmd.Stdout.Write([]byte(resp.stdout)) + _, err1 := cmd.Stdout.Write([]byte(resp.stdout)) + if err == nil { + err = err1 + } } if resp.stderr != "" { - cmd.Stderr.Write([]byte(resp.stderr)) + _, err1 := cmd.Stderr.Write([]byte(resp.stderr)) + if err == nil { + err = err1 + } } - return resp.err + return err } if s.callback != nil { return s.callback(cmd) @@ -163,8 +170,12 @@ func (s *processStub) run(cmd *exec.Cmd) error { if s.reponseStub == zeroStub { return fmt.Errorf("no default process stub") } + err := s.reponseStub.err if s.reponseStub.stdout != "" { - cmd.Stdout.Write([]byte(s.reponseStub.stdout)) + _, err1 := cmd.Stdout.Write([]byte(s.reponseStub.stdout)) + if err == nil { + err = err1 + } } - return s.reponseStub.err + return err } diff --git a/libs/sync/output.go b/libs/sync/output.go index c01b25ef64..e6ac8c56c7 100644 --- a/libs/sync/output.go +++ b/libs/sync/output.go @@ -43,9 +43,9 @@ func TextOutput(ctx context.Context, ch <-chan Event, w io.Writer) { // Log only if something actually happened. // Sync events produce an empty string if nothing happened. if str := e.String(); str != "" { - bw.WriteString(str) - bw.WriteString("\n") - bw.Flush() + _, _ = bw.WriteString(str) + _, _ = bw.WriteString("\n") + _ = bw.Flush() } } } From e39e94b12f50bf593a649f3c288762a5d7b89f28 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Wed, 11 Dec 2024 13:53:57 +0100 Subject: [PATCH 29/63] Make 'make lint' apply --fix (#1995) Add 'make lintcheck' to lint without fixing. Fixing is what you usually want. No changes to github workflow since that does not call our Makefile. --- Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index bc1c42cfaa..d10332c385 100644 --- a/Makefile +++ b/Makefile @@ -7,13 +7,13 @@ fmt: @gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") lint: vendor + @echo "✓ Linting source code with https://golangci-lint.run/ (with --fix)..." + @golangci-lint run --fix ./... + +lintcheck: vendor @echo "✓ Linting source code with https://golangci-lint.run/ ..." @golangci-lint run ./... -lintfix: vendor - @echo "✓ Linting source code with 'golangci-lint run --fix' ..." - @golangci-lint run --fix ./... - test: lint testonly testonly: @@ -39,4 +39,4 @@ vendor: integration: gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./internal/..." -- -run "TestAcc.*" -parallel 4 -timeout=2h -.PHONY: fmt lint lintfix test testonly coverage build snapshot vendor integration +.PHONY: fmt lint lintcheck test testonly coverage build snapshot vendor integration From aa0b6080a49de8f64e8f12a8b526442745e97ef0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 11 Dec 2024 15:00:43 +0100 Subject: [PATCH 30/63] Fixed TestAccBundleDeployUcSchema test (#1997) ## Changes It was failing because when schema.yml was removed, `catalog: main` option left set in the base pipeline definition in databricks.yml which lead to incorrect config (empty target schema) https://github.com/databricks/cli/pull/1413 Backend behaviour changed and DLT pipelines stopped to accept empty targets leading to the error which was ignored before. ## Tests ``` --- PASS: TestAccBundleDeployUcSchema (41.75s) PASS coverage: 33.3% of statements in ./... ok github.com/databricks/cli/internal/bundle 42.210s coverage: 33.3% of statements in ./... ``` --- internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl | 1 - internal/bundle/bundles/uc_schema/template/schema.yml.tmpl | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl b/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl index 15076ac858..0cb8d4f61a 100644 --- a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl @@ -12,7 +12,6 @@ resources: - notebook: path: ./nb.sql development: true - catalog: main include: - "*.yml" diff --git a/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl b/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl index 50067036e6..0fcf10453f 100644 --- a/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl +++ b/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl @@ -11,3 +11,4 @@ targets: pipelines: foo: target: ${resources.schemas.bar.id} + catalog: main From 2ee7d56ae6061eb152e743ecbc742df394cd2d82 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 11 Dec 2024 15:57:31 +0100 Subject: [PATCH 31/63] Show an error when using a cluster override with 'mode: production' (#1994) ## Changes We should show a warning when using a cluster override with 'mode: production'. Right now, we inadvertently show an error for this state. This is a followup based on https://github.com/databricks/cli/pull/1899#discussion_r1877765148. --- bundle/config/mutator/override_compute.go | 5 +++-- bundle/config/mutator/override_compute_test.go | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 2e14b58245..f3a080ee80 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -45,8 +45,9 @@ func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diag if b.Config.Bundle.ClusterId != "" { // Overriding compute via a command-line flag for production works, but is not recommended. diags = diags.Extend(diag.Diagnostics{{ - Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended", - Detail: "It is recommended to always use the same compute for production target for consistency.", + Summary: "Setting a cluster override for a target that uses 'mode: production' is not recommended", + Detail: "It is recommended to always use the same compute for production target for consistency.", + Severity: diag.Warning, }}) } } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 950091ed70..1fdeb373c6 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" @@ -173,6 +174,7 @@ func TestOverrideComputeModeProduction(t *testing.T) { diags := bundle.Apply(context.Background(), b, m) require.Len(t, diags, 1) assert.Equal(t, "Setting a cluster override for a target that uses 'mode: production' is not recommended", diags[0].Summary) + assert.Equal(t, diag.Warning, diags[0].Severity) assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } From 592474880d7834a4be3bf2333d0e8cea1ddeb887 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Wed, 11 Dec 2024 17:42:03 +0100 Subject: [PATCH 32/63] Enable 'govet' linter; expand log/diag with non-f functions (#1996) ## Changes Fix all the govet-found issues and enable govet linter. This prompts adding non-formatting variants of logging functions (Errorf -> Error). ## Tests Existing tests. --- .golangci.yaml | 7 ++++- bundle/artifacts/whl/autodetect.go | 2 +- bundle/deferred_test.go | 2 +- bundle/deploy/terraform/plan.go | 3 +- bundle/libraries/filer_volume_test.go | 2 +- bundle/run/job.go | 4 +-- bundle/run/pipeline.go | 4 +-- cmd/labs/project/entrypoint.go | 3 -- cmd/root/auth_test.go | 3 +- libs/log/logger.go | 45 +++++++++++++++++++++++++++ libs/template/renderer.go | 2 +- 11 files changed, 62 insertions(+), 15 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index 6ab8bb2fef..7598646d35 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -4,12 +4,17 @@ linters: - bodyclose - errcheck - gosimple - #- govet + - govet - ineffassign - staticcheck - unused - gofmt linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + - shadow gofmt: rewrite-rules: - pattern: 'a[b:len(a)]' diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index 88dc742c12..3d17a698c0 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -42,7 +42,7 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic return nil } - log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath)) + log.Infof(ctx, "Found Python wheel project at %s", b.BundleRootPath) module := extractModuleName(setupPy) if b.Config.Artifacts == nil { diff --git a/bundle/deferred_test.go b/bundle/deferred_test.go index 3abc4aa102..ea3df17c44 100644 --- a/bundle/deferred_test.go +++ b/bundle/deferred_test.go @@ -19,7 +19,7 @@ func (t *mutatorWithError) Name() string { func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics { t.applyCalled++ - return diag.Errorf(t.errorMsg) + return diag.Errorf(t.errorMsg) // nolint:govet } func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { diff --git a/bundle/deploy/terraform/plan.go b/bundle/deploy/terraform/plan.go index 72f0b49a89..7f7473efab 100644 --- a/bundle/deploy/terraform/plan.go +++ b/bundle/deploy/terraform/plan.go @@ -2,7 +2,6 @@ package terraform import ( "context" - "fmt" "path/filepath" "github.com/databricks/cli/bundle" @@ -57,7 +56,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { IsEmpty: !notEmpty, } - log.Debugf(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath)) + log.Debugf(ctx, "Planning complete and persisted at %s\n", planPath) return nil } diff --git a/bundle/libraries/filer_volume_test.go b/bundle/libraries/filer_volume_test.go index 0d886824d9..870dce97cc 100644 --- a/bundle/libraries/filer_volume_test.go +++ b/bundle/libraries/filer_volume_test.go @@ -173,7 +173,7 @@ func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) { { Severity: diag.Error, Summary: "volume /Volumes/main/my_schema/my_volume does not exist: error from API", - Locations: []dyn.Location{{"config.yml", 1, 2}, {"volume.yml", 1, 2}}, + Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}, {File: "volume.yml", Line: 1, Column: 2}}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path"), dyn.MustPathFromString("resources.volumes.foo")}, Detail: `You are using a volume in your artifact_path that is managed by this bundle but which has not been deployed yet. Please first deploy diff --git a/bundle/run/job.go b/bundle/run/job.go index 340af961cf..35a6ef4ffc 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -143,7 +143,7 @@ func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func progressLogger.Log(event) // log progress events in using the default logger - log.Infof(ctx, event.String()) + log.Info(ctx, event.String()) } } @@ -203,7 +203,7 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e logDebug(r) logProgress(r) }).GetWithTimeout(jobRunTimeout) - if err != nil && runId != nil { + if err != nil { r.logFailedTasks(ctx, *runId) } if err != nil { diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index ffe0128430..6db80a71ba 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -37,7 +37,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE } } if logString != "" { - log.Errorf(ctx, fmt.Sprintf("[%s] %s", event.EventType, logString)) + log.Errorf(ctx, "[%s] %s", event.EventType, logString) } } @@ -132,7 +132,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp } for _, event := range events { progressLogger.Log(&event) - log.Infof(ctx, event.String()) + log.Info(ctx, event.String()) } update, err := w.Pipelines.GetUpdateByPipelineIdAndUpdateId(ctx, pipelineID, updateID) diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go index 99edf83c8c..5210c85f95 100644 --- a/cmd/labs/project/entrypoint.go +++ b/cmd/labs/project/entrypoint.go @@ -190,9 +190,6 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C if isNoLoginConfig && !e.IsBundleAware { return nil, nil, ErrNoLoginConfig } - if !isNoLoginConfig && err != nil { - return nil, nil, fmt.Errorf("load: %w", err) - } if e.IsAccountLevel { log.Debugf(ctx, "Using account-level login profile: %s", lc.AccountProfile) cfg, err := e.envAwareConfigWithProfile(ctx, lc.AccountProfile) diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 9ba2a8fa99..92f028514e 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -15,7 +15,8 @@ import ( ) func TestEmptyHttpRequest(t *testing.T) { - ctx, _ := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() req := emptyHttpRequest(ctx) assert.Equal(t, req.Context(), ctx) } diff --git a/libs/log/logger.go b/libs/log/logger.go index 43a30e92bf..794a7e8655 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -31,6 +31,51 @@ func log(logger *slog.Logger, ctx context.Context, level slog.Level, msg string) _ = logger.Handler().Handle(ctx, r) } +// Trace logs a string using the context-local or global logger. +func Trace(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelTrace) { + return + } + log(logger, ctx, LevelTrace, msg) +} + +// Debug logs a string using the context-local or global logger. +func Debug(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelDebug) { + return + } + log(logger, ctx, LevelDebug, msg) +} + +// Info logs a string using the context-local or global logger. +func Info(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelInfo) { + return + } + log(logger, ctx, LevelInfo, msg) +} + +// Warn logs a string using the context-local or global logger. +func Warn(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelWarn) { + return + } + log(logger, ctx, LevelWarn, msg) +} + +// Error logs a string using the context-local or global logger. +func Error(ctx context.Context, msg string) { + logger := GetLogger(ctx) + if !logger.Enabled(ctx, LevelError) { + return + } + log(logger, ctx, LevelError, msg) +} + // Tracef logs a formatted string using the context-local or global logger. func Tracef(ctx context.Context, format string, v ...any) { logger := GetLogger(ctx) diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 0f30a67d0e..5030cd9df3 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -310,7 +310,7 @@ func (r *renderer) persistToDisk(ctx context.Context, out filer.Filer) error { if err == nil { return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } - if err != nil && !errors.Is(err, fs.ErrNotExist) { + if !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) } } From 2e018cfaec200a02ee2bd5b389e7da3c6f15f460 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Thu, 12 Dec 2024 10:28:42 +0100 Subject: [PATCH 33/63] Enable gofumpt and goimports in golangci-lint (#1999) ## Changes Enable gofumpt and goimports in golangci-lint and apply autofix. This makes 'make fmt' redundant, will be cleaned up in follow up diff. ## Tests Existing tests. --- .golangci.yaml | 7 ++ bundle/artifacts/all.go | 1 - bundle/artifacts/autodetect.go | 3 +- bundle/artifacts/expand_globs.go | 1 - bundle/artifacts/whl/autodetect.go | 3 +- bundle/bundle.go | 4 +- bundle/config/experimental.go | 6 +- bundle/config/generate/job.go | 6 +- bundle/config/loader/process_root_includes.go | 2 +- bundle/config/mutator/apply_presets_test.go | 1 - bundle/config/mutator/compute_id_compat.go | 1 - .../expand_pipeline_glob_paths_test.go | 2 +- bundle/config/mutator/initialize_urls.go | 5 +- bundle/config/mutator/override_compute.go | 2 +- .../config/mutator/paths/job_paths_visitor.go | 3 +- .../mutator/prepend_workspace_prefix.go | 2 - .../mutator/python/python_diagnostics_test.go | 1 - .../config/mutator/python/python_mutator.go | 18 ++--- .../mutator/python/python_mutator_test.go | 15 ++-- .../mutator/resolve_variable_references.go | 13 ++-- .../mutator/rewrite_workspace_prefix.go | 1 - .../mutator/rewrite_workspace_prefix_test.go | 1 - bundle/config/mutator/run_as.go | 5 +- bundle/config/mutator/set_variables.go | 1 - bundle/config/mutator/sync_infer_root.go | 2 +- bundle/config/mutator/translate_paths.go | 4 +- bundle/config/mutator/translate_paths_test.go | 2 +- bundle/config/mutator/verify_cli_version.go | 3 +- bundle/config/presets.go | 6 +- bundle/config/root_test.go | 2 - bundle/config/validate/files_to_sync.go | 3 +- bundle/config/validate/folder_permissions.go | 3 +- .../validate/job_cluster_key_defined.go | 3 +- .../config/validate/job_task_cluster_spec.go | 3 +- .../validate/single_node_cluster_test.go | 1 - bundle/config/validate/validate.go | 3 +- .../config/validate/validate_sync_patterns.go | 3 +- bundle/config/variable/lookup_test.go | 1 - bundle/config/variable/resolve_cluster.go | 1 - bundle/deferred.go | 2 +- bundle/deploy/state.go | 8 +- bundle/deploy/state_pull.go | 2 +- bundle/deploy/state_pull_test.go | 4 +- bundle/deploy/state_push_test.go | 2 +- bundle/deploy/state_update.go | 5 +- bundle/deploy/state_update_test.go | 2 +- .../check_dashboards_modified_remotely.go | 3 +- .../terraform/check_running_resources.go | 3 +- bundle/deploy/terraform/convert_test.go | 74 +++++++++---------- bundle/deploy/terraform/init.go | 2 +- bundle/deploy/terraform/init_test.go | 6 +- bundle/deploy/terraform/interpolate.go | 3 +- bundle/deploy/terraform/pkg.go | 22 ++++-- bundle/deploy/terraform/pkg_test.go | 2 +- bundle/deploy/terraform/state_pull.go | 6 +- .../terraform/tfdyn/convert_cluster_test.go | 3 +- .../terraform/tfdyn/convert_dashboard.go | 2 +- .../terraform/tfdyn/convert_dashboard_test.go | 10 +-- .../tfdyn/convert_experiment_test.go | 2 +- .../terraform/tfdyn/convert_grants_test.go | 6 +- bundle/deploy/terraform/tfdyn/convert_job.go | 1 - .../terraform/tfdyn/convert_job_test.go | 2 +- .../convert_model_serving_endpoint_test.go | 2 +- .../terraform/tfdyn/convert_model_test.go | 2 +- .../tfdyn/convert_permissions_test.go | 6 +- .../terraform/tfdyn/convert_pipeline_test.go | 2 +- .../tfdyn/convert_quality_monitor_test.go | 2 +- .../tfdyn/convert_registered_model_test.go | 2 +- .../terraform/tfdyn/convert_schema_test.go | 2 +- .../terraform/tfdyn/convert_volume_test.go | 2 +- bundle/deploy/terraform/tfdyn/rename_keys.go | 3 +- bundle/deploy/terraform/unbind.go | 2 +- bundle/internal/schema/main.go | 5 +- bundle/libraries/expand_glob_references.go | 5 +- bundle/libraries/filer_volume_test.go | 6 +- bundle/libraries/upload.go | 3 - bundle/permissions/filter.go | 1 - bundle/permissions/filter_test.go | 1 - bundle/permissions/mutator.go | 73 +++++++++--------- bundle/permissions/validate.go | 3 +- .../permissions/workspace_path_permissions.go | 2 +- .../workspace_path_permissions_test.go | 1 - bundle/permissions/workspace_root.go | 3 +- bundle/phases/bind.go | 2 +- bundle/render/render_text_output.go | 2 +- bundle/render/render_text_output_test.go | 3 +- bundle/root_test.go | 2 +- bundle/run/job.go | 1 - bundle/run/job_args.go | 2 +- bundle/run/job_options.go | 2 +- bundle/run/output/task.go | 16 ++-- bundle/run/pipeline.go | 5 +- bundle/run/progress/pipeline.go | 2 +- bundle/tests/run_as_test.go | 2 - bundle/tests/variables_test.go | 2 +- bundle/trampoline/python_dbr_warning.go | 4 +- bundle/trampoline/python_wheel_test.go | 3 +- bundle/trampoline/trampoline.go | 2 +- bundle/trampoline/trampoline_test.go | 3 +- cmd/api/api.go | 2 +- cmd/auth/describe.go | 1 - cmd/auth/login.go | 6 +- cmd/bundle/generate/dashboard.go | 6 +- cmd/bundle/generate/generate_test.go | 2 +- cmd/bundle/generate/utils.go | 4 +- cmd/configure/configure_test.go | 4 +- cmd/labs/github/github.go | 12 ++- cmd/labs/localcache/jsonfile.go | 6 +- cmd/labs/project/entrypoint.go | 10 ++- cmd/labs/project/installer_test.go | 6 +- cmd/root/auth.go | 8 +- cmd/root/auth_test.go | 12 +-- cmd/root/bundle_test.go | 6 +- cmd/root/root.go | 3 +- cmd/root/user_agent_upstream.go | 12 ++- cmd/sync/sync.go | 1 - cmd/workspace/workspace/export_dir.go | 2 +- cmd/workspace/workspace/overrides.go | 3 +- internal/build/variables.go | 34 +++++---- internal/bundle/artifacts_test.go | 2 +- internal/bundle/clusters_test.go | 1 - internal/bundle/deploy_test.go | 1 - internal/bundle/deployment_state_test.go | 8 +- internal/bundle/generate_pipeline_test.go | 2 +- internal/bundle/helpers.go | 8 +- internal/bundle/python_wheel_test.go | 2 +- internal/bundle/spark_jar_test.go | 2 +- internal/dashboard_assumptions_test.go | 2 +- internal/filer_test.go | 5 +- internal/fs_rm_test.go | 1 - internal/git_fetch_test.go | 12 +-- internal/helpers.go | 5 +- internal/init_test.go | 8 +- internal/sync_test.go | 6 +- internal/testutil/copy.go | 2 +- internal/testutil/file.go | 8 +- internal/workspace_test.go | 4 +- libs/cmdio/io.go | 2 +- libs/cmdio/logger.go | 3 +- libs/databrickscfg/cfgpickers/clusters.go | 6 +- libs/dyn/convert/from_typed_test.go | 70 +++++++++--------- libs/dyn/convert/struct_info.go | 4 +- libs/dyn/convert/struct_info_test.go | 4 +- libs/dyn/convert/to_typed_test.go | 20 ++--- libs/dyn/dynassert/assert.go | 10 +-- libs/dyn/dynassert/assert_test.go | 2 +- libs/dyn/mapping.go | 2 +- libs/dyn/merge/merge.go | 1 + libs/dyn/merge/merge_test.go | 1 - libs/dyn/merge/override.go | 18 ++--- libs/dyn/merge/override_test.go | 2 +- libs/dyn/value_test.go | 6 +- libs/dyn/visit_map_test.go | 8 +- libs/dyn/yamlloader/loader.go | 4 +- libs/dyn/yamlloader/yaml_spec_test.go | 3 +- libs/dyn/yamlsaver/saver.go | 2 +- libs/dyn/yamlsaver/saver_test.go | 44 +++++------ libs/exec/exec.go | 8 +- libs/exec/shell.go | 2 +- libs/filer/filer.go | 3 +- libs/filer/local_client.go | 7 +- libs/filer/slice_test.go | 3 +- .../workspace_files_extensions_client.go | 3 +- .../workspace_files_extensions_client_test.go | 5 +- libs/flags/json_flag_test.go | 2 +- libs/flags/log_file_flag.go | 2 +- libs/folders/folders.go | 2 +- libs/git/config_test.go | 4 +- libs/git/ignore_test.go | 2 +- libs/git/info.go | 1 - libs/git/reference.go | 12 ++- libs/git/repository_test.go | 2 +- libs/git/view.go | 2 +- libs/git/view_test.go | 4 +- libs/jsonschema/validate_type.go | 6 +- libs/locker/locker.go | 4 +- libs/log/context.go | 1 - libs/log/logger.go | 3 +- libs/log/sdk.go | 3 +- libs/notebook/detect.go | 2 +- libs/notebook/detect_jupyter_test.go | 6 +- libs/notebook/detect_test.go | 4 +- libs/python/detect_test.go | 6 +- libs/python/interpreters.go | 6 +- libs/python/interpreters_unix_test.go | 2 +- libs/sync/diff.go | 10 +-- libs/sync/event.go | 4 +- libs/sync/snapshot.go | 9 +-- libs/sync/snapshot_state.go | 1 - libs/sync/sync.go | 2 +- libs/tags/gcp_test.go | 1 - libs/template/file_test.go | 8 +- libs/template/helpers.go | 8 +- libs/template/helpers_test.go | 2 - libs/template/materialize.go | 8 +- libs/template/renderer_test.go | 28 ++++--- libs/textutil/textutil_test.go | 3 +- 197 files changed, 554 insertions(+), 565 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index 7598646d35..9e69e51463 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -9,6 +9,8 @@ linters: - staticcheck - unused - gofmt + - gofumpt + - goimports linters-settings: govet: enable-all: true @@ -27,5 +29,10 @@ linters-settings: - (*github.com/spf13/cobra.Command).MarkFlagRequired - (*github.com/spf13/pflag.FlagSet).MarkDeprecated - (*github.com/spf13/pflag.FlagSet).MarkHidden + gofumpt: + module-path: github.com/databricks/cli + extra-rules: true + #goimports: + # local-prefixes: github.com/databricks/cli issues: exclude-dirs-use-default: false # recommended by docs https://golangci-lint.run/usage/false-positives/ diff --git a/bundle/artifacts/all.go b/bundle/artifacts/all.go index 305193e2eb..768ccdfe3a 100644 --- a/bundle/artifacts/all.go +++ b/bundle/artifacts/all.go @@ -3,7 +3,6 @@ package artifacts import ( "context" "fmt" - "slices" "github.com/databricks/cli/bundle" diff --git a/bundle/artifacts/autodetect.go b/bundle/artifacts/autodetect.go index 569a480f00..c8d2356160 100644 --- a/bundle/artifacts/autodetect.go +++ b/bundle/artifacts/autodetect.go @@ -13,8 +13,7 @@ func DetectPackages() bundle.Mutator { return &autodetect{} } -type autodetect struct { -} +type autodetect struct{} func (m *autodetect) Name() string { return "artifacts.DetectPackages" diff --git a/bundle/artifacts/expand_globs.go b/bundle/artifacts/expand_globs.go index cdf3d45900..7d44db0be9 100644 --- a/bundle/artifacts/expand_globs.go +++ b/bundle/artifacts/expand_globs.go @@ -96,7 +96,6 @@ func (m *expandGlobs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost // Set the expanded globs back into the configuration. return dyn.SetByPath(v, base, dyn.V(output)) }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index 3d17a698c0..202ea12bcc 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -15,8 +15,7 @@ import ( "github.com/databricks/cli/libs/log" ) -type detectPkg struct { -} +type detectPkg struct{} func DetectPackage() bundle.Mutator { return &detectPkg{} diff --git a/bundle/bundle.go b/bundle/bundle.go index 76c87c24cd..573bcef2f2 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -186,7 +186,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) // Make directory if it doesn't exist yet. dir := filepath.Join(parts...) - err := os.MkdirAll(dir, 0700) + err := os.MkdirAll(dir, 0o700) if err != nil { return "", err } @@ -203,7 +203,7 @@ func (b *Bundle) InternalDir(ctx context.Context) (string, error) { } dir := filepath.Join(cacheDir, internalFolder) - err = os.MkdirAll(dir, 0700) + err = os.MkdirAll(dir, 0o700) if err != nil { return dir, err } diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 061bbdae06..4c787168f8 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -47,8 +47,10 @@ type PyDABs struct { Import []string `json:"import,omitempty"` } -type Command string -type ScriptHook string +type ( + Command string + ScriptHook string +) // These hook names are subject to change and currently experimental const ( diff --git a/bundle/config/generate/job.go b/bundle/config/generate/job.go index 6cd7c1b328..0cdcbf3ad4 100644 --- a/bundle/config/generate/job.go +++ b/bundle/config/generate/job.go @@ -6,8 +6,10 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -var jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"}) -var taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"}) +var ( + jobOrder = yamlsaver.NewOrder([]string{"name", "job_clusters", "compute", "tasks"}) + taskOrder = yamlsaver.NewOrder([]string{"task_key", "depends_on", "existing_cluster_id", "new_cluster", "job_cluster_key"}) +) func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) { value := make(map[string]dyn.Value) diff --git a/bundle/config/loader/process_root_includes.go b/bundle/config/loader/process_root_includes.go index c14fb7ce15..c608a3de61 100644 --- a/bundle/config/loader/process_root_includes.go +++ b/bundle/config/loader/process_root_includes.go @@ -27,7 +27,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. var out []bundle.Mutator // Map with files we've already seen to avoid loading them twice. - var seen = map[string]bool{} + seen := map[string]bool{} for _, file := range config.FileNames { seen[file] = true diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index 91d5b62e5f..c26f203832 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -481,5 +481,4 @@ func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) }) } - } diff --git a/bundle/config/mutator/compute_id_compat.go b/bundle/config/mutator/compute_id_compat.go index 3afe02e9e8..8f1ff5868e 100644 --- a/bundle/config/mutator/compute_id_compat.go +++ b/bundle/config/mutator/compute_id_compat.go @@ -42,7 +42,6 @@ func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagn var diags diag.Diagnostics computeIdPath := p.Append(dyn.Key("compute_id")) computeId, err := dyn.GetByPath(v, computeIdPath) - // If the "compute_id" key is not set, we don't need to do anything. if err != nil { return v, nil diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index 9f70b74ae4..7cf3c9f3e3 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -17,7 +17,7 @@ import ( ) func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/bundle/config/mutator/initialize_urls.go b/bundle/config/mutator/initialize_urls.go index 4cf7ed72c0..35ff53d0b6 100644 --- a/bundle/config/mutator/initialize_urls.go +++ b/bundle/config/mutator/initialize_urls.go @@ -10,8 +10,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -type initializeURLs struct { -} +type initializeURLs struct{} // InitializeURLs makes sure the URL field of each resource is configured. // NOTE: since this depends on an extra API call, this mutator adds some extra @@ -39,7 +38,7 @@ func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn return nil } -func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error { +func initializeForWorkspace(b *bundle.Bundle, orgId, host string) error { baseURL, err := url.Parse(host) if err != nil { return err diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index f3a080ee80..3433034025 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -23,7 +23,7 @@ func (m *overrideCompute) Name() string { func overrideJobCompute(j *resources.Job, compute string) { for i := range j.Tasks { - var task = &j.Tasks[i] + task := &j.Tasks[i] if task.ForEachTask != nil { task = &task.ForEachTask.Task diff --git a/bundle/config/mutator/paths/job_paths_visitor.go b/bundle/config/mutator/paths/job_paths_visitor.go index 275a8fa536..1d713aaf55 100644 --- a/bundle/config/mutator/paths/job_paths_visitor.go +++ b/bundle/config/mutator/paths/job_paths_visitor.go @@ -95,7 +95,7 @@ func jobRewritePatterns() []jobRewritePattern { // VisitJobPaths visits all paths in job resources and applies a function to each path. func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) { var err error - var newValue = value + newValue := value for _, rewritePattern := range jobRewritePatterns() { newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { @@ -105,7 +105,6 @@ func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) { return fn(p, rewritePattern.kind, v) }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/config/mutator/prepend_workspace_prefix.go b/bundle/config/mutator/prepend_workspace_prefix.go index e0be2572d5..b093ec26a9 100644 --- a/bundle/config/mutator/prepend_workspace_prefix.go +++ b/bundle/config/mutator/prepend_workspace_prefix.go @@ -57,14 +57,12 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil }) - if err != nil { return dyn.InvalidValue, err } } return v, nil }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/python/python_diagnostics_test.go b/bundle/config/mutator/python/python_diagnostics_test.go index b73b0f73cd..fd6def8dad 100644 --- a/bundle/config/mutator/python/python_diagnostics_test.go +++ b/bundle/config/mutator/python/python_diagnostics_test.go @@ -30,7 +30,6 @@ type parsePythonDiagnosticsTest struct { } func TestParsePythonDiagnostics(t *testing.T) { - testCases := []parsePythonDiagnosticsTest{ { name: "short error with location", diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index da6c4d2102..69c1a5dd6b 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -9,12 +9,11 @@ import ( "io" "os" "path/filepath" + "strings" "github.com/databricks/databricks-sdk-go/logger" "github.com/fatih/color" - "strings" - "github.com/databricks/cli/libs/python" "github.com/databricks/cli/bundle/env" @@ -94,11 +93,10 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno // mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics' var mutateDiags diag.Diagnostics - var mutateDiagsHasError = errors.New("unexpected error") + mutateDiagsHasError := errors.New("unexpected error") err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath) - if err != nil { return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err) } @@ -141,7 +139,7 @@ func createCacheDir(ctx context.Context) (string, error) { // use 'default' as target name cacheDir := filepath.Join(tempDir, "default", "pydabs") - err := os.MkdirAll(cacheDir, 0700) + err := os.MkdirAll(cacheDir, 0o700) if err != nil { return "", err } @@ -152,7 +150,7 @@ func createCacheDir(ctx context.Context) (string, error) { return os.MkdirTemp("", "-pydabs") } -func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { +func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir, rootPath, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { inputPath := filepath.Join(cacheDir, "input.json") outputPath := filepath.Join(cacheDir, "output.json") diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") @@ -263,10 +261,10 @@ func writeInputFile(inputPath string, input dyn.Value) error { return fmt.Errorf("failed to marshal input: %w", err) } - return os.WriteFile(inputPath, rootConfigJson, 0600) + return os.WriteFile(inputPath, rootConfigJson, 0o600) } -func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) { +func loadOutputFile(rootPath, outputPath string) (dyn.Value, diag.Diagnostics) { outputFile, err := os.Open(outputPath) if err != nil { return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err)) @@ -381,7 +379,7 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return right, nil }, - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) }, } @@ -430,7 +428,7 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return right, nil }, - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { if !valuePath.HasPrefix(jobsPath) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) } diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index 7a419d7991..0c6df98334 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -106,7 +106,6 @@ func TestPythonMutator_load(t *testing.T) { Column: 5, }, }, diags[0].Locations) - } func TestPythonMutator_load_disallowed(t *testing.T) { @@ -588,7 +587,7 @@ or activate the environment before running CLI commands: assert.Equal(t, expected, out) } -func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context { +func withProcessStub(t *testing.T, args []string, output, diagnostics string) context.Context { ctx := context.Background() ctx, stub := process.WithStub(ctx) @@ -611,10 +610,10 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str assert.NoError(t, err) if reflect.DeepEqual(actual.Args, args) { - err := os.WriteFile(outputPath, []byte(output), 0600) + err := os.WriteFile(outputPath, []byte(output), 0o600) require.NoError(t, err) - err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600) + err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0o600) require.NoError(t, err) return nil @@ -626,7 +625,7 @@ func withProcessStub(t *testing.T, args []string, output string, diagnostics str return ctx } -func loadYaml(name string, content string) *bundle.Bundle { +func loadYaml(name, content string) *bundle.Bundle { v, diag := config.LoadFromBytes(name, []byte(content)) if diag.Error() != nil { @@ -650,17 +649,17 @@ func withFakeVEnv(t *testing.T, venvPath string) { interpreterPath := interpreterPath(venvPath) - err = os.MkdirAll(filepath.Dir(interpreterPath), 0755) + err = os.MkdirAll(filepath.Dir(interpreterPath), 0o755) if err != nil { panic(err) } - err = os.WriteFile(interpreterPath, []byte(""), 0755) + err = os.WriteFile(interpreterPath, []byte(""), 0o755) if err != nil { panic(err) } - err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755) + err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0o755) if err != nil { panic(err) } diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index 5e5b761090..8c207e375e 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -32,11 +32,12 @@ func ResolveVariableReferencesInLookup() bundle.Mutator { } func ResolveVariableReferencesInComplexVariables() bundle.Mutator { - return &resolveVariableReferences{prefixes: []string{ - "bundle", - "workspace", - "variables", - }, + return &resolveVariableReferences{ + prefixes: []string{ + "bundle", + "workspace", + "variables", + }, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")), lookupFn: lookupForComplexVariables, skipFn: skipResolvingInNonComplexVariables, @@ -173,7 +174,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) return dyn.InvalidValue, dynvar.ErrSkipResolution }) }) - if err != nil { return dyn.InvalidValue, err } @@ -184,7 +184,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) diags = diags.Extend(normaliseDiags) return root, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/config/mutator/rewrite_workspace_prefix.go b/bundle/config/mutator/rewrite_workspace_prefix.go index 8a39ee8a12..0ccb3314b9 100644 --- a/bundle/config/mutator/rewrite_workspace_prefix.go +++ b/bundle/config/mutator/rewrite_workspace_prefix.go @@ -63,7 +63,6 @@ func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di return v, nil }) }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/rewrite_workspace_prefix_test.go b/bundle/config/mutator/rewrite_workspace_prefix_test.go index d75ec89dbd..48973a4cf1 100644 --- a/bundle/config/mutator/rewrite_workspace_prefix_test.go +++ b/bundle/config/mutator/rewrite_workspace_prefix_test.go @@ -81,5 +81,4 @@ func TestNoWorkspacePrefixUsed(t *testing.T) { require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar) require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath) require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar) - } diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 0ca71e28e6..7ffd782c2b 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -12,8 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -type setRunAs struct { -} +type setRunAs struct{} // This mutator does two things: // @@ -30,7 +29,7 @@ func (m *setRunAs) Name() string { return "SetRunAs" } -func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics { +func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser, runAsUser string) diag.Diagnostics { return diag.Diagnostics{{ Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+ "Current identity: %s. Run as identity: %s.\n"+ diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index 47ce2ad03a..9e9f2dcfef 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -65,7 +65,6 @@ func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, // We should have had a value to set for the variable at this point. return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) - } func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { diff --git a/bundle/config/mutator/sync_infer_root.go b/bundle/config/mutator/sync_infer_root.go index 512adcdbf6..160fcc908a 100644 --- a/bundle/config/mutator/sync_infer_root.go +++ b/bundle/config/mutator/sync_infer_root.go @@ -35,7 +35,7 @@ func (m *syncInferRoot) Name() string { // If the path does not exist, it returns an empty string. // // See "sync_infer_root_internal_test.go" for examples. -func (m *syncInferRoot) computeRoot(path string, root string) string { +func (m *syncInferRoot) computeRoot(path, root string) string { for !filepath.IsLocal(path) { // Break if we have reached the root of the filesystem. dir := filepath.Dir(root) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 5e016d8a1e..af0f941201 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -275,8 +275,8 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos } func gatherFallbackPaths(v dyn.Value, typ string) (map[string]string, error) { - var fallback = make(map[string]string) - var pattern = dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) + fallback := make(map[string]string) + pattern := dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) // Previous behavior was to use a resource's location as the base path to resolve // relative paths in its definition. With the introduction of [dyn.Value] throughout, diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 8442ff6cf5..493abb8c5f 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -34,7 +34,7 @@ func touchNotebookFile(t *testing.T, path string) { } func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/bundle/config/mutator/verify_cli_version.go b/bundle/config/mutator/verify_cli_version.go index 279af44e6a..873e4f7806 100644 --- a/bundle/config/mutator/verify_cli_version.go +++ b/bundle/config/mutator/verify_cli_version.go @@ -15,8 +15,7 @@ func VerifyCliVersion() bundle.Mutator { return &verifyCliVersion{} } -type verifyCliVersion struct { -} +type verifyCliVersion struct{} func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // No constraints specified, skip the check. diff --git a/bundle/config/presets.go b/bundle/config/presets.go index 30f56c0f85..252c5b5f74 100644 --- a/bundle/config/presets.go +++ b/bundle/config/presets.go @@ -1,7 +1,9 @@ package config -const Paused = "PAUSED" -const Unpaused = "UNPAUSED" +const ( + Paused = "PAUSED" + Unpaused = "UNPAUSED" +) type Presets struct { // NamePrefix to prepend to all resource names. diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index fc1c148c32..42fae49d98 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -168,7 +168,6 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) { "key1": "value1", }, root.Variables["complex"].Default) assert.Equal(t, "complex var", root.Variables["complex"].Description) - } func TestIsFullVariableOverrideDef(t *testing.T) { @@ -252,5 +251,4 @@ func TestIsFullVariableOverrideDef(t *testing.T) { for i, tc := range testCases { assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i) } - } diff --git a/bundle/config/validate/files_to_sync.go b/bundle/config/validate/files_to_sync.go index a142784828..b4de067730 100644 --- a/bundle/config/validate/files_to_sync.go +++ b/bundle/config/validate/files_to_sync.go @@ -13,8 +13,7 @@ func FilesToSync() bundle.ReadOnlyMutator { return &filesToSync{} } -type filesToSync struct { -} +type filesToSync struct{} func (v *filesToSync) Name() string { return "validate:files_to_sync" diff --git a/bundle/config/validate/folder_permissions.go b/bundle/config/validate/folder_permissions.go index 505e82a1e5..aa89a05519 100644 --- a/bundle/config/validate/folder_permissions.go +++ b/bundle/config/validate/folder_permissions.go @@ -15,8 +15,7 @@ import ( "golang.org/x/sync/errgroup" ) -type folderPermissions struct { -} +type folderPermissions struct{} // Apply implements bundle.ReadOnlyMutator. func (f *folderPermissions) Apply(ctx context.Context, b bundle.ReadOnlyBundle) diag.Diagnostics { diff --git a/bundle/config/validate/job_cluster_key_defined.go b/bundle/config/validate/job_cluster_key_defined.go index 368c3edb13..c3a1ab3df3 100644 --- a/bundle/config/validate/job_cluster_key_defined.go +++ b/bundle/config/validate/job_cluster_key_defined.go @@ -13,8 +13,7 @@ func JobClusterKeyDefined() bundle.ReadOnlyMutator { return &jobClusterKeyDefined{} } -type jobClusterKeyDefined struct { -} +type jobClusterKeyDefined struct{} func (v *jobClusterKeyDefined) Name() string { return "validate:job_cluster_key_defined" diff --git a/bundle/config/validate/job_task_cluster_spec.go b/bundle/config/validate/job_task_cluster_spec.go index b80befcdff..5f532acfe6 100644 --- a/bundle/config/validate/job_task_cluster_spec.go +++ b/bundle/config/validate/job_task_cluster_spec.go @@ -17,8 +17,7 @@ func JobTaskClusterSpec() bundle.ReadOnlyMutator { return &jobTaskClusterSpec{} } -type jobTaskClusterSpec struct { -} +type jobTaskClusterSpec struct{} func (v *jobTaskClusterSpec) Name() string { return "validate:job_task_cluster_spec" diff --git a/bundle/config/validate/single_node_cluster_test.go b/bundle/config/validate/single_node_cluster_test.go index 18771cc00d..c3ead8ef63 100644 --- a/bundle/config/validate/single_node_cluster_test.go +++ b/bundle/config/validate/single_node_cluster_test.go @@ -175,7 +175,6 @@ func TestValidateSingleNodeClusterFailForJobClusters(t *testing.T) { Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.job_clusters[0].new_cluster")}, }, }, diags) - }) } } diff --git a/bundle/config/validate/validate.go b/bundle/config/validate/validate.go index eb4c3c3cd2..131566fc90 100644 --- a/bundle/config/validate/validate.go +++ b/bundle/config/validate/validate.go @@ -8,8 +8,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -type validate struct { -} +type validate struct{} type location struct { path string diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go index 52f06835c4..f5787a81d2 100644 --- a/bundle/config/validate/validate_sync_patterns.go +++ b/bundle/config/validate/validate_sync_patterns.go @@ -17,8 +17,7 @@ func ValidateSyncPatterns() bundle.ReadOnlyMutator { return &validateSyncPatterns{} } -type validateSyncPatterns struct { -} +type validateSyncPatterns struct{} func (v *validateSyncPatterns) Name() string { return "validate:validate_sync_patterns" diff --git a/bundle/config/variable/lookup_test.go b/bundle/config/variable/lookup_test.go index a847487512..bd54d89fcd 100644 --- a/bundle/config/variable/lookup_test.go +++ b/bundle/config/variable/lookup_test.go @@ -42,7 +42,6 @@ func TestLookup_Empty(t *testing.T) { // No string representation for an invalid lookup assert.Empty(t, lookup.String()) - } func TestLookup_Multiple(t *testing.T) { diff --git a/bundle/config/variable/resolve_cluster.go b/bundle/config/variable/resolve_cluster.go index 2d68b7fb7b..a8cf3fe7f4 100644 --- a/bundle/config/variable/resolve_cluster.go +++ b/bundle/config/variable/resolve_cluster.go @@ -20,7 +20,6 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi}, }, }) - if err != nil { return "", err } diff --git a/bundle/deferred.go b/bundle/deferred.go index 56c2bdca21..e7e0c2aeb0 100644 --- a/bundle/deferred.go +++ b/bundle/deferred.go @@ -15,7 +15,7 @@ func (d *DeferredMutator) Name() string { return "deferred" } -func Defer(mutator Mutator, finally Mutator) Mutator { +func Defer(mutator, finally Mutator) Mutator { return &DeferredMutator{ mutator: mutator, finally: finally, diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index 4f2bc4ee4c..a131ab9c3f 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -15,8 +15,10 @@ import ( "github.com/google/uuid" ) -const DeploymentStateFileName = "deployment.json" -const DeploymentStateVersion = 1 +const ( + DeploymentStateFileName = "deployment.json" + DeploymentStateVersion = 1 +) type File struct { LocalPath string `json:"local_path"` @@ -132,7 +134,7 @@ func (f Filelist) ToSlice(root vfs.Path) []fileset.File { return files } -func isLocalStateStale(local io.Reader, remote io.Reader) bool { +func isLocalStateStale(local, remote io.Reader) bool { localState, err := loadState(local) if err != nil { return true diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 96f138e9d3..8fffca073d 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -44,7 +44,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic return diag.FromErr(err) } - local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600) + local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 42701eb26f..36c49fb01c 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -99,7 +99,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { snapshotPath, err := sync.SnapshotPath(opts) require.NoError(t, err) - err = os.WriteFile(snapshotPath, []byte("snapshot"), 0644) + err = os.WriteFile(snapshotPath, []byte("snapshot"), 0o644) require.NoError(t, err) } @@ -110,7 +110,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { data, err := json.Marshal(opts.localState) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) } diff --git a/bundle/deploy/state_push_test.go b/bundle/deploy/state_push_test.go index 038b753412..3562ec147e 100644 --- a/bundle/deploy/state_push_test.go +++ b/bundle/deploy/state_push_test.go @@ -74,7 +74,7 @@ func TestStatePush(t *testing.T) { data, err := json.Marshal(state) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) diags := bundle.Apply(ctx, b, s) diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index 9ab1bacf13..5488d50ed3 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -17,8 +17,7 @@ import ( "github.com/google/uuid" ) -type stateUpdate struct { -} +type stateUpdate struct{} func (s *stateUpdate) Name() string { return "deploy:state-update" @@ -57,7 +56,7 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost return diag.FromErr(err) } // Write the state back to the file. - f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600) + f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o600) if err != nil { log.Infof(ctx, "Unable to open deployment state file: %s", err) return diag.FromErr(err) diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index 1f5010b521..e561f534ea 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -119,7 +119,7 @@ func TestStateUpdateWithExistingState(t *testing.T) { data, err := json.Marshal(state) require.NoError(t, err) - err = os.WriteFile(statePath, data, 0644) + err = os.WriteFile(statePath, data, 0o644) require.NoError(t, err) diags := bundle.Apply(ctx, b, s) diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely.go b/bundle/deploy/terraform/check_dashboards_modified_remotely.go index c884bcb9b4..f263e8a7fe 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely.go @@ -42,8 +42,7 @@ func collectDashboardsFromState(ctx context.Context, b *bundle.Bundle) ([]dashbo return dashboards, nil } -type checkDashboardsModifiedRemotely struct { -} +type checkDashboardsModifiedRemotely struct{} func (l *checkDashboardsModifiedRemotely) Name() string { return "CheckDashboardsModifiedRemotely" diff --git a/bundle/deploy/terraform/check_running_resources.go b/bundle/deploy/terraform/check_running_resources.go index 737f773e56..5b3a704083 100644 --- a/bundle/deploy/terraform/check_running_resources.go +++ b/bundle/deploy/terraform/check_running_resources.go @@ -23,8 +23,7 @@ func (e ErrResourceIsRunning) Error() string { return fmt.Sprintf("%s %s is running", e.resourceType, e.resourceId) } -type checkRunningResources struct { -} +type checkRunningResources struct{} func (l *checkRunningResources) Name() string { return "check-running-resources" diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 076d9b7a05..61f26c0886 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -43,7 +43,7 @@ func convertToResourceStruct[T any](t *testing.T, resource *T, data any) { } func TestBundleToTerraformJob(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", JobClusters: []jobs.JobCluster{ @@ -71,7 +71,7 @@ func TestBundleToTerraformJob(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -93,7 +93,7 @@ func TestBundleToTerraformJob(t *testing.T) { } func TestBundleToTerraformJobPermissions(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -102,7 +102,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -121,7 +121,7 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { } func TestBundleToTerraformJobTaskLibraries(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", Tasks: []jobs.Task{ @@ -139,7 +139,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -158,7 +158,7 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { } func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", Tasks: []jobs.Task{ @@ -182,7 +182,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job": &src, @@ -201,7 +201,7 @@ func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { } func TestBundleToTerraformPipeline(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ PipelineSpec: &pipelines.PipelineSpec{ Name: "my pipeline", Libraries: []pipelines.PipelineLibrary{ @@ -239,7 +239,7 @@ func TestBundleToTerraformPipeline(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "my_pipeline": &src, @@ -262,7 +262,7 @@ func TestBundleToTerraformPipeline(t *testing.T) { } func TestBundleToTerraformPipelinePermissions(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -271,7 +271,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "my_pipeline": &src, @@ -290,7 +290,7 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { } func TestBundleToTerraformModel(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", Description: "description", @@ -307,7 +307,7 @@ func TestBundleToTerraformModel(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Models: map[string]*resources.MlflowModel{ "my_model": &src, @@ -330,7 +330,7 @@ func TestBundleToTerraformModel(t *testing.T) { } func TestBundleToTerraformModelPermissions(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", }, @@ -342,7 +342,7 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Models: map[string]*resources.MlflowModel{ "my_model": &src, @@ -361,13 +361,13 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { } func TestBundleToTerraformExperiment(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Experiments: map[string]*resources.MlflowExperiment{ "my_experiment": &src, @@ -384,7 +384,7 @@ func TestBundleToTerraformExperiment(t *testing.T) { } func TestBundleToTerraformExperimentPermissions(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, @@ -396,7 +396,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Experiments: map[string]*resources.MlflowExperiment{ "my_experiment": &src, @@ -415,7 +415,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { } func TestBundleToTerraformModelServing(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", Config: serving.EndpointCoreConfigInput{ @@ -439,7 +439,7 @@ func TestBundleToTerraformModelServing(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ "my_model_serving_endpoint": &src, @@ -462,7 +462,7 @@ func TestBundleToTerraformModelServing(t *testing.T) { } func TestBundleToTerraformModelServingPermissions(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", @@ -492,7 +492,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ "my_model_serving_endpoint": &src, @@ -511,7 +511,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { } func TestBundleToTerraformRegisteredModel(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", @@ -520,7 +520,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ RegisteredModels: map[string]*resources.RegisteredModel{ "my_registered_model": &src, @@ -540,7 +540,7 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { } func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", @@ -554,7 +554,7 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { }, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ RegisteredModels: map[string]*resources.RegisteredModel{ "my_registered_model": &src, @@ -573,14 +573,14 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { } func TestBundleToTerraformDeletedResources(t *testing.T) { - var job1 = resources.Job{ + job1 := resources.Job{ JobSettings: &jobs.JobSettings{}, } - var job2 = resources.Job{ + job2 := resources.Job{ ModifiedStatus: resources.ModifiedStatusDeleted, JobSettings: &jobs.JobSettings{}, } - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my_job1": &job1, @@ -601,10 +601,10 @@ func TestBundleToTerraformDeletedResources(t *testing.T) { } func TestTerraformToBundleEmptyLocalResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{}, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: []stateResource{ { Type: "databricks_job", @@ -736,7 +736,7 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { } func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test_job": { @@ -817,7 +817,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: nil, } err := TerraformToBundle(&tfState, &config) @@ -860,7 +860,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { } func TestTerraformToBundleModifiedResources(t *testing.T) { - var config = config.Root{ + config := config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test_job": { @@ -996,7 +996,7 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, } - var tfState = resourcesState{ + tfState := resourcesState{ Resources: []stateResource{ { Type: "databricks_job", diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 7d75ee8a81..366f0be8c0 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -145,7 +145,7 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error { // This function is used for env vars set by the Databricks VSCode extension. The variables are intended to be used by the CLI // bundled with the Databricks VSCode extension, but users can use different CLI versions in the VSCode terminals, in which case we want to ignore // the variables if that CLI uses different versions of the dependencies. -func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versionVarName string, currentVersion string) (string, error) { +func getEnvVarWithMatchingVersion(ctx context.Context, envVarName, versionVarName, currentVersion string) (string, error) { envValue := env.Get(ctx, envVarName) versionValue := env.Get(ctx, versionVarName) diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index e3621c6c33..a1ffc5a1ab 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -400,7 +400,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) { require.Equal(t, tmpBinPath, b.Config.Bundle.Terraform.ExecPath) } -func createTempFile(t *testing.T, dest string, name string, executable bool) string { +func createTempFile(t *testing.T, dest, name string, executable bool) string { binPath := filepath.Join(dest, name) f, err := os.Create(binPath) require.NoError(t, err) @@ -409,7 +409,7 @@ func createTempFile(t *testing.T, dest string, name string, executable bool) str require.NoError(t, err) }() if executable { - err = f.Chmod(0777) + err = f.Chmod(0o777) require.NoError(t, err) } return binPath @@ -422,7 +422,7 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) { tmp := t.TempDir() file := testutil.Touch(t, tmp, "bar") - var tc = []struct { + tc := []struct { envValue string versionValue string currentVersion string diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 9c2126aeca..813e6bbb7a 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -10,8 +10,7 @@ import ( "github.com/databricks/cli/libs/dyn/dynvar" ) -type interpolateMutator struct { -} +type interpolateMutator struct{} func Interpolate() bundle.Mutator { return &interpolateMutator{} diff --git a/bundle/deploy/terraform/pkg.go b/bundle/deploy/terraform/pkg.go index cad7540240..bd636639fb 100644 --- a/bundle/deploy/terraform/pkg.go +++ b/bundle/deploy/terraform/pkg.go @@ -5,15 +5,19 @@ import ( "github.com/hashicorp/go-version" ) -const TerraformStateFileName = "terraform.tfstate" -const TerraformConfigFileName = "bundle.tf.json" +const ( + TerraformStateFileName = "terraform.tfstate" + TerraformConfigFileName = "bundle.tf.json" +) // Users can provide their own terraform binary and databricks terraform provider by setting the following environment variables. // This allows users to use the CLI in an air-gapped environments. See the `debug terraform` command. -const TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" -const TerraformVersionEnv = "DATABRICKS_TF_VERSION" -const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" -const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" +const ( + TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" + TerraformVersionEnv = "DATABRICKS_TF_VERSION" + TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" + TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" +) // Terraform CLI version to use and the corresponding checksums for it. The // checksums are used to verify the integrity of the downloaded binary. Please @@ -26,8 +30,10 @@ const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" // downloaded Terraform archive. var TerraformVersion = version.Must(version.NewVersion("1.5.5")) -const checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" -const checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" +const ( + checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" + checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" +) type Checksum struct { LinuxArm64 string `json:"linux_arm64"` diff --git a/bundle/deploy/terraform/pkg_test.go b/bundle/deploy/terraform/pkg_test.go index b8dcb9e089..08ec3de758 100644 --- a/bundle/deploy/terraform/pkg_test.go +++ b/bundle/deploy/terraform/pkg_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func downloadAndChecksum(t *testing.T, url string, expectedChecksum string) { +func downloadAndChecksum(t *testing.T, url, expectedChecksum string) { resp, err := http.Get(url) require.NoError(t, err) defer resp.Body.Close() diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index 9a5b910076..4e1e2b1c57 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -104,7 +104,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic localState, err := l.localState(ctx, b) if errors.Is(err, fs.ErrNotExist) { log.Infof(ctx, "Local state file does not exist. Using remote Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } if err != nil { @@ -114,14 +114,14 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic // If the lineage does not match, the Terraform state files do not correspond to the same deployment. if localState.Lineage != remoteState.Lineage { log.Infof(ctx, "Remote and local state lineages do not match. Using remote Terraform state. Invalidating local Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } // If the remote state is newer than the local state, we should use the remote state. if remoteState.Serial > localState.Serial { log.Infof(ctx, "Remote state is newer than local state. Using remote Terraform state.") - err := os.WriteFile(localStatePath, remoteContent, 0600) + err := os.WriteFile(localStatePath, remoteContent, 0o600) return diag.FromErr(err) } diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go index e6d2620c6a..330720a7cc 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertCluster(t *testing.T) { - var src = resources.Cluster{ + src := resources.Cluster{ ClusterSpec: &compute.ClusterSpec{ NumWorkers: 3, SparkVersion: "13.3.x-scala2.12", @@ -93,5 +93,4 @@ func TestConvertCluster(t *testing.T) { }, }, }, out.Permissions["cluster_my_cluster"]) - } diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard.go b/bundle/deploy/terraform/tfdyn/convert_dashboard.go index 3ba7e19a2d..3ec8b489f5 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard.go @@ -17,7 +17,7 @@ const ( ) // Marshal "serialized_dashboard" as JSON if it is set in the input but not in the output. -func marshalSerializedDashboard(vin dyn.Value, vout dyn.Value) (dyn.Value, error) { +func marshalSerializedDashboard(vin, vout dyn.Value) (dyn.Value, error) { // Skip if the "serialized_dashboard" field is already set. if v := vout.Get(serializedDashboardFieldName); v.IsValid() { return vout, nil diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go index 539ba21aab..6f5d365041 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertDashboard(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ Dashboard: &dashboards.Dashboard{ DisplayName: "my dashboard", WarehouseId: "f00dcafe", @@ -60,7 +60,7 @@ func TestConvertDashboard(t *testing.T) { } func TestConvertDashboardFilePath(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ FilePath: "some/path", } @@ -84,7 +84,7 @@ func TestConvertDashboardFilePath(t *testing.T) { } func TestConvertDashboardFilePathQuoted(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ FilePath: `C:\foo\bar\baz\dashboard.lvdash.json`, } @@ -108,7 +108,7 @@ func TestConvertDashboardFilePathQuoted(t *testing.T) { } func TestConvertDashboardSerializedDashboardString(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ SerializedDashboard: `{ "json": true }`, } @@ -127,7 +127,7 @@ func TestConvertDashboardSerializedDashboardString(t *testing.T) { } func TestConvertDashboardSerializedDashboardAny(t *testing.T) { - var src = resources.Dashboard{ + src := resources.Dashboard{ SerializedDashboard: map[string]any{ "pages": []map[string]any{ { diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go index 63add43681..3ef3963f26 100644 --- a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertExperiment(t *testing.T) { - var src = resources.MlflowExperiment{ + src := resources.MlflowExperiment{ Experiment: &ml.Experiment{ Name: "name", }, diff --git a/bundle/deploy/terraform/tfdyn/convert_grants_test.go b/bundle/deploy/terraform/tfdyn/convert_grants_test.go index a486bc36f3..0a263b493d 100644 --- a/bundle/deploy/terraform/tfdyn/convert_grants_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_grants_test.go @@ -13,7 +13,7 @@ import ( ) func TestConvertGrants(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: []resources.Grant{ { Privileges: []string{"EXECUTE", "FOO"}, @@ -45,7 +45,7 @@ func TestConvertGrants(t *testing.T) { } func TestConvertGrantsNil(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: nil, } @@ -58,7 +58,7 @@ func TestConvertGrantsNil(t *testing.T) { } func TestConvertGrantsEmpty(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ Grants: []resources.Grant{}, } diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go index 8948e3bafd..bb2f8cd0fe 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job.go +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -83,7 +83,6 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { "libraries": "library", }) }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/deploy/terraform/tfdyn/convert_job_test.go b/bundle/deploy/terraform/tfdyn/convert_job_test.go index 695b9ba249..c73e530d48 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_job_test.go @@ -15,7 +15,7 @@ import ( ) func TestConvertJob(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ JobSettings: &jobs.JobSettings{ Name: "my job", JobClusters: []jobs.JobCluster{ diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go index 63b75e9ab3..d46350bb71 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertModelServingEndpoint(t *testing.T) { - var src = resources.ModelServingEndpoint{ + src := resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", Config: serving.EndpointCoreConfigInput{ diff --git a/bundle/deploy/terraform/tfdyn/convert_model_test.go b/bundle/deploy/terraform/tfdyn/convert_model_test.go index 542caa878b..4c4e62c5b6 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_model_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertModel(t *testing.T) { - var src = resources.MlflowModel{ + src := resources.MlflowModel{ Model: &ml.Model{ Name: "name", Description: "description", diff --git a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go index ba389020f9..ba04384b57 100644 --- a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go @@ -13,7 +13,7 @@ import ( ) func TestConvertPermissions(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -59,7 +59,7 @@ func TestConvertPermissions(t *testing.T) { } func TestConvertPermissionsNil(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: nil, } @@ -72,7 +72,7 @@ func TestConvertPermissionsNil(t *testing.T) { } func TestConvertPermissionsEmpty(t *testing.T) { - var src = resources.Job{ + src := resources.Job{ Permissions: []resources.Permission{}, } diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go index 7010d463a8..0239bad188 100644 --- a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertPipeline(t *testing.T) { - var src = resources.Pipeline{ + src := resources.Pipeline{ PipelineSpec: &pipelines.PipelineSpec{ Name: "my pipeline", Libraries: []pipelines.PipelineLibrary{ diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go index f71abf43cc..16b30de71e 100644 --- a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertQualityMonitor(t *testing.T) { - var src = resources.QualityMonitor{ + src := resources.QualityMonitor{ TableName: "test_table_name", CreateMonitor: &catalog.CreateMonitor{ AssetsDir: "assets_dir", diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go index 77096e8d09..bf2a5ab640 100644 --- a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertRegisteredModel(t *testing.T) { - var src = resources.RegisteredModel{ + src := resources.RegisteredModel{ CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ Name: "name", CatalogName: "catalog", diff --git a/bundle/deploy/terraform/tfdyn/convert_schema_test.go b/bundle/deploy/terraform/tfdyn/convert_schema_test.go index 2efbf3e430..12822bb3ce 100644 --- a/bundle/deploy/terraform/tfdyn/convert_schema_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_schema_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertSchema(t *testing.T) { - var src = resources.Schema{ + src := resources.Schema{ CreateSchema: &catalog.CreateSchema{ Name: "name", CatalogName: "catalog", diff --git a/bundle/deploy/terraform/tfdyn/convert_volume_test.go b/bundle/deploy/terraform/tfdyn/convert_volume_test.go index c897ae69a8..09b69489e3 100644 --- a/bundle/deploy/terraform/tfdyn/convert_volume_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_volume_test.go @@ -14,7 +14,7 @@ import ( ) func TestConvertVolume(t *testing.T) { - var src = resources.Volume{ + src := resources.Volume{ CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{ CatalogName: "catalog", Comment: "comment", diff --git a/bundle/deploy/terraform/tfdyn/rename_keys.go b/bundle/deploy/terraform/tfdyn/rename_keys.go index 650ffb8900..95904575f8 100644 --- a/bundle/deploy/terraform/tfdyn/rename_keys.go +++ b/bundle/deploy/terraform/tfdyn/rename_keys.go @@ -11,7 +11,7 @@ import ( // definition uses the plural name. This function can convert between the two. func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { var err error - var acc = dyn.V(map[string]dyn.Value{}) + acc := dyn.V(map[string]dyn.Value{}) nv, err := dyn.Walk(v, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { if len(p) == 0 { @@ -36,7 +36,6 @@ func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { // Pass through all other values. return v, dyn.ErrSkip }) - if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/deploy/terraform/unbind.go b/bundle/deploy/terraform/unbind.go index 49d65615ed..494cb7ef1f 100644 --- a/bundle/deploy/terraform/unbind.go +++ b/bundle/deploy/terraform/unbind.go @@ -37,6 +37,6 @@ func (*unbind) Name() string { return "terraform.Unbind" } -func Unbind(resourceType string, resourceKey string) bundle.Mutator { +func Unbind(resourceType, resourceKey string) bundle.Mutator { return &unbind{resourceType: resourceType, resourceKey: resourceKey} } diff --git a/bundle/internal/schema/main.go b/bundle/internal/schema/main.go index 881ce3496c..cc06f0bbee 100644 --- a/bundle/internal/schema/main.go +++ b/bundle/internal/schema/main.go @@ -48,7 +48,8 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema. { Type: jsonschema.StringType, Pattern: interpolationPattern("var"), - }}, + }, + }, } case jsonschema.IntegerType, jsonschema.NumberType, jsonschema.BooleanType: // primitives can have variable values, or references like ${bundle.xyz} @@ -149,7 +150,7 @@ func main() { } // Write the schema descriptions to the output file. - err = os.WriteFile(outputFile, b, 0644) + err = os.WriteFile(outputFile, b, 0o644) if err != nil { log.Fatal(err) } diff --git a/bundle/libraries/expand_glob_references.go b/bundle/libraries/expand_glob_references.go index c71615e0e2..bb19050452 100644 --- a/bundle/libraries/expand_glob_references.go +++ b/bundle/libraries/expand_glob_references.go @@ -11,8 +11,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -type expand struct { -} +type expand struct{} func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic { return diag.Diagnostic{ @@ -189,7 +188,6 @@ func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { diags = diags.Extend(d) return dyn.V(output), nil }) - if err != nil { return dyn.InvalidValue, err } @@ -197,7 +195,6 @@ func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return v, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/libraries/filer_volume_test.go b/bundle/libraries/filer_volume_test.go index 870dce97cc..7b2f5c5ba9 100644 --- a/bundle/libraries/filer_volume_test.go +++ b/bundle/libraries/filer_volume_test.go @@ -110,7 +110,8 @@ func TestFilerForVolumeForErrorFromAPI(t *testing.T) { Summary: "unable to determine if volume at /Volumes/main/my_schema/my_volume exists: error from API", Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }}, diags) + }, + }, diags) } func TestFilerForVolumeWithVolumeNotFound(t *testing.T) { @@ -136,7 +137,8 @@ func TestFilerForVolumeWithVolumeNotFound(t *testing.T) { Summary: "volume /Volumes/main/my_schema/doesnotexist does not exist: some error message", Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}}, Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")}, - }}, diags) + }, + }, diags) } func TestFilerForVolumeNotFoundAndInBundle(t *testing.T) { diff --git a/bundle/libraries/upload.go b/bundle/libraries/upload.go index 4b6f43701b..a2162fb7b5 100644 --- a/bundle/libraries/upload.go +++ b/bundle/libraries/upload.go @@ -81,7 +81,6 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error return v, nil }) }) - if err != nil { return nil, err } @@ -119,7 +118,6 @@ func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error return v, nil }) }) - if err != nil { return nil, err } @@ -175,7 +173,6 @@ func (u *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return v, nil }) - if err != nil { diags = diags.Extend(diag.FromErr(err)) } diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go index 60264f6eae..6fa8d1374b 100644 --- a/bundle/permissions/filter.go +++ b/bundle/permissions/filter.go @@ -56,7 +56,6 @@ func filter(currentUser string) dyn.WalkValueFunc { } return v, nil - } } diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go index 121ce10dc9..e6e5a3799d 100644 --- a/bundle/permissions/filter_test.go +++ b/bundle/permissions/filter_test.go @@ -90,7 +90,6 @@ func testFixture(userName string) *bundle.Bundle { }, }, } - } func TestFilterCurrentUser(t *testing.T) { diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index c4b9708571..cd7cbf40c8 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -13,42 +13,46 @@ import ( "github.com/databricks/cli/libs/dyn/convert" ) -const CAN_MANAGE = "CAN_MANAGE" -const CAN_VIEW = "CAN_VIEW" -const CAN_RUN = "CAN_RUN" +const ( + CAN_MANAGE = "CAN_MANAGE" + CAN_VIEW = "CAN_VIEW" + CAN_RUN = "CAN_RUN" +) var unsupportedResources = []string{"clusters", "volumes", "schemas", "quality_monitors", "registered_models"} -var allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} -var levelsMap = map[string](map[string]string){ - "jobs": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_MANAGE_RUN", - }, - "pipelines": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_RUN", - }, - "experiments": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, - "models": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, - "model_serving_endpoints": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_VIEW", - CAN_RUN: "CAN_QUERY", - }, - "dashboards": { - CAN_MANAGE: "CAN_MANAGE", - CAN_VIEW: "CAN_READ", - }, -} +var ( + allowedLevels = []string{CAN_MANAGE, CAN_VIEW, CAN_RUN} + levelsMap = map[string](map[string]string){ + "jobs": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_MANAGE_RUN", + }, + "pipelines": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_RUN", + }, + "experiments": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "models": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + "model_serving_endpoints": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_VIEW", + CAN_RUN: "CAN_QUERY", + }, + "dashboards": { + CAN_MANAGE: "CAN_MANAGE", + CAN_VIEW: "CAN_READ", + }, + } +) type bundlePermissions struct{} @@ -76,7 +80,6 @@ func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Di v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { var permissions []resources.Permission pv, err := dyn.Get(v, "permissions") - // If the permissions field is not found, we set to an empty array if err != nil { pv = dyn.V([]dyn.Value{}) @@ -102,7 +105,6 @@ func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Di return dyn.Set(v, "permissions", pv) }) - if err != nil { return dyn.InvalidValue, err } @@ -110,7 +112,6 @@ func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Di return v, nil }) - if err != nil { return diag.FromErr(err) } diff --git a/bundle/permissions/validate.go b/bundle/permissions/validate.go index f1a18f4309..dee7326cfa 100644 --- a/bundle/permissions/validate.go +++ b/bundle/permissions/validate.go @@ -9,8 +9,7 @@ import ( "github.com/databricks/cli/libs/diag" ) -type validateSharedRootPermissions struct { -} +type validateSharedRootPermissions struct{} func ValidateSharedRootPermissions() bundle.Mutator { return &validateSharedRootPermissions{} diff --git a/bundle/permissions/workspace_path_permissions.go b/bundle/permissions/workspace_path_permissions.go index a3b4424c1c..225d2499e0 100644 --- a/bundle/permissions/workspace_path_permissions.go +++ b/bundle/permissions/workspace_path_permissions.go @@ -52,7 +52,7 @@ func (p WorkspacePathPermissions) Compare(perms []resources.Permission) diag.Dia } // containsAll checks if permA contains all permissions in permB. -func containsAll(permA []resources.Permission, permB []resources.Permission) (bool, []resources.Permission) { +func containsAll(permA, permB []resources.Permission) (bool, []resources.Permission) { missing := make([]resources.Permission, 0) for _, a := range permA { found := false diff --git a/bundle/permissions/workspace_path_permissions_test.go b/bundle/permissions/workspace_path_permissions_test.go index 0bb00474c8..eaefad906c 100644 --- a/bundle/permissions/workspace_path_permissions_test.go +++ b/bundle/permissions/workspace_path_permissions_test.go @@ -117,5 +117,4 @@ func TestWorkspacePathPermissionsCompare(t *testing.T) { diags := wp.Compare(tc.perms) require.Equal(t, tc.expected, diags) } - } diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index de4f3a7fe3..4ac0d38a5d 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -12,8 +12,7 @@ import ( "golang.org/x/sync/errgroup" ) -type workspaceRootPermissions struct { -} +type workspaceRootPermissions struct{} func ApplyWorkspaceRootPermissions() bundle.Mutator { return &workspaceRootPermissions{} diff --git a/bundle/phases/bind.go b/bundle/phases/bind.go index b2e92d6e2b..c62c48aea9 100644 --- a/bundle/phases/bind.go +++ b/bundle/phases/bind.go @@ -25,7 +25,7 @@ func Bind(opts *terraform.BindOptions) bundle.Mutator { ) } -func Unbind(resourceType string, resourceKey string) bundle.Mutator { +func Unbind(resourceType, resourceKey string) bundle.Mutator { return newPhase( "unbind", []bundle.Mutator{ diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index bf7c9a53a9..bacb85735a 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -110,7 +110,7 @@ func renderSummaryHeaderTemplate(out io.Writer, b *bundle.Bundle) error { return renderSummaryHeaderTemplate(out, &bundle.Bundle{}) } - var currentUser = &iam.User{} + currentUser := &iam.User{} if b.Config.Workspace.CurrentUser != nil { if b.Config.Workspace.CurrentUser.User != nil { diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go index 2b7f0e8254..506756f708 100644 --- a/bundle/render/render_text_output_test.go +++ b/bundle/render/render_text_output_test.go @@ -376,7 +376,8 @@ func TestRenderDiagnostics(t *testing.T) { Locations: []dyn.Location{{ File: "foo.yaml", Line: 1, - Column: 2}}, + Column: 2, + }}, }, }, expected: "Error: failed to load xxx\n" + diff --git a/bundle/root_test.go b/bundle/root_test.go index 99bf58a00a..0752427109 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -71,7 +71,7 @@ func TestRootLookup(t *testing.T) { defer f.Close() // Create directory tree. - err = os.MkdirAll("./a/b/c", 0755) + err = os.MkdirAll("./a/b/c", 0o755) require.NoError(t, err) // It should find the project root from $PWD. diff --git a/bundle/run/job.go b/bundle/run/job.go index 35a6ef4ffc..b43db91842 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -289,7 +289,6 @@ func (r *jobRunner) Cancel(ctx context.Context) error { ActiveOnly: true, JobId: jobID, }) - if err != nil { return err } diff --git a/bundle/run/job_args.go b/bundle/run/job_args.go index 85cf96efbd..b1596bbb01 100644 --- a/bundle/run/job_args.go +++ b/bundle/run/job_args.go @@ -131,7 +131,7 @@ func (r *jobRunner) posArgsHandler() argsHandler { } // Handle task parameters otherwise. - var seen = make(map[jobTaskType]bool) + seen := make(map[jobTaskType]bool) for _, t := range job.Tasks { if t.NotebookTask != nil { seen[jobTaskTypeNotebook] = true diff --git a/bundle/run/job_options.go b/bundle/run/job_options.go index c359e79eb9..6a03dff953 100644 --- a/bundle/run/job_options.go +++ b/bundle/run/job_options.go @@ -80,7 +80,7 @@ func (o *JobOptions) validatePipelineParams() (*jobs.PipelineParams, error) { return nil, nil } - var defaultErr = fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=`") + defaultErr := fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=`") v, ok := o.pipelineParams["full_refresh"] if !ok { return nil, defaultErr diff --git a/bundle/run/output/task.go b/bundle/run/output/task.go index 402e4d66ab..1ef78a8c3a 100644 --- a/bundle/run/output/task.go +++ b/bundle/run/output/task.go @@ -7,13 +7,15 @@ import ( "github.com/databricks/databricks-sdk-go/service/jobs" ) -type NotebookOutput jobs.NotebookOutput -type DbtOutput jobs.DbtOutput -type SqlOutput jobs.SqlOutput -type LogsOutput struct { - Logs string `json:"logs"` - LogsTruncated bool `json:"logs_truncated"` -} +type ( + NotebookOutput jobs.NotebookOutput + DbtOutput jobs.DbtOutput + SqlOutput jobs.SqlOutput + LogsOutput struct { + Logs string `json:"logs"` + LogsTruncated bool `json:"logs_truncated"` + } +) func structToString(val any) (string, error) { b, err := json.MarshalIndent(val, "", " ") diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index 6db80a71ba..a0e7d1e1e2 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -41,7 +41,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE } } -func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId string, updateId string) error { +func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId, updateId string) error { w := r.bundle.WorkspaceClient() // Note: For a 100 percent correct and complete solution we should use the @@ -85,7 +85,7 @@ func (r *pipelineRunner) Name() string { } func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, error) { - var pipelineID = r.pipeline.ID + pipelineID := r.pipeline.ID // Include resource key in logger. ctx = log.NewContext(ctx, log.GetLogger(ctx).With("resource", r.Key())) @@ -173,7 +173,6 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error { wait, err := w.Pipelines.Stop(ctx, pipelines.StopRequest{ PipelineId: r.pipeline.ID, }) - if err != nil { return err } diff --git a/bundle/run/progress/pipeline.go b/bundle/run/progress/pipeline.go index 4a256e76c4..b82dd7abd0 100644 --- a/bundle/run/progress/pipeline.go +++ b/bundle/run/progress/pipeline.go @@ -51,7 +51,7 @@ type UpdateTracker struct { w *databricks.WorkspaceClient } -func NewUpdateTracker(pipelineId string, updateId string, w *databricks.WorkspaceClient) *UpdateTracker { +func NewUpdateTracker(pipelineId, updateId string, w *databricks.WorkspaceClient) *UpdateTracker { return &UpdateTracker{ w: w, PipelineId: pipelineId, diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 920577146d..113a6140b0 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -93,7 +93,6 @@ func TestRunAsForAllowedWithTargetOverride(t *testing.T) { assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) - } func TestRunAsErrorForPipelines(t *testing.T) { @@ -220,7 +219,6 @@ func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { for _, tc := range tcases { t.Run(tc.name, func(t *testing.T) { - bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name) b := load(t, bundlePath) diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 9451c5a042..37d488fad5 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -151,7 +151,7 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) { } func TestVariableTargetOverrides(t *testing.T) { - var tcases = []struct { + tcases := []struct { targetName string pipelineName string pipelineContinuous bool diff --git a/bundle/trampoline/python_dbr_warning.go b/bundle/trampoline/python_dbr_warning.go index cf3e9aeb3b..0318df7c9f 100644 --- a/bundle/trampoline/python_dbr_warning.go +++ b/bundle/trampoline/python_dbr_warning.go @@ -14,8 +14,7 @@ import ( "golang.org/x/mod/semver" ) -type wrapperWarning struct { -} +type wrapperWarning struct{} func WrapperWarning() bundle.Mutator { return &wrapperWarning{} @@ -62,7 +61,6 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { if task.ExistingClusterId != "" { version, err := getSparkVersionForCluster(ctx, b.WorkspaceClient(), task.ExistingClusterId) - // If there's error getting spark version for cluster, do not mark it as incompatible if err != nil { log.Warnf(ctx, "unable to get spark version for cluster %s, err: %s", task.ExistingClusterId, err.Error()) diff --git a/bundle/trampoline/python_wheel_test.go b/bundle/trampoline/python_wheel_test.go index 517be35e4c..d75a3eca35 100644 --- a/bundle/trampoline/python_wheel_test.go +++ b/bundle/trampoline/python_wheel_test.go @@ -127,7 +127,8 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { Tasks: []jobs.Task{ { TaskKey: "notebook_task", - NotebookTask: &jobs.NotebookTask{}}, + NotebookTask: &jobs.NotebookTask{}, + }, }, }, }, diff --git a/bundle/trampoline/trampoline.go b/bundle/trampoline/trampoline.go index 1dc1c4463f..600ce3d9c6 100644 --- a/bundle/trampoline/trampoline.go +++ b/bundle/trampoline/trampoline.go @@ -62,7 +62,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund notebookName := fmt.Sprintf("notebook_%s_%s", task.JobKey, task.Task.TaskKey) localNotebookPath := filepath.Join(internalDir, notebookName+".py") - err = os.MkdirAll(filepath.Dir(localNotebookPath), 0755) + err = os.MkdirAll(filepath.Dir(localNotebookPath), 0o755) if err != nil { return err } diff --git a/bundle/trampoline/trampoline_test.go b/bundle/trampoline/trampoline_test.go index 4682d8fa03..3c5d185707 100644 --- a/bundle/trampoline/trampoline_test.go +++ b/bundle/trampoline/trampoline_test.go @@ -52,7 +52,8 @@ func TestGenerateTrampoline(t *testing.T) { PythonWheelTask: &jobs.PythonWheelTask{ PackageName: "test", EntryPoint: "run", - }}, + }, + }, } b := &bundle.Bundle{ diff --git a/cmd/api/api.go b/cmd/api/api.go index d33939a521..c3a3eb0b60 100644 --- a/cmd/api/api.go +++ b/cmd/api/api.go @@ -39,7 +39,7 @@ func makeCommand(method string) *cobra.Command { Args: root.ExactArgs(1), Short: fmt.Sprintf("Perform %s request", method), RunE: func(cmd *cobra.Command, args []string) error { - var path = args[0] + path := args[0] var request any diags := payload.Unmarshal(&request) diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go index c5fa8d26bf..faaf64f8fd 100644 --- a/cmd/auth/describe.go +++ b/cmd/auth/describe.go @@ -59,7 +59,6 @@ func newDescribeCommand() *cobra.Command { isAccount, err := root.MustAnyClient(cmd, args) return root.ConfigUsed(cmd.Context()), isAccount, err }) - if err != nil { return err } diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 79b7954680..c986765994 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -29,8 +29,10 @@ func promptForProfile(ctx context.Context, defaultValue string) (string, error) return prompt.Run() } -const minimalDbConnectVersion = "13.1" -const defaultTimeout = 1 * time.Hour +const ( + minimalDbConnectVersion = "13.1" + defaultTimeout = 1 * time.Hour +) func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { defaultConfigPath := "~/.databrickscfg" diff --git a/cmd/bundle/generate/dashboard.go b/cmd/bundle/generate/dashboard.go index 4a538a2937..f196bbe629 100644 --- a/cmd/bundle/generate/dashboard.go +++ b/cmd/bundle/generate/dashboard.go @@ -158,7 +158,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, } // Make sure the output directory exists. - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(filename), 0o755); err != nil { return err } @@ -183,7 +183,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle, } fmt.Printf("Writing dashboard to %q\n", rel) - return os.WriteFile(filename, data, 0644) + return os.WriteFile(filename, data, 0o644) } func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error { @@ -210,7 +210,7 @@ func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, das } // Make sure the output directory exists. - if err := os.MkdirAll(d.resourceDir, 0755); err != nil { + if err := os.MkdirAll(d.resourceDir, 0o755); err != nil { return err } diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index 33f1661586..896b7de51c 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -217,7 +217,7 @@ func TestGenerateJobCommand(t *testing.T) { } func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/cmd/bundle/generate/utils.go b/cmd/bundle/generate/utils.go index 65f6924193..8e3764e352 100644 --- a/cmd/bundle/generate/utils.go +++ b/cmd/bundle/generate/utils.go @@ -87,7 +87,7 @@ func (n *downloader) markNotebookForDownload(ctx context.Context, notebookPath * } func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { - err := os.MkdirAll(n.sourceDir, 0755) + err := os.MkdirAll(n.sourceDir, 0o755) if err != nil { return err } @@ -134,7 +134,7 @@ func (n *downloader) FlushToDisk(ctx context.Context, force bool) error { return errs.Wait() } -func newDownloader(w *databricks.WorkspaceClient, sourceDir string, configDir string) *downloader { +func newDownloader(w *databricks.WorkspaceClient, sourceDir, configDir string) *downloader { return &downloader{ files: make(map[string]string), w: w, diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index a127fe57aa..e2f6c1e29b 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -31,7 +31,7 @@ func setup(t *testing.T) string { return tempHomeDir } -func getTempFileWithContent(t *testing.T, tempHomeDir string, content string) *os.File { +func getTempFileWithContent(t *testing.T, tempHomeDir, content string) *os.File { inp, err := os.CreateTemp(tempHomeDir, "input") assert.NoError(t, err) _, err = inp.WriteString(content) @@ -75,7 +75,7 @@ func TestDefaultConfigureNoInteractive(t *testing.T) { } func TestConfigFileFromEnvNoInteractive(t *testing.T) { - //TODO: Replace with similar test code from go SDK, once we start using it directly + // TODO: Replace with similar test code from go SDK, once we start using it directly ctx := context.Background() tempHomeDir := setup(t) defaultCfgPath := filepath.Join(tempHomeDir, ".databrickscfg") diff --git a/cmd/labs/github/github.go b/cmd/labs/github/github.go index 1dd9fae5eb..a67df10224 100644 --- a/cmd/labs/github/github.go +++ b/cmd/labs/github/github.go @@ -12,12 +12,16 @@ import ( "github.com/databricks/cli/libs/log" ) -const gitHubAPI = "https://api.github.com" -const gitHubUserContent = "https://raw.githubusercontent.com" +const ( + gitHubAPI = "https://api.github.com" + gitHubUserContent = "https://raw.githubusercontent.com" +) // Placeholders to use as unique keys in context.Context. -var apiOverride int -var userContentOverride int +var ( + apiOverride int + userContentOverride int +) func WithApiOverride(ctx context.Context, override string) context.Context { return context.WithValue(ctx, &apiOverride, override) diff --git a/cmd/labs/localcache/jsonfile.go b/cmd/labs/localcache/jsonfile.go index 495743a576..6540e4ac27 100644 --- a/cmd/labs/localcache/jsonfile.go +++ b/cmd/labs/localcache/jsonfile.go @@ -14,8 +14,10 @@ import ( "github.com/databricks/cli/libs/log" ) -const userRW = 0o600 -const ownerRWXworldRX = 0o755 +const ( + userRW = 0o600 + ownerRWXworldRX = 0o755 +) func NewLocalCache[T any](dir, name string, validity time.Duration) LocalCache[T] { return LocalCache[T]{ diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go index 5210c85f95..2bed49145f 100644 --- a/cmd/labs/project/entrypoint.go +++ b/cmd/labs/project/entrypoint.go @@ -30,10 +30,12 @@ type Entrypoint struct { IsBundleAware bool `yaml:"is_bundle_aware,omitempty"` } -var ErrNoLoginConfig = errors.New("no login configuration found") -var ErrMissingClusterID = errors.New("missing a cluster compatible with Databricks Connect") -var ErrMissingWarehouseID = errors.New("missing a SQL warehouse") -var ErrNotInTTY = errors.New("not in an interactive terminal") +var ( + ErrNoLoginConfig = errors.New("no login configuration found") + ErrMissingClusterID = errors.New("missing a cluster compatible with Databricks Connect") + ErrMissingWarehouseID = errors.New("missing a SQL warehouse") + ErrNotInTTY = errors.New("not in an interactive terminal") +) func (e *Entrypoint) NeedsCluster() bool { if e.Installer == nil { diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index c62d546b76..63c984bcb6 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -29,8 +29,10 @@ import ( "github.com/stretchr/testify/require" ) -const ownerRWXworldRX = 0o755 -const ownerRW = 0o600 +const ( + ownerRWXworldRX = 0o755 + ownerRW = 0o600 +) func zipballFromFolder(src string) ([]byte, error) { var buf bytes.Buffer diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 107679105b..07ab483990 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -15,9 +15,11 @@ import ( ) // Placeholders to use as unique keys in context.Context. -var workspaceClient int -var accountClient int -var configUsed int +var ( + workspaceClient int + accountClient int + configUsed int +) type ErrNoWorkspaceProfiles struct { path string diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 92f028514e..7845987968 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -84,7 +84,7 @@ func TestAccountClientOrPrompt(t *testing.T) { account_id = 1112 token = foobar `), - 0755) + 0o755) require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", configFile) t.Setenv("PATH", "/nothing") @@ -150,7 +150,7 @@ func TestWorkspaceClientOrPrompt(t *testing.T) { host = https://adb-1112.12.azuredatabricks.net/ token = foobar `), - 0755) + 0o755) require.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", configFile) t.Setenv("PATH", "/nothing") @@ -204,7 +204,7 @@ func TestMustAccountClientWorksWithDatabricksCfg(t *testing.T) { account_id = 1111 token = foobar `), - 0755) + 0o755) require.NoError(t, err) cmd := New(context.Background()) @@ -251,7 +251,7 @@ func TestMustAnyClientCanCreateWorkspaceClient(t *testing.T) { host = https://adb-1111.11.azuredatabricks.net/ token = foobar `), - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) @@ -280,7 +280,7 @@ func TestMustAnyClientCanCreateAccountClient(t *testing.T) { account_id = 1111 token = foobar `), - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) @@ -304,7 +304,7 @@ func TestMustAnyClientWithEmptyDatabricksCfg(t *testing.T) { err := os.WriteFile( configFile, []byte(""), // empty file - 0755) + 0o755) require.NoError(t, err) ctx, tt := cmdio.SetupTest(context.Background()) diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 72e0bef556..1998b19e61 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -23,7 +23,7 @@ func setupDatabricksCfg(t *testing.T) { } cfg := []byte("[PROFILE-1]\nhost = https://a.com\ntoken = a\n[PROFILE-2]\nhost = https://a.com\ntoken = b\n") - err := os.WriteFile(filepath.Join(tempHomeDir, ".databrickscfg"), cfg, 0644) + err := os.WriteFile(filepath.Join(tempHomeDir, ".databrickscfg"), cfg, 0o644) assert.NoError(t, err) t.Setenv("DATABRICKS_CONFIG_FILE", "") @@ -48,7 +48,7 @@ func setupWithHost(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle workspace: host: %q `, host) - err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0o644) require.NoError(t, err) b, diags := MustConfigureBundle(cmd) @@ -66,7 +66,7 @@ func setupWithProfile(t *testing.T, cmd *cobra.Command, profile string) *bundle. workspace: profile: %q `, profile) - err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0o644) require.NoError(t, err) b, diags := MustConfigureBundle(cmd) diff --git a/cmd/root/root.go b/cmd/root/root.go index e6f66f126f..3b37d01769 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -4,11 +4,10 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "strings" - "log/slog" - "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/dbr" diff --git a/cmd/root/user_agent_upstream.go b/cmd/root/user_agent_upstream.go index f580b42630..a813e8ee74 100644 --- a/cmd/root/user_agent_upstream.go +++ b/cmd/root/user_agent_upstream.go @@ -8,12 +8,16 @@ import ( ) // Environment variables that caller can set to convey what is upstream to this CLI. -const upstreamEnvVar = "DATABRICKS_CLI_UPSTREAM" -const upstreamVersionEnvVar = "DATABRICKS_CLI_UPSTREAM_VERSION" +const ( + upstreamEnvVar = "DATABRICKS_CLI_UPSTREAM" + upstreamVersionEnvVar = "DATABRICKS_CLI_UPSTREAM_VERSION" +) // Keys in the user agent. -const upstreamKey = "upstream" -const upstreamVersionKey = "upstream-version" +const ( + upstreamKey = "upstream" + upstreamVersionKey = "upstream-version" +) func withUpstreamInUserAgent(ctx context.Context) context.Context { value := env.Get(ctx, upstreamEnvVar) diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 6d722fb08a..cd2167a198 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -68,7 +68,6 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn localRoot := vfs.MustNew(args[0]) info, err := git.FetchRepositoryInfo(ctx, localRoot.Native(), client) - if err != nil { log.Warnf(ctx, "Failed to read git info: %s", err) } diff --git a/cmd/workspace/workspace/export_dir.go b/cmd/workspace/workspace/export_dir.go index 0046f46ef9..febe4c3e10 100644 --- a/cmd/workspace/workspace/export_dir.go +++ b/cmd/workspace/workspace/export_dir.go @@ -39,7 +39,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer. // create directory and return early if d.IsDir() { - return os.MkdirAll(targetPath, 0755) + return os.MkdirAll(targetPath, 0o755) } // Add extension to local file path if the file is a notebook diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index cfed0a6eec..216e9b5d81 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -52,7 +52,7 @@ func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest if err != nil { return err } - return os.WriteFile(filePath, b, 0755) + return os.WriteFile(filePath, b, 0o755) } } @@ -88,7 +88,6 @@ func importOverride(importCmd *cobra.Command, importReq *workspace.Import) { err := originalRunE(cmd, args) return wrapImportAPIErrors(err, importReq) } - } func init() { diff --git a/internal/build/variables.go b/internal/build/variables.go index 197dee9c30..80c4683aba 100644 --- a/internal/build/variables.go +++ b/internal/build/variables.go @@ -1,21 +1,27 @@ package build -var buildProjectName string = "cli" -var buildVersion string = "" +var ( + buildProjectName string = "cli" + buildVersion string = "" +) -var buildBranch string = "undefined" -var buildTag string = "undefined" -var buildShortCommit string = "00000000" -var buildFullCommit string = "0000000000000000000000000000000000000000" -var buildCommitTimestamp string = "0" -var buildSummary string = "v0.0.0" +var ( + buildBranch string = "undefined" + buildTag string = "undefined" + buildShortCommit string = "00000000" + buildFullCommit string = "0000000000000000000000000000000000000000" + buildCommitTimestamp string = "0" + buildSummary string = "v0.0.0" +) -var buildMajor string = "0" -var buildMinor string = "0" -var buildPatch string = "0" -var buildPrerelease string = "" -var buildIsSnapshot string = "false" -var buildTimestamp string = "0" +var ( + buildMajor string = "0" + buildMinor string = "0" + buildPatch string = "0" + buildPrerelease string = "" + buildIsSnapshot string = "false" + buildTimestamp string = "0" +) // This function is used to set the build version for testing purposes. func SetBuildVersion(version string) { diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 34d101e4f8..f5eec32e06 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -23,7 +23,7 @@ import ( ) func touchEmptyFile(t *testing.T, path string) { - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(path), 0o700) require.NoError(t, err) f, err := os.Create(path) require.NoError(t, err) diff --git a/internal/bundle/clusters_test.go b/internal/bundle/clusters_test.go index a961f3ea8f..b46bec97ec 100644 --- a/internal/bundle/clusters_test.go +++ b/internal/bundle/clusters_test.go @@ -39,7 +39,6 @@ func TestAccDeployBundleWithCluster(t *testing.T) { } else { require.Contains(t, []compute.State{compute.StateTerminated, compute.StateTerminating}, cluster.State) } - }) err = deployBundle(t, ctx, root) diff --git a/internal/bundle/deploy_test.go b/internal/bundle/deploy_test.go index f4c6a440c3..283b05f7ab 100644 --- a/internal/bundle/deploy_test.go +++ b/internal/bundle/deploy_test.go @@ -174,7 +174,6 @@ restore the defined STs and MVs through full refresh. Note that recreation is ne properties such as the 'catalog' or 'storage' are changed: delete pipeline bar`) assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") - } func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { diff --git a/internal/bundle/deployment_state_test.go b/internal/bundle/deployment_state_test.go index 0ab99aeb38..0facd9eb04 100644 --- a/internal/bundle/deployment_state_test.go +++ b/internal/bundle/deployment_state_test.go @@ -32,14 +32,14 @@ func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { t.Setenv("BUNDLE_ROOT", bundleRoot) // Add some test file to the bundle - err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0o644) require.NoError(t, err) // Add notebook to the bundle - err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0o644) require.NoError(t, err) err = deployBundle(t, ctx, bundleRoot) @@ -79,7 +79,7 @@ func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { require.NoError(t, err) // Modify the content of another file - err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0644) + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0o644) require.NoError(t, err) err = deployBundle(t, ctx, bundleRoot) diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index 82467952d1..b4a53aaaab 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -59,7 +59,7 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { // Replace pipeline name generatedYaml = strings.ReplaceAll(generatedYaml, name, internal.RandomName("copy-generated-pipeline-")) - err = os.WriteFile(fileName, []byte(generatedYaml), 0644) + err = os.WriteFile(fileName, []byte(generatedYaml), 0o644) require.NoError(t, err) require.Contains(t, generatedYaml, "libraries:") diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 00a941c952..2d2e1098d4 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -59,7 +59,7 @@ func writeConfigFile(t *testing.T, config map[string]any) (string, error) { filepath := filepath.Join(dir, "config.json") t.Log("Configuration for template: ", string(bytes)) - err = os.WriteFile(filepath, bytes, 0644) + err = os.WriteFile(filepath, bytes, 0o644) return filepath, err } @@ -107,7 +107,7 @@ func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags return err } -func runResource(t *testing.T, ctx context.Context, path string, key string) (string, error) { +func runResource(t *testing.T, ctx context.Context, path, key string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) @@ -116,7 +116,7 @@ func runResource(t *testing.T, ctx context.Context, path string, key string) (st return stdout.String(), err } -func runResourceWithParams(t *testing.T, ctx context.Context, path string, key string, params ...string) (string, error) { +func runResourceWithParams(t *testing.T, ctx context.Context, path, key string, params ...string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) @@ -143,7 +143,7 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, unique return root } -func blackBoxRun(t *testing.T, root string, args ...string) (stdout string, stderr string) { +func blackBoxRun(t *testing.T, root string, args ...string) (stdout, stderr string) { gitRoot, err := folders.FindDirWithLeaf(".", ".git") require.NoError(t, err) diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index 44f6200c1b..108676bd80 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -func runPythonWheelTest(t *testing.T, templateName string, sparkVersion string, pythonWheelWrapper bool) { +func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonWheelWrapper bool) { ctx, _ := acc.WorkspaceTest(t) nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) diff --git a/internal/bundle/spark_jar_test.go b/internal/bundle/spark_jar_test.go index cffcbb755a..a7a58a3663 100644 --- a/internal/bundle/spark_jar_test.go +++ b/internal/bundle/spark_jar_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion string, artifactPath string) { +func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, artifactPath string) { cloudEnv := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") nodeTypeId := internal.GetNodeTypeId(cloudEnv) tmpDir := t.TempDir() diff --git a/internal/dashboard_assumptions_test.go b/internal/dashboard_assumptions_test.go index 64294873d1..6d67937a8b 100644 --- a/internal/dashboard_assumptions_test.go +++ b/internal/dashboard_assumptions_test.go @@ -98,7 +98,7 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { assert.Fail(t, "unexpected insert operation") return right, nil }, - VisitUpdate: func(basePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(basePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { updatedFieldPaths = append(updatedFieldPaths, basePath.String()) return right, nil }, diff --git a/internal/filer_test.go b/internal/filer_test.go index 4e6a156711..5ba303dc73 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -22,7 +22,7 @@ type filerTest struct { filer.Filer } -func (f filerTest) assertContents(ctx context.Context, name string, contents string) { +func (f filerTest) assertContents(ctx context.Context, name, contents string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -39,7 +39,7 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } -func (f filerTest) assertContentsJupyter(ctx context.Context, name string, language string) { +func (f filerTest) assertContentsJupyter(ctx context.Context, name, language string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -468,7 +468,6 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected2) }) } - } func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { diff --git a/internal/fs_rm_test.go b/internal/fs_rm_test.go index e86f5713bb..d49813a340 100644 --- a/internal/fs_rm_test.go +++ b/internal/fs_rm_test.go @@ -118,7 +118,6 @@ func TestAccFsRmForNonExistentFile(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) }) } - } func TestAccFsRmDirRecursively(t *testing.T) { diff --git a/internal/git_fetch_test.go b/internal/git_fetch_test.go index 3da024a3a3..3b234ec147 100644 --- a/internal/git_fetch_test.go +++ b/internal/git_fetch_test.go @@ -14,8 +14,10 @@ import ( "github.com/stretchr/testify/require" ) -const examplesRepoUrl = "https://github.com/databricks/bundle-examples" -const examplesRepoProvider = "gitHub" +const ( + examplesRepoUrl = "https://github.com/databricks/bundle-examples" + examplesRepoProvider = "gitHub" +) func assertFullGitInfo(t *testing.T, expectedRoot string, info git.RepositoryInfo) { assert.Equal(t, "main", info.CurrentBranch) @@ -140,7 +142,7 @@ func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { tempDir := t.TempDir() root := filepath.Join(tempDir, "repo") - require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(root, "a/b/c"), 0o700)) tests := []string{ filepath.Join(root, "a/b/c"), @@ -163,8 +165,8 @@ func TestAccFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { tempDir := t.TempDir() root := filepath.Join(tempDir, "repo") path := filepath.Join(root, "a/b/c") - require.NoError(t, os.MkdirAll(path, 0700)) - require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0000)) + require.NoError(t, os.MkdirAll(path, 0o700)) + require.NoError(t, os.WriteFile(filepath.Join(root, ".git"), []byte(""), 0o000)) info, err := git.FetchRepositoryInfo(ctx, path, wt.W) assert.NoError(t, err) diff --git a/internal/helpers.go b/internal/helpers.go index a76ea92b5b..060736dc6f 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -279,7 +279,7 @@ func (t *cobraTestRunner) Run() (bytes.Buffer, bytes.Buffer, error) { } // Like [require.Eventually] but errors if the underlying command has failed. -func (c *cobraTestRunner) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...any) { +func (c *cobraTestRunner) Eventually(condition func() bool, waitFor, tick time.Duration, msgAndArgs ...any) { ch := make(chan bool, 1) timer := time.NewTimer(waitFor) @@ -362,7 +362,7 @@ func readFile(t *testing.T, name string) string { return string(b) } -func writeFile(t *testing.T, name string, body string) string { +func writeFile(t *testing.T, name, body string) string { f, err := os.Create(filepath.Join(t.TempDir(), name)) require.NoError(t, err) _, err = f.WriteString(body) @@ -521,7 +521,6 @@ func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { }) return path.Join("/Volumes", "main", schema.Name, volume.Name) - } func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { diff --git a/internal/init_test.go b/internal/init_test.go index 08a808f458..36b789d91f 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -58,7 +58,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { } b, err := json.Marshal(initConfig) require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0644) + err = os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0o644) require.NoError(t, err) // Run bundle init @@ -151,11 +151,11 @@ func TestAccBundleInitHelpers(t *testing.T) { tmpDir := t.TempDir() tmpDir2 := t.TempDir() - err := os.Mkdir(filepath.Join(tmpDir, "template"), 0755) + err := os.Mkdir(filepath.Join(tmpDir, "template"), 0o755) require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir, "template", "foo.txt.tmpl"), []byte(test.funcName), 0644) + err = os.WriteFile(filepath.Join(tmpDir, "template", "foo.txt.tmpl"), []byte(test.funcName), 0o644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir, "databricks_template_schema.json"), []byte("{}"), 0644) + err = os.WriteFile(filepath.Join(tmpDir, "databricks_template_schema.json"), []byte("{}"), 0o644) require.NoError(t, err) // Run bundle init. diff --git a/internal/sync_test.go b/internal/sync_test.go index 6f8b1827be..2518777fbd 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -145,7 +145,7 @@ func (a *syncTest) remoteDirContent(ctx context.Context, relativeDir string, exp } } -func (a *syncTest) remoteFileContent(ctx context.Context, relativePath string, expectedContent string) { +func (a *syncTest) remoteFileContent(ctx context.Context, relativePath, expectedContent string) { filePath := path.Join(a.remoteRoot, relativePath) // Remove leading "/" so we can use it in the URL. @@ -181,7 +181,7 @@ func (a *syncTest) touchFile(ctx context.Context, path string) { require.NoError(a.t, err) } -func (a *syncTest) objectType(ctx context.Context, relativePath string, expected string) { +func (a *syncTest) objectType(ctx context.Context, relativePath, expected string) { path := path.Join(a.remoteRoot, relativePath) a.c.Eventually(func() bool { @@ -193,7 +193,7 @@ func (a *syncTest) objectType(ctx context.Context, relativePath string, expected }, 30*time.Second, 5*time.Second) } -func (a *syncTest) language(ctx context.Context, relativePath string, expected string) { +func (a *syncTest) language(ctx context.Context, relativePath, expected string) { path := path.Join(a.remoteRoot, relativePath) a.c.Eventually(func() bool { diff --git a/internal/testutil/copy.go b/internal/testutil/copy.go index 21faece003..9fd8539925 100644 --- a/internal/testutil/copy.go +++ b/internal/testutil/copy.go @@ -22,7 +22,7 @@ func CopyDirectory(t *testing.T, src, dst string) { require.NoError(t, err) if d.IsDir() { - return os.MkdirAll(filepath.Join(dst, rel), 0755) + return os.MkdirAll(filepath.Join(dst, rel), 0o755) } // Copy the file to the temporary directory diff --git a/internal/testutil/file.go b/internal/testutil/file.go index ba2c3280e2..59c640fbeb 100644 --- a/internal/testutil/file.go +++ b/internal/testutil/file.go @@ -10,17 +10,17 @@ import ( func TouchNotebook(t *testing.T, elems ...string) string { path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) - err = os.WriteFile(path, []byte("# Databricks notebook source"), 0644) + err = os.WriteFile(path, []byte("# Databricks notebook source"), 0o644) require.NoError(t, err) return path } func Touch(t *testing.T, elems ...string) string { path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) f, err := os.Create(path) @@ -33,7 +33,7 @@ func Touch(t *testing.T, elems ...string) string { func WriteFile(t *testing.T, content string, elems ...string) string { path := filepath.Join(elems...) - err := os.MkdirAll(filepath.Dir(path), 0755) + err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) f, err := os.Create(path) diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 4453616545..575c0cadca 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -71,14 +71,14 @@ func setupWorkspaceImportExportTest(t *testing.T) (context.Context, filer.Filer, return ctx, f, tmpdir } -func assertLocalFileContents(t *testing.T, path string, content string) { +func assertLocalFileContents(t *testing.T, path, content string) { require.FileExists(t, path) b, err := os.ReadFile(path) require.NoError(t, err) assert.Contains(t, string(b), content) } -func assertFilerFileContents(t *testing.T, ctx context.Context, f filer.Filer, path string, content string) { +func assertFilerFileContents(t *testing.T, ctx context.Context, f filer.Filer, path, content string) { r, err := f.Read(ctx, path) require.NoError(t, err) b, err := io.ReadAll(r) diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index 75c0c4b875..f7987c2335 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -31,7 +31,7 @@ type cmdIO struct { err io.Writer } -func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, headerTemplate, template string) *cmdIO { +func NewIO(outputFormat flags.Output, in io.Reader, out, err io.Writer, headerTemplate, template string) *cmdIO { // The check below is similar to color.NoColor but uses the specified err writer. dumb := os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb" if f, ok := err.(*os.File); ok && !dumb { diff --git a/libs/cmdio/logger.go b/libs/cmdio/logger.go index 5e89b3a34f..7bc95e9a51 100644 --- a/libs/cmdio/logger.go +++ b/libs/cmdio/logger.go @@ -151,7 +151,7 @@ func (l *Logger) AskSelect(question string, choices []string) (string, error) { return ans, nil } -func (l *Logger) Ask(question string, defaultVal string) (string, error) { +func (l *Logger) Ask(question, defaultVal string) (string, error) { if l.Mode == flags.ModeJson { return "", fmt.Errorf("question prompts are not supported in json mode") } @@ -234,5 +234,4 @@ func (l *Logger) Log(event Event) { // jobs.RunNowAndWait panic("unknown progress logger mode: " + l.Mode.String()) } - } diff --git a/libs/databrickscfg/cfgpickers/clusters.go b/libs/databrickscfg/cfgpickers/clusters.go index cac1b08a7c..6ae7d99c63 100644 --- a/libs/databrickscfg/cfgpickers/clusters.go +++ b/libs/databrickscfg/cfgpickers/clusters.go @@ -18,8 +18,10 @@ import ( var minUcRuntime = canonicalVersion("v12.0") -var dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) -var dbrSnapshotVersionRegex = regexp.MustCompile(`^(\d+)\.x-snapshot.*`) +var ( + dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) + dbrSnapshotVersionRegex = regexp.MustCompile(`^(\d+)\.x-snapshot.*`) +) func canonicalVersion(v string) string { return semver.Canonical("v" + strings.TrimPrefix(v, "v")) diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index 0cddff3be0..8a05bfb38a 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -325,7 +325,7 @@ func TestFromTypedMapNil(t *testing.T) { } func TestFromTypedMapEmpty(t *testing.T) { - var src = map[string]string{} + src := map[string]string{} ref := dyn.V(map[string]dyn.Value{ "foo": dyn.V("bar"), @@ -338,7 +338,7 @@ func TestFromTypedMapEmpty(t *testing.T) { } func TestFromTypedMapNonEmpty(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "foo", "bar": "bar", } @@ -353,7 +353,7 @@ func TestFromTypedMapNonEmpty(t *testing.T) { } func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "bar", "bar": "qux", } @@ -372,7 +372,7 @@ func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { - var src = map[string]string{ + src := map[string]string{ "foo": "", } @@ -398,7 +398,7 @@ func TestFromTypedSliceNil(t *testing.T) { } func TestFromTypedSliceEmpty(t *testing.T) { - var src = []string{} + src := []string{} ref := dyn.V([]dyn.Value{ dyn.V("bar"), @@ -411,7 +411,7 @@ func TestFromTypedSliceEmpty(t *testing.T) { } func TestFromTypedSliceNonEmpty(t *testing.T) { - var src = []string{ + src := []string{ "foo", "bar", } @@ -426,7 +426,7 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { } func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { - var src = []string{ + src := []string{ "foo", "bar", } @@ -446,7 +446,7 @@ func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { func TestFromTypedStringEmpty(t *testing.T) { var src string - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -454,7 +454,7 @@ func TestFromTypedStringEmpty(t *testing.T) { func TestFromTypedStringEmptyOverwrite(t *testing.T) { var src string - var ref = dyn.V("old") + ref := dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(""), nv) @@ -462,7 +462,7 @@ func TestFromTypedStringEmptyOverwrite(t *testing.T) { func TestFromTypedStringNonEmpty(t *testing.T) { var src string = "new" - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("new"), nv) @@ -470,14 +470,14 @@ func TestFromTypedStringNonEmpty(t *testing.T) { func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { var src string = "new" - var ref = dyn.V("old") + ref := dyn.V("old") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("new"), nv) } func TestFromTypedStringRetainsLocations(t *testing.T) { - var ref = dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) // case: value has not been changed var src string = "foo" @@ -494,14 +494,14 @@ func TestFromTypedStringRetainsLocations(t *testing.T) { func TestFromTypedStringTypeError(t *testing.T) { var src string = "foo" - var ref = dyn.V(1234) + ref := dyn.V(1234) _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedBoolEmpty(t *testing.T) { var src bool - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -509,7 +509,7 @@ func TestFromTypedBoolEmpty(t *testing.T) { func TestFromTypedBoolEmptyOverwrite(t *testing.T) { var src bool - var ref = dyn.V(true) + ref := dyn.V(true) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(false), nv) @@ -517,7 +517,7 @@ func TestFromTypedBoolEmptyOverwrite(t *testing.T) { func TestFromTypedBoolNonEmpty(t *testing.T) { var src bool = true - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(true), nv) @@ -525,14 +525,14 @@ func TestFromTypedBoolNonEmpty(t *testing.T) { func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { var src bool = true - var ref = dyn.V(false) + ref := dyn.V(false) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(true), nv) } func TestFromTypedBoolRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(true, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(true, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src bool = true @@ -549,7 +549,7 @@ func TestFromTypedBoolRetainsLocations(t *testing.T) { func TestFromTypedBoolVariableReference(t *testing.T) { var src bool = true - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -557,14 +557,14 @@ func TestFromTypedBoolVariableReference(t *testing.T) { func TestFromTypedBoolTypeError(t *testing.T) { var src bool = true - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedIntEmpty(t *testing.T) { var src int - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -572,7 +572,7 @@ func TestFromTypedIntEmpty(t *testing.T) { func TestFromTypedIntEmptyOverwrite(t *testing.T) { var src int - var ref = dyn.V(1234) + ref := dyn.V(1234) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(0)), nv) @@ -580,7 +580,7 @@ func TestFromTypedIntEmptyOverwrite(t *testing.T) { func TestFromTypedIntNonEmpty(t *testing.T) { var src int = 1234 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(1234)), nv) @@ -588,14 +588,14 @@ func TestFromTypedIntNonEmpty(t *testing.T) { func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { var src int = 1234 - var ref = dyn.V(1233) + ref := dyn.V(1233) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(int64(1234)), nv) } func TestFromTypedIntRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src int = 1234 @@ -612,7 +612,7 @@ func TestFromTypedIntRetainsLocations(t *testing.T) { func TestFromTypedIntVariableReference(t *testing.T) { var src int = 1234 - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -620,14 +620,14 @@ func TestFromTypedIntVariableReference(t *testing.T) { func TestFromTypedIntTypeError(t *testing.T) { var src int = 1234 - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } func TestFromTypedFloatEmpty(t *testing.T) { var src float64 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) @@ -635,7 +635,7 @@ func TestFromTypedFloatEmpty(t *testing.T) { func TestFromTypedFloatEmptyOverwrite(t *testing.T) { var src float64 - var ref = dyn.V(1.23) + ref := dyn.V(1.23) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(0.0), nv) @@ -643,7 +643,7 @@ func TestFromTypedFloatEmptyOverwrite(t *testing.T) { func TestFromTypedFloatNonEmpty(t *testing.T) { var src float64 = 1.23 - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(1.23), nv) @@ -651,7 +651,7 @@ func TestFromTypedFloatNonEmpty(t *testing.T) { func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V(1.24) + ref := dyn.V(1.24) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V(1.23), nv) @@ -659,7 +659,7 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { func TestFromTypedFloatRetainsLocations(t *testing.T) { var src float64 - var ref = dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) + ref := dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) // case: value has not been changed src = 1.23 @@ -676,7 +676,7 @@ func TestFromTypedFloatRetainsLocations(t *testing.T) { func TestFromTypedFloatVariableReference(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V("${var.foo}") + ref := dyn.V("${var.foo}") nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.V("${var.foo}"), nv) @@ -684,7 +684,7 @@ func TestFromTypedFloatVariableReference(t *testing.T) { func TestFromTypedFloatTypeError(t *testing.T) { var src float64 = 1.23 - var ref = dyn.V("string") + ref := dyn.V("string") _, err := FromTyped(src, ref) require.Error(t, err) } @@ -727,7 +727,7 @@ func TestFromTypedAny(t *testing.T) { func TestFromTypedAnyNil(t *testing.T) { var src any = nil - var ref = dyn.NilValue + ref := dyn.NilValue nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) diff --git a/libs/dyn/convert/struct_info.go b/libs/dyn/convert/struct_info.go index dc3ed4da40..f5fd29cb98 100644 --- a/libs/dyn/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -43,7 +43,7 @@ func getStructInfo(typ reflect.Type) structInfo { // buildStructInfo populates a new [structInfo] for the given type. func buildStructInfo(typ reflect.Type) structInfo { - var out = structInfo{ + out := structInfo{ Fields: make(map[string][]int), } @@ -102,7 +102,7 @@ func buildStructInfo(typ reflect.Type) structInfo { } func (s *structInfo) FieldValues(v reflect.Value) map[string]reflect.Value { - var out = make(map[string]reflect.Value) + out := make(map[string]reflect.Value) for k, index := range s.Fields { fv := v diff --git a/libs/dyn/convert/struct_info_test.go b/libs/dyn/convert/struct_info_test.go index 20348ff601..bc10db9daf 100644 --- a/libs/dyn/convert/struct_info_test.go +++ b/libs/dyn/convert/struct_info_test.go @@ -95,7 +95,7 @@ func TestStructInfoFieldValues(t *testing.T) { Bar string `json:"bar"` } - var src = Tmp{ + src := Tmp{ Foo: "foo", Bar: "bar", } @@ -121,7 +121,7 @@ func TestStructInfoFieldValuesAnonymousByValue(t *testing.T) { Foo } - var src = Tmp{ + src := Tmp{ Foo: Foo{ Foo: "foo", Bar: Bar{ diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 78221c2991..4a56dd4fc0 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -44,7 +44,7 @@ func TestToTypedStructOverwrite(t *testing.T) { Qux string `json:"-"` } - var out = Tmp{ + out := Tmp{ Foo: "baz", Bar: "qux", } @@ -66,7 +66,7 @@ func TestToTypedStructClearFields(t *testing.T) { } // Struct value with non-empty fields. - var out = Tmp{ + out := Tmp{ Foo: "baz", Bar: "qux", } @@ -137,7 +137,7 @@ func TestToTypedStructNil(t *testing.T) { Foo string `json:"foo"` } - var out = Tmp{} + out := Tmp{} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) @@ -148,7 +148,7 @@ func TestToTypedStructNilOverwrite(t *testing.T) { Foo string `json:"foo"` } - var out = Tmp{"bar"} + out := Tmp{"bar"} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Equal(t, Tmp{}, out) @@ -173,7 +173,7 @@ func TestToTypedStructWithValueField(t *testing.T) { } func TestToTypedMap(t *testing.T) { - var out = map[string]string{} + out := map[string]string{} v := dyn.V(map[string]dyn.Value{ "key": dyn.V("value"), @@ -186,7 +186,7 @@ func TestToTypedMap(t *testing.T) { } func TestToTypedMapOverwrite(t *testing.T) { - var out = map[string]string{ + out := map[string]string{ "foo": "bar", } @@ -214,14 +214,14 @@ func TestToTypedMapWithPointerElement(t *testing.T) { } func TestToTypedMapNil(t *testing.T) { - var out = map[string]string{} + out := map[string]string{} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) } func TestToTypedMapNilOverwrite(t *testing.T) { - var out = map[string]string{ + out := map[string]string{ "foo": "bar", } err := ToTyped(&out, dyn.NilValue) @@ -245,7 +245,7 @@ func TestToTypedSlice(t *testing.T) { } func TestToTypedSliceOverwrite(t *testing.T) { - var out = []string{"qux"} + out := []string{"qux"} v := dyn.V([]dyn.Value{ dyn.V("foo"), @@ -282,7 +282,7 @@ func TestToTypedSliceNil(t *testing.T) { } func TestToTypedSliceNilOverwrite(t *testing.T) { - var out = []string{"foo"} + out := []string{"foo"} err := ToTyped(&out, dyn.NilValue) require.NoError(t, err) assert.Nil(t, out) diff --git a/libs/dyn/dynassert/assert.go b/libs/dyn/dynassert/assert.go index ebdba1214a..616a588ec4 100644 --- a/libs/dyn/dynassert/assert.go +++ b/libs/dyn/dynassert/assert.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" ) -func Equal(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { +func Equal(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool { ev, eok := expected.(dyn.Value) av, aok := actual.(dyn.Value) if eok && aok && ev.IsValid() && av.IsValid() { @@ -36,7 +36,7 @@ func EqualValues(t assert.TestingT, expected, actual any, msgAndArgs ...any) boo return assert.EqualValues(t, expected, actual, msgAndArgs...) } -func NotEqual(t assert.TestingT, expected any, actual any, msgAndArgs ...any) bool { +func NotEqual(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool { return assert.NotEqual(t, expected, actual, msgAndArgs...) } @@ -84,11 +84,11 @@ func False(t assert.TestingT, value bool, msgAndArgs ...any) bool { return assert.False(t, value, msgAndArgs...) } -func Contains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { +func Contains(t assert.TestingT, list, element any, msgAndArgs ...any) bool { return assert.Contains(t, list, element, msgAndArgs...) } -func NotContains(t assert.TestingT, list any, element any, msgAndArgs ...any) bool { +func NotContains(t assert.TestingT, list, element any, msgAndArgs ...any) bool { return assert.NotContains(t, list, element, msgAndArgs...) } @@ -112,6 +112,6 @@ func NotPanics(t assert.TestingT, f func(), msgAndArgs ...any) bool { return assert.NotPanics(t, f, msgAndArgs...) } -func JSONEq(t assert.TestingT, expected string, actual string, msgAndArgs ...any) bool { +func JSONEq(t assert.TestingT, expected, actual string, msgAndArgs ...any) bool { return assert.JSONEq(t, expected, actual, msgAndArgs...) } diff --git a/libs/dyn/dynassert/assert_test.go b/libs/dyn/dynassert/assert_test.go index 43258bd205..c8c2d69609 100644 --- a/libs/dyn/dynassert/assert_test.go +++ b/libs/dyn/dynassert/assert_test.go @@ -13,7 +13,7 @@ import ( ) func TestThatThisTestPackageIsUsed(t *testing.T) { - var base = ".." + base := ".." var files []string err := fs.WalkDir(os.DirFS(base), ".", func(path string, d fs.DirEntry, err error) error { if d.IsDir() { diff --git a/libs/dyn/mapping.go b/libs/dyn/mapping.go index f5dbd18e6c..3c7c4e96ef 100644 --- a/libs/dyn/mapping.go +++ b/libs/dyn/mapping.go @@ -94,7 +94,7 @@ func (m *Mapping) GetByString(skey string) (Value, bool) { // If the key already exists, the value is updated. // If the key does not exist, a new key-value pair is added. // The key must be a string, otherwise an error is returned. -func (m *Mapping) Set(key Value, value Value) error { +func (m *Mapping) Set(key, value Value) error { skey, ok := key.AsString() if !ok { return fmt.Errorf("key must be a string, got %s", key.Kind()) diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index a83bbc1527..72d9a7d28a 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -111,6 +111,7 @@ func mergeSequence(a, b dyn.Value) (dyn.Value, error) { // Preserve the location of the first value. Accumulate the locations of the second value. return dyn.NewValue(out, a.Locations()).AppendLocationsFromValue(b), nil } + func mergePrimitive(a, b dyn.Value) (dyn.Value, error) { // Merging primitive values means using the incoming value. return b.AppendLocationsFromValue(a), nil diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index 4a4bf9e6c0..bfe7720164 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -75,7 +75,6 @@ func TestMergeMaps(t *testing.T) { assert.Equal(t, l1, out.Get("foo").Location()) assert.Equal(t, l2, out.Get("qux").Location()) } - } func TestMergeMapsNil(t *testing.T) { diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go index 7a8667cd69..ca62c73050 100644 --- a/libs/dyn/merge/override.go +++ b/libs/dyn/merge/override.go @@ -23,7 +23,7 @@ import ( type OverrideVisitor struct { VisitDelete func(valuePath dyn.Path, left dyn.Value) error VisitInsert func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) - VisitUpdate func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) + VisitUpdate func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) } var ErrOverrideUndoDelete = errors.New("undo delete operation") @@ -31,11 +31,11 @@ var ErrOverrideUndoDelete = errors.New("undo delete operation") // Override overrides value 'leftRoot' with 'rightRoot', keeping 'location' if values // haven't changed. Preserving 'location' is important to preserve the original source of the value // for error reporting. -func Override(leftRoot dyn.Value, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { +func Override(leftRoot, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { return override(dyn.EmptyPath, leftRoot, rightRoot, visitor) } -func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { +func override(basePath dyn.Path, left, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { if left.Kind() != right.Kind() { return visitor.VisitUpdate(basePath, left, right) } @@ -46,7 +46,6 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri switch left.Kind() { case dyn.KindMap: merged, err := overrideMapping(basePath, left.MustMap(), right.MustMap(), visitor) - if err != nil { return dyn.InvalidValue, err } @@ -57,7 +56,6 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri // some sequences are keyed, and we can detect which elements are added/removed/updated, // but we don't have this information merged, err := overrideSequence(basePath, left.MustSequence(), right.MustSequence(), visitor) - if err != nil { return dyn.InvalidValue, err } @@ -107,7 +105,7 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri return dyn.InvalidValue, fmt.Errorf("unexpected kind %s at %s", left.Kind(), basePath.String()) } -func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { +func overrideMapping(basePath dyn.Path, leftMapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { out := dyn.NewMapping() for _, leftPair := range leftMapping.Pairs() { @@ -136,14 +134,12 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy if leftPair, ok := leftMapping.GetPair(rightPair.Key); ok { path := basePath.Append(dyn.Key(rightPair.Key.MustString())) newValue, err := override(path, leftPair.Value, rightPair.Value, visitor) - if err != nil { return dyn.NewMapping(), err } // key was there before, so keep its location err = out.Set(leftPair.Key, newValue) - if err != nil { return dyn.NewMapping(), err } @@ -151,13 +147,11 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy path := basePath.Append(dyn.Key(rightPair.Key.MustString())) newValue, err := visitor.VisitInsert(path, rightPair.Value) - if err != nil { return dyn.NewMapping(), err } err = out.Set(rightPair.Key, newValue) - if err != nil { return dyn.NewMapping(), err } @@ -167,14 +161,13 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy return out, nil } -func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { +func overrideSequence(basePath dyn.Path, left, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { minLen := min(len(left), len(right)) var values []dyn.Value for i := 0; i < minLen; i++ { path := basePath.Append(dyn.Index(i)) merged, err := override(path, left[i], right[i], visitor) - if err != nil { return nil, err } @@ -186,7 +179,6 @@ func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, vi for i := minLen; i < len(right); i++ { path := basePath.Append(dyn.Index(i)) newValue, err := visitor.VisitInsert(path, right[i]) - if err != nil { return nil, err } diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index 4af9377749..ea161d27c5 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -484,7 +484,7 @@ func createVisitor(opts visitorOpts) (*visitorState, OverrideVisitor) { s := visitorState{} return &s, OverrideVisitor{ - VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + VisitUpdate: func(valuePath dyn.Path, left, right dyn.Value) (dyn.Value, error) { s.updated = append(s.updated, valuePath.String()) if opts.error != nil { diff --git a/libs/dyn/value_test.go b/libs/dyn/value_test.go index 6a0a27b8dc..86e65858ee 100644 --- a/libs/dyn/value_test.go +++ b/libs/dyn/value_test.go @@ -25,11 +25,11 @@ func TestValueAsMap(t *testing.T) { _, ok := zeroValue.AsMap() assert.False(t, ok) - var intValue = dyn.V(1) + intValue := dyn.V(1) _, ok = intValue.AsMap() assert.False(t, ok) - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key": dyn.NewValue( "value", @@ -46,6 +46,6 @@ func TestValueAsMap(t *testing.T) { func TestValueIsValid(t *testing.T) { var zeroValue dyn.Value assert.False(t, zeroValue.IsValid()) - var intValue = dyn.V(1) + intValue := dyn.V(1) assert.True(t, intValue.IsValid()) } diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index 2cea0913be..d62327d6f5 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -71,7 +71,7 @@ func TestMapFuncOnMap(t *testing.T) { }, vbar.AsAny()) // Return error from map function. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -137,7 +137,7 @@ func TestMapFuncOnSequence(t *testing.T) { assert.Equal(t, []any{42, 45}, v1.AsAny()) // Return error from map function. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) @@ -211,7 +211,7 @@ func TestMapForeachOnMapError(t *testing.T) { }) // Check that an error from the map function propagates. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) @@ -255,7 +255,7 @@ func TestMapForeachOnSequenceError(t *testing.T) { }) // Check that an error from the map function propagates. - var ref = fmt.Errorf("error") + ref := fmt.Errorf("error") _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index 965753ccd4..fe58d6dfb4 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -137,8 +137,8 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro } // Build location for the merge node. - var mloc = d.location(merge) - var merr = errorf(mloc, "map merge requires map or sequence of maps as the value") + mloc := d.location(merge) + merr := errorf(mloc, "map merge requires map or sequence of maps as the value") // Flatten the merge node into a slice of nodes. // It can be either a single node or a sequence of nodes. diff --git a/libs/dyn/yamlloader/yaml_spec_test.go b/libs/dyn/yamlloader/yaml_spec_test.go index 2a5ae817fd..d9997f702c 100644 --- a/libs/dyn/yamlloader/yaml_spec_test.go +++ b/libs/dyn/yamlloader/yaml_spec_test.go @@ -777,7 +777,8 @@ func TestYAMLSpecExample_2_27(t *testing.T) { ), }, []dyn.Location{{File: file, Line: 22, Column: 3}}, - )}, + ), + }, []dyn.Location{{File: file, Line: 18, Column: 1}}, ), "tax": dyn.NewValue( diff --git a/libs/dyn/yamlsaver/saver.go b/libs/dyn/yamlsaver/saver.go index 0fd81d5341..7398e2594c 100644 --- a/libs/dyn/yamlsaver/saver.go +++ b/libs/dyn/yamlsaver/saver.go @@ -27,7 +27,7 @@ func NewSaverWithStyle(nodesWithStyle map[string]yaml.Style) *saver { } func (s *saver) SaveAsYAML(data any, filename string, force bool) error { - err := os.MkdirAll(filepath.Dir(filename), 0755) + err := os.MkdirAll(filepath.Dir(filename), 0o755) if err != nil { return err } diff --git a/libs/dyn/yamlsaver/saver_test.go b/libs/dyn/yamlsaver/saver_test.go index aa481c20b7..89bd5c31eb 100644 --- a/libs/dyn/yamlsaver/saver_test.go +++ b/libs/dyn/yamlsaver/saver_test.go @@ -11,7 +11,7 @@ import ( func TestMarshalNilValue(t *testing.T) { s := NewSaver() - var nilValue = dyn.NilValue + nilValue := dyn.NilValue v, err := s.toYamlNode(nilValue) assert.NoError(t, err) assert.Equal(t, "null", v.Value) @@ -19,7 +19,7 @@ func TestMarshalNilValue(t *testing.T) { func TestMarshalIntValue(t *testing.T) { s := NewSaver() - var intValue = dyn.V(1) + intValue := dyn.V(1) v, err := s.toYamlNode(intValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -28,7 +28,7 @@ func TestMarshalIntValue(t *testing.T) { func TestMarshalFloatValue(t *testing.T) { s := NewSaver() - var floatValue = dyn.V(1.0) + floatValue := dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -37,7 +37,7 @@ func TestMarshalFloatValue(t *testing.T) { func TestMarshalBoolValue(t *testing.T) { s := NewSaver() - var boolValue = dyn.V(true) + boolValue := dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -49,7 +49,7 @@ func TestMarshalTimeValue(t *testing.T) { require.NoError(t, err) s := NewSaver() - var timeValue = dyn.V(tm) + timeValue := dyn.V(tm) v, err := s.toYamlNode(timeValue) assert.NoError(t, err) assert.Equal(t, "1970-01-01", v.Value) @@ -58,7 +58,7 @@ func TestMarshalTimeValue(t *testing.T) { func TestMarshalSequenceValue(t *testing.T) { s := NewSaver() - var sequenceValue = dyn.NewValue( + sequenceValue := dyn.NewValue( []dyn.Value{ dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -74,7 +74,7 @@ func TestMarshalSequenceValue(t *testing.T) { func TestMarshalStringValue(t *testing.T) { s := NewSaver() - var stringValue = dyn.V("value") + stringValue := dyn.V("value") v, err := s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "value", v.Value) @@ -83,7 +83,7 @@ func TestMarshalStringValue(t *testing.T) { func TestMarshalMapValue(t *testing.T) { s := NewSaver() - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 3, Column: 2}}), "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -107,7 +107,7 @@ func TestMarshalMapValue(t *testing.T) { func TestMarshalNestedValues(t *testing.T) { s := NewSaver() - var mapValue = dyn.NewValue( + mapValue := dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue( map[string]dyn.Value{ @@ -129,14 +129,14 @@ func TestMarshalNestedValues(t *testing.T) { func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { s := NewSaver() - var hexValue = dyn.V(0x123) + hexValue := dyn.V(0x123) v, err := s.toYamlNode(hexValue) assert.NoError(t, err) assert.Equal(t, "291", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0x123") + stringValue := dyn.V("0x123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0x123", v.Value) @@ -146,14 +146,14 @@ func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { func TestMarshalBinaryValueIsQuoted(t *testing.T) { s := NewSaver() - var binaryValue = dyn.V(0b101) + binaryValue := dyn.V(0b101) v, err := s.toYamlNode(binaryValue) assert.NoError(t, err) assert.Equal(t, "5", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0b101") + stringValue := dyn.V("0b101") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0b101", v.Value) @@ -163,14 +163,14 @@ func TestMarshalBinaryValueIsQuoted(t *testing.T) { func TestMarshalOctalValueIsQuoted(t *testing.T) { s := NewSaver() - var octalValue = dyn.V(0123) + octalValue := dyn.V(0o123) v, err := s.toYamlNode(octalValue) assert.NoError(t, err) assert.Equal(t, "83", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("0123") + stringValue := dyn.V("0123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0123", v.Value) @@ -180,14 +180,14 @@ func TestMarshalOctalValueIsQuoted(t *testing.T) { func TestMarshalFloatValueIsQuoted(t *testing.T) { s := NewSaver() - var floatValue = dyn.V(1.0) + floatValue := dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("1.0") + stringValue := dyn.V("1.0") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "1.0", v.Value) @@ -197,14 +197,14 @@ func TestMarshalFloatValueIsQuoted(t *testing.T) { func TestMarshalBoolValueIsQuoted(t *testing.T) { s := NewSaver() - var boolValue = dyn.V(true) + boolValue := dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.V("true") + stringValue := dyn.V("true") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -217,7 +217,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { "styled": yaml.DoubleQuotedStyle, }) - var styledMap = dyn.NewValue( + styledMap := dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -225,7 +225,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { []dyn.Location{{File: "file", Line: -2, Column: 2}}, ) - var unstyledMap = dyn.NewValue( + unstyledMap := dyn.NewValue( map[string]dyn.Value{ "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 1, Column: 2}}), "key4": dyn.NewValue("value4", []dyn.Location{{File: "file", Line: 2, Column: 2}}), @@ -233,7 +233,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { []dyn.Location{{File: "file", Line: -1, Column: 2}}, ) - var val = dyn.NewValue( + val := dyn.NewValue( map[string]dyn.Value{ "styled": styledMap, "unstyled": unstyledMap, diff --git a/libs/exec/exec.go b/libs/exec/exec.go index 8e4633271a..466117e601 100644 --- a/libs/exec/exec.go +++ b/libs/exec/exec.go @@ -10,9 +10,11 @@ import ( type ExecutableType string -const BashExecutable ExecutableType = `bash` -const ShExecutable ExecutableType = `sh` -const CmdExecutable ExecutableType = `cmd` +const ( + BashExecutable ExecutableType = `bash` + ShExecutable ExecutableType = `sh` + CmdExecutable ExecutableType = `cmd` +) var finders map[ExecutableType](func() (shell, error)) = map[ExecutableType](func() (shell, error)){ BashExecutable: newBashShell, diff --git a/libs/exec/shell.go b/libs/exec/shell.go index f5d1768967..ee29eac8a0 100644 --- a/libs/exec/shell.go +++ b/libs/exec/shell.go @@ -36,7 +36,7 @@ func findShell() (shell, error) { return nil, errors.New("no shell found") } -func createTempScript(command string, extension string) (string, error) { +func createTempScript(command, extension string) (string, error) { file, err := os.CreateTemp(os.TempDir(), "cli-exec*"+extension) if err != nil { return "", err diff --git a/libs/filer/filer.go b/libs/filer/filer.go index b5be4c3c2f..83dc560cb0 100644 --- a/libs/filer/filer.go +++ b/libs/filer/filer.go @@ -103,8 +103,7 @@ func (err DirectoryNotEmptyError) Is(other error) bool { return other == fs.ErrInvalid } -type CannotDeleteRootError struct { -} +type CannotDeleteRootError struct{} func (err CannotDeleteRootError) Error() string { return "unable to delete filer root" diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 8b25345fc9..385aa69247 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -29,7 +29,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } // Retrieve permission mask from the [WriteMode], if present. - perm := fs.FileMode(0644) + perm := fs.FileMode(0o644) for _, m := range mode { bits := m & writeModePerm if bits != 0 { @@ -47,7 +47,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, f, err := os.OpenFile(absPath, flags, perm) if errors.Is(err, fs.ErrNotExist) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. - err = os.MkdirAll(filepath.Dir(absPath), 0755) + err = os.MkdirAll(filepath.Dir(absPath), 0o755) if err != nil { return err } @@ -73,7 +73,6 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } return err - } func (w *LocalClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { @@ -159,7 +158,7 @@ func (w *LocalClient) Mkdir(ctx context.Context, name string) error { return err } - return os.MkdirAll(dirPath, 0755) + return os.MkdirAll(dirPath, 0o755) } func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { diff --git a/libs/filer/slice_test.go b/libs/filer/slice_test.go index 21d7834838..2bdb3f7f5f 100644 --- a/libs/filer/slice_test.go +++ b/libs/filer/slice_test.go @@ -12,11 +12,10 @@ func TestSliceWithout(t *testing.T) { assert.Equal(t, []int{2, 3}, sliceWithout([]int{1, 2, 3}, 1)) assert.Equal(t, []int{1, 3}, sliceWithout([]int{1, 2, 3}, 2)) assert.Equal(t, []int{1, 2}, sliceWithout([]int{1, 2, 3}, 3)) - } func TestSliceWithoutReturnsClone(t *testing.T) { - var ints = []int{1, 2, 3} + ints := []int{1, 2, 3} assert.Equal(t, []int{2, 3}, sliceWithout(ints, 1)) assert.Equal(t, []int{1, 2, 3}, ints) } diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index 2a60520911..9ee2722e17 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -52,7 +52,8 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex notebook.ExtensionR, notebook.ExtensionScala, notebook.ExtensionSql, - notebook.ExtensionJupyter}, ext) { + notebook.ExtensionJupyter, + }, ext) { return nil, nil } diff --git a/libs/filer/workspace_files_extensions_client_test.go b/libs/filer/workspace_files_extensions_client_test.go index 10c176b315..10a2bebf0a 100644 --- a/libs/filer/workspace_files_extensions_client_test.go +++ b/libs/filer/workspace_files_extensions_client_test.go @@ -17,8 +17,9 @@ type mockApiClient struct { } func (m *mockApiClient) Do(ctx context.Context, method, path string, - headers map[string]string, request any, response any, - visitors ...func(*http.Request) error) error { + headers map[string]string, request, response any, + visitors ...func(*http.Request) error, +) error { args := m.Called(ctx, method, path, headers, request, response, visitors) // Set the http response from a value provided in the mock call. diff --git a/libs/flags/json_flag_test.go b/libs/flags/json_flag_test.go index 564d4f813c..b31324011a 100644 --- a/libs/flags/json_flag_test.go +++ b/libs/flags/json_flag_test.go @@ -57,7 +57,7 @@ func TestJsonFlagFile(t *testing.T) { var request any var fpath string - var payload = []byte(`{"foo": "bar"}`) + payload := []byte(`{"foo": "bar"}`) { f, err := os.Create(path.Join(t.TempDir(), "file")) diff --git a/libs/flags/log_file_flag.go b/libs/flags/log_file_flag.go index 9e60353f00..d2fe51d917 100644 --- a/libs/flags/log_file_flag.go +++ b/libs/flags/log_file_flag.go @@ -48,7 +48,7 @@ func (f *realLogFile) Writer() io.Writer { } func (f *realLogFile) Open() error { - file, err := os.OpenFile(f.s, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + file, err := os.OpenFile(f.s, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600) if err != nil { return err } diff --git a/libs/folders/folders.go b/libs/folders/folders.go index 81bf7ed01d..bbabc588ce 100644 --- a/libs/folders/folders.go +++ b/libs/folders/folders.go @@ -8,7 +8,7 @@ import ( // FindDirWithLeaf returns the first directory that holds `leaf`, // traversing up to the root of the filesystem, starting at `dir`. -func FindDirWithLeaf(dir string, leaf string) (string, error) { +func FindDirWithLeaf(dir, leaf string) (string, error) { dir, err := filepath.Abs(dir) if err != nil { return "", err diff --git a/libs/git/config_test.go b/libs/git/config_test.go index 3e6edf765e..73f3431c94 100644 --- a/libs/git/config_test.go +++ b/libs/git/config_test.go @@ -113,7 +113,7 @@ func (h *testCoreExcludesHelper) initialize(t *testing.T) { t.Setenv("XDG_CONFIG_HOME", h.xdgConfigHome) xdgConfigHomeGit := filepath.Join(h.xdgConfigHome, "git") - err := os.MkdirAll(xdgConfigHomeGit, 0755) + err := os.MkdirAll(xdgConfigHomeGit, 0o755) require.NoError(t, err) } @@ -124,7 +124,7 @@ func (h *testCoreExcludesHelper) coreExcludesFile() (string, error) { } func (h *testCoreExcludesHelper) writeConfig(path, contents string) { - err := os.WriteFile(path, []byte(contents), 0644) + err := os.WriteFile(path, []byte(contents), 0o644) require.NoError(h, err) } diff --git a/libs/git/ignore_test.go b/libs/git/ignore_test.go index 057c0cb2e5..9e2713608f 100644 --- a/libs/git/ignore_test.go +++ b/libs/git/ignore_test.go @@ -48,7 +48,7 @@ func TestIgnoreFileTaint(t *testing.T) { assert.False(t, ign) // Now create the .gitignore file. - err = os.WriteFile(gitIgnorePath, []byte("hello"), 0644) + err = os.WriteFile(gitIgnorePath, []byte("hello"), 0o644) require.NoError(t, err) // Verify that the match still doesn't happen (no spontaneous reload). diff --git a/libs/git/info.go b/libs/git/info.go index ce9c70a5c1..46e57be488 100644 --- a/libs/git/info.go +++ b/libs/git/info.go @@ -72,7 +72,6 @@ func fetchRepositoryInfoAPI(ctx context.Context, path string, w *databricks.Work }, &response, ) - if err != nil { return result, err } diff --git a/libs/git/reference.go b/libs/git/reference.go index 2165a9cdad..e1126d4f24 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -12,8 +12,10 @@ import ( type ReferenceType string -var ErrNotAReferencePointer = fmt.Errorf("HEAD does not point to another reference") -var ErrNotABranch = fmt.Errorf("HEAD is not a reference to a git branch") +var ( + ErrNotAReferencePointer = fmt.Errorf("HEAD does not point to another reference") + ErrNotABranch = fmt.Errorf("HEAD is not a reference to a git branch") +) const ( // pointer to a secondary reference file path containing sha-1 object ID. @@ -30,8 +32,10 @@ type Reference struct { Content string } -const ReferencePrefix = "ref: " -const HeadPathPrefix = "refs/heads/" +const ( + ReferencePrefix = "ref: " + HeadPathPrefix = "refs/heads/" +) // asserts if a string is a 40 character hexadecimal encoded string func isSHA1(s string) bool { diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index 474880e273..857df65a91 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -63,7 +63,7 @@ func (testRepo *testRepository) checkoutCommit(commitId string) { require.NoError(testRepo.t, err) } -func (testRepo *testRepository) addBranch(name string, latestCommit string) { +func (testRepo *testRepository) addBranch(name, latestCommit string) { // create dir for branch head reference branchDir := filepath.Join(testRepo.r.Root(), ".git", "refs", "heads") err := os.MkdirAll(branchDir, os.ModePerm) diff --git a/libs/git/view.go b/libs/git/view.go index 2eaba1f8b4..db22dfc5d4 100644 --- a/libs/git/view.go +++ b/libs/git/view.go @@ -113,7 +113,7 @@ func (v *View) EnsureValidGitIgnoreExists() error { // Create .gitignore with .databricks entry gitIgnorePath := filepath.Join(v.repo.Root(), v.targetPath, ".gitignore") - file, err := os.OpenFile(gitIgnorePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) + file, err := os.OpenFile(gitIgnorePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0o644) if err != nil { return err } diff --git a/libs/git/view_test.go b/libs/git/view_test.go index 06f6f94194..96881fdeee 100644 --- a/libs/git/view_test.go +++ b/libs/git/view_test.go @@ -20,7 +20,7 @@ func copyTestdata(t *testing.T, name string) string { require.NoError(t, err) if d.IsDir() { - err := os.MkdirAll(filepath.Join(tempDir, path), 0755) + err := os.MkdirAll(filepath.Join(tempDir, path), 0o755) require.NoError(t, err) return nil } @@ -46,7 +46,7 @@ func createFakeRepo(t *testing.T, testdataName string) string { absPath := copyTestdata(t, testdataName) // Add .git directory to make it look like a Git repository. - err := os.Mkdir(filepath.Join(absPath, ".git"), 0755) + err := os.Mkdir(filepath.Join(absPath, ".git"), 0o755) require.NoError(t, err) return absPath } diff --git a/libs/jsonschema/validate_type.go b/libs/jsonschema/validate_type.go index 125d6b20b2..9f70498bae 100644 --- a/libs/jsonschema/validate_type.go +++ b/libs/jsonschema/validate_type.go @@ -39,9 +39,11 @@ func validateNumber(v any) error { } func validateInteger(v any) error { - if !slices.Contains([]reflect.Kind{reflect.Int, reflect.Int8, reflect.Int16, + if !slices.Contains([]reflect.Kind{ + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64}, + reflect.Uint32, reflect.Uint64, + }, reflect.TypeOf(v).Kind()) { return fmt.Errorf("expected type integer, but value is %#v", v) } diff --git a/libs/locker/locker.go b/libs/locker/locker.go index b0d65c42ee..eb59c9f748 100644 --- a/libs/locker/locker.go +++ b/libs/locker/locker.go @@ -140,7 +140,7 @@ func (locker *Locker) Lock(ctx context.Context, isForced bool) error { return err } - var modes = []filer.WriteMode{ + modes := []filer.WriteMode{ // Always create parent directory if it doesn't yet exist. filer.CreateParentDirectories, } @@ -196,7 +196,7 @@ func (locker *Locker) Unlock(ctx context.Context, opts ...UnlockOption) error { return nil } -func CreateLocker(user string, targetDir string, w *databricks.WorkspaceClient) (*Locker, error) { +func CreateLocker(user, targetDir string, w *databricks.WorkspaceClient) (*Locker, error) { filer, err := filer.NewWorkspaceFilesClient(w, targetDir) if err != nil { return nil, err diff --git a/libs/log/context.go b/libs/log/context.go index d9e31d1161..5e3e8ccb65 100644 --- a/libs/log/context.go +++ b/libs/log/context.go @@ -2,7 +2,6 @@ package log import ( "context" - "log/slog" ) diff --git a/libs/log/logger.go b/libs/log/logger.go index 794a7e8655..c1d307c899 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -3,10 +3,9 @@ package log import ( "context" "fmt" + "log/slog" "runtime" "time" - - "log/slog" ) // GetLogger returns either the logger configured on the context, diff --git a/libs/log/sdk.go b/libs/log/sdk.go index e1b1ffed46..086f80f504 100644 --- a/libs/log/sdk.go +++ b/libs/log/sdk.go @@ -3,11 +3,10 @@ package log import ( "context" "fmt" + "log/slog" "runtime" "time" - "log/slog" - sdk "github.com/databricks/databricks-sdk-go/logger" ) diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index cd8680bfa4..40c850945f 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -46,7 +46,7 @@ func (f file) close() error { func (f file) readHeader() (string, error) { // Scan header line with some padding. - var buf = make([]byte, headerLength) + buf := make([]byte, headerLength) n, err := f.f.Read([]byte(buf)) if err != nil && err != io.EOF { return "", err diff --git a/libs/notebook/detect_jupyter_test.go b/libs/notebook/detect_jupyter_test.go index 4ff2aeff62..af29a22149 100644 --- a/libs/notebook/detect_jupyter_test.go +++ b/libs/notebook/detect_jupyter_test.go @@ -41,7 +41,7 @@ func TestDetectJupyterInvalidJSON(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := make([]byte, 128) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. @@ -55,7 +55,7 @@ func TestDetectJupyterNoCells(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := []byte("{}") - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. @@ -69,7 +69,7 @@ func TestDetectJupyterOldVersion(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.ipynb") buf := []byte(`{ "cells": [], "metadata": {}, "nbformat": 3 }`) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index 786c7e394a..4ede7bf9bd 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -78,7 +78,7 @@ func TestDetectEmptyFile(t *testing.T) { // Create empty file. dir := t.TempDir() path := filepath.Join(dir, "file.py") - err := os.WriteFile(path, nil, 0644) + err := os.WriteFile(path, nil, 0o644) require.NoError(t, err) // No contents means not a notebook. @@ -92,7 +92,7 @@ func TestDetectFileWithLongHeader(t *testing.T) { dir := t.TempDir() path := filepath.Join(dir, "file.py") buf := make([]byte, 128*1024) - err := os.WriteFile(path, buf, 0644) + err := os.WriteFile(path, buf, 0o644) require.NoError(t, err) // Garbage contents means not a notebook. diff --git a/libs/python/detect_test.go b/libs/python/detect_test.go index 78c7067f7e..485aa1875c 100644 --- a/libs/python/detect_test.go +++ b/libs/python/detect_test.go @@ -14,13 +14,13 @@ func TestDetectVEnvExecutable(t *testing.T) { dir := t.TempDir() interpreterPath := interpreterPath(dir) - err := os.Mkdir(filepath.Dir(interpreterPath), 0755) + err := os.Mkdir(filepath.Dir(interpreterPath), 0o755) require.NoError(t, err) - err = os.WriteFile(interpreterPath, []byte(""), 0755) + err = os.WriteFile(interpreterPath, []byte(""), 0o755) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, "pyvenv.cfg"), []byte(""), 0755) + err = os.WriteFile(filepath.Join(dir, "pyvenv.cfg"), []byte(""), 0o755) require.NoError(t, err) executable, err := DetectVEnvExecutable(dir) diff --git a/libs/python/interpreters.go b/libs/python/interpreters.go index 94f5074dec..6071309a81 100644 --- a/libs/python/interpreters.go +++ b/libs/python/interpreters.go @@ -18,8 +18,10 @@ import ( var ErrNoPythonInterpreters = errors.New("no python3 interpreters found") -const officialMswinPython = "(Python Official) https://python.org/downloads/windows" -const microsoftStorePython = "(Microsoft Store) https://apps.microsoft.com/store/search?publisher=Python%20Software%20Foundation" +const ( + officialMswinPython = "(Python Official) https://python.org/downloads/windows" + microsoftStorePython = "(Microsoft Store) https://apps.microsoft.com/store/search?publisher=Python%20Software%20Foundation" +) const worldWriteable = 0o002 diff --git a/libs/python/interpreters_unix_test.go b/libs/python/interpreters_unix_test.go index b5229c2ace..8471644a13 100644 --- a/libs/python/interpreters_unix_test.go +++ b/libs/python/interpreters_unix_test.go @@ -41,7 +41,7 @@ func TestFilteringInterpreters(t *testing.T) { assert.NoError(t, err) injectedBinary := filepath.Join(rogueBin, "python8.4") - err = os.WriteFile(injectedBinary, raw, 00777) + err = os.WriteFile(injectedBinary, raw, 0o0777) assert.NoError(t, err) t.Setenv("PATH", "testdata/other-binaries-filtered:"+rogueBin) diff --git a/libs/sync/diff.go b/libs/sync/diff.go index e91f7277ee..d81a3ae65e 100644 --- a/libs/sync/diff.go +++ b/libs/sync/diff.go @@ -20,7 +20,7 @@ func (d diff) IsEmpty() bool { // Compute operations required to make files in WSFS reflect current local files. // Takes into account changes since the last sync iteration. -func computeDiff(after *SnapshotState, before *SnapshotState) diff { +func computeDiff(after, before *SnapshotState) diff { d := &diff{ delete: make([]string, 0), rmdir: make([]string, 0), @@ -35,7 +35,7 @@ func computeDiff(after *SnapshotState, before *SnapshotState) diff { } // Add operators for tracked files that no longer exist. -func (d *diff) addRemovedFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addRemovedFiles(after, before *SnapshotState) { for localName, remoteName := range before.LocalToRemoteNames { if _, ok := after.LocalToRemoteNames[localName]; !ok { d.delete = append(d.delete, remoteName) @@ -50,7 +50,7 @@ func (d *diff) addRemovedFiles(after *SnapshotState, before *SnapshotState) { // Cleanup previous remote files for files that had their remote targets change. For // example this is possible if you convert a normal python script to a notebook. -func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *SnapshotState) { +func (d *diff) addFilesWithRemoteNameChanged(after, before *SnapshotState) { for localName, beforeRemoteName := range before.LocalToRemoteNames { afterRemoteName, ok := after.LocalToRemoteNames[localName] if ok && afterRemoteName != beforeRemoteName { @@ -60,7 +60,7 @@ func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *Snaps } // Add operators for files that were not being tracked before. -func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addNewFiles(after, before *SnapshotState) { for localName := range after.LastModifiedTimes { if _, ok := before.LastModifiedTimes[localName]; !ok { d.put = append(d.put, localName) @@ -74,7 +74,7 @@ func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { } // Add operators for files which had their contents updated. -func (d *diff) addUpdatedFiles(after *SnapshotState, before *SnapshotState) { +func (d *diff) addUpdatedFiles(after, before *SnapshotState) { for localName, modTime := range after.LastModifiedTimes { prevModTime, ok := before.LastModifiedTimes[localName] if ok && modTime.After(prevModTime) { diff --git a/libs/sync/event.go b/libs/sync/event.go index 8e5c0efa2e..05821a477b 100644 --- a/libs/sync/event.go +++ b/libs/sync/event.go @@ -73,7 +73,7 @@ func (e *EventStart) String() string { return fmt.Sprintf("Action: %s", e.EventChanges.String()) } -func newEventStart(seq int, put []string, delete []string) Event { +func newEventStart(seq int, put, delete []string) Event { return &EventStart{ EventBase: newEventBase(seq, EventTypeStart), EventChanges: &EventChanges{Put: put, Delete: delete}, @@ -133,7 +133,7 @@ func (e *EventSyncComplete) String() string { return "Complete" } -func newEventComplete(seq int, put []string, delete []string) Event { +func newEventComplete(seq int, put, delete []string) Event { return &EventSyncComplete{ EventBase: newEventBase(seq, EventTypeComplete), EventChanges: &EventChanges{Put: put, Delete: delete}, diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index f2920d8c22..a596531b93 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -2,6 +2,8 @@ package sync import ( "context" + "crypto/md5" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -10,9 +12,6 @@ import ( "path/filepath" "time" - "crypto/md5" - "encoding/hex" - "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/log" ) @@ -91,7 +90,7 @@ func GetFileName(host, remotePath string) string { func SnapshotPath(opts *SyncOptions) (string, error) { snapshotDir := filepath.Join(opts.SnapshotBasePath, syncSnapshotDirName) if _, err := os.Stat(snapshotDir); errors.Is(err, fs.ErrNotExist) { - err = os.MkdirAll(snapshotDir, 0755) + err = os.MkdirAll(snapshotDir, 0o755) if err != nil { return "", fmt.Errorf("failed to create config directory: %s", err) } @@ -122,7 +121,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } func (s *Snapshot) Save(ctx context.Context) error { - f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) if err != nil { return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err) } diff --git a/libs/sync/snapshot_state.go b/libs/sync/snapshot_state.go index 09bb5b63e6..d8660ee6ac 100644 --- a/libs/sync/snapshot_state.go +++ b/libs/sync/snapshot_state.go @@ -51,7 +51,6 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { // Compute the remote name the file will have in WSFS remoteName := f.Relative isNotebook, err := f.IsNotebook() - if err != nil { // Ignore this file if we're unable to determine the notebook type. // Trying to upload such a file to the workspace would fail anyway. diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 6bd26f2241..dc2c8992ad 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -117,7 +117,7 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { } var notifier EventNotifier - var outputWaitGroup = &stdsync.WaitGroup{} + outputWaitGroup := &stdsync.WaitGroup{} if opts.OutputHandler != nil { ch := make(chan Event, MaxRequestsInFlight) notifier = &ChannelNotifier{ch} diff --git a/libs/tags/gcp_test.go b/libs/tags/gcp_test.go index 89f4fd8e6b..7c960acbbf 100644 --- a/libs/tags/gcp_test.go +++ b/libs/tags/gcp_test.go @@ -38,7 +38,6 @@ func TestGcpNormalizeKey(t *testing.T) { assert.Equal(t, "test", gcpTag.NormalizeKey("test")) assert.Equal(t, "cafe", gcpTag.NormalizeKey("café 🍎?")) assert.Equal(t, "cafe_foo", gcpTag.NormalizeKey("__café_foo__")) - } func TestGcpNormalizeValue(t *testing.T) { diff --git a/libs/template/file_test.go b/libs/template/file_test.go index bd5f6d632e..ced38c2848 100644 --- a/libs/template/file_test.go +++ b/libs/template/file_test.go @@ -57,7 +57,7 @@ func TestTemplateInMemoryFilePersistToDisk(t *testing.T) { t.SkipNow() } ctx := context.Background() - testInMemoryFile(t, ctx, 0755) + testInMemoryFile(t, ctx, 0o755) } func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { @@ -67,7 +67,7 @@ func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. ctx := context.Background() - testInMemoryFile(t, ctx, 0666) + testInMemoryFile(t, ctx, 0o666) } func TestTemplateCopyFilePersistToDisk(t *testing.T) { @@ -75,7 +75,7 @@ func TestTemplateCopyFilePersistToDisk(t *testing.T) { t.SkipNow() } ctx := context.Background() - testCopyFile(t, ctx, 0644) + testCopyFile(t, ctx, 0o644) } func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { @@ -85,5 +85,5 @@ func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. ctx := context.Background() - testCopyFile(t, ctx, 0666) + testCopyFile(t, ctx, 0o666) } diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 7f7acbd24a..4550e5fa2b 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -31,9 +31,11 @@ type pair struct { v any } -var cachedUser *iam.User -var cachedIsServicePrincipal *bool -var cachedCatalog *string +var ( + cachedUser *iam.User + cachedIsServicePrincipal *bool + cachedCatalog *string +) // UUID that is stable for the duration of the template execution. This can be used // to populate the `bundle.uuid` field in databricks.yml by template authors. diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 6c476c658a..a2e8269d28 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -158,7 +158,6 @@ func TestWorkspaceHost(t *testing.T) { assert.Len(t, r.files, 1) assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "https://myhost.com") assert.Contains(t, string(r.files[0].(*inMemoryFile).content), "i3.xlarge") - } func TestWorkspaceHostNotConfigured(t *testing.T) { @@ -178,5 +177,4 @@ func TestWorkspaceHostNotConfigured(t *testing.T) { err = r.walk() require.ErrorContains(t, err, "cannot determine target workspace") - } diff --git a/libs/template/materialize.go b/libs/template/materialize.go index ee30444a5e..86a6a8c37a 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -10,9 +10,11 @@ import ( "github.com/databricks/cli/libs/filer" ) -const libraryDirName = "library" -const templateDirName = "template" -const schemaFileName = "databricks_template_schema.json" +const ( + libraryDirName = "library" + templateDirName = "template" + schemaFileName = "databricks_template_schema.json" +) // This function materializes the input templates as a project, using user defined // configurations. diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index a4b9166dad..eeb3087327 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/require" ) -func assertFileContent(t *testing.T, path string, content string) { +func assertFileContent(t *testing.T, path, content string) { b, err := os.ReadFile(path) require.NoError(t, err) assert.Equal(t, content, string(b)) @@ -39,7 +39,7 @@ func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { assert.Equal(t, perm, info.Mode().Perm()) } -func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { +func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal, build bool, tempDir string) { ctx := context.Background() templateFS, err := fs.Sub(builtinTemplates, path.Join("templates", template)) @@ -200,8 +200,7 @@ func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { } func TestRendererExecuteTemplate(t *testing.T) { - templateText := - `"{{.count}} items are made of {{.Material}}". + templateText := `"{{.count}} items are made of {{.Material}}". {{if eq .Animal "sheep" }} Sheep wool is the best! {{else}} @@ -256,7 +255,6 @@ func TestRendererExecuteTemplateWithUnknownProperty(t *testing.T) { } func TestRendererIsSkipped(t *testing.T) { - skipPatterns := []string{"a*", "*yz", "def", "a/b/*"} // skipped paths @@ -319,22 +317,22 @@ func TestRendererPersistToDisk(t *testing.T) { skipPatterns: []string{"a/b/c", "mn*"}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a/b/c", content: nil, }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "mno", content: nil, }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a/b/d", content: []byte("123"), }, &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "mmnn", content: []byte("456"), }, @@ -350,9 +348,9 @@ func TestRendererPersistToDisk(t *testing.T) { assert.NoFileExists(t, filepath.Join(tmpDir, "mno")) assertFileContent(t, filepath.Join(tmpDir, "a", "b", "d"), "123") - assertFilePermissions(t, filepath.Join(tmpDir, "a", "b", "d"), 0444) + assertFilePermissions(t, filepath.Join(tmpDir, "a", "b", "d"), 0o444) assertFileContent(t, filepath.Join(tmpDir, "mmnn"), "456") - assertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), 0444) + assertFilePermissions(t, filepath.Join(tmpDir, "mmnn"), 0o444) } func TestRendererWalk(t *testing.T) { @@ -520,8 +518,8 @@ func TestRendererReadsPermissionsBits(t *testing.T) { } assert.Len(t, r.files, 2) - assert.Equal(t, getPermissions(r, "script.sh"), fs.FileMode(0755)) - assert.Equal(t, getPermissions(r, "not-a-script"), fs.FileMode(0644)) + assert.Equal(t, getPermissions(r, "script.sh"), fs.FileMode(0o755)) + assert.Equal(t, getPermissions(r, "not-a-script"), fs.FileMode(0o644)) } func TestRendererErrorOnConflictingFile(t *testing.T) { @@ -537,7 +535,7 @@ func TestRendererErrorOnConflictingFile(t *testing.T) { skipPatterns: []string{}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a", content: []byte("123"), }, @@ -563,7 +561,7 @@ func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { skipPatterns: []string{"a"}, files: []file{ &inMemoryFile{ - perm: 0444, + perm: 0o444, relPath: "a", content: []byte("123"), }, diff --git a/libs/textutil/textutil_test.go b/libs/textutil/textutil_test.go index f6834a1ef3..b9268c98b8 100644 --- a/libs/textutil/textutil_test.go +++ b/libs/textutil/textutil_test.go @@ -50,7 +50,8 @@ func TestNormalizeString(t *testing.T) { { input: ".test//test..test", expected: "test_test_test", - }} + }, + } for _, c := range cases { assert.Equal(t, c.expected, NormalizeString(c.input)) From 7249b82bf7892ac98c93686ef1cd403ecea873f9 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Thu, 12 Dec 2024 10:54:00 +0100 Subject: [PATCH 34/63] Add .git-blame-ignore-revs with linter-related mass change commits (#2000) This file is automatically recognized by GitHub. For git to recognized it, run: `git config blame.ignoreRevsFile .git-blame-ignore-revs` ## Tests Run 'git blame' with and without this option. Compare view on Github https://github.com/databricks/cli/blame/2e018cf/libs/git/reference_test.go https://github.com/databricks/cli/blame/denis.bilenko/ignore-linter-commits/libs/git/reference_test.go --- .git-blame-ignore-revs | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..6304b3604c --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,8 @@ +# Enable gofumpt and goimports in golangci-lint (#1999) +2e018cfaec200a02ee2bd5b389e7da3c6f15f460 + +# Enable errcheck everywhere and fix or silent remaining issues (#1987) +8d5351c1c3d7befda4baae5d6adb99367aa50b3c + +# Add error checking in tests and enable errcheck there (#1980) +1b2be1b2cb4b7909df2a8ad4cb6a0f43e8fcf0c6 From a7e91a5b687c27e4ec16a08f7fa35a39d8e1ed8b Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Thu, 12 Dec 2024 11:06:34 +0100 Subject: [PATCH 35/63] Enable gofumpt in vscode (#2001) ## Tests Verified that with this setting, VSCode reformats code on Save according to gofumpt rules. --- .vscode/settings.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.vscode/settings.json b/.vscode/settings.json index 853e84de83..a7830d4ae9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -7,6 +7,10 @@ "go.lintFlags": [ "--fast" ], + "go.useLanguageServer": true, + "gopls": { + "formatting.gofumpt": true + }, "files.trimTrailingWhitespace": true, "files.insertFinalNewline": true, "files.trimFinalNewlines": true, From 241fcfffb02100fe7d8410b28d62fd2045bbe794 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 12 Dec 2024 13:35:38 +0100 Subject: [PATCH 36/63] Consolidate helper functions to `internal/testutil` package (#2002) ## Changes This is one step (of many) toward moving the integration tests around. This change consolidates the following functions: * `ReadFile` / `WriteFile` * `GetEnvOrSkipTest` * `RandomName` ## Tests n/a --- bundle/config/validate/files_to_sync_test.go | 3 +- ...check_dashboards_modified_remotely_test.go | 4 +- internal/acc/workspace.go | 9 ++-- internal/alerts_test.go | 3 +- internal/api_test.go | 11 ++-- internal/auth_describe_test.go | 5 +- internal/bundle/artifacts_test.go | 5 +- internal/bundle/bind_resource_test.go | 7 +-- internal/bundle/dashboards_test.go | 3 +- internal/bundle/deployment_state_test.go | 3 +- internal/bundle/generate_job_test.go | 2 +- internal/bundle/generate_pipeline_test.go | 7 +-- internal/bundle/spark_jar_test.go | 6 +-- internal/bundle/validate_test.go | 5 +- internal/clusters_test.go | 5 +- internal/dashboard_assumptions_test.go | 3 +- internal/filer_test.go | 47 ++++++++-------- internal/fs_cat_test.go | 5 +- internal/fs_cp_test.go | 3 +- internal/fs_ls_test.go | 3 +- internal/git_clone_test.go | 7 +-- internal/git_fetch_test.go | 5 +- internal/helpers.go | 54 +++---------------- internal/init_test.go | 6 +-- internal/jobs_test.go | 3 +- internal/locker_test.go | 7 +-- internal/python/python_tasks_test.go | 15 +++--- internal/repos_test.go | 15 +++--- internal/secrets_test.go | 3 +- internal/storage_credentials_test.go | 3 +- internal/sync_test.go | 15 +++--- internal/tags_test.go | 2 +- internal/testutil/env.go | 9 ---- internal/testutil/file.go | 13 +++-- internal/{acc => testutil}/helpers.go | 2 +- internal/workspace_test.go | 5 +- 36 files changed, 145 insertions(+), 158 deletions(-) rename internal/{acc => testutil}/helpers.go (97%) diff --git a/bundle/config/validate/files_to_sync_test.go b/bundle/config/validate/files_to_sync_test.go index 30af9026d1..d6a1ed59ad 100644 --- a/bundle/config/validate/files_to_sync_test.go +++ b/bundle/config/validate/files_to_sync_test.go @@ -2,6 +2,7 @@ package validate import ( "context" + "path/filepath" "testing" "github.com/databricks/cli/bundle" @@ -81,7 +82,7 @@ func TestFilesToSync_EverythingIgnored(t *testing.T) { b := setupBundleForFilesToSyncTest(t) // Ignore all files. - testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore") + testutil.WriteFile(t, filepath.Join(b.BundleRootPath, ".gitignore"), "*\n.*\n") ctx := context.Background() rb := bundle.ReadOnly(b) diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go index 25aee125f8..1bed3b1be1 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go @@ -139,7 +139,7 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle require.NoError(t, err) // Write fake state file. - testutil.WriteFile(t, ` + testutil.WriteFile(t, filepath.Join(tfDir, TerraformStateFileName), ` { "version": 4, "terraform_version": "1.5.5", @@ -187,5 +187,5 @@ func writeFakeDashboardState(t *testing.T, ctx context.Context, b *bundle.Bundle } ] } - `, filepath.Join(tfDir, TerraformStateFileName)) + `) } diff --git a/internal/acc/workspace.go b/internal/acc/workspace.go index 69ab0e715d..64010f42bf 100644 --- a/internal/acc/workspace.go +++ b/internal/acc/workspace.go @@ -6,6 +6,7 @@ import ( "os" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/compute" @@ -26,7 +27,7 @@ type WorkspaceT struct { func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { loadDebugEnvIfRunFromIDE(t, "workspace") - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -46,7 +47,7 @@ func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { loadDebugEnvIfRunFromIDE(t, "workspace") - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) if os.Getenv("TEST_METASTORE_ID") == "" { t.Skipf("Skipping on non-UC workspaces") @@ -70,7 +71,7 @@ func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { } func (t *WorkspaceT) TestClusterID() string { - clusterID := GetEnvOrSkipTest(t.T, "TEST_BRICKS_CLUSTER_ID") + clusterID := testutil.GetEnvOrSkipTest(t.T, "TEST_BRICKS_CLUSTER_ID") err := t.W.Clusters.EnsureClusterIsRunning(t.ctx, clusterID) require.NoError(t, err) return clusterID @@ -103,7 +104,7 @@ func (t *WorkspaceT) TemporaryWorkspaceDir(name ...string) string { me, err := t.W.CurrentUser.Me(ctx) require.NoError(t, err) - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName(name...)) + basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName(name...)) t.Logf("Creating %s", basePath) err = t.W.Workspace.MkdirsByPath(ctx, basePath) diff --git a/internal/alerts_test.go b/internal/alerts_test.go index 6d75440741..ebb7a2095b 100644 --- a/internal/alerts_test.go +++ b/internal/alerts_test.go @@ -3,11 +3,12 @@ package internal import ( "testing" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" ) func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) _, _, err := RequireErrorRun(t, "alerts-legacy", "create") assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) diff --git a/internal/api_test.go b/internal/api_test.go index f3e8b71719..fbbc5b0729 100644 --- a/internal/api_test.go +++ b/internal/api_test.go @@ -4,16 +4,18 @@ import ( "encoding/json" "fmt" "path" + "path/filepath" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" _ "github.com/databricks/cli/cmd/api" + "github.com/databricks/cli/internal/testutil" ) func TestAccApiGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, _ := RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") @@ -28,14 +30,15 @@ func TestAccApiGet(t *testing.T) { } func TestAccApiPost(t *testing.T) { - env := GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) if env == "gcp" { t.Skip("DBFS REST API is disabled on gcp") } - dbfsPath := path.Join("/tmp/databricks/integration", RandomName("api-post")) - requestPath := writeFile(t, "body.json", fmt.Sprintf(`{ + dbfsPath := path.Join("/tmp/databricks/integration", testutil.RandomName("api-post")) + requestPath := filepath.Join(t.TempDir(), "body.json") + testutil.WriteFile(t, requestPath, fmt.Sprintf(`{ "path": "%s" }`, dbfsPath)) diff --git a/internal/auth_describe_test.go b/internal/auth_describe_test.go index 90b5d68019..dfbc95a1fd 100644 --- a/internal/auth_describe_test.go +++ b/internal/auth_describe_test.go @@ -5,12 +5,13 @@ import ( "fmt" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) func TestAuthDescribeSuccess(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, _ := RequireSuccessfulRun(t, "auth", "describe") outStr := stdout.String() @@ -31,7 +32,7 @@ func TestAuthDescribeSuccess(t *testing.T) { } func TestAuthDescribeFailure(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, _ := RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") outStr := stdout.String() diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index f5eec32e06..d0773df05a 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -234,7 +235,7 @@ func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W - schemaName := internal.RandomName("schema-") + schemaName := testutil.RandomName("schema-") _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ CatalogName: "main", @@ -271,7 +272,7 @@ func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W - schemaName := internal.RandomName("schema-") + schemaName := testutil.RandomName("schema-") _, err := w.Schemas.Create(ctx, catalog.CreateSchema{ CatalogName: "main", diff --git a/internal/bundle/bind_resource_test.go b/internal/bundle/bind_resource_test.go index bf80b36691..c6bafdfc21 100644 --- a/internal/bundle/bind_resource_test.go +++ b/internal/bundle/bind_resource_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -16,7 +17,7 @@ import ( ) func TestAccBindJobToExistingJob(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) ctx, wt := acc.WorkspaceTest(t) @@ -81,7 +82,7 @@ func TestAccBindJobToExistingJob(t *testing.T) { } func TestAccAbortBind(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) ctx, wt := acc.WorkspaceTest(t) @@ -130,7 +131,7 @@ func TestAccAbortBind(t *testing.T) { } func TestAccGenerateAndBind(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) ctx, wt := acc.WorkspaceTest(t) diff --git a/internal/bundle/dashboards_test.go b/internal/bundle/dashboards_test.go index 3c2e27c629..82ed419b4b 100644 --- a/internal/bundle/dashboards_test.go +++ b/internal/bundle/dashboards_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/google/uuid" @@ -15,7 +16,7 @@ import ( func TestAccDashboards(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - warehouseID := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") uniqueID := uuid.New().String() root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{ "unique_id": uniqueID, diff --git a/internal/bundle/deployment_state_test.go b/internal/bundle/deployment_state_test.go index 0facd9eb04..5e85740095 100644 --- a/internal/bundle/deployment_state_test.go +++ b/internal/bundle/deployment_state_test.go @@ -9,12 +9,13 @@ import ( "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/require" ) func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) ctx, wt := acc.WorkspaceTest(t) diff --git a/internal/bundle/generate_job_test.go b/internal/bundle/generate_job_test.go index 847a7a14e5..7fd265fd43 100644 --- a/internal/bundle/generate_job_test.go +++ b/internal/bundle/generate_job_test.go @@ -95,7 +95,7 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { require.NoError(t, err) resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ - Name: internal.RandomName("generated-job-"), + Name: testutil.RandomName("generated-job-"), Tasks: []jobs.Task{ { TaskKey: "test", diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index b4a53aaaab..589cb2a6c2 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -58,7 +59,7 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { generatedYaml := string(data) // Replace pipeline name - generatedYaml = strings.ReplaceAll(generatedYaml, name, internal.RandomName("copy-generated-pipeline-")) + generatedYaml = strings.ReplaceAll(generatedYaml, name, testutil.RandomName("copy-generated-pipeline-")) err = os.WriteFile(fileName, []byte(generatedYaml), 0o644) require.NoError(t, err) @@ -94,10 +95,10 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, err = f.Write(ctx, "test.py", strings.NewReader("print('Hello!')")) require.NoError(t, err) - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") nodeTypeId := internal.GetNodeTypeId(env) - name := internal.RandomName("generated-pipeline-") + name := testutil.RandomName("generated-pipeline-") resp, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Name: name, Libraries: []pipelines.PipelineLibrary{ diff --git a/internal/bundle/spark_jar_test.go b/internal/bundle/spark_jar_test.go index a7a58a3663..cc67b30f2a 100644 --- a/internal/bundle/spark_jar_test.go +++ b/internal/bundle/spark_jar_test.go @@ -13,7 +13,7 @@ import ( ) func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, artifactPath string) { - cloudEnv := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + cloudEnv := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") nodeTypeId := internal.GetNodeTypeId(cloudEnv) tmpDir := t.TempDir() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") @@ -54,7 +54,7 @@ func runSparkJarTestFromWorkspace(t *testing.T, sparkVersion string) { } func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: @@ -78,7 +78,7 @@ func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { } func TestAccSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: diff --git a/internal/bundle/validate_test.go b/internal/bundle/validate_test.go index 18da89e4ce..0942275bc5 100644 --- a/internal/bundle/validate_test.go +++ b/internal/bundle/validate_test.go @@ -3,6 +3,7 @@ package bundle import ( "context" "encoding/json" + "path/filepath" "testing" "github.com/databricks/cli/internal/testutil" @@ -16,7 +17,7 @@ func TestAccBundleValidate(t *testing.T) { testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") tmpDir := t.TempDir() - testutil.WriteFile(t, + testutil.WriteFile(t, filepath.Join(tmpDir, "databricks.yml"), ` bundle: name: "foobar" @@ -33,7 +34,7 @@ resources: inner_loop: name: inner loop -`, tmpDir, "databricks.yml") +`) ctx := context.Background() stdout, err := validateBundle(t, ctx, tmpDir) diff --git a/internal/clusters_test.go b/internal/clusters_test.go index 2c41ebe1dd..9828d240c2 100644 --- a/internal/clusters_test.go +++ b/internal/clusters_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" @@ -13,7 +14,7 @@ import ( ) func TestAccClustersList(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, stderr := RequireSuccessfulRun(t, "clusters", "list") outStr := stdout.String() @@ -28,7 +29,7 @@ func TestAccClustersList(t *testing.T) { } func TestAccClustersGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) clusterId := findValidClusterID(t) stdout, stderr := RequireSuccessfulRun(t, "clusters", "get", clusterId) diff --git a/internal/dashboard_assumptions_test.go b/internal/dashboard_assumptions_test.go index 6d67937a8b..1ad137b7f2 100644 --- a/internal/dashboard_assumptions_test.go +++ b/internal/dashboard_assumptions_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/cli/libs/dyn/merge" @@ -25,7 +26,7 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { dashboardName := "New Dashboard" dashboardPayload := []byte(`{"pages":[{"name":"2506f97a","displayName":"New Page"}]}`) - warehouseId := acc.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") + warehouseId := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-") diff --git a/internal/filer_test.go b/internal/filer_test.go index 5ba303dc73..1ec148ed31 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -12,6 +12,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -410,33 +411,33 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { { name: "pythonJupyterNb.ipynb", nameWithoutExt: "pythonJupyterNb", - content1: readFile(t, "testdata/notebooks/py1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"), expected1: "# Databricks notebook source\nprint(1)", - content2: readFile(t, "testdata/notebooks/py2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/py2.ipynb"), expected2: "# Databricks notebook source\nprint(2)", }, { name: "rJupyterNb.ipynb", nameWithoutExt: "rJupyterNb", - content1: readFile(t, "testdata/notebooks/r1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/r1.ipynb"), expected1: "# Databricks notebook source\nprint(1)", - content2: readFile(t, "testdata/notebooks/r2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/r2.ipynb"), expected2: "# Databricks notebook source\nprint(2)", }, { name: "scalaJupyterNb.ipynb", nameWithoutExt: "scalaJupyterNb", - content1: readFile(t, "testdata/notebooks/scala1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb"), expected1: "// Databricks notebook source\nprintln(1)", - content2: readFile(t, "testdata/notebooks/scala2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/scala2.ipynb"), expected2: "// Databricks notebook source\nprintln(2)", }, { name: "sqlJupyterNotebook.ipynb", nameWithoutExt: "sqlJupyterNotebook", - content1: readFile(t, "testdata/notebooks/sql1.ipynb"), + content1: testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb"), expected1: "-- Databricks notebook source\nselect 1", - content2: readFile(t, "testdata/notebooks/sql2.ipynb"), + content2: testutil.ReadFile(t, "testdata/notebooks/sql2.ipynb"), expected2: "-- Databricks notebook source\nselect 2", }, } @@ -483,13 +484,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { {"foo.r", "print('foo')"}, {"foo.scala", "println('foo')"}, {"foo.sql", "SELECT 'foo'"}, - {"py1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, + {"py1.ipynb", testutil.ReadFile(t, "testdata/notebooks/py1.ipynb")}, {"pyNb.py", "# Databricks notebook source\nprint('first upload'))"}, - {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, + {"r1.ipynb", testutil.ReadFile(t, "testdata/notebooks/r1.ipynb")}, {"rNb.r", "# Databricks notebook source\nprint('first upload'))"}, - {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, + {"scala1.ipynb", testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb")}, {"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"}, - {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, + {"sql1.ipynb", testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb")}, {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, } @@ -554,10 +555,10 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { }{ {"foo.py", "# Databricks notebook source\nprint('first upload'))"}, {"bar.py", "print('foo')"}, - {"p1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, - {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, - {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, - {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, + {"p1.ipynb", testutil.ReadFile(t, "testdata/notebooks/py1.ipynb")}, + {"r1.ipynb", testutil.ReadFile(t, "testdata/notebooks/r1.ipynb")}, + {"scala1.ipynb", testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb")}, + {"sql1.ipynb", testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb")}, {"pretender", "not a notebook"}, {"dir/file.txt", "file content"}, {"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"}, @@ -729,7 +730,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Reading foo should fail. Even though the WSFS name for the notebook is foo @@ -748,7 +749,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Stating foo should fail. Even though the WSFS name for the notebook is foo @@ -767,7 +768,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) wf, _ := setupWsfsExtensionsFiler(t) // Create a notebook - err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"))) require.NoError(t, err) // Deleting foo should fail. Even though the WSFS name for the notebook is foo @@ -849,25 +850,25 @@ func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { language: "python", sourceName: "foo.py", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/py1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/py1.ipynb"), }, { language: "r", sourceName: "foo.r", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/r1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/r1.ipynb"), }, { language: "scala", sourceName: "foo.scala", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/scala1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/scala1.ipynb"), }, { language: "sql", sourceName: "foo.sql", jupyterName: "foo.ipynb", - jupyterContent: readFile(t, "testdata/notebooks/sql1.ipynb"), + jupyterContent: testutil.ReadFile(t, "testdata/notebooks/sql1.ipynb"), }, } { t.Run("jupyter_"+tc.language, func(t *testing.T) { diff --git a/internal/fs_cat_test.go b/internal/fs_cat_test.go index 6292aef189..c625d885b9 100644 --- a/internal/fs_cat_test.go +++ b/internal/fs_cat_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" @@ -70,14 +71,14 @@ func TestAccFsCatOnNonExistentFile(t *testing.T) { } func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) _, _, err := RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") assert.ErrorContains(t, err, "invalid scheme: dab") } func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w, err := databricks.NewWorkspaceClient() diff --git a/internal/fs_cp_test.go b/internal/fs_cp_test.go index b69735bc00..ca9d47e301 100644 --- a/internal/fs_cp_test.go +++ b/internal/fs_cp_test.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -355,7 +356,7 @@ func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { } func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) _, _, err := RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") assert.Equal(t, "invalid scheme: https", err.Error()) diff --git a/internal/fs_ls_test.go b/internal/fs_ls_test.go index 994a4a4255..0e8459412b 100644 --- a/internal/fs_ls_test.go +++ b/internal/fs_ls_test.go @@ -10,6 +10,7 @@ import ( "testing" _ "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -166,7 +167,7 @@ func TestAccFsLsForNonexistingDir(t *testing.T) { func TestAccFsLsWithoutScheme(t *testing.T) { t.Parallel() - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) _, _, err := RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) diff --git a/internal/git_clone_test.go b/internal/git_clone_test.go index 73c3db1058..d5e6762dde 100644 --- a/internal/git_clone_test.go +++ b/internal/git_clone_test.go @@ -6,12 +6,13 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/git" "github.com/stretchr/testify/assert" ) func TestAccGitClone(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() ctx := context.Background() @@ -33,7 +34,7 @@ func TestAccGitClone(t *testing.T) { } func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() ctx := context.Background() @@ -54,7 +55,7 @@ func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { } func TestAccGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() diff --git a/internal/git_fetch_test.go b/internal/git_fetch_test.go index 3b234ec147..797bc2e348 100644 --- a/internal/git_fetch_test.go +++ b/internal/git_fetch_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/git" "github.com/stretchr/testify/assert" @@ -42,7 +43,7 @@ func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { me, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) - targetPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-")) + targetPath := testutil.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-")) stdout, stderr := RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath) t.Cleanup(func() { RequireSuccessfulRun(t, "repos", "delete", targetPath) @@ -69,7 +70,7 @@ func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { me, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) - rootPath := acc.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-")) + rootPath := testutil.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-")) _, stderr := RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c")) t.Cleanup(func() { RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath) diff --git a/internal/helpers.go b/internal/helpers.go index 060736dc6f..1fb16106c4 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "math/rand" "net/http" "os" "path" @@ -21,6 +20,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/cmd" @@ -41,30 +41,6 @@ import ( _ "github.com/databricks/cli/cmd/workspace" ) -const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -// GetEnvOrSkipTest proceeds with test only with that env variable -func GetEnvOrSkipTest(t *testing.T, name string) string { - value := os.Getenv(name) - if value == "" { - t.Skipf("Environment variable %s is missing", name) - } - return value -} - -// RandomName gives random name with optional prefix. e.g. qa.RandomName("tf-") -func RandomName(prefix ...string) string { - randLen := 12 - b := make([]byte, randLen) - for i := range b { - b[i] = charset[rand.Intn(randLen)] - } - if len(prefix) > 0 { - return fmt.Sprintf("%s%s", strings.Join(prefix, ""), b) - } - return string(b) -} - // Helper for running the root command in the background. // It ensures that the background goroutine terminates upon // test completion through cancelling the command context. @@ -355,22 +331,6 @@ func RequireErrorRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer, return stdout, stderr, err } -func readFile(t *testing.T, name string) string { - b, err := os.ReadFile(name) - require.NoError(t, err) - - return string(b) -} - -func writeFile(t *testing.T, name, body string) string { - f, err := os.Create(filepath.Join(t.TempDir(), name)) - require.NoError(t, err) - _, err = f.WriteString(body) - require.NoError(t, err) - f.Close() - return f.Name() -} - func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { tasks := make([]jobs.SubmitTask, 0) for i := 0; i < len(versions); i++ { @@ -443,7 +403,7 @@ func TemporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("integration-test-wsfs-")) + basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName("integration-test-wsfs-")) t.Logf("Creating %s", basePath) err = w.Workspace.MkdirsByPath(ctx, basePath) @@ -467,7 +427,7 @@ func TemporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { ctx := context.Background() - path := fmt.Sprintf("/tmp/%s", RandomName("integration-test-dbfs-")) + path := fmt.Sprintf("/tmp/%s", testutil.RandomName("integration-test-dbfs-")) t.Logf("Creating DBFS folder:%s", path) err := w.Dbfs.MkdirsByPath(ctx, path) @@ -495,7 +455,7 @@ func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { // Create a schema schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ CatalogName: "main", - Name: RandomName("test-schema-"), + Name: testutil.RandomName("test-schema-"), }) require.NoError(t, err) t.Cleanup(func() { @@ -528,7 +488,7 @@ func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("integration-test-repo-")) + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("integration-test-repo-")) t.Logf("Creating repo:%s", repoPath) repoInfo, err := w.Repos.Create(ctx, workspace.CreateRepoRequest{ @@ -563,7 +523,7 @@ func GetNodeTypeId(env string) string { } func setupLocalFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmp := t.TempDir() f, err := filer.NewLocalClient(tmp) @@ -610,7 +570,7 @@ func setupDbfsFiler(t *testing.T) (filer.Filer, string) { } func setupUcVolumesFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) if os.Getenv("TEST_METASTORE_ID") == "" { t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") diff --git a/internal/init_test.go b/internal/init_test.go index 36b789d91f..e219e265cf 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -19,7 +19,7 @@ import ( ) func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() _, _, err := RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) @@ -47,7 +47,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) - projectName := RandomName("project_name_") + projectName := testutil.RandomName("project_name_") // Create a config file with the project name and root dir initConfig := map[string]string{ @@ -101,7 +101,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { } func TestAccBundleInitHelpers(t *testing.T) { - env := GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) w, err := databricks.NewWorkspaceClient(&databricks.Config{}) diff --git a/internal/jobs_test.go b/internal/jobs_test.go index 8513168c82..e9132cea01 100644 --- a/internal/jobs_test.go +++ b/internal/jobs_test.go @@ -6,13 +6,14 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAccCreateJob(t *testing.T) { acc.WorkspaceTest(t) - env := GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") if env != "azure" { t.Skipf("Not running test on cloud %s", env) } diff --git a/internal/locker_test.go b/internal/locker_test.go index 62fcfc69fd..3047273a06 100644 --- a/internal/locker_test.go +++ b/internal/locker_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" lockpkg "github.com/databricks/cli/libs/locker" "github.com/databricks/databricks-sdk-go" @@ -28,7 +29,7 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr me, err := wsc.CurrentUser.Me(ctx) assert.NoError(t, err) - remoteProjectRoot := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName(projectNamePrefix)) + remoteProjectRoot := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName(projectNamePrefix)) repoInfo, err := wsc.Repos.Create(ctx, workspace.CreateRepoRequest{ Path: remoteProjectRoot, Url: EmptyRepoUrl, @@ -44,7 +45,7 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr } func TestAccLock(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.TODO() wsc, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -164,7 +165,7 @@ func TestAccLock(t *testing.T) { } func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer.Filer) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) diff --git a/internal/python/python_tasks_test.go b/internal/python/python_tasks_test.go index fde9b37f66..b407973196 100644 --- a/internal/python/python_tasks_test.go +++ b/internal/python/python_tasks_test.go @@ -15,6 +15,7 @@ import ( "github.com/databricks/cli/bundle/run/output" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -75,8 +76,8 @@ var sparkVersions = []string{ func TestAccRunPythonTaskWorkspace(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") unsupportedSparkVersionsForWheel := []string{ "11.3.x-scala2.12", @@ -96,8 +97,8 @@ func TestAccRunPythonTaskWorkspace(t *testing.T) { func TestAccRunPythonTaskDBFS(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") runPythonTasks(t, prepareDBFSFiles(t), testOpts{ name: "Python tasks from DBFS", @@ -109,8 +110,8 @@ func TestAccRunPythonTaskDBFS(t *testing.T) { func TestAccRunPythonTaskRepo(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly - internal.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") runPythonTasks(t, prepareRepoFiles(t), testOpts{ name: "Python tasks from Repo", @@ -121,7 +122,7 @@ func TestAccRunPythonTaskRepo(t *testing.T) { } func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) w := tw.w diff --git a/internal/repos_test.go b/internal/repos_test.go index 1ad0e87757..b681ff7e50 100644 --- a/internal/repos_test.go +++ b/internal/repos_test.go @@ -6,6 +6,7 @@ import ( "strconv" "testing" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/workspace" @@ -16,7 +17,7 @@ import ( func synthesizeTemporaryRepoPath(t *testing.T, w *databricks.WorkspaceClient, ctx context.Context) string { me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("empty-repo-integration-")) + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("empty-repo-integration-")) // Cleanup if repo was created at specified path. t.Cleanup(func() { @@ -44,7 +45,7 @@ func createTemporaryRepo(t *testing.T, w *databricks.WorkspaceClient, ctx contex } func TestAccReposCreateWithProvider(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w, err := databricks.NewWorkspaceClient() @@ -61,7 +62,7 @@ func TestAccReposCreateWithProvider(t *testing.T) { } func TestAccReposCreateWithoutProvider(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w, err := databricks.NewWorkspaceClient() @@ -78,7 +79,7 @@ func TestAccReposCreateWithoutProvider(t *testing.T) { } func TestAccReposGet(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w, err := databricks.NewWorkspaceClient() @@ -107,7 +108,7 @@ func TestAccReposGet(t *testing.T) { } func TestAccReposUpdate(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w, err := databricks.NewWorkspaceClient() @@ -128,7 +129,7 @@ func TestAccReposUpdate(t *testing.T) { } func TestAccReposDeleteByID(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w, err := databricks.NewWorkspaceClient() @@ -147,7 +148,7 @@ func TestAccReposDeleteByID(t *testing.T) { } func TestAccReposDeleteByPath(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w, err := databricks.NewWorkspaceClient() diff --git a/internal/secrets_test.go b/internal/secrets_test.go index 59e5d61509..bcc61b711b 100644 --- a/internal/secrets_test.go +++ b/internal/secrets_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,7 +19,7 @@ func TestSecretsCreateScopeErrWhenNoArguments(t *testing.T) { } func temporarySecretScope(ctx context.Context, t *acc.WorkspaceT) string { - scope := acc.RandomName("cli-acc-") + scope := testutil.RandomName("cli-acc-") err := t.W.Secrets.CreateScope(ctx, workspace.CreateScope{ Scope: scope, }) diff --git a/internal/storage_credentials_test.go b/internal/storage_credentials_test.go index 07c21861fe..e937a9e7c4 100644 --- a/internal/storage_credentials_test.go +++ b/internal/storage_credentials_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" ) @@ -11,7 +12,7 @@ func TestAccStorageCredentialsListRendersResponse(t *testing.T) { _, _ = acc.WorkspaceTest(t) // Check if metastore is assigned for the workspace, otherwise test will fail - t.Log(GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) + t.Log(testutil.GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) stdout, stderr := RequireSuccessfulRun(t, "storage-credentials", "list") assert.NotEmpty(t, stdout) diff --git a/internal/sync_test.go b/internal/sync_test.go index 2518777fbd..3699ce376c 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -16,6 +16,7 @@ import ( "time" _ "github.com/databricks/cli/cmd/sync" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/sync" "github.com/databricks/cli/libs/testfile" @@ -36,7 +37,7 @@ var ( func setupRepo(t *testing.T, wsc *databricks.WorkspaceClient, ctx context.Context) (localRoot, remoteRoot string) { me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("empty-repo-sync-integration-")) + repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("empty-repo-sync-integration-")) repoInfo, err := wsc.Repos.Create(ctx, workspace.CreateRepoRequest{ Path: repoPath, @@ -71,7 +72,7 @@ type syncTest struct { } func setupSyncTest(t *testing.T, args ...string) *syncTest { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) w := databricks.Must(databricks.NewWorkspaceClient()) localRoot := t.TempDir() @@ -499,7 +500,7 @@ func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { } func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) wsc := databricks.Must(databricks.NewWorkspaceClient()) ctx := context.Background() @@ -508,7 +509,7 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { require.NoError(t, err) // Hypothetical repo path doesn't exist. - nonExistingRepoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, RandomName("doesnt-exist-")) + nonExistingRepoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("doesnt-exist-")) err = sync.EnsureRemotePathIsUsable(ctx, wsc, nonExistingRepoPath, nil) assert.ErrorContains(t, err, " does not exist; please create it first") @@ -519,7 +520,7 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { } func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) wsc := databricks.Must(databricks.NewWorkspaceClient()) ctx := context.Background() @@ -541,14 +542,14 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { } func TestAccSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) wsc := databricks.Must(databricks.NewWorkspaceClient()) ctx := context.Background() me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) - remotePath := fmt.Sprintf("/Users/%s/%s", me.UserName, RandomName("ensure-path-exists-test-")) + remotePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName("ensure-path-exists-test-")) err = sync.EnsureRemotePathIsUsable(ctx, wsc, remotePath, me) assert.NoError(t, err) diff --git a/internal/tags_test.go b/internal/tags_test.go index 54039223f1..3db8527857 100644 --- a/internal/tags_test.go +++ b/internal/tags_test.go @@ -28,7 +28,7 @@ func testTags(t *testing.T, tags map[string]string) error { ctx := context.Background() resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ - Name: RandomName("test-tags-"), + Name: testutil.RandomName("test-tags-"), Tasks: []jobs.Task{ { TaskKey: "test", diff --git a/internal/testutil/env.go b/internal/testutil/env.go index 7a7436f47a..090302d469 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -39,15 +39,6 @@ func CleanupEnvironment(t *testing.T) { } } -// GetEnvOrSkipTest proceeds with test only with that env variable -func GetEnvOrSkipTest(t *testing.T, name string) string { - value := os.Getenv(name) - if value == "" { - t.Skipf("Environment variable %s is missing", name) - } - return value -} - // Changes into specified directory for the duration of the test. // Returns the current working directory. func Chdir(t *testing.T, dir string) string { diff --git a/internal/testutil/file.go b/internal/testutil/file.go index 59c640fbeb..ee3074514c 100644 --- a/internal/testutil/file.go +++ b/internal/testutil/file.go @@ -31,8 +31,8 @@ func Touch(t *testing.T, elems ...string) string { return path } -func WriteFile(t *testing.T, content string, elems ...string) string { - path := filepath.Join(elems...) +// WriteFile writes content to a file. +func WriteFile(t *testing.T, path, content string) { err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) @@ -44,5 +44,12 @@ func WriteFile(t *testing.T, content string, elems ...string) string { err = f.Close() require.NoError(t, err) - return path +} + +// ReadFile reads a file and returns its content as a string. +func ReadFile(t require.TestingT, path string) string { + b, err := os.ReadFile(path) + require.NoError(t, err) + + return string(b) } diff --git a/internal/acc/helpers.go b/internal/testutil/helpers.go similarity index 97% rename from internal/acc/helpers.go rename to internal/testutil/helpers.go index f980013468..646edc6c59 100644 --- a/internal/acc/helpers.go +++ b/internal/testutil/helpers.go @@ -1,4 +1,4 @@ -package acc +package testutil import ( "fmt" diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 575c0cadca..e159fc1dcf 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/workspace" @@ -20,7 +21,7 @@ import ( ) func TestAccWorkspaceList(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, stderr := RequireSuccessfulRun(t, "workspace", "list", "/") outStr := stdout.String() @@ -42,7 +43,7 @@ func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { } func TestAccWorkpaceExportPrintsContents(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() w := databricks.Must(databricks.NewWorkspaceClient()) From cabdabf31ec592915d46ac58cfe0190a202a9366 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 15:36:00 +0100 Subject: [PATCH 37/63] Bump github.com/databricks/databricks-sdk-go from 0.52.0 to 0.53.0 (#1985) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.52.0 to 0.53.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.53.0

Bug Fixes

  • Update Changelog file (#1091).

Internal Changes

  • Update to latest OpenAPI spec (#1098).

Note: This release contains breaking changes, please see the API changes below for more details.

API Changes:

OpenAPI SHA: 7016dcbf2e011459416cf408ce21143bcc4b3a25, Date: 2024-12-05

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

[Release] Release v0.53.0

Bug Fixes

  • Update Changelog file (#1091).

Internal Changes

  • Update to latest OpenAPI spec (#1098).

Note: This release contains breaking changes, please see the API changes below for more details.

API Changes:

OpenAPI SHA: 7016dcbf2e011459416cf408ce21143bcc4b3a25, Date: 2024-12-05

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.52.0&new-version=0.53.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .gitattributes | 3 + bundle/schema/embed_test.go | 10 - bundle/schema/jsonschema.json | 25 +- .../aibi-dashboard-embedding-access-policy.go | 57 +++ ...bi-dashboard-embedding-approved-domains.go | 57 +++ .../clean-room-assets/clean-room-assets.go | 419 ++++++++++++++++ .../clean-room-task-runs.go | 97 ++++ cmd/workspace/clean-rooms/clean-rooms.go | 450 ++++++++++++++++++ cmd/workspace/cmd.go | 6 + cmd/workspace/credentials/credentials.go | 6 +- cmd/workspace/groups.go | 4 + cmd/workspace/lakeview/lakeview.go | 27 -- go.mod | 2 +- go.sum | 4 +- 15 files changed, 1114 insertions(+), 55 deletions(-) create mode 100755 cmd/workspace/clean-room-assets/clean-room-assets.go create mode 100755 cmd/workspace/clean-room-task-runs/clean-room-task-runs.go create mode 100755 cmd/workspace/clean-rooms/clean-rooms.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index a2ba58aa56..68cd2f4be8 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f2385add116e3716c8a90a0b68e204deb40f996c \ No newline at end of file +7016dcbf2e011459416cf408ce21143bcc4b3a25 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 2755c02d72..3bc3c73c32 100755 --- a/.gitattributes +++ b/.gitattributes @@ -37,6 +37,9 @@ cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true +cmd/workspace/clean-room-assets/clean-room-assets.go linguist-generated=true +cmd/workspace/clean-room-task-runs/clean-room-task-runs.go linguist-generated=true +cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true diff --git a/bundle/schema/embed_test.go b/bundle/schema/embed_test.go index e4b45baa55..ff2e7651b4 100644 --- a/bundle/schema/embed_test.go +++ b/bundle/schema/embed_test.go @@ -58,16 +58,6 @@ func TestJsonSchema(t *testing.T) { assert.NotEmpty(t, pipeline.AnyOf[0].Properties[field].Description) } - // Assert enum values are loaded - schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY") - assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY") - providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider") assert.Contains(t, providers.Enum, "gitHub") assert.Contains(t, providers.Enum, "bitbucketCloud") diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index f791b8440e..e813e44063 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -2888,7 +2888,7 @@ "anyOf": [ { "type": "object", - "description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", + "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", "properties": { "service_principal_name": { "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", @@ -4436,16 +4436,7 @@ "properties": { "days_of_week": { "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek", - "enum": [ - "MONDAY", - "TUESDAY", - "WEDNESDAY", - "THURSDAY", - "FRIDAY", - "SATURDAY", - "SUNDAY" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" }, "start_hour": { "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", @@ -4468,7 +4459,17 @@ ] }, "pipelines.RestartWindowDaysOfWeek": { - "type": "string" + "type": "string", + "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] }, "pipelines.SchemaSpec": { "anyOf": [ diff --git a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go index b1adf6103b..3f905e5212 100755 --- a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go +++ b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go @@ -26,6 +26,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newUpdate()) @@ -37,6 +38,62 @@ func New() *cobra.Command { return cmd } +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteAibiDashboardEmbeddingAccessPolicySettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteAibiDashboardEmbeddingAccessPolicySettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the AI/BI dashboard embedding access policy.` + cmd.Long = `Delete the AI/BI dashboard embedding access policy. + + Delete the AI/BI dashboard embedding access policy, reverting back to the + default.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go index 4811974607..69db665047 100755 --- a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go +++ b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go @@ -26,6 +26,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newUpdate()) @@ -37,6 +38,62 @@ func New() *cobra.Command { return cmd } +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete AI/BI dashboard embedding approved domains.` + cmd.Long = `Delete AI/BI dashboard embedding approved domains. + + Delete the list of domains approved to host embedded AI/BI dashboards, + reverting back to the default empty list.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go new file mode 100755 index 0000000000..872f0ecefb --- /dev/null +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -0,0 +1,419 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_assets + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-assets", + Short: `Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room.`, + Long: `Clean room assets are data and code objects — Tables, volumes, and notebooks + that are shared with the clean room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomAssetRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq cleanrooms.CreateCleanRoomAssetRequest + createReq.Asset = &cleanrooms.CleanRoomAsset{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Var(&createReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + // TODO: complex arg: foreign_table + // TODO: complex arg: foreign_table_local_details + cmd.Flags().StringVar(&createReq.Asset.Name, "name", createReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) + // TODO: complex arg: notebook + // TODO: complex arg: table + // TODO: complex arg: table_local_details + // TODO: complex arg: view + // TODO: complex arg: view_local_details + // TODO: complex arg: volume_local_details + + cmd.Use = "create CLEAN_ROOM_NAME" + cmd.Short = `Create an asset.` + cmd.Long = `Create an asset. + + Create a clean room asset —share an asset like a notebook or table into the + clean room. For each UC asset that is added through this method, the clean + room owner must also have enough privilege on the asset to consume it. The + privilege must be maintained indefinitely for the clean room to be able to + access the asset. Typically, you should use a group as the clean room owner. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Asset) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createReq.CleanRoomName = args[0] + + response, err := w.CleanRoomAssets.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *cleanrooms.DeleteCleanRoomAssetRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq cleanrooms.DeleteCleanRoomAssetRequest + + // TODO: short flags + + cmd.Use = "delete CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Short = `Delete an asset.` + cmd.Long = `Delete an asset. + + Delete a clean room asset - unshare/remove the asset from the clean room + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &deleteReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + deleteReq.AssetFullName = args[2] + + err = w.CleanRoomAssets.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomAssetRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomAssetRequest + + // TODO: short flags + + cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Short = `Get an asset.` + cmd.Long = `Get an asset. + + Get the details of a clean room asset by its type and full name. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + CleanRoomAsset.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &getReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + getReq.AssetFullName = args[2] + + response, err := w.CleanRoomAssets.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomAssetsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomAssetsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME" + cmd.Short = `List assets.` + cmd.Long = `List assets. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + + response := w.CleanRoomAssets.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *cleanrooms.UpdateCleanRoomAssetRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq cleanrooms.UpdateCleanRoomAssetRequest + updateReq.Asset = &cleanrooms.CleanRoomAsset{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Var(&updateReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + // TODO: complex arg: foreign_table + // TODO: complex arg: foreign_table_local_details + cmd.Flags().StringVar(&updateReq.Asset.Name, "name", updateReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) + // TODO: complex arg: notebook + // TODO: complex arg: table + // TODO: complex arg: table_local_details + // TODO: complex arg: view + // TODO: complex arg: view_local_details + // TODO: complex arg: volume_local_details + + cmd.Use = "update CLEAN_ROOM_NAME ASSET_TYPE NAME" + cmd.Short = `Update an asset.` + cmd.Long = `Update an asset. + + Update a clean room asset. For example, updating the content of a notebook; + changing the shared partitions of a table; etc. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: The type of the asset. + NAME: A fully qualified name that uniquely identifies the asset within the clean + room. This is also the name displayed in the clean room UI. + + For UC securable assets (tables, volumes, etc.), the format is + *shared_catalog*.*shared_schema*.*asset_name* + + For notebooks, the name is the notebook file name.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Asset) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &updateReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + updateReq.Name = args[2] + + response, err := w.CleanRoomAssets.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CleanRoomAssets diff --git a/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go b/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go new file mode 100755 index 0000000000..b41e380cc1 --- /dev/null +++ b/cmd/workspace/clean-room-task-runs/clean-room-task-runs.go @@ -0,0 +1,97 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_task_runs + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-task-runs", + Short: `Clean room task runs are the executions of notebooks in a clean room.`, + Long: `Clean room task runs are the executions of notebooks in a clean room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomNotebookTaskRunsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomNotebookTaskRunsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.NotebookName, "notebook-name", listReq.NotebookName, `Notebook name.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The maximum number of task runs to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME" + cmd.Short = `List notebook task runs.` + cmd.Long = `List notebook task runs. + + List all the historical notebook task runs in a clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + + response := w.CleanRoomTaskRuns.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service CleanRoomTaskRuns diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go new file mode 100755 index 0000000000..053e41e8af --- /dev/null +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -0,0 +1,450 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_rooms + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-rooms", + Short: `A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data.`, + Long: `A clean room uses Delta Sharing and serverless compute to provide a secure and + privacy-protecting environment where multiple parties can work together on + sensitive enterprise data without direct access to each other’s data.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateOutputCatalog()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq cleanrooms.CreateCleanRoomRequest + createReq.CleanRoom = &cleanrooms.CleanRoom{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.CleanRoom.Comment, "comment", createReq.CleanRoom.Comment, ``) + cmd.Flags().StringVar(&createReq.CleanRoom.Name, "name", createReq.CleanRoom.Name, `The name of the clean room.`) + // TODO: complex arg: output_catalog + cmd.Flags().StringVar(&createReq.CleanRoom.Owner, "owner", createReq.CleanRoom.Owner, `This is Databricks username of the owner of the local clean room securable for permission management.`) + // TODO: complex arg: remote_detailed_info + + cmd.Use = "create" + cmd.Short = `Create a clean room.` + cmd.Long = `Create a clean room. + + Create a new clean room with the specified collaborators. This method is + asynchronous; the returned name field inside the clean_room field can be used + to poll the clean room status, using the :method:cleanrooms/get method. When + this method returns, the cluster will be in a PROVISIONING state. The cluster + will be usable once it enters an ACTIVE state. + + The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** + privilege on the metastore.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.CleanRoom) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.CleanRooms.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start create-output-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOutputCatalogOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomOutputCatalogRequest, +) + +func newCreateOutputCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var createOutputCatalogReq cleanrooms.CreateCleanRoomOutputCatalogRequest + createOutputCatalogReq.OutputCatalog = &cleanrooms.CleanRoomOutputCatalog{} + var createOutputCatalogJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createOutputCatalogJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createOutputCatalogReq.OutputCatalog.CatalogName, "catalog-name", createOutputCatalogReq.OutputCatalog.CatalogName, `The name of the output catalog in UC.`) + + cmd.Use = "create-output-catalog CLEAN_ROOM_NAME" + cmd.Short = `Create an output catalog.` + cmd.Long = `Create an output catalog. + + Create the output catalog of the clean room. + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createOutputCatalogJson.Unmarshal(&createOutputCatalogReq.OutputCatalog) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createOutputCatalogReq.CleanRoomName = args[0] + + response, err := w.CleanRooms.CreateOutputCatalog(ctx, createOutputCatalogReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOutputCatalogOverrides { + fn(cmd, &createOutputCatalogReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *cleanrooms.DeleteCleanRoomRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq cleanrooms.DeleteCleanRoomRequest + + // TODO: short flags + + cmd.Use = "delete NAME" + cmd.Short = `Delete a clean room.` + cmd.Long = `Delete a clean room. + + Delete a clean room. After deletion, the clean room will be removed from the + metastore. If the other collaborators have not deleted the clean room, they + will still have the clean room in their metastore, but it will be in a DELETED + state and no operations other than deletion can be performed on it. + + Arguments: + NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Name = args[0] + + err = w.CleanRooms.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomRequest + + // TODO: short flags + + cmd.Use = "get NAME" + cmd.Short = `Get a clean room.` + cmd.Long = `Get a clean room. + + Get the details of a clean room given its name.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Name = args[0] + + response, err := w.CleanRooms.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Maximum number of clean rooms to return (i.e., the page length).`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list" + cmd.Short = `List clean rooms.` + cmd.Long = `List clean rooms. + + Get a list of all clean rooms of the metastore. Only clean rooms the caller + has access to are returned.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.CleanRooms.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *cleanrooms.UpdateCleanRoomRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq cleanrooms.UpdateCleanRoomRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: clean_room + + cmd.Use = "update NAME" + cmd.Short = `Update a clean room.` + cmd.Long = `Update a clean room. + + Update a clean room. The caller must be the owner of the clean room, have + **MODIFY_CLEAN_ROOM** privilege, or be metastore admin. + + When the caller is a metastore admin, only the __owner__ field can be updated. + + Arguments: + NAME: Name of the clean room.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.Name = args[0] + + response, err := w.CleanRooms.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CleanRooms diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 9cb3cca9e4..f07d0cf761 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -8,6 +8,9 @@ import ( apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" + clean_room_assets "github.com/databricks/cli/cmd/workspace/clean-room-assets" + clean_room_task_runs "github.com/databricks/cli/cmd/workspace/clean-room-task-runs" + clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" @@ -98,6 +101,9 @@ func All() []*cobra.Command { out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) + out = append(out, clean_room_assets.New()) + out = append(out, clean_room_task_runs.New()) + out = append(out, clean_rooms.New()) out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go index 44ee0cf310..672a3aeece 100755 --- a/cmd/workspace/credentials/credentials.go +++ b/cmd/workspace/credentials/credentials.go @@ -27,7 +27,7 @@ func New() *cobra.Command { To create credentials, you must be a Databricks account admin or have the CREATE SERVICE CREDENTIAL privilege. The user who creates the credential can - delegate ownership to another user or group to manage permissions on it`, + delegate ownership to another user or group to manage permissions on it.`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -73,7 +73,7 @@ func newCreateCredential() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`) - // TODO: complex arg: gcp_service_account_key + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE, STORAGE]`) cmd.Flags().BoolVar(&createCredentialReq.ReadOnly, "read-only", createCredentialReq.ReadOnly, `Whether the credential is usable only for read operations.`) cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`) @@ -227,6 +227,7 @@ func newGenerateTemporaryServiceCredential() *cobra.Command { cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: azure_options + // TODO: complex arg: gcp_options cmd.Use = "generate-temporary-service-credential CREDENTIAL_NAME" cmd.Short = `Generate a temporary service credential.` @@ -434,6 +435,7 @@ func newUpdateCredential() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`) + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**).`) cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`) diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 98e474d33c..8827682fa6 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -72,5 +72,9 @@ func Groups() []cobra.Group { ID: "apps", Title: "Apps", }, + { + ID: "cleanrooms", + Title: "Clean Rooms", + }, } } diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 35c3bdf4e6..f190380622 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -160,9 +160,6 @@ func newCreateSchedule() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -242,9 +239,6 @@ func newCreateSubscription() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -322,9 +316,6 @@ func newDeleteSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -384,9 +375,6 @@ func newDeleteSubscription() *cobra.Command { SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. SUBSCRIPTION_ID: UUID identifying the subscription.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -562,9 +550,6 @@ func newGetSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -624,9 +609,6 @@ func newGetSubscription() *cobra.Command { SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. SUBSCRIPTION_ID: UUID identifying the subscription.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -739,9 +721,6 @@ func newListSchedules() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the dashboard to which the schedules belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -798,9 +777,6 @@ func newListSubscriptions() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard which the subscriptions belongs. SCHEDULE_ID: UUID identifying the schedule which the subscriptions belongs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -1215,9 +1191,6 @@ func newUpdateSchedule() *cobra.Command { DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. SCHEDULE_ID: UUID identifying the schedule.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/go.mod b/go.mod index 20a0e28953..6ad5a8cdf1 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.2 require ( github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.52.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.53.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index bf70ca509a..d8fd63d00d 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.52.0 h1:WKcj0F+pdx0gjI5xMicjYC4O43S2q5nyTpaGGMFmgHw= -github.com/databricks/databricks-sdk-go v0.52.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.53.0 h1:rZMXaTC3HNKZt+m4C4I/dY3EdZj+kl/sVd/Kdq55Qfo= +github.com/databricks/databricks-sdk-go v0.53.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From dd3b7ec4508f9816ab11dea1512691cdbd25388a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 12 Dec 2024 15:42:15 +0100 Subject: [PATCH 38/63] Define and use `testutil.TestingT` interface (#2003) ## Changes Using an interface instead of a concrete type means we can pass `*testing.T` directly or any wrapper type that implements a superset of this interface. It prepares for more broad use of `acc.WorkspaceT`, which enhances the testing object with helper functions for using a Databricks workspace. This eliminates the need to dereference a `*testing.T` field on a wrapper type. ## Tests n/a --- internal/acc/debug.go | 5 ++-- internal/acc/workspace.go | 13 ++++----- internal/bundle/clusters_test.go | 2 +- internal/bundle/deploy_to_shared_test.go | 8 ++--- internal/bundle/helpers.go | 30 +++++++++---------- internal/bundle/python_wheel_test.go | 2 +- internal/filer_test.go | 6 ++-- internal/fs_cp_test.go | 4 +-- internal/fs_ls_test.go | 2 +- internal/helpers.go | 37 ++++++++++++------------ internal/secrets_test.go | 4 +-- internal/testutil/cloud.go | 10 ++----- internal/testutil/copy.go | 3 +- internal/testutil/env.go | 5 ++-- internal/testutil/file.go | 9 +++--- internal/testutil/helpers.go | 3 +- internal/testutil/interface.go | 27 +++++++++++++++++ internal/testutil/jdk.go | 3 +- internal/testutil/requirement.go | 8 ++--- internal/testutil/testutil_test.go | 36 +++++++++++++++++++++++ 20 files changed, 134 insertions(+), 83 deletions(-) create mode 100644 internal/testutil/interface.go create mode 100644 internal/testutil/testutil_test.go diff --git a/internal/acc/debug.go b/internal/acc/debug.go index 1166311324..b4939881ee 100644 --- a/internal/acc/debug.go +++ b/internal/acc/debug.go @@ -6,7 +6,8 @@ import ( "path" "path/filepath" "strings" - "testing" + + "github.com/databricks/cli/internal/testutil" ) // Detects if test is run from "debug test" feature in VS Code. @@ -16,7 +17,7 @@ func isInDebug() bool { } // Loads debug environment from ~/.databricks/debug-env.json. -func loadDebugEnvIfRunFromIDE(t *testing.T, key string) { +func loadDebugEnvIfRunFromIDE(t testutil.TestingT, key string) { if !isInDebug() { return } diff --git a/internal/acc/workspace.go b/internal/acc/workspace.go index 64010f42bf..d640325b5f 100644 --- a/internal/acc/workspace.go +++ b/internal/acc/workspace.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "testing" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" @@ -15,7 +14,7 @@ import ( ) type WorkspaceT struct { - *testing.T + testutil.TestingT W *databricks.WorkspaceClient @@ -24,7 +23,7 @@ type WorkspaceT struct { exec *compute.CommandExecutorV2 } -func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { +func WorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) { loadDebugEnvIfRunFromIDE(t, "workspace") t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) @@ -33,7 +32,7 @@ func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { require.NoError(t, err) wt := &WorkspaceT{ - T: t, + TestingT: t, W: w, @@ -44,7 +43,7 @@ func WorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { } // Run the workspace test only on UC workspaces. -func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { +func UcWorkspaceTest(t testutil.TestingT) (context.Context, *WorkspaceT) { loadDebugEnvIfRunFromIDE(t, "workspace") t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) @@ -60,7 +59,7 @@ func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { require.NoError(t, err) wt := &WorkspaceT{ - T: t, + TestingT: t, W: w, @@ -71,7 +70,7 @@ func UcWorkspaceTest(t *testing.T) (context.Context, *WorkspaceT) { } func (t *WorkspaceT) TestClusterID() string { - clusterID := testutil.GetEnvOrSkipTest(t.T, "TEST_BRICKS_CLUSTER_ID") + clusterID := testutil.GetEnvOrSkipTest(t, "TEST_BRICKS_CLUSTER_ID") err := t.W.Clusters.EnsureClusterIsRunning(t.ctx, clusterID) require.NoError(t, err) return clusterID diff --git a/internal/bundle/clusters_test.go b/internal/bundle/clusters_test.go index b46bec97ec..8849ff3bff 100644 --- a/internal/bundle/clusters_test.go +++ b/internal/bundle/clusters_test.go @@ -16,7 +16,7 @@ import ( func TestAccDeployBundleWithCluster(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - if testutil.IsAWSCloud(wt.T) { + if testutil.IsAWSCloud(wt) { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } diff --git a/internal/bundle/deploy_to_shared_test.go b/internal/bundle/deploy_to_shared_test.go index 568c1fb560..6d9209d369 100644 --- a/internal/bundle/deploy_to_shared_test.go +++ b/internal/bundle/deploy_to_shared_test.go @@ -29,10 +29,10 @@ func TestAccDeployBasicToSharedWorkspacePath(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(wt.T, ctx, bundleRoot) - require.NoError(wt.T, err) + err = destroyBundle(wt, ctx, bundleRoot) + require.NoError(wt, err) }) - err = deployBundle(wt.T, ctx, bundleRoot) - require.NoError(wt.T, err) + err = deployBundle(wt, ctx, bundleRoot) + require.NoError(wt, err) } diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 2d2e1098d4..d06a2a72d8 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -9,11 +9,11 @@ import ( "os/exec" "path/filepath" "strings" - "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" @@ -26,12 +26,12 @@ import ( const defaultSparkVersion = "13.3.x-snapshot-scala2.12" -func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { +func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) (string, error) { bundleRoot := t.TempDir() return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot) } -func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) { +func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) { templateRoot := filepath.Join("bundles", templateName) configFilePath, err := writeConfigFile(t, config) @@ -49,7 +49,7 @@ func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateN return bundleRoot, err } -func writeConfigFile(t *testing.T, config map[string]any) (string, error) { +func writeConfigFile(t testutil.TestingT, config map[string]any) (string, error) { bytes, err := json.Marshal(config) if err != nil { return "", err @@ -63,34 +63,34 @@ func writeConfigFile(t *testing.T, config map[string]any) (string, error) { return filepath, err } -func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, error) { +func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") stdout, _, err := c.Run() return stdout.Bytes(), err } -func mustValidateBundle(t *testing.T, ctx context.Context, path string) []byte { +func mustValidateBundle(t testutil.TestingT, ctx context.Context, path string) []byte { data, err := validateBundle(t, ctx, path) require.NoError(t, err) return data } -func unmarshalConfig(t *testing.T, data []byte) *bundle.Bundle { +func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle { bundle := &bundle.Bundle{} err := json.Unmarshal(data, &bundle.Config) require.NoError(t, err) return bundle } -func deployBundle(t *testing.T, ctx context.Context, path string) error { +func deployBundle(t testutil.TestingT, ctx context.Context, path string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") _, _, err := c.Run() return err } -func deployBundleWithArgs(t *testing.T, ctx context.Context, path string, args ...string) (string, string, error) { +func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args = append([]string{"bundle", "deploy"}, args...) c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) @@ -98,7 +98,7 @@ func deployBundleWithArgs(t *testing.T, ctx context.Context, path string, args . return stdout.String(), stderr.String(), err } -func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags []string) error { +func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args := []string{"bundle", "deploy", "--force-lock"} args = append(args, flags...) @@ -107,7 +107,7 @@ func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags return err } -func runResource(t *testing.T, ctx context.Context, path, key string) (string, error) { +func runResource(t testutil.TestingT, ctx context.Context, path, key string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) @@ -116,7 +116,7 @@ func runResource(t *testing.T, ctx context.Context, path, key string) (string, e return stdout.String(), err } -func runResourceWithParams(t *testing.T, ctx context.Context, path, key string, params ...string) (string, error) { +func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key string, params ...string) (string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) @@ -128,14 +128,14 @@ func runResourceWithParams(t *testing.T, ctx context.Context, path, key string, return stdout.String(), err } -func destroyBundle(t *testing.T, ctx context.Context, path string) error { +func destroyBundle(t testutil.TestingT, ctx context.Context, path string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() return err } -func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, uniqueId string) string { +func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, uniqueId string) string { // Compute root path for the bundle deployment me, err := w.CurrentUser.Me(context.Background()) require.NoError(t, err) @@ -143,7 +143,7 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, unique return root } -func blackBoxRun(t *testing.T, root string, args ...string) (stdout, stderr string) { +func blackBoxRun(t testutil.TestingT, root string, args ...string) (stdout, stderr string) { gitRoot, err := folders.FindDirWithLeaf(".", ".git") require.NoError(t, err) diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index 108676bd80..009417937e 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -57,7 +57,7 @@ func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { func TestAccPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { _, wt := acc.WorkspaceTest(t) - if testutil.IsAWSCloud(wt.T) { + if testutil.IsAWSCloud(wt) { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } diff --git a/internal/filer_test.go b/internal/filer_test.go index 1ec148ed31..4b7a0b53eb 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -122,7 +122,7 @@ func TestAccFilerRecursiveDelete(t *testing.T) { for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -233,7 +233,7 @@ func TestAccFilerReadWrite(t *testing.T) { for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, @@ -342,7 +342,7 @@ func TestAccFilerReadDir(t *testing.T) { for _, testCase := range []struct { name string - f func(t *testing.T) (filer.Filer, string) + f func(t testutil.TestingT) (filer.Filer, string) }{ {"local", setupLocalFiler}, {"workspace files", setupWsfsFiler}, diff --git a/internal/fs_cp_test.go b/internal/fs_cp_test.go index ca9d47e301..41c2a44b9a 100644 --- a/internal/fs_cp_test.go +++ b/internal/fs_cp_test.go @@ -62,8 +62,8 @@ func assertTargetDir(t *testing.T, ctx context.Context, f filer.Filer) { type cpTest struct { name string - setupSource func(*testing.T) (filer.Filer, string) - setupTarget func(*testing.T) (filer.Filer, string) + setupSource func(testutil.TestingT) (filer.Filer, string) + setupTarget func(testutil.TestingT) (filer.Filer, string) } func copyTests() []cpTest { diff --git a/internal/fs_ls_test.go b/internal/fs_ls_test.go index 0e8459412b..999d9afea7 100644 --- a/internal/fs_ls_test.go +++ b/internal/fs_ls_test.go @@ -18,7 +18,7 @@ import ( type fsTest struct { name string - setupFiler func(t *testing.T) (filer.Filer, string) + setupFiler func(t testutil.TestingT) (filer.Filer, string) } var fsTests = []fsTest{ diff --git a/internal/helpers.go b/internal/helpers.go index 1fb16106c4..b67ea889a8 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -15,7 +15,6 @@ import ( "reflect" "strings" "sync" - "testing" "time" "github.com/databricks/cli/cmd/root" @@ -45,7 +44,7 @@ import ( // It ensures that the background goroutine terminates upon // test completion through cancelling the command context. type cobraTestRunner struct { - *testing.T + testutil.TestingT args []string stdout bytes.Buffer @@ -128,7 +127,7 @@ func (t *cobraTestRunner) WaitForTextPrinted(text string, timeout time.Duration) } func (t *cobraTestRunner) WaitForOutput(text string, timeout time.Duration) { - require.Eventually(t.T, func() bool { + require.Eventually(t, func() bool { currentStdout := t.stdout.String() currentErrout := t.stderr.String() return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) @@ -300,23 +299,25 @@ func (t *cobraTestRunner) RunAndParseJSON(v any) { require.NoError(t, err) } -func NewCobraTestRunner(t *testing.T, args ...string) *cobraTestRunner { +func NewCobraTestRunner(t testutil.TestingT, args ...string) *cobraTestRunner { return &cobraTestRunner{ - T: t, + TestingT: t, + ctx: context.Background(), args: args, } } -func NewCobraTestRunnerWithContext(t *testing.T, ctx context.Context, args ...string) *cobraTestRunner { +func NewCobraTestRunnerWithContext(t testutil.TestingT, ctx context.Context, args ...string) *cobraTestRunner { return &cobraTestRunner{ - T: t, + TestingT: t, + ctx: ctx, args: args, } } -func RequireSuccessfulRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer) { +func RequireSuccessfulRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer) { t.Logf("run args: [%s]", strings.Join(args, ", ")) c := NewCobraTestRunner(t, args...) stdout, stderr, err := c.Run() @@ -324,7 +325,7 @@ func RequireSuccessfulRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buf return stdout, stderr } -func RequireErrorRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer, error) { +func RequireErrorRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer, error) { c := NewCobraTestRunner(t, args...) stdout, stderr, err := c.Run() require.Error(t, err) @@ -398,7 +399,7 @@ func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) return tasks } -func TemporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { +func TemporaryWorkspaceDir(t testutil.TestingT, w *databricks.WorkspaceClient) string { ctx := context.Background() me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) @@ -425,7 +426,7 @@ func TemporaryWorkspaceDir(t *testing.T, w *databricks.WorkspaceClient) string { return basePath } -func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { +func TemporaryDbfsDir(t testutil.TestingT, w *databricks.WorkspaceClient) string { ctx := context.Background() path := fmt.Sprintf("/tmp/%s", testutil.RandomName("integration-test-dbfs-")) @@ -449,7 +450,7 @@ func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { } // Create a new UC volume in a catalog called "main" in the workspace. -func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { +func TemporaryUcVolume(t testutil.TestingT, w *databricks.WorkspaceClient) string { ctx := context.Background() // Create a schema @@ -483,7 +484,7 @@ func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { return path.Join("/Volumes", "main", schema.Name, volume.Name) } -func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { +func TemporaryRepo(t testutil.TestingT, w *databricks.WorkspaceClient) string { ctx := context.Background() me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) @@ -522,7 +523,7 @@ func GetNodeTypeId(env string) string { return "Standard_DS4_v2" } -func setupLocalFiler(t *testing.T) (filer.Filer, string) { +func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmp := t.TempDir() @@ -532,7 +533,7 @@ func setupLocalFiler(t *testing.T) (filer.Filer, string) { return f, path.Join(filepath.ToSlash(tmp)) } -func setupWsfsFiler(t *testing.T) (filer.Filer, string) { +func setupWsfsFiler(t testutil.TestingT) (filer.Filer, string) { ctx, wt := acc.WorkspaceTest(t) tmpdir := TemporaryWorkspaceDir(t, wt.W) @@ -549,7 +550,7 @@ func setupWsfsFiler(t *testing.T) (filer.Filer, string) { return f, tmpdir } -func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { +func setupWsfsExtensionsFiler(t testutil.TestingT) (filer.Filer, string) { _, wt := acc.WorkspaceTest(t) tmpdir := TemporaryWorkspaceDir(t, wt.W) @@ -559,7 +560,7 @@ func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { return f, tmpdir } -func setupDbfsFiler(t *testing.T) (filer.Filer, string) { +func setupDbfsFiler(t testutil.TestingT) (filer.Filer, string) { _, wt := acc.WorkspaceTest(t) tmpDir := TemporaryDbfsDir(t, wt.W) @@ -569,7 +570,7 @@ func setupDbfsFiler(t *testing.T) (filer.Filer, string) { return f, path.Join("dbfs:/", tmpDir) } -func setupUcVolumesFiler(t *testing.T) (filer.Filer, string) { +func setupUcVolumesFiler(t testutil.TestingT) (filer.Filer, string) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) if os.Getenv("TEST_METASTORE_ID") == "" { diff --git a/internal/secrets_test.go b/internal/secrets_test.go index bcc61b711b..d780654bd4 100644 --- a/internal/secrets_test.go +++ b/internal/secrets_test.go @@ -68,7 +68,7 @@ func TestAccSecretsPutSecretStringValue(tt *testing.T) { key := "test-key" value := "test-value\nwith-newlines\n" - stdout, stderr := RequireSuccessfulRun(t.T, "secrets", "put-secret", scope, key, "--string-value", value) + stdout, stderr := RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--string-value", value) assert.Empty(t, stdout) assert.Empty(t, stderr) @@ -82,7 +82,7 @@ func TestAccSecretsPutSecretBytesValue(tt *testing.T) { key := "test-key" value := []byte{0x00, 0x01, 0x02, 0x03} - stdout, stderr := RequireSuccessfulRun(t.T, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) + stdout, stderr := RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) assert.Empty(t, stdout) assert.Empty(t, stderr) diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index ba5b75ecff..849ac817c8 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -1,9 +1,5 @@ package testutil -import ( - "testing" -) - type Cloud int const ( @@ -13,7 +9,7 @@ const ( ) // Implement [Requirement]. -func (c Cloud) Verify(t *testing.T) { +func (c Cloud) Verify(t TestingT) { if c != GetCloud(t) { t.Skipf("Skipping %s-specific test", c) } @@ -32,7 +28,7 @@ func (c Cloud) String() string { } } -func GetCloud(t *testing.T) Cloud { +func GetCloud(t TestingT) Cloud { env := GetEnvOrSkipTest(t, "CLOUD_ENV") switch env { case "aws": @@ -50,6 +46,6 @@ func GetCloud(t *testing.T) Cloud { return -1 } -func IsAWSCloud(t *testing.T) bool { +func IsAWSCloud(t TestingT) bool { return GetCloud(t) == AWS } diff --git a/internal/testutil/copy.go b/internal/testutil/copy.go index 9fd8539925..a521da3e3d 100644 --- a/internal/testutil/copy.go +++ b/internal/testutil/copy.go @@ -5,14 +5,13 @@ import ( "io/fs" "os" "path/filepath" - "testing" "github.com/stretchr/testify/require" ) // CopyDirectory copies the contents of a directory to another directory. // The destination directory is created if it does not exist. -func CopyDirectory(t *testing.T, src, dst string) { +func CopyDirectory(t TestingT, src, dst string) { err := filepath.WalkDir(src, func(path string, d fs.DirEntry, err error) error { if err != nil { return err diff --git a/internal/testutil/env.go b/internal/testutil/env.go index 090302d469..10557c4e63 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -5,7 +5,6 @@ import ( "path/filepath" "runtime" "strings" - "testing" "github.com/stretchr/testify/require" ) @@ -13,7 +12,7 @@ import ( // CleanupEnvironment sets up a pristine environment containing only $PATH and $HOME. // The original environment is restored upon test completion. // Note: use of this function is incompatible with parallel execution. -func CleanupEnvironment(t *testing.T) { +func CleanupEnvironment(t TestingT) { // Restore environment when test finishes. environ := os.Environ() t.Cleanup(func() { @@ -41,7 +40,7 @@ func CleanupEnvironment(t *testing.T) { // Changes into specified directory for the duration of the test. // Returns the current working directory. -func Chdir(t *testing.T, dir string) string { +func Chdir(t TestingT, dir string) string { // Prevent parallel execution when changing the working directory. // t.Setenv automatically fails if t.Parallel is set. t.Setenv("DO_NOT_RUN_IN_PARALLEL", "true") diff --git a/internal/testutil/file.go b/internal/testutil/file.go index ee3074514c..538a3c20ad 100644 --- a/internal/testutil/file.go +++ b/internal/testutil/file.go @@ -3,12 +3,11 @@ package testutil import ( "os" "path/filepath" - "testing" "github.com/stretchr/testify/require" ) -func TouchNotebook(t *testing.T, elems ...string) string { +func TouchNotebook(t TestingT, elems ...string) string { path := filepath.Join(elems...) err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) @@ -18,7 +17,7 @@ func TouchNotebook(t *testing.T, elems ...string) string { return path } -func Touch(t *testing.T, elems ...string) string { +func Touch(t TestingT, elems ...string) string { path := filepath.Join(elems...) err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) @@ -32,7 +31,7 @@ func Touch(t *testing.T, elems ...string) string { } // WriteFile writes content to a file. -func WriteFile(t *testing.T, path, content string) { +func WriteFile(t TestingT, path, content string) { err := os.MkdirAll(filepath.Dir(path), 0o755) require.NoError(t, err) @@ -47,7 +46,7 @@ func WriteFile(t *testing.T, path, content string) { } // ReadFile reads a file and returns its content as a string. -func ReadFile(t require.TestingT, path string) string { +func ReadFile(t TestingT, path string) string { b, err := os.ReadFile(path) require.NoError(t, err) diff --git a/internal/testutil/helpers.go b/internal/testutil/helpers.go index 646edc6c59..69ed7595b7 100644 --- a/internal/testutil/helpers.go +++ b/internal/testutil/helpers.go @@ -5,11 +5,10 @@ import ( "math/rand" "os" "strings" - "testing" ) // GetEnvOrSkipTest proceeds with test only with that env variable. -func GetEnvOrSkipTest(t *testing.T, name string) string { +func GetEnvOrSkipTest(t TestingT, name string) string { value := os.Getenv(name) if value == "" { t.Skipf("Environment variable %s is missing", name) diff --git a/internal/testutil/interface.go b/internal/testutil/interface.go new file mode 100644 index 0000000000..2c3004800d --- /dev/null +++ b/internal/testutil/interface.go @@ -0,0 +1,27 @@ +package testutil + +// TestingT is an interface wrapper around *testing.T that provides the methods +// that are used by the test package to convey information about test failures. +// +// We use an interface so we can wrap *testing.T and provide additional functionality. +type TestingT interface { + Log(args ...any) + Logf(format string, args ...any) + + Error(args ...any) + Errorf(format string, args ...any) + + Fatal(args ...any) + Fatalf(format string, args ...any) + + Skip(args ...any) + Skipf(format string, args ...any) + + FailNow() + + Cleanup(func()) + + Setenv(key, value string) + + TempDir() string +} diff --git a/internal/testutil/jdk.go b/internal/testutil/jdk.go index 05bd7d6d68..60fa439dbd 100644 --- a/internal/testutil/jdk.go +++ b/internal/testutil/jdk.go @@ -5,12 +5,11 @@ import ( "context" "os/exec" "strings" - "testing" "github.com/stretchr/testify/require" ) -func RequireJDK(t *testing.T, ctx context.Context, version string) { +func RequireJDK(t TestingT, ctx context.Context, version string) { var stderr bytes.Buffer cmd := exec.Command("javac", "-version") diff --git a/internal/testutil/requirement.go b/internal/testutil/requirement.go index 53855e0b52..e182b75180 100644 --- a/internal/testutil/requirement.go +++ b/internal/testutil/requirement.go @@ -1,18 +1,14 @@ package testutil -import ( - "testing" -) - // Requirement is the interface for test requirements. type Requirement interface { - Verify(t *testing.T) + Verify(t TestingT) } // Require should be called at the beginning of a test to ensure that all // requirements are met before running the test. // If any requirement is not met, the test will be skipped. -func Require(t *testing.T, requirements ...Requirement) { +func Require(t TestingT, requirements ...Requirement) { for _, r := range requirements { r.Verify(t) } diff --git a/internal/testutil/testutil_test.go b/internal/testutil/testutil_test.go new file mode 100644 index 0000000000..d41374d559 --- /dev/null +++ b/internal/testutil/testutil_test.go @@ -0,0 +1,36 @@ +package testutil_test + +import ( + "go/parser" + "go/token" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestNoTestingImport checks that no file in the package imports the testing package. +// All exported functions must use the TestingT interface instead of *testing.T. +func TestNoTestingImport(t *testing.T) { + // Parse the package + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, ".", nil, parser.AllErrors) + require.NoError(t, err) + + // Iterate through the files in the package + for _, pkg := range pkgs { + for _, file := range pkg.Files { + // Skip test files + if strings.HasSuffix(fset.Position(file.Pos()).Filename, "_test.go") { + continue + } + // Check the imports of each file + for _, imp := range file.Imports { + if imp.Path.Value == `"testing"` { + assert.Fail(t, "File imports the testing package", "File %s imports the testing package", fset.Position(file.Pos()).Filename) + } + } + } + } +} From e472b5d88850b46f3cff38f215e774ee653e53ad Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 12 Dec 2024 17:48:51 +0100 Subject: [PATCH 39/63] Move the CLI test runner to `internal/testcli` package (#2004) ## Changes The CLI test runner instantiates a new CLI "instance" through `cmd.New()` and runs it with specified arguments. This is as close as we get to running the real CLI **in-process**. This runner was located in the `internal` package next to other helpers. This change moves it to its own dedicated package. Note: this runner transitively imports pretty much the entire repository, which is why we intentionally keep it _separate_ from `testutil`. ## Tests n/a --- bundle/tests/suggest_target_test.go | 24 +- cmd/labs/installed_test.go | 4 +- cmd/labs/list_test.go | 4 +- cmd/labs/project/command_test.go | 10 +- cmd/labs/project/installer_test.go | 8 +- internal/alerts_test.go | 3 +- internal/api_test.go | 7 +- internal/auth_describe_test.go | 5 +- internal/bundle/artifacts_test.go | 5 +- internal/bundle/bind_resource_test.go | 11 +- internal/bundle/deploy_test.go | 11 +- internal/bundle/generate_job_test.go | 3 +- internal/bundle/generate_pipeline_test.go | 3 +- internal/bundle/helpers.go | 16 +- internal/clusters_test.go | 7 +- internal/completer_test.go | 3 +- internal/fs_cat_test.go | 11 +- internal/fs_cp_test.go | 27 +- internal/fs_ls_test.go | 13 +- internal/fs_mkdir_test.go | 11 +- internal/fs_rm_test.go | 11 +- internal/git_fetch_test.go | 9 +- internal/helpers.go | 308 --------------------- internal/init_test.go | 15 +- internal/jobs_test.go | 5 +- internal/repos_test.go | 21 +- internal/secrets_test.go | 7 +- internal/storage_credentials_test.go | 3 +- internal/sync_test.go | 8 +- internal/testcli/README.md | 7 + internal/testcli/runner.go | 315 ++++++++++++++++++++++ internal/unknown_command_test.go | 3 +- internal/version_test.go | 9 +- internal/workspace_test.go | 55 ++-- 34 files changed, 500 insertions(+), 462 deletions(-) create mode 100644 internal/testcli/README.md create mode 100644 internal/testcli/runner.go diff --git a/bundle/tests/suggest_target_test.go b/bundle/tests/suggest_target_test.go index 8fb1304090..02905d779c 100644 --- a/bundle/tests/suggest_target_test.go +++ b/bundle/tests/suggest_target_test.go @@ -1,22 +1,22 @@ package config_tests import ( - "path/filepath" + "context" "testing" - "github.com/databricks/cli/cmd/root" - assert "github.com/databricks/cli/libs/dyn/dynassert" - - "github.com/databricks/cli/internal" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/require" ) func TestSuggestTargetIfWrongPassed(t *testing.T) { - t.Setenv("BUNDLE_ROOT", filepath.Join("target_overrides", "workspace")) - stdoutBytes, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") - stdout := stdoutBytes.String() + b := load(t, "target_overrides/workspace") - assert.Error(t, root.ErrAlreadyPrinted, err) - assert.Contains(t, stdout, "Available targets:") - assert.Contains(t, stdout, "development") - assert.Contains(t, stdout, "staging") + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SelectTarget("incorrect")) + err := diags.Error() + require.Error(t, err) + require.Contains(t, err.Error(), "Available targets:") + require.Contains(t, err.Error(), "development") + require.Contains(t, err.Error(), "staging") } diff --git a/cmd/labs/installed_test.go b/cmd/labs/installed_test.go index 00692f7960..0418e5a09b 100644 --- a/cmd/labs/installed_test.go +++ b/cmd/labs/installed_test.go @@ -4,14 +4,14 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" ) func TestListsInstalledProjects(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "installed") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "installed") r.RunAndExpectOutput(` Name Description Version blueprint Blueprint Project v0.3.15 diff --git a/cmd/labs/list_test.go b/cmd/labs/list_test.go index 925b984ab8..eec942d414 100644 --- a/cmd/labs/list_test.go +++ b/cmd/labs/list_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/require" ) @@ -12,7 +12,7 @@ import ( func TestListingWorks(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "list") + c := testcli.NewRunnerWithContext(t, ctx, "labs", "list") stdout, _, err := c.Run() require.NoError(t, err) require.Contains(t, stdout.String(), "ucx") diff --git a/cmd/labs/project/command_test.go b/cmd/labs/project/command_test.go index 20021879fe..f6326b2d1b 100644 --- a/cmd/labs/project/command_test.go +++ b/cmd/labs/project/command_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/python" "github.com/databricks/databricks-sdk-go" @@ -30,7 +30,7 @@ func devEnvContext(t *testing.T) context.Context { func TestRunningBlueprintEcho(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "echo") var out echoOut r.RunAndParseJSON(&out) assert.Equal(t, "echo", out.Command) @@ -41,14 +41,14 @@ func TestRunningBlueprintEcho(t *testing.T) { func TestRunningBlueprintEchoProfileWrongOverride(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") _, _, err := r.Run() assert.ErrorIs(t, err, databricks.ErrNotAccountClient) } func TestRunningCommand(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "foo") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "foo") r.WithStdin() defer r.CloseStdin() @@ -60,7 +60,7 @@ func TestRunningCommand(t *testing.T) { func TestRenderingTable(t *testing.T) { ctx := devEnvContext(t) - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "blueprint", "table") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "table") r.RunAndExpectOutput(` Key Value First Second diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 63c984bcb6..367daccb33 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -19,7 +19,7 @@ import ( "github.com/databricks/cli/cmd/labs/github" "github.com/databricks/cli/cmd/labs/project" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/process" "github.com/databricks/cli/libs/python" @@ -236,7 +236,7 @@ func TestInstallerWorksForReleases(t *testing.T) { // │ │ │ └── site-packages // │ │ │ ├── ... // │ │ │ ├── distutils-precedence.pth - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", "blueprint", "--debug") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "install", "blueprint", "--debug") r.RunAndExpectOutput("setting up important infrastructure") } @@ -356,7 +356,7 @@ account_id = abc // └── databrickslabs-blueprint-releases.json // `databricks labs install .` means "verify this installer i'm developing does work" - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "install", ".") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "install", ".") r.WithStdin() defer r.CloseStdin() @@ -426,7 +426,7 @@ func TestUpgraderWorksForReleases(t *testing.T) { ctx = env.Set(ctx, "DATABRICKS_CLUSTER_ID", "installer-cluster") ctx = env.Set(ctx, "DATABRICKS_WAREHOUSE_ID", "installer-warehouse") - r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") + r := testcli.NewRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") r.RunAndExpectOutput("setting up important infrastructure") // Check if the stub was called with the 'python -m pip install' command diff --git a/internal/alerts_test.go b/internal/alerts_test.go index ebb7a2095b..d7be16d8bb 100644 --- a/internal/alerts_test.go +++ b/internal/alerts_test.go @@ -3,6 +3,7 @@ package internal import ( "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" ) @@ -10,6 +11,6 @@ import ( func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := RequireErrorRun(t, "alerts-legacy", "create") + _, _, err := testcli.RequireErrorRun(t, "alerts-legacy", "create") assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) } diff --git a/internal/api_test.go b/internal/api_test.go index fbbc5b0729..1c7312ddbd 100644 --- a/internal/api_test.go +++ b/internal/api_test.go @@ -11,13 +11,14 @@ import ( "github.com/stretchr/testify/require" _ "github.com/databricks/cli/cmd/api" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" ) func TestAccApiGet(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, _ := RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") + stdout, _ := testcli.RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") // Deserialize SCIM API response. var out map[string]any @@ -44,11 +45,11 @@ func TestAccApiPost(t *testing.T) { // Post to mkdir { - RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") + testcli.RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") } // Post to delete { - RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") + testcli.RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") } } diff --git a/internal/auth_describe_test.go b/internal/auth_describe_test.go index dfbc95a1fd..e0b335c592 100644 --- a/internal/auth_describe_test.go +++ b/internal/auth_describe_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( func TestAuthDescribeSuccess(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, _ := RequireSuccessfulRun(t, "auth", "describe") + stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe") outStr := stdout.String() w, err := databricks.NewWorkspaceClient(&databricks.Config{}) @@ -34,7 +35,7 @@ func TestAuthDescribeSuccess(t *testing.T) { func TestAuthDescribeFailure(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, _ := RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") + stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") outStr := stdout.String() require.NotEmpty(t, outStr) diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index d0773df05a..8d4b0ae10a 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" @@ -257,7 +258,7 @@ func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { require.NoError(t, err) t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + stdout, stderr, err := testcli.RequireErrorRun(t, "bundle", "deploy") assert.Error(t, err) assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/doesnotexist does not exist: Not Found @@ -294,7 +295,7 @@ func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { require.NoError(t, err) t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.RequireErrorRun(t, "bundle", "deploy") + stdout, stderr, err := testcli.RequireErrorRun(t, "bundle", "deploy") assert.Error(t, err) assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/my_volume does not exist: Not Found diff --git a/internal/bundle/bind_resource_test.go b/internal/bundle/bind_resource_test.go index c6bafdfc21..0d54587833 100644 --- a/internal/bundle/bind_resource_test.go +++ b/internal/bundle/bind_resource_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -39,7 +40,7 @@ func TestAccBindJobToExistingJob(t *testing.T) { }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") + c := testcli.NewRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") _, _, err = c.Run() require.NoError(t, err) @@ -61,7 +62,7 @@ func TestAccBindJobToExistingJob(t *testing.T) { require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") - c = internal.NewCobraTestRunner(t, "bundle", "deployment", "unbind", "foo") + c = testcli.NewRunner(t, "bundle", "deployment", "unbind", "foo") _, _, err = c.Run() require.NoError(t, err) @@ -107,7 +108,7 @@ func TestAccAbortBind(t *testing.T) { // Bind should fail because prompting is not possible. t.Setenv("BUNDLE_ROOT", bundleRoot) t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) + c := testcli.NewRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) // Expect error suggesting to use --auto-approve _, _, err = c.Run() @@ -157,7 +158,7 @@ func TestAccGenerateAndBind(t *testing.T) { }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "generate", "job", "--key", "test_job_key", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), @@ -173,7 +174,7 @@ func TestAccGenerateAndBind(t *testing.T) { require.Len(t, matches, 1) - c = internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") + c = testcli.NewRunner(t, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") _, _, err = c.Run() require.NoError(t, err) diff --git a/internal/bundle/deploy_test.go b/internal/bundle/deploy_test.go index 283b05f7ab..244cfecc8a 100644 --- a/internal/bundle/deploy_test.go +++ b/internal/bundle/deploy_test.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" @@ -120,7 +121,7 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { // Redeploy the bundle t.Setenv("BUNDLE_ROOT", bundleRoot) t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -164,7 +165,7 @@ func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { // Redeploy the bundle. Expect it to fail because deleting the pipeline requires --auto-approve. t.Setenv("BUNDLE_ROOT", bundleRoot) t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -203,7 +204,7 @@ func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { // Redeploy the bundle, pointing the DLT pipeline to a different UC catalog. t.Setenv("BUNDLE_ROOT", bundleRoot) t.Setenv("TERM", "dumb") - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -284,7 +285,7 @@ func TestAccDeployUcVolume(t *testing.T) { // Recreation of the volume without --auto-approve should fail since prompting is not possible t.Setenv("TERM", "dumb") t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() + stdout, stderr, err := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() assert.Error(t, err) assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following volumes. For managed volumes, the files stored in the volume are also deleted from your @@ -296,7 +297,7 @@ is removed from the catalog, but the underlying files are not deleted: // Successfully recreate the volume with --auto-approve t.Setenv("TERM", "dumb") t.Setenv("BUNDLE_ROOT", bundleRoot) - _, _, err = internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() + _, _, err = testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() assert.NoError(t, err) // Assert the volume is updated successfully diff --git a/internal/bundle/generate_job_test.go b/internal/bundle/generate_job_test.go index 7fd265fd43..9e2f927aab 100644 --- a/internal/bundle/generate_job_test.go +++ b/internal/bundle/generate_job_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" @@ -36,7 +37,7 @@ func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "generate", "job", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index 589cb2a6c2..194f6d3096 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" @@ -35,7 +36,7 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "pipeline", + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "generate", "pipeline", "--existing-pipeline-id", fmt.Sprint(pipelineId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index d06a2a72d8..0512de8732 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -12,7 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/env" @@ -65,7 +65,7 @@ func writeConfigFile(t testutil.TestingT, config map[string]any) (string, error) func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") stdout, _, err := c.Run() return stdout.Bytes(), err } @@ -85,7 +85,7 @@ func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle { func deployBundle(t testutil.TestingT, ctx context.Context, path string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") _, _, err := c.Run() return err } @@ -93,7 +93,7 @@ func deployBundle(t testutil.TestingT, ctx context.Context, path string) error { func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args = append([]string{"bundle", "deploy"}, args...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunnerWithContext(t, ctx, args...) stdout, stderr, err := c.Run() return stdout.String(), stderr.String(), err } @@ -102,7 +102,7 @@ func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string ctx = env.Set(ctx, "BUNDLE_ROOT", path) args := []string{"bundle", "deploy", "--force-lock"} args = append(args, flags...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunnerWithContext(t, ctx, args...) _, _, err := c.Run() return err } @@ -111,7 +111,7 @@ func runResource(t testutil.TestingT, ctx context.Context, path, key string) (st ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "run", key) + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "run", key) stdout, _, err := c.Run() return stdout.String(), err } @@ -123,14 +123,14 @@ func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key s args := make([]string, 0) args = append(args, "bundle", "run", key) args = append(args, params...) - c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + c := testcli.NewRunnerWithContext(t, ctx, args...) stdout, _, err := c.Run() return stdout.String(), err } func destroyBundle(t testutil.TestingT, ctx context.Context, path string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") + c := testcli.NewRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() return err } diff --git a/internal/clusters_test.go b/internal/clusters_test.go index 9828d240c2..cc1b2aa07c 100644 --- a/internal/clusters_test.go +++ b/internal/clusters_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/compute" @@ -16,7 +17,7 @@ import ( func TestAccClustersList(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, stderr := RequireSuccessfulRun(t, "clusters", "list") + stdout, stderr := testcli.RequireSuccessfulRun(t, "clusters", "list") outStr := stdout.String() assert.Contains(t, outStr, "ID") assert.Contains(t, outStr, "Name") @@ -32,14 +33,14 @@ func TestAccClustersGet(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) clusterId := findValidClusterID(t) - stdout, stderr := RequireSuccessfulRun(t, "clusters", "get", clusterId) + stdout, stderr := testcli.RequireSuccessfulRun(t, "clusters", "get", clusterId) outStr := stdout.String() assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId)) assert.Equal(t, "", stderr.String()) } func TestClusterCreateErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "clusters", "create") + _, _, err := testcli.RequireErrorRun(t, "clusters", "create") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } diff --git a/internal/completer_test.go b/internal/completer_test.go index b2c9368862..52a1f6c08b 100644 --- a/internal/completer_test.go +++ b/internal/completer_test.go @@ -7,6 +7,7 @@ import ( "testing" _ "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,7 +22,7 @@ func TestAccFsCompletion(t *testing.T) { f, tmpDir := setupDbfsFiler(t) setupCompletionFile(t, f) - stdout, _ := RequireSuccessfulRun(t, "__complete", "fs", "ls", tmpDir+"/") + stdout, _ := testcli.RequireSuccessfulRun(t, "__complete", "fs", "ls", tmpDir+"/") expectedOutput := fmt.Sprintf("%s/dir1/\n:2\n", tmpDir) assert.Equal(t, expectedOutput, stdout.String()) } diff --git a/internal/fs_cat_test.go b/internal/fs_cat_test.go index c625d885b9..1d9a0dbe18 100644 --- a/internal/fs_cat_test.go +++ b/internal/fs_cat_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" @@ -27,7 +28,7 @@ func TestAccFsCat(t *testing.T) { err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) require.NoError(t, err) - stdout, stderr := RequireSuccessfulRun(t, "fs", "cat", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "cat", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "abcd", stdout.String()) }) @@ -47,7 +48,7 @@ func TestAccFsCatOnADir(t *testing.T) { err := f.Mkdir(context.Background(), "dir1") require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "dir1")) + _, _, err = testcli.RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "dir1")) assert.ErrorAs(t, err, &filer.NotAFile{}) }) } @@ -64,7 +65,7 @@ func TestAccFsCatOnNonExistentFile(t *testing.T) { _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "non-existent-file")) + _, _, err := testcli.RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "non-existent-file")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } @@ -73,7 +74,7 @@ func TestAccFsCatOnNonExistentFile(t *testing.T) { func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") + _, _, err := testcli.RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") assert.ErrorContains(t, err, "invalid scheme: dab") } @@ -92,6 +93,6 @@ func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { err = f.Write(ctx, "hello.txt", strings.NewReader("abc")) require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") + _, _, err = testcli.RequireErrorRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") assert.ErrorContains(t, err, "json output not supported") } diff --git a/internal/fs_cp_test.go b/internal/fs_cp_test.go index 41c2a44b9a..16318d5c15 100644 --- a/internal/fs_cp_test.go +++ b/internal/fs_cp_test.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" @@ -134,7 +135,7 @@ func TestAccFsCpDir(t *testing.T) { targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") assertTargetDir(t, context.Background(), targetFiler) }) @@ -154,7 +155,7 @@ func TestAccFsCpFileToFile(t *testing.T) { targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) assertTargetFile(t, context.Background(), targetFiler, "bar.txt") }) @@ -174,7 +175,7 @@ func TestAccFsCpFileToDir(t *testing.T) { targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) assertTargetFile(t, context.Background(), targetFiler, "foo.txt") }) @@ -193,7 +194,7 @@ func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { windowsPath := filepath.Join(filepath.FromSlash(sourceDir), "foo.txt") - RequireSuccessfulRun(t, "fs", "cp", windowsPath, targetDir) + testcli.RequireSuccessfulRun(t, "fs", "cp", windowsPath, targetDir) assertTargetFile(t, ctx, targetFiler, "foo.txt") } @@ -214,7 +215,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") assertFileContent(t, context.Background(), targetFiler, "query.sql", "SELECT 1") assertFileContent(t, context.Background(), targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") @@ -239,7 +240,7 @@ func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) + testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") }) } @@ -262,7 +263,7 @@ func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/dontoverwrite.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) + testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/dontoverwrite.txt", "this should not be overwritten") }) } @@ -285,7 +286,7 @@ func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") + testcli.RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") assertTargetDir(t, context.Background(), targetFiler) }) } @@ -308,7 +309,7 @@ func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/overwritten.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") + testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/overwritten.txt", "hello, world\n") }) } @@ -331,7 +332,7 @@ func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") + testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "hello, world\n") }) } @@ -348,7 +349,7 @@ func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) + _, _, err := testcli.RequireErrorRun(t, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) r := regexp.MustCompile("source path .* is a directory. Please specify the --recursive flag") assert.Regexp(t, r, err.Error()) }) @@ -358,7 +359,7 @@ func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") + _, _, err := testcli.RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") assert.Equal(t, "invalid scheme: https", err.Error()) } @@ -379,7 +380,7 @@ func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { err := targetFiler.Write(context.Background(), "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) require.NoError(t, err) - _, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") + _, _, err = testcli.RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") assert.Error(t, err) }) } diff --git a/internal/fs_ls_test.go b/internal/fs_ls_test.go index 999d9afea7..d56fc8bb00 100644 --- a/internal/fs_ls_test.go +++ b/internal/fs_ls_test.go @@ -10,6 +10,7 @@ import ( "testing" _ "github.com/databricks/cli/cmd/fs" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" @@ -51,7 +52,7 @@ func TestAccFsLs(t *testing.T) { f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -84,7 +85,7 @@ func TestAccFsLsWithAbsolutePaths(t *testing.T) { f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json", "--absolute") + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json", "--absolute") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -116,7 +117,7 @@ func TestAccFsLsOnFile(t *testing.T) { f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") + _, _, err := testcli.RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) assert.ErrorAs(t, err, &filer.NotADirectory{}) }) @@ -134,7 +135,7 @@ func TestAccFsLsOnEmptyDir(t *testing.T) { _, tmpDir := tc.setupFiler(t) - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any err := json.Unmarshal(stdout.Bytes(), &parsedStdout) @@ -157,7 +158,7 @@ func TestAccFsLsForNonexistingDir(t *testing.T) { _, tmpDir := tc.setupFiler(t) - _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") + _, _, err := testcli.RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) assert.Regexp(t, regexp.MustCompile("no such directory: .*/nonexistent"), err.Error()) }) @@ -169,6 +170,6 @@ func TestAccFsLsWithoutScheme(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") + _, _, err := testcli.RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/internal/fs_mkdir_test.go b/internal/fs_mkdir_test.go index 9191f61434..197f5d4b5d 100644 --- a/internal/fs_mkdir_test.go +++ b/internal/fs_mkdir_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -24,7 +25,7 @@ func TestAccFsMkdir(t *testing.T) { f, tmpDir := tc.setupFiler(t) // create directory "a" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -49,7 +50,7 @@ func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { f, tmpDir := tc.setupFiler(t) // create directory "a/b/c" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -90,7 +91,7 @@ func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { require.NoError(t, err) // assert run is successful without any errors - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) }) @@ -110,7 +111,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) // Different cloud providers or cloud configurations return different errors. regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$|(^|: )"The specified path already exists.".*$`) @@ -127,7 +128,7 @@ func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{}) }) diff --git a/internal/fs_rm_test.go b/internal/fs_rm_test.go index d49813a340..1af779fd27 100644 --- a/internal/fs_rm_test.go +++ b/internal/fs_rm_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -31,7 +32,7 @@ func TestAccFsRmFile(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -61,7 +62,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -95,7 +96,7 @@ func TestAccFsRmNonEmptyDirectory(t *testing.T) { assert.NoError(t, err) // Run rm command - _, _, err = RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "a")) + _, _, err = testcli.RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "a")) assert.ErrorIs(t, err, fs.ErrInvalid) assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) }) @@ -114,7 +115,7 @@ func TestAccFsRmForNonExistentFile(t *testing.T) { _, tmpDir := tc.setupFiler(t) // Expect error if file does not exist - _, _, err := RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "does-not-exist")) + _, _, err := testcli.RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "does-not-exist")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } @@ -144,7 +145,7 @@ func TestAccFsRmDirRecursively(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") + stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) diff --git a/internal/git_fetch_test.go b/internal/git_fetch_test.go index 797bc2e348..e692970c77 100644 --- a/internal/git_fetch_test.go +++ b/internal/git_fetch_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/git" @@ -44,9 +45,9 @@ func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { require.NoError(t, err) targetPath := testutil.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-")) - stdout, stderr := RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath) + stdout, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath) t.Cleanup(func() { - RequireSuccessfulRun(t, "repos", "delete", targetPath) + testcli.RequireSuccessfulRun(t, "repos", "delete", targetPath) }) assert.Empty(t, stderr.String()) @@ -71,9 +72,9 @@ func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { require.NoError(t, err) rootPath := testutil.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-")) - _, stderr := RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c")) + _, stderr := testcli.RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c")) t.Cleanup(func() { - RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath) + testcli.RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath) }) assert.Empty(t, stderr.String()) diff --git a/internal/helpers.go b/internal/helpers.go index b67ea889a8..f6cb199ea0 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -1,30 +1,18 @@ package internal import ( - "bufio" - "bytes" "context" - "encoding/json" "errors" "fmt" - "io" "net/http" "os" "path" "path/filepath" - "reflect" "strings" - "sync" - "time" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/cli/cmd" - _ "github.com/databricks/cli/cmd/version" - "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" @@ -33,305 +21,9 @@ import ( "github.com/databricks/databricks-sdk-go/service/files" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/workspace" - "github.com/spf13/cobra" - "github.com/spf13/pflag" "github.com/stretchr/testify/require" - - _ "github.com/databricks/cli/cmd/workspace" ) -// Helper for running the root command in the background. -// It ensures that the background goroutine terminates upon -// test completion through cancelling the command context. -type cobraTestRunner struct { - testutil.TestingT - - args []string - stdout bytes.Buffer - stderr bytes.Buffer - stdinR *io.PipeReader - stdinW *io.PipeWriter - - ctx context.Context - - // Line-by-line output. - // Background goroutines populate these channels by reading from stdout/stderr pipes. - stdoutLines <-chan string - stderrLines <-chan string - - errch <-chan error -} - -func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { - ch := make(chan string, 30000) - wg.Add(1) - go func() { - defer close(ch) - defer wg.Done() - scanner := bufio.NewScanner(r) - for scanner.Scan() { - // We expect to be able to always send these lines into the channel. - // If we can't, it means the channel is full and likely there is a problem - // in either the test or the code under test. - select { - case <-ctx.Done(): - return - case ch <- scanner.Text(): - continue - default: - panic("line buffer is full") - } - } - }() - return ch -} - -func (t *cobraTestRunner) registerFlagCleanup(c *cobra.Command) { - // Find target command that will be run. Example: if the command run is `databricks fs cp`, - // target command corresponds to `cp` - targetCmd, _, err := c.Find(t.args) - if err != nil && strings.HasPrefix(err.Error(), "unknown command") { - // even if command is unknown, we can proceed - require.NotNil(t, targetCmd) - } else { - require.NoError(t, err) - } - - // Force initialization of default flags. - // These are initialized by cobra at execution time and would otherwise - // not be cleaned up by the cleanup function below. - targetCmd.InitDefaultHelpFlag() - targetCmd.InitDefaultVersionFlag() - - // Restore flag values to their original value on test completion. - targetCmd.Flags().VisitAll(func(f *pflag.Flag) { - v := reflect.ValueOf(f.Value) - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - // Store copy of the current flag value. - reset := reflect.New(v.Type()).Elem() - reset.Set(v) - t.Cleanup(func() { - v.Set(reset) - }) - }) -} - -// Like [cobraTestRunner.Eventually], but more specific -func (t *cobraTestRunner) WaitForTextPrinted(text string, timeout time.Duration) { - t.Eventually(func() bool { - currentStdout := t.stdout.String() - return strings.Contains(currentStdout, text) - }, timeout, 50*time.Millisecond) -} - -func (t *cobraTestRunner) WaitForOutput(text string, timeout time.Duration) { - require.Eventually(t, func() bool { - currentStdout := t.stdout.String() - currentErrout := t.stderr.String() - return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) - }, timeout, 50*time.Millisecond) -} - -func (t *cobraTestRunner) WithStdin() { - reader, writer := io.Pipe() - t.stdinR = reader - t.stdinW = writer -} - -func (t *cobraTestRunner) CloseStdin() { - if t.stdinW == nil { - panic("no standard input configured") - } - t.stdinW.Close() -} - -func (t *cobraTestRunner) SendText(text string) { - if t.stdinW == nil { - panic("no standard input configured") - } - _, err := t.stdinW.Write([]byte(text + "\n")) - if err != nil { - panic("Failed to to write to t.stdinW") - } -} - -func (t *cobraTestRunner) RunBackground() { - var stdoutR, stderrR io.Reader - var stdoutW, stderrW io.WriteCloser - stdoutR, stdoutW = io.Pipe() - stderrR, stderrW = io.Pipe() - ctx := cmdio.NewContext(t.ctx, &cmdio.Logger{ - Mode: flags.ModeAppend, - Reader: bufio.Reader{}, - Writer: stderrW, - }) - - cli := cmd.New(ctx) - cli.SetOut(stdoutW) - cli.SetErr(stderrW) - cli.SetArgs(t.args) - if t.stdinW != nil { - cli.SetIn(t.stdinR) - } - - // Register cleanup function to restore flags to their original values - // once test has been executed. This is needed because flag values reside - // in a global singleton data-structure, and thus subsequent tests might - // otherwise interfere with each other - t.registerFlagCleanup(cli) - - errch := make(chan error) - ctx, cancel := context.WithCancel(ctx) - - // Tee stdout/stderr to buffers. - stdoutR = io.TeeReader(stdoutR, &t.stdout) - stderrR = io.TeeReader(stderrR, &t.stderr) - - // Consume stdout/stderr line-by-line. - var wg sync.WaitGroup - t.stdoutLines = consumeLines(ctx, &wg, stdoutR) - t.stderrLines = consumeLines(ctx, &wg, stderrR) - - // Run command in background. - go func() { - err := root.Execute(ctx, cli) - if err != nil { - t.Logf("Error running command: %s", err) - } - - // Close pipes to signal EOF. - stdoutW.Close() - stderrW.Close() - - // Wait for the [consumeLines] routines to finish now that - // the pipes they're reading from have closed. - wg.Wait() - - if t.stdout.Len() > 0 { - // Make a copy of the buffer such that it remains "unread". - scanner := bufio.NewScanner(bytes.NewBuffer(t.stdout.Bytes())) - for scanner.Scan() { - t.Logf("[databricks stdout]: %s", scanner.Text()) - } - } - - if t.stderr.Len() > 0 { - // Make a copy of the buffer such that it remains "unread". - scanner := bufio.NewScanner(bytes.NewBuffer(t.stderr.Bytes())) - for scanner.Scan() { - t.Logf("[databricks stderr]: %s", scanner.Text()) - } - } - - // Reset context on command for the next test. - // These commands are globals so we have to clean up to the best of our ability after each run. - // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 - //nolint:staticcheck // cobra sets the context and doesn't clear it - cli.SetContext(nil) - - // Make caller aware of error. - errch <- err - close(errch) - }() - - // Ensure command terminates upon test completion (success or failure). - t.Cleanup(func() { - // Signal termination of command. - cancel() - // Wait for goroutine to finish. - <-errch - }) - - t.errch = errch -} - -func (t *cobraTestRunner) Run() (bytes.Buffer, bytes.Buffer, error) { - t.RunBackground() - err := <-t.errch - return t.stdout, t.stderr, err -} - -// Like [require.Eventually] but errors if the underlying command has failed. -func (c *cobraTestRunner) Eventually(condition func() bool, waitFor, tick time.Duration, msgAndArgs ...any) { - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - // Kick off condition check immediately. - go func() { ch <- condition() }() - - for tick := ticker.C; ; { - select { - case err := <-c.errch: - require.Fail(c, "Command failed", err) - return - case <-timer.C: - require.Fail(c, "Condition never satisfied", msgAndArgs...) - return - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return - } - tick = ticker.C - } - } -} - -func (t *cobraTestRunner) RunAndExpectOutput(heredoc string) { - stdout, _, err := t.Run() - require.NoError(t, err) - require.Equal(t, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String())) -} - -func (t *cobraTestRunner) RunAndParseJSON(v any) { - stdout, _, err := t.Run() - require.NoError(t, err) - err = json.Unmarshal(stdout.Bytes(), &v) - require.NoError(t, err) -} - -func NewCobraTestRunner(t testutil.TestingT, args ...string) *cobraTestRunner { - return &cobraTestRunner{ - TestingT: t, - - ctx: context.Background(), - args: args, - } -} - -func NewCobraTestRunnerWithContext(t testutil.TestingT, ctx context.Context, args ...string) *cobraTestRunner { - return &cobraTestRunner{ - TestingT: t, - - ctx: ctx, - args: args, - } -} - -func RequireSuccessfulRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer) { - t.Logf("run args: [%s]", strings.Join(args, ", ")) - c := NewCobraTestRunner(t, args...) - stdout, stderr, err := c.Run() - require.NoError(t, err) - return stdout, stderr -} - -func RequireErrorRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer, error) { - c := NewCobraTestRunner(t, args...) - stdout, stderr, err := c.Run() - require.Error(t, err) - return stdout, stderr, err -} - func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { tasks := make([]jobs.SubmitTask, 0) for i := 0; i < len(versions); i++ { diff --git a/internal/init_test.go b/internal/init_test.go index e219e265cf..e2d96b3e15 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/iamutil" "github.com/databricks/databricks-sdk-go" @@ -22,7 +23,7 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() - _, _, err := RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) + _, _, err := testcli.RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") } @@ -63,7 +64,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { // Run bundle init assert.NoFileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) + testcli.RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) // Assert that the README.md file was created assert.FileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) @@ -71,17 +72,17 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { // Validate the stack testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) - RequireSuccessfulRun(t, "bundle", "validate") + testcli.RequireSuccessfulRun(t, "bundle", "validate") // Deploy the stack - RequireSuccessfulRun(t, "bundle", "deploy") + testcli.RequireSuccessfulRun(t, "bundle", "deploy") t.Cleanup(func() { // Delete the stack - RequireSuccessfulRun(t, "bundle", "destroy", "--auto-approve") + testcli.RequireSuccessfulRun(t, "bundle", "destroy", "--auto-approve") }) // Get summary of the bundle deployment - stdout, _ := RequireSuccessfulRun(t, "bundle", "summary", "--output", "json") + stdout, _ := testcli.RequireSuccessfulRun(t, "bundle", "summary", "--output", "json") summary := &config.Root{} err = json.Unmarshal(stdout.Bytes(), summary) require.NoError(t, err) @@ -159,7 +160,7 @@ func TestAccBundleInitHelpers(t *testing.T) { require.NoError(t, err) // Run bundle init. - RequireSuccessfulRun(t, "bundle", "init", tmpDir, "--output-dir", tmpDir2) + testcli.RequireSuccessfulRun(t, "bundle", "init", tmpDir, "--output-dir", tmpDir2) // Assert that the helper function was correctly computed. assertLocalFileContents(t, filepath.Join(tmpDir2, "foo.txt"), test.expected) diff --git a/internal/jobs_test.go b/internal/jobs_test.go index e9132cea01..2781323e6a 100644 --- a/internal/jobs_test.go +++ b/internal/jobs_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,10 +18,10 @@ func TestAccCreateJob(t *testing.T) { if env != "azure" { t.Skipf("Not running test on cloud %s", env) } - stdout, stderr := RequireSuccessfulRun(t, "jobs", "create", "--json", "@testjsons/create_job_without_workers.json", "--log-level=debug") + stdout, stderr := testcli.RequireSuccessfulRun(t, "jobs", "create", "--json", "@testjsons/create_job_without_workers.json", "--log-level=debug") assert.Empty(t, stderr.String()) var output map[string]int err := json.Unmarshal(stdout.Bytes(), &output) require.NoError(t, err) - RequireSuccessfulRun(t, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") + testcli.RequireSuccessfulRun(t, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") } diff --git a/internal/repos_test.go b/internal/repos_test.go index b681ff7e50..a89217fe1f 100644 --- a/internal/repos_test.go +++ b/internal/repos_test.go @@ -6,6 +6,7 @@ import ( "strconv" "testing" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" @@ -52,7 +53,7 @@ func TestAccReposCreateWithProvider(t *testing.T) { require.NoError(t, err) repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := RequireSuccessfulRun(t, "repos", "create", repoUrl, "gitHub", "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", repoUrl, "gitHub", "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -69,7 +70,7 @@ func TestAccReposCreateWithoutProvider(t *testing.T) { require.NoError(t, err) repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := RequireSuccessfulRun(t, "repos", "create", repoUrl, "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", repoUrl, "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -88,22 +89,22 @@ func TestAccReposGet(t *testing.T) { repoId, repoPath := createTemporaryRepo(t, w, ctx) // Get by ID - byIdOutput, stderr := RequireSuccessfulRun(t, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") assert.Equal(t, "", stderr.String()) // Get by path - byPathOutput, stderr := RequireSuccessfulRun(t, "repos", "get", repoPath, "--output=json") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "get", repoPath, "--output=json") assert.Equal(t, "", stderr.String()) // Output should be the same assert.Equal(t, byIdOutput.String(), byPathOutput.String()) // Get by path fails - _, stderr, err = RequireErrorRun(t, "repos", "get", repoPath+"-doesntexist", "--output=json") + _, stderr, err = testcli.RequireErrorRun(t, "repos", "get", repoPath+"-doesntexist", "--output=json") assert.ErrorContains(t, err, "failed to look up repo") // Get by path resolves to something other than a repo - _, stderr, err = RequireErrorRun(t, "repos", "get", "/Repos", "--output=json") + _, stderr, err = testcli.RequireErrorRun(t, "repos", "get", "/Repos", "--output=json") assert.ErrorContains(t, err, "is not a repo") } @@ -117,11 +118,11 @@ func TestAccReposUpdate(t *testing.T) { repoId, repoPath := createTemporaryRepo(t, w, ctx) // Update by ID - byIdOutput, stderr := RequireSuccessfulRun(t, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") assert.Equal(t, "", stderr.String()) // Update by path - byPathOutput, stderr := RequireSuccessfulRun(t, "repos", "update", repoPath, "--branch", "ide") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "update", repoPath, "--branch", "ide") assert.Equal(t, "", stderr.String()) // Output should be the same @@ -138,7 +139,7 @@ func TestAccReposDeleteByID(t *testing.T) { repoId, _ := createTemporaryRepo(t, w, ctx) // Delete by ID - stdout, stderr := RequireSuccessfulRun(t, "repos", "delete", strconv.FormatInt(repoId, 10)) + stdout, stderr := testcli.RequireSuccessfulRun(t, "repos", "delete", strconv.FormatInt(repoId, 10)) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) @@ -157,7 +158,7 @@ func TestAccReposDeleteByPath(t *testing.T) { repoId, repoPath := createTemporaryRepo(t, w, ctx) // Delete by path - stdout, stderr := RequireSuccessfulRun(t, "repos", "delete", repoPath) + stdout, stderr := testcli.RequireSuccessfulRun(t, "repos", "delete", repoPath) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) diff --git a/internal/secrets_test.go b/internal/secrets_test.go index d780654bd4..0ad9cece71 100644 --- a/internal/secrets_test.go +++ b/internal/secrets_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" @@ -14,7 +15,7 @@ import ( ) func TestSecretsCreateScopeErrWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "secrets", "create-scope") + _, _, err := testcli.RequireErrorRun(t, "secrets", "create-scope") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } @@ -68,7 +69,7 @@ func TestAccSecretsPutSecretStringValue(tt *testing.T) { key := "test-key" value := "test-value\nwith-newlines\n" - stdout, stderr := RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--string-value", value) + stdout, stderr := testcli.RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--string-value", value) assert.Empty(t, stdout) assert.Empty(t, stderr) @@ -82,7 +83,7 @@ func TestAccSecretsPutSecretBytesValue(tt *testing.T) { key := "test-key" value := []byte{0x00, 0x01, 0x02, 0x03} - stdout, stderr := RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) + stdout, stderr := testcli.RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) assert.Empty(t, stdout) assert.Empty(t, stderr) diff --git a/internal/storage_credentials_test.go b/internal/storage_credentials_test.go index e937a9e7c4..b7fe1aa56a 100644 --- a/internal/storage_credentials_test.go +++ b/internal/storage_credentials_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" ) @@ -14,7 +15,7 @@ func TestAccStorageCredentialsListRendersResponse(t *testing.T) { // Check if metastore is assigned for the workspace, otherwise test will fail t.Log(testutil.GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) - stdout, stderr := RequireSuccessfulRun(t, "storage-credentials", "list") + stdout, stderr := testcli.RequireSuccessfulRun(t, "storage-credentials", "list") assert.NotEmpty(t, stdout) assert.Empty(t, stderr) } diff --git a/internal/sync_test.go b/internal/sync_test.go index 3699ce376c..848c60ce66 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - _ "github.com/databricks/cli/cmd/sync" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/sync" @@ -64,7 +64,7 @@ func setupRepo(t *testing.T, wsc *databricks.WorkspaceClient, ctx context.Contex type syncTest struct { t *testing.T - c *cobraTestRunner + c *testcli.Runner w *databricks.WorkspaceClient f filer.Filer localRoot string @@ -89,7 +89,7 @@ func setupSyncTest(t *testing.T, args ...string) *syncTest { "json", }, args...) - c := NewCobraTestRunner(t, args...) + c := testcli.NewRunner(t, args...) c.RunBackground() return &syncTest{ @@ -110,7 +110,7 @@ func (s *syncTest) waitForCompletionMarker() { select { case <-ctx.Done(): s.t.Fatal("timed out waiting for sync to complete") - case line := <-s.c.stdoutLines: + case line := <-s.c.StdoutLines: var event sync.EventBase err := json.Unmarshal([]byte(line), &event) require.NoError(s.t, err) diff --git a/internal/testcli/README.md b/internal/testcli/README.md new file mode 100644 index 0000000000..b37ae3bc93 --- /dev/null +++ b/internal/testcli/README.md @@ -0,0 +1,7 @@ +# testcli + +This package provides a way to run the CLI from tests as if it were a separate process. +By running the CLI inline we can still set breakpoints and step through execution. + +It transitively imports pretty much this entire repository, which is why we +intentionally keep this package _separate_ from `testutil`. diff --git a/internal/testcli/runner.go b/internal/testcli/runner.go new file mode 100644 index 0000000000..55e3faca12 --- /dev/null +++ b/internal/testcli/runner.go @@ -0,0 +1,315 @@ +package testcli + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "reflect" + "strings" + "sync" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/stretchr/testify/require" + + "github.com/databricks/cli/cmd" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" +) + +// Helper for running the root command in the background. +// It ensures that the background goroutine terminates upon +// test completion through cancelling the command context. +type Runner struct { + testutil.TestingT + + args []string + stdout bytes.Buffer + stderr bytes.Buffer + stdinR *io.PipeReader + stdinW *io.PipeWriter + + ctx context.Context + + // Line-by-line output. + // Background goroutines populate these channels by reading from stdout/stderr pipes. + StdoutLines <-chan string + StderrLines <-chan string + + errch <-chan error +} + +func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { + ch := make(chan string, 30000) + wg.Add(1) + go func() { + defer close(ch) + defer wg.Done() + scanner := bufio.NewScanner(r) + for scanner.Scan() { + // We expect to be able to always send these lines into the channel. + // If we can't, it means the channel is full and likely there is a problem + // in either the test or the code under test. + select { + case <-ctx.Done(): + return + case ch <- scanner.Text(): + continue + default: + panic("line buffer is full") + } + } + }() + return ch +} + +func (r *Runner) registerFlagCleanup(c *cobra.Command) { + // Find target command that will be run. Example: if the command run is `databricks fs cp`, + // target command corresponds to `cp` + targetCmd, _, err := c.Find(r.args) + if err != nil && strings.HasPrefix(err.Error(), "unknown command") { + // even if command is unknown, we can proceed + require.NotNil(r, targetCmd) + } else { + require.NoError(r, err) + } + + // Force initialization of default flags. + // These are initialized by cobra at execution time and would otherwise + // not be cleaned up by the cleanup function below. + targetCmd.InitDefaultHelpFlag() + targetCmd.InitDefaultVersionFlag() + + // Restore flag values to their original value on test completion. + targetCmd.Flags().VisitAll(func(f *pflag.Flag) { + v := reflect.ValueOf(f.Value) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // Store copy of the current flag value. + reset := reflect.New(v.Type()).Elem() + reset.Set(v) + r.Cleanup(func() { + v.Set(reset) + }) + }) +} + +// Like [Runner.Eventually], but more specific +func (r *Runner) WaitForTextPrinted(text string, timeout time.Duration) { + r.Eventually(func() bool { + currentStdout := r.stdout.String() + return strings.Contains(currentStdout, text) + }, timeout, 50*time.Millisecond) +} + +func (r *Runner) WaitForOutput(text string, timeout time.Duration) { + require.Eventually(r, func() bool { + currentStdout := r.stdout.String() + currentErrout := r.stderr.String() + return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) + }, timeout, 50*time.Millisecond) +} + +func (r *Runner) WithStdin() { + reader, writer := io.Pipe() + r.stdinR = reader + r.stdinW = writer +} + +func (r *Runner) CloseStdin() { + if r.stdinW == nil { + panic("no standard input configured") + } + r.stdinW.Close() +} + +func (r *Runner) SendText(text string) { + if r.stdinW == nil { + panic("no standard input configured") + } + _, err := r.stdinW.Write([]byte(text + "\n")) + if err != nil { + panic("Failed to to write to t.stdinW") + } +} + +func (r *Runner) RunBackground() { + var stdoutR, stderrR io.Reader + var stdoutW, stderrW io.WriteCloser + stdoutR, stdoutW = io.Pipe() + stderrR, stderrW = io.Pipe() + ctx := cmdio.NewContext(r.ctx, &cmdio.Logger{ + Mode: flags.ModeAppend, + Reader: bufio.Reader{}, + Writer: stderrW, + }) + + cli := cmd.New(ctx) + cli.SetOut(stdoutW) + cli.SetErr(stderrW) + cli.SetArgs(r.args) + if r.stdinW != nil { + cli.SetIn(r.stdinR) + } + + // Register cleanup function to restore flags to their original values + // once test has been executed. This is needed because flag values reside + // in a global singleton data-structure, and thus subsequent tests might + // otherwise interfere with each other + r.registerFlagCleanup(cli) + + errch := make(chan error) + ctx, cancel := context.WithCancel(ctx) + + // Tee stdout/stderr to buffers. + stdoutR = io.TeeReader(stdoutR, &r.stdout) + stderrR = io.TeeReader(stderrR, &r.stderr) + + // Consume stdout/stderr line-by-line. + var wg sync.WaitGroup + r.StdoutLines = consumeLines(ctx, &wg, stdoutR) + r.StderrLines = consumeLines(ctx, &wg, stderrR) + + // Run command in background. + go func() { + err := root.Execute(ctx, cli) + if err != nil { + r.Logf("Error running command: %s", err) + } + + // Close pipes to signal EOF. + stdoutW.Close() + stderrW.Close() + + // Wait for the [consumeLines] routines to finish now that + // the pipes they're reading from have closed. + wg.Wait() + + if r.stdout.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(r.stdout.Bytes())) + for scanner.Scan() { + r.Logf("[databricks stdout]: %s", scanner.Text()) + } + } + + if r.stderr.Len() > 0 { + // Make a copy of the buffer such that it remains "unread". + scanner := bufio.NewScanner(bytes.NewBuffer(r.stderr.Bytes())) + for scanner.Scan() { + r.Logf("[databricks stderr]: %s", scanner.Text()) + } + } + + // Reset context on command for the next test. + // These commands are globals so we have to clean up to the best of our ability after each run. + // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 + //nolint:staticcheck // cobra sets the context and doesn't clear it + cli.SetContext(nil) + + // Make caller aware of error. + errch <- err + close(errch) + }() + + // Ensure command terminates upon test completion (success or failure). + r.Cleanup(func() { + // Signal termination of command. + cancel() + // Wait for goroutine to finish. + <-errch + }) + + r.errch = errch +} + +func (r *Runner) Run() (bytes.Buffer, bytes.Buffer, error) { + r.RunBackground() + err := <-r.errch + return r.stdout, r.stderr, err +} + +// Like [require.Eventually] but errors if the underlying command has failed. +func (r *Runner) Eventually(condition func() bool, waitFor, tick time.Duration, msgAndArgs ...any) { + ch := make(chan bool, 1) + + timer := time.NewTimer(waitFor) + defer timer.Stop() + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + // Kick off condition check immediately. + go func() { ch <- condition() }() + + for tick := ticker.C; ; { + select { + case err := <-r.errch: + require.Fail(r, "Command failed", err) + return + case <-timer.C: + require.Fail(r, "Condition never satisfied", msgAndArgs...) + return + case <-tick: + tick = nil + go func() { ch <- condition() }() + case v := <-ch: + if v { + return + } + tick = ticker.C + } + } +} + +func (r *Runner) RunAndExpectOutput(heredoc string) { + stdout, _, err := r.Run() + require.NoError(r, err) + require.Equal(r, cmdio.Heredoc(heredoc), strings.TrimSpace(stdout.String())) +} + +func (r *Runner) RunAndParseJSON(v any) { + stdout, _, err := r.Run() + require.NoError(r, err) + err = json.Unmarshal(stdout.Bytes(), &v) + require.NoError(r, err) +} + +func NewRunner(t testutil.TestingT, args ...string) *Runner { + return &Runner{ + TestingT: t, + + ctx: context.Background(), + args: args, + } +} + +func NewRunnerWithContext(t testutil.TestingT, ctx context.Context, args ...string) *Runner { + return &Runner{ + TestingT: t, + + ctx: ctx, + args: args, + } +} + +func RequireSuccessfulRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer) { + t.Logf("run args: [%s]", strings.Join(args, ", ")) + r := NewRunner(t, args...) + stdout, stderr, err := r.Run() + require.NoError(t, err) + return stdout, stderr +} + +func RequireErrorRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer, error) { + r := NewRunner(t, args...) + stdout, stderr, err := r.Run() + require.Error(t, err) + return stdout, stderr, err +} diff --git a/internal/unknown_command_test.go b/internal/unknown_command_test.go index 62b84027f1..96cb4fa5e8 100644 --- a/internal/unknown_command_test.go +++ b/internal/unknown_command_test.go @@ -3,11 +3,12 @@ package internal import ( "testing" + "github.com/databricks/cli/internal/testcli" assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestUnknownCommand(t *testing.T) { - stdout, stderr, err := RequireErrorRun(t, "unknown-command") + stdout, stderr, err := testcli.RequireErrorRun(t, "unknown-command") assert.Error(t, err, "unknown command", `unknown command "unknown-command" for "databricks"`) assert.Equal(t, "", stdout.String()) diff --git a/internal/version_test.go b/internal/version_test.go index 7dba63cd8c..5c5c3a1005 100644 --- a/internal/version_test.go +++ b/internal/version_test.go @@ -6,31 +6,32 @@ import ( "testing" "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/internal/testcli" "github.com/stretchr/testify/assert" ) var expectedVersion = fmt.Sprintf("Databricks CLI v%s\n", build.GetInfo().Version) func TestVersionFlagShort(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "-v") + stdout, stderr := testcli.RequireSuccessfulRun(t, "-v") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionFlagLong(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "--version") + stdout, stderr := testcli.RequireSuccessfulRun(t, "--version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommand(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "version") + stdout, stderr := testcli.RequireSuccessfulRun(t, "version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommandWithJSONOutput(t *testing.T) { - stdout, stderr := RequireSuccessfulRun(t, "version", "--output", "json") + stdout, stderr := testcli.RequireSuccessfulRun(t, "version", "--output", "json") assert.NotEmpty(t, stdout.String()) assert.Equal(t, "", stderr.String()) diff --git a/internal/workspace_test.go b/internal/workspace_test.go index e159fc1dcf..7ff9b0bb8c 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" @@ -23,7 +24,7 @@ import ( func TestAccWorkspaceList(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, stderr := RequireSuccessfulRun(t, "workspace", "list", "/") + stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "list", "/") outStr := stdout.String() assert.Contains(t, outStr, "ID") assert.Contains(t, outStr, "Type") @@ -33,12 +34,12 @@ func TestAccWorkspaceList(t *testing.T) { } func TestWorkpaceListErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "workspace", "list") + _, _, err := testcli.RequireErrorRun(t, "workspace", "list") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { - _, _, err := RequireErrorRun(t, "workspace", "get-status") + _, _, err := testcli.RequireErrorRun(t, "workspace", "get-status") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } @@ -57,7 +58,7 @@ func TestAccWorkpaceExportPrintsContents(t *testing.T) { require.NoError(t, err) // Run export - stdout, stderr := RequireSuccessfulRun(t, "workspace", "export", path.Join(tmpdir, "file-a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(tmpdir, "file-a")) assert.Equal(t, contents, stdout.String()) assert.Equal(t, "", stderr.String()) } @@ -125,7 +126,7 @@ func TestAccExportDir(t *testing.T) { }, "\n") // Run Export - stdout, stderr := RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) assert.Equal(t, expectedLogs, stdout.String()) assert.Equal(t, "", stderr.String()) @@ -153,7 +154,7 @@ func TestAccExportDirDoesNotOverwrite(t *testing.T) { require.NoError(t, err) // Run Export - RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + testcli.RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) // Assert file is not overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "local content") @@ -174,7 +175,7 @@ func TestAccExportDirWithOverwriteFlag(t *testing.T) { require.NoError(t, err) // Run Export - RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") // Assert file has been overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "content from workspace") @@ -182,7 +183,7 @@ func TestAccExportDirWithOverwriteFlag(t *testing.T) { func TestAccImportDir(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - stdout, stderr := RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") + stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") expectedLogs := strings.Join([]string{ fmt.Sprintf("Importing files from %s", "./testdata/import_dir"), @@ -223,7 +224,7 @@ func TestAccImportDirDoesNotOverwrite(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir) + testcli.RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir) // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -251,7 +252,7 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -273,7 +274,7 @@ func TestAccExport(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a")) + stdout, _ := testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a")) b, err := io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "abc", string(b)) @@ -281,13 +282,13 @@ func TestAccExport(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook")) + stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "# Databricks notebook source\n", string(b)) // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") + stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Contains(t, string(b), `"cells":`, "jupyter notebooks contain the cells field") @@ -303,7 +304,7 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) + stdout, _ := testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) b, err := io.ReadAll(&stdout) require.NoError(t, err) // Expect nothing to be printed to stdout @@ -313,14 +314,14 @@ func TestAccExportWithFileFlag(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) + stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) assertLocalFileContents(t, filepath.Join(localTmpDir, "pyNb.py"), "# Databricks notebook source\n") // Export python notebook as jupyter - stdout, _ = RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) + stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) @@ -332,13 +333,13 @@ func TestAccImportFileUsingContentFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `print(1)`. Uploaded as a notebook by default - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyScript"), + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyScript"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyScript", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyScript", workspace.ObjectTypeNotebook) // Import with content = `# Databricks notebook source\nprint(1)`. Uploaded as a notebook with the content just being print(1) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNb"), + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNb"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNb", "print(1)") @@ -349,19 +350,19 @@ func TestAccImportFileUsingContentFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `# Databricks notebook source\nprint(1)`. Upload as file if path has no extension. - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Content = `# Databricks notebook source\nprint(1)`. Upload as notebook if path has py extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Content = `print(1)`. Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) @@ -369,15 +370,15 @@ func TestAccImportFileUsingContentFormatAuto(t *testing.T) { func TestAccImportFileFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNotebook", workspace.ObjectTypeNotebook) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") assertFilerFileContents(t, ctx, workspaceFiler, "scalaNotebook", "// Databricks notebook source\nprintln(\"scala\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "scalaNotebook", workspace.ObjectTypeNotebook) - _, _, err := RequireErrorRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") + _, _, err := testcli.RequireErrorRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") assert.ErrorContains(t, err, "The zip file may not be valid or may be an unsupported version. Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook") } @@ -385,18 +386,18 @@ func TestAccImportFileFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Upload as file if path has no extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "print(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Upload as notebook if path has extension - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Upload as file if content is not notebook (even if path has .py extension) - RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") + testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } From 61b0c59137233b9ee969354e3f4aa5c13ca86f8a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 12 Dec 2024 22:28:04 +0100 Subject: [PATCH 40/63] Move test helpers from internal to `acc` and `testutil` (#2008) ## Changes This change moves fixture helpers to `internal/acc/fixtures.go`. These helpers create an ephemeral path or resource for the duration of a test. Call sites are updated to use `acc.WorkspaceTest()` to construct a workspace-focused test wrapper as needed. This change also moves the `GetNodeTypeID()` function to `testutil`. ## Tests n/a --- internal/acc/fixtures.go | 133 +++++++++++ internal/acc/workspace.go | 30 --- internal/bundle/artifacts_test.go | 10 +- internal/bundle/basic_test.go | 5 +- internal/bundle/bind_resource_test.go | 11 +- internal/bundle/clusters_test.go | 4 +- internal/bundle/deploy_test.go | 7 +- .../deploy_then_remove_resources_test.go | 5 +- internal/bundle/deploy_to_shared_test.go | 5 +- internal/bundle/deployment_state_test.go | 3 +- internal/bundle/destroy_test.go | 5 +- internal/bundle/generate_job_test.go | 19 +- internal/bundle/generate_pipeline_test.go | 10 +- internal/bundle/job_metadata_test.go | 5 +- internal/bundle/local_state_staleness_test.go | 5 +- internal/bundle/python_wheel_test.go | 3 +- internal/bundle/spark_jar_test.go | 6 +- internal/dashboard_assumptions_test.go | 2 +- internal/fs_cat_test.go | 12 +- internal/helpers.go | 223 +----------------- internal/locker_test.go | 19 +- internal/python/python_tasks_test.go | 110 +++++++-- internal/sync_test.go | 7 +- internal/tags_test.go | 24 +- internal/testutil/cloud.go | 13 + internal/workspace_test.go | 13 +- 26 files changed, 309 insertions(+), 380 deletions(-) create mode 100644 internal/acc/fixtures.go diff --git a/internal/acc/fixtures.go b/internal/acc/fixtures.go new file mode 100644 index 0000000000..cd867fb3a9 --- /dev/null +++ b/internal/acc/fixtures.go @@ -0,0 +1,133 @@ +package acc + +import ( + "fmt" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/files" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/require" +) + +func TemporaryWorkspaceDir(t *WorkspaceT, name ...string) string { + ctx := t.ctx + me, err := t.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + // Prefix the name with "integration-test-" to make it easier to identify. + name = append([]string{"integration-test-"}, name...) + basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName(name...)) + + t.Logf("Creating workspace directory %s", basePath) + err = t.W.Workspace.MkdirsByPath(ctx, basePath) + require.NoError(t, err) + + // Remove test directory on test completion. + t.Cleanup(func() { + t.Logf("Removing workspace directory %s", basePath) + err := t.W.Workspace.Delete(ctx, workspace.Delete{ + Path: basePath, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) + }) + + return basePath +} + +func TemporaryDbfsDir(t *WorkspaceT, name ...string) string { + ctx := t.ctx + + // Prefix the name with "integration-test-" to make it easier to identify. + name = append([]string{"integration-test-"}, name...) + path := fmt.Sprintf("/tmp/%s", testutil.RandomName(name...)) + + t.Logf("Creating DBFS directory %s", path) + err := t.W.Dbfs.MkdirsByPath(ctx, path) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing DBFS directory %s", path) + err := t.W.Dbfs.Delete(ctx, files.Delete{ + Path: path, + Recursive: true, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove temporary DBFS directory %s: %#v", path, err) + }) + + return path +} + +func TemporaryRepo(t *WorkspaceT, url string) string { + ctx := t.ctx + me, err := t.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + // Prefix the path with "integration-test-" to make it easier to identify. + path := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("integration-test-")) + + t.Logf("Creating repo: %s", path) + resp, err := t.W.Repos.Create(ctx, workspace.CreateRepoRequest{ + Url: url, + Path: path, + Provider: "gitHub", + }) + require.NoError(t, err) + + t.Cleanup(func() { + t.Logf("Removing repo: %s", path) + err := t.W.Repos.Delete(ctx, workspace.DeleteRepoRequest{ + RepoId: resp.Id, + }) + if err == nil || apierr.IsMissing(err) { + return + } + t.Logf("Unable to remove repo %s: %#v", path, err) + }) + + return path +} + +// Create a new Unity Catalog volume in a catalog called "main" in the workspace. +func TemporaryVolume(t *WorkspaceT) string { + ctx := t.ctx + w := t.W + + // Create a schema + schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ + CatalogName: "main", + Name: testutil.RandomName("test-schema-"), + }) + require.NoError(t, err) + t.Cleanup(func() { + err := w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ + FullName: schema.FullName, + }) + require.NoError(t, err) + }) + + // Create a volume + volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ + CatalogName: "main", + SchemaName: schema.Name, + Name: "my-volume", + VolumeType: catalog.VolumeTypeManaged, + }) + require.NoError(t, err) + t.Cleanup(func() { + err := w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ + Name: volume.FullName, + }) + require.NoError(t, err) + }) + + return fmt.Sprintf("/Volumes/%s/%s/%s", "main", schema.Name, volume.Name) +} diff --git a/internal/acc/workspace.go b/internal/acc/workspace.go index d640325b5f..2f8a5b8e7f 100644 --- a/internal/acc/workspace.go +++ b/internal/acc/workspace.go @@ -2,14 +2,11 @@ package acc import ( "context" - "fmt" "os" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" ) @@ -97,30 +94,3 @@ func (t *WorkspaceT) RunPython(code string) (string, error) { require.True(t, ok, "unexpected type %T", results.Data) return output, nil } - -func (t *WorkspaceT) TemporaryWorkspaceDir(name ...string) string { - ctx := context.Background() - me, err := t.W.CurrentUser.Me(ctx) - require.NoError(t, err) - - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName(name...)) - - t.Logf("Creating %s", basePath) - err = t.W.Workspace.MkdirsByPath(ctx, basePath) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("Removing %s", basePath) - err := t.W.Workspace.Delete(ctx, workspace.Delete{ - Path: basePath, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) - }) - - return basePath -} diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 8d4b0ae10a..7c8954a0cc 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -12,7 +12,6 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" @@ -34,12 +33,11 @@ func touchEmptyFile(t *testing.T, path string) { func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - wsDir := internal.TemporaryWorkspaceDir(t, w) + wsDir := acc.TemporaryWorkspaceDir(wt, "artifact-") b := &bundle.Bundle{ BundleRootPath: dir, @@ -99,12 +97,11 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - wsDir := internal.TemporaryWorkspaceDir(t, w) + wsDir := acc.TemporaryWorkspaceDir(wt, "artifact-") b := &bundle.Bundle{ BundleRootPath: dir, @@ -164,13 +161,12 @@ func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - w := wt.W if os.Getenv("TEST_METASTORE_ID") == "" { t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") } - volumePath := internal.TemporaryUcVolume(t, w) + volumePath := acc.TemporaryVolume(wt) dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") diff --git a/internal/bundle/basic_test.go b/internal/bundle/basic_test.go index c24ef0c054..13da6da9ba 100644 --- a/internal/bundle/basic_test.go +++ b/internal/bundle/basic_test.go @@ -5,9 +5,8 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/require" ) @@ -15,7 +14,7 @@ import ( func TestAccBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { ctx, _ := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() root, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, diff --git a/internal/bundle/bind_resource_test.go b/internal/bundle/bind_resource_test.go index 0d54587833..7f8151ee9a 100644 --- a/internal/bundle/bind_resource_test.go +++ b/internal/bundle/bind_resource_test.go @@ -6,7 +6,6 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" @@ -22,9 +21,9 @@ func TestAccBindJobToExistingJob(t *testing.T) { t.Log(env) ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, @@ -87,9 +86,9 @@ func TestAccAbortBind(t *testing.T) { t.Log(env) ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, @@ -136,7 +135,7 @@ func TestAccGenerateAndBind(t *testing.T) { t.Log(env) ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ diff --git a/internal/bundle/clusters_test.go b/internal/bundle/clusters_test.go index 8849ff3bff..d79ded0b7d 100644 --- a/internal/bundle/clusters_test.go +++ b/internal/bundle/clusters_test.go @@ -4,10 +4,8 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/google/uuid" "github.com/stretchr/testify/require" @@ -20,7 +18,7 @@ func TestAccDeployBundleWithCluster(t *testing.T) { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() root, err := initTestTemplate(t, ctx, "clusters", map[string]any{ "unique_id": uniqueId, diff --git a/internal/bundle/deploy_test.go b/internal/bundle/deploy_test.go index 244cfecc8a..038705c37c 100644 --- a/internal/bundle/deploy_test.go +++ b/internal/bundle/deploy_test.go @@ -11,10 +11,9 @@ import ( "testing" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/catalog" @@ -133,7 +132,7 @@ func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, @@ -219,7 +218,7 @@ properties such as the 'catalog' or 'storage' are changed: func TestAccDeployBasicBundleLogs(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() root, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/internal/bundle/deploy_then_remove_resources_test.go index 66ec5c16ad..0cc7ab92d4 100644 --- a/internal/bundle/deploy_then_remove_resources_test.go +++ b/internal/bundle/deploy_then_remove_resources_test.go @@ -5,9 +5,8 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,7 +16,7 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, diff --git a/internal/bundle/deploy_to_shared_test.go b/internal/bundle/deploy_to_shared_test.go index 6d9209d369..7b8d570293 100644 --- a/internal/bundle/deploy_to_shared_test.go +++ b/internal/bundle/deploy_to_shared_test.go @@ -4,9 +4,8 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/require" ) @@ -14,7 +13,7 @@ import ( func TestAccDeployBasicToSharedWorkspacePath(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() currentUser, err := wt.W.CurrentUser.Me(ctx) diff --git a/internal/bundle/deployment_state_test.go b/internal/bundle/deployment_state_test.go index 5e85740095..caa72d0a23 100644 --- a/internal/bundle/deployment_state_test.go +++ b/internal/bundle/deployment_state_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/databricks/cli/bundle/deploy" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" @@ -21,7 +20,7 @@ func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, diff --git a/internal/bundle/destroy_test.go b/internal/bundle/destroy_test.go index baccf4e6fe..dad9c0874e 100644 --- a/internal/bundle/destroy_test.go +++ b/internal/bundle/destroy_test.go @@ -6,9 +6,8 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/apierr" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -19,7 +18,7 @@ func TestAccBundleDestroy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, diff --git a/internal/bundle/generate_job_test.go b/internal/bundle/generate_job_test.go index 9e2f927aab..b254dac8c8 100644 --- a/internal/bundle/generate_job_test.go +++ b/internal/bundle/generate_job_test.go @@ -9,7 +9,6 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" @@ -23,7 +22,7 @@ import ( func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generateJobTest{T: t, w: wt.W} + gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ @@ -70,7 +69,7 @@ func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { } type generateJobTest struct { - T *testing.T + T *acc.WorkspaceT w *databricks.WorkspaceClient } @@ -78,17 +77,7 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { t := gt.T w := gt.w - var nodeTypeId string - switch testutil.GetCloud(t) { - case testutil.AWS: - nodeTypeId = "i3.xlarge" - case testutil.Azure: - nodeTypeId = "Standard_DS4_v2" - case testutil.GCP: - nodeTypeId = "n1-standard-4" - } - - tmpdir := internal.TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(t, "generate-job-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -103,7 +92,7 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { NewCluster: &compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, - NodeTypeId: nodeTypeId, + NodeTypeId: testutil.GetCloud(t).NodeTypeID(), SparkConf: map[string]string{ "spark.databricks.enableWsfs": "true", "spark.databricks.hive.metastore.glueCatalog.enabled": "true", diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index 194f6d3096..96d8d28285 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -9,7 +9,6 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" @@ -22,7 +21,7 @@ import ( func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - gt := &generatePipelineTest{T: t, w: wt.W} + gt := &generatePipelineTest{T: wt, w: wt.W} uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ @@ -78,7 +77,7 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { } type generatePipelineTest struct { - T *testing.T + T *acc.WorkspaceT w *databricks.WorkspaceClient } @@ -86,7 +85,7 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, t := gt.T w := gt.w - tmpdir := internal.TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(t, "generate-pipeline-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -96,8 +95,7 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, err = f.Write(ctx, "test.py", strings.NewReader("print('Hello!')")) require.NoError(t, err) - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() name := testutil.RandomName("generated-pipeline-") resp, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ diff --git a/internal/bundle/job_metadata_test.go b/internal/bundle/job_metadata_test.go index 21f1086ae5..de11d89a6f 100644 --- a/internal/bundle/job_metadata_test.go +++ b/internal/bundle/job_metadata_test.go @@ -10,9 +10,8 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -23,7 +22,7 @@ func TestAccJobsMetadataFile(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{ "unique_id": uniqueId, diff --git a/internal/bundle/local_state_staleness_test.go b/internal/bundle/local_state_staleness_test.go index d11234667e..1c025b47a4 100644 --- a/internal/bundle/local_state_staleness_test.go +++ b/internal/bundle/local_state_staleness_test.go @@ -4,9 +4,8 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -25,7 +24,7 @@ func TestAccLocalStateStaleness(t *testing.T) { // Because of deploy (2), the locally cached state of bundle instance A should be stale. // Then for deploy (3), it must use the remote state over the stale local state. - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() initialize := func() string { root, err := initTestTemplate(t, ctx, "basic", map[string]any{ diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index 009417937e..1d8647396a 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -3,7 +3,6 @@ package bundle import ( "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" @@ -14,7 +13,7 @@ import ( func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonWheelWrapper bool) { ctx, _ := acc.WorkspaceTest(t) - nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{ "node_type_id": nodeTypeId, diff --git a/internal/bundle/spark_jar_test.go b/internal/bundle/spark_jar_test.go index cc67b30f2a..c2402deb60 100644 --- a/internal/bundle/spark_jar_test.go +++ b/internal/bundle/spark_jar_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" @@ -13,8 +12,7 @@ import ( ) func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, artifactPath string) { - cloudEnv := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - nodeTypeId := internal.GetNodeTypeId(cloudEnv) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() tmpDir := t.TempDir() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ @@ -42,7 +40,7 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti func runSparkJarTestFromVolume(t *testing.T, sparkVersion string) { ctx, wt := acc.UcWorkspaceTest(t) - volumePath := internal.TemporaryUcVolume(t, wt.W) + volumePath := acc.TemporaryVolume(wt) ctx = env.Set(ctx, "DATABRICKS_BUNDLE_TARGET", "volume") runSparkJarTestCommon(t, ctx, sparkVersion, volumePath) } diff --git a/internal/dashboard_assumptions_test.go b/internal/dashboard_assumptions_test.go index 1ad137b7f2..906efa7819 100644 --- a/internal/dashboard_assumptions_test.go +++ b/internal/dashboard_assumptions_test.go @@ -28,7 +28,7 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { dashboardPayload := []byte(`{"pages":[{"name":"2506f97a","displayName":"New Page"}]}`) warehouseId := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") - dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-") + dir := acc.TemporaryWorkspaceDir(wt, "dashboard-assumptions-") dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ Dashboard: &dashboards.Dashboard{ diff --git a/internal/fs_cat_test.go b/internal/fs_cat_test.go index 1d9a0dbe18..e14121feaf 100644 --- a/internal/fs_cat_test.go +++ b/internal/fs_cat_test.go @@ -7,10 +7,10 @@ import ( "strings" "testing" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -79,14 +79,10 @@ func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { } func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + tmpDir := acc.TemporaryDbfsDir(wt, "fs-cat-") f, err := filer.NewDbfsClient(w, tmpDir) require.NoError(t, err) diff --git a/internal/helpers.go b/internal/helpers.go index f6cb199ea0..d57e9b9c2c 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -1,220 +1,20 @@ package internal import ( - "context" "errors" - "fmt" "net/http" "os" "path" "path/filepath" - "strings" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" - "github.com/databricks/databricks-sdk-go/service/catalog" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/files" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" ) -func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")), - NotebookTask: &jobs.NotebookTask{ - NotebookPath: notebookPath, - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")), - SparkPythonTask: &jobs.SparkPythonTask{ - PythonFile: notebookPath, - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { - tasks := make([]jobs.SubmitTask, 0) - for i := 0; i < len(versions); i++ { - task := jobs.SubmitTask{ - TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")), - PythonWheelTask: &jobs.PythonWheelTask{ - PackageName: "my_test_code", - EntryPoint: "run", - }, - NewCluster: &compute.ClusterSpec{ - SparkVersion: versions[i], - NumWorkers: 1, - NodeTypeId: nodeTypeId, - DataSecurityMode: compute.DataSecurityModeUserIsolation, - }, - Libraries: []compute.Library{ - {Whl: wheelPath}, - }, - } - tasks = append(tasks, task) - } - - return tasks -} - -func TemporaryWorkspaceDir(t testutil.TestingT, w *databricks.WorkspaceClient) string { - ctx := context.Background() - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - basePath := fmt.Sprintf("/Users/%s/%s", me.UserName, testutil.RandomName("integration-test-wsfs-")) - - t.Logf("Creating %s", basePath) - err = w.Workspace.MkdirsByPath(ctx, basePath) - require.NoError(t, err) - - // Remove test directory on test completion. - t.Cleanup(func() { - t.Logf("Removing %s", basePath) - err := w.Workspace.Delete(ctx, workspace.Delete{ - Path: basePath, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("Unable to remove temporary workspace directory %s: %#v", basePath, err) - }) - - return basePath -} - -func TemporaryDbfsDir(t testutil.TestingT, w *databricks.WorkspaceClient) string { - ctx := context.Background() - path := fmt.Sprintf("/tmp/%s", testutil.RandomName("integration-test-dbfs-")) - - t.Logf("Creating DBFS folder:%s", path) - err := w.Dbfs.MkdirsByPath(ctx, path) - require.NoError(t, err) - - t.Cleanup(func() { - t.Logf("Removing DBFS folder:%s", path) - err := w.Dbfs.Delete(ctx, files.Delete{ - Path: path, - Recursive: true, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove temporary dbfs directory %s: %#v", path, err) - }) - - return path -} - -// Create a new UC volume in a catalog called "main" in the workspace. -func TemporaryUcVolume(t testutil.TestingT, w *databricks.WorkspaceClient) string { - ctx := context.Background() - - // Create a schema - schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ - CatalogName: "main", - Name: testutil.RandomName("test-schema-"), - }) - require.NoError(t, err) - t.Cleanup(func() { - err := w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ - FullName: schema.FullName, - }) - require.NoError(t, err) - }) - - // Create a volume - volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ - CatalogName: "main", - SchemaName: schema.Name, - Name: "my-volume", - VolumeType: catalog.VolumeTypeManaged, - }) - require.NoError(t, err) - t.Cleanup(func() { - err := w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ - Name: volume.FullName, - }) - require.NoError(t, err) - }) - - return path.Join("/Volumes", "main", schema.Name, volume.Name) -} - -func TemporaryRepo(t testutil.TestingT, w *databricks.WorkspaceClient) string { - ctx := context.Background() - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - repoPath := fmt.Sprintf("/Repos/%s/%s", me.UserName, testutil.RandomName("integration-test-repo-")) - - t.Logf("Creating repo:%s", repoPath) - repoInfo, err := w.Repos.Create(ctx, workspace.CreateRepoRequest{ - Url: "https://github.com/databricks/cli", - Provider: "github", - Path: repoPath, - }) - require.NoError(t, err) - - t.Cleanup(func() { - t.Logf("Removing repo: %s", repoPath) - err := w.Repos.Delete(ctx, workspace.DeleteRepoRequest{ - RepoId: repoInfo.Id, - }) - if err == nil || apierr.IsMissing(err) { - return - } - t.Logf("unable to remove repo %s: %#v", repoPath, err) - }) - - return repoPath -} - -func GetNodeTypeId(env string) string { - if env == "gcp" { - return "n1-standard-4" - } else if env == "aws" || env == "ucws" { - // aws-prod-ucws has CLOUD_ENV set to "ucws" - return "i3.xlarge" - } - return "Standard_DS4_v2" -} - func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) @@ -228,7 +28,7 @@ func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { func setupWsfsFiler(t testutil.TestingT) (filer.Filer, string) { ctx, wt := acc.WorkspaceTest(t) - tmpdir := TemporaryWorkspaceDir(t, wt.W) + tmpdir := acc.TemporaryWorkspaceDir(wt) f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) require.NoError(t, err) @@ -245,36 +45,31 @@ func setupWsfsFiler(t testutil.TestingT) (filer.Filer, string) { func setupWsfsExtensionsFiler(t testutil.TestingT) (filer.Filer, string) { _, wt := acc.WorkspaceTest(t) - tmpdir := TemporaryWorkspaceDir(t, wt.W) + tmpdir := acc.TemporaryWorkspaceDir(wt) f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir) require.NoError(t, err) - return f, tmpdir } func setupDbfsFiler(t testutil.TestingT) (filer.Filer, string) { _, wt := acc.WorkspaceTest(t) - tmpDir := TemporaryDbfsDir(t, wt.W) - f, err := filer.NewDbfsClient(wt.W, tmpDir) + tmpdir := acc.TemporaryDbfsDir(wt) + f, err := filer.NewDbfsClient(wt.W, tmpdir) require.NoError(t, err) - - return f, path.Join("dbfs:/", tmpDir) + return f, path.Join("dbfs:/", tmpdir) } func setupUcVolumesFiler(t testutil.TestingT) (filer.Filer, string) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + _, wt := acc.WorkspaceTest(t) if os.Getenv("TEST_METASTORE_ID") == "" { t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") } - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryUcVolume(t, w) - f, err := filer.NewFilesClient(w, tmpDir) + tmpdir := acc.TemporaryVolume(wt) + f, err := filer.NewFilesClient(wt.W, tmpdir) require.NoError(t, err) - return f, path.Join("dbfs:/", tmpDir) + return f, path.Join("dbfs:/", tmpdir) } diff --git a/internal/locker_test.go b/internal/locker_test.go index 3047273a06..87f74e9d85 100644 --- a/internal/locker_test.go +++ b/internal/locker_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" lockpkg "github.com/databricks/cli/libs/locker" @@ -164,14 +165,12 @@ func TestAccLock(t *testing.T) { assert.True(t, lockers[indexOfAnInactiveLocker].Active) } -func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer.Filer) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +func setupLockerTest(t *testing.T) (context.Context, *lockpkg.Locker, filer.Filer) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W // create temp wsfs dir - tmpDir := TemporaryWorkspaceDir(t, w) + tmpDir := acc.TemporaryWorkspaceDir(wt, "locker-") f, err := filer.NewWorkspaceFilesClient(w, tmpDir) require.NoError(t, err) @@ -179,12 +178,11 @@ func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer. locker, err := lockpkg.CreateLocker("redfoo@databricks.com", tmpDir, w) require.NoError(t, err) - return locker, f + return ctx, locker, f } func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { - ctx := context.Background() - locker, f := setupLockerTest(ctx, t) + ctx, locker, f := setupLockerTest(t) var err error // Acquire lock on tmp directory @@ -205,8 +203,7 @@ func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { } func TestAccLockUnlockWithAllowsLockFileNotExist(t *testing.T) { - ctx := context.Background() - locker, f := setupLockerTest(ctx, t) + ctx, locker, f := setupLockerTest(t) var err error // Acquire lock on tmp directory diff --git a/internal/python/python_tasks_test.go b/internal/python/python_tasks_test.go index b407973196..36efa4a1b8 100644 --- a/internal/python/python_tasks_test.go +++ b/internal/python/python_tasks_test.go @@ -14,10 +14,11 @@ import ( "time" "github.com/databricks/cli/bundle/run/output" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/require" @@ -127,14 +128,14 @@ func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { w := tw.w - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() tasks := make([]jobs.SubmitTask, 0) if opts.includeNotebookTasks { - tasks = append(tasks, internal.GenerateNotebookTasks(tw.pyNotebookPath, sparkVersions, nodeTypeId)...) + tasks = append(tasks, GenerateNotebookTasks(tw.pyNotebookPath, sparkVersions, nodeTypeId)...) } if opts.includeSparkPythonTasks { - tasks = append(tasks, internal.GenerateSparkPythonTasks(tw.sparkPythonPath, sparkVersions, nodeTypeId)...) + tasks = append(tasks, GenerateSparkPythonTasks(tw.sparkPythonPath, sparkVersions, nodeTypeId)...) } if opts.includeWheelTasks { @@ -142,7 +143,7 @@ func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { if len(opts.wheelSparkVersions) > 0 { versions = opts.wheelSparkVersions } - tasks = append(tasks, internal.GenerateWheelTasks(tw.wheelPath, versions, nodeTypeId)...) + tasks = append(tasks, GenerateWheelTasks(tw.wheelPath, versions, nodeTypeId)...) } ctx := context.Background() @@ -179,13 +180,13 @@ func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { } func prepareWorkspaceFiles(t *testing.T) *testFiles { - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + var err error + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - baseDir := internal.TemporaryWorkspaceDir(t, w) - pyNotebookPath := path.Join(baseDir, "test.py") + baseDir := acc.TemporaryWorkspaceDir(wt, "python-tasks-") + pyNotebookPath := path.Join(baseDir, "test.py") err = w.Workspace.Import(ctx, workspace.Import{ Path: pyNotebookPath, Overwrite: true, @@ -225,11 +226,12 @@ func prepareWorkspaceFiles(t *testing.T) *testFiles { } func prepareDBFSFiles(t *testing.T) *testFiles { - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + var err error + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryDbfsDir(wt, "python-tasks-") - baseDir := internal.TemporaryDbfsDir(t, w) f, err := filer.NewDbfsClient(w, baseDir) require.NoError(t, err) @@ -254,15 +256,83 @@ func prepareDBFSFiles(t *testing.T) *testFiles { } func prepareRepoFiles(t *testing.T) *testFiles { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + _, wt := acc.WorkspaceTest(t) + w := wt.W + + baseDir := acc.TemporaryRepo(wt, "https://github.com/databricks/cli") - repo := internal.TemporaryRepo(t, w) packagePath := "internal/python/testdata" return &testFiles{ w: w, - pyNotebookPath: path.Join(repo, packagePath, "test"), - sparkPythonPath: path.Join(repo, packagePath, "spark.py"), - wheelPath: path.Join(repo, packagePath, "my_test_code-0.0.1-py3-none-any.whl"), + pyNotebookPath: path.Join(baseDir, packagePath, "test"), + sparkPythonPath: path.Join(baseDir, packagePath, "spark.py"), + wheelPath: path.Join(baseDir, packagePath, "my_test_code-0.0.1-py3-none-any.whl"), + } +} + +func GenerateNotebookTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("notebook_%s", strings.ReplaceAll(versions[i], ".", "_")), + NotebookTask: &jobs.NotebookTask{ + NotebookPath: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) } + + return tasks +} + +func GenerateSparkPythonTasks(notebookPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("spark_%s", strings.ReplaceAll(versions[i], ".", "_")), + SparkPythonTask: &jobs.SparkPythonTask{ + PythonFile: notebookPath, + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + } + tasks = append(tasks, task) + } + + return tasks +} + +func GenerateWheelTasks(wheelPath string, versions []string, nodeTypeId string) []jobs.SubmitTask { + tasks := make([]jobs.SubmitTask, 0) + for i := 0; i < len(versions); i++ { + task := jobs.SubmitTask{ + TaskKey: fmt.Sprintf("whl_%s", strings.ReplaceAll(versions[i], ".", "_")), + PythonWheelTask: &jobs.PythonWheelTask{ + PackageName: "my_test_code", + EntryPoint: "run", + }, + NewCluster: &compute.ClusterSpec{ + SparkVersion: versions[i], + NumWorkers: 1, + NodeTypeId: nodeTypeId, + DataSecurityMode: compute.DataSecurityModeUserIsolation, + }, + Libraries: []compute.Library{ + {Whl: wheelPath}, + }, + } + tasks = append(tasks, task) + } + + return tasks } diff --git a/internal/sync_test.go b/internal/sync_test.go index 848c60ce66..9fff6055b3 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -15,6 +15,7 @@ import ( "testing" "time" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" @@ -72,11 +73,11 @@ type syncTest struct { } func setupSyncTest(t *testing.T, args ...string) *syncTest { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + _, wt := acc.WorkspaceTest(t) + w := wt.W - w := databricks.Must(databricks.NewWorkspaceClient()) localRoot := t.TempDir() - remoteRoot := TemporaryWorkspaceDir(t, w) + remoteRoot := acc.TemporaryWorkspaceDir(wt, "sync-") f, err := filer.NewWorkspaceFilesClient(w, remoteRoot) require.NoError(t, err) diff --git a/internal/tags_test.go b/internal/tags_test.go index 3db8527857..ea2c0db052 100644 --- a/internal/tags_test.go +++ b/internal/tags_test.go @@ -1,33 +1,19 @@ package internal import ( - "context" "strings" "testing" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) func testTags(t *testing.T, tags map[string]string) error { - var nodeTypeId string - switch testutil.GetCloud(t) { - case testutil.AWS: - nodeTypeId = "i3.xlarge" - case testutil.Azure: - nodeTypeId = "Standard_DS4_v2" - case testutil.GCP: - nodeTypeId = "n1-standard-4" - } - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() - resp, err := w.Jobs.Create(ctx, jobs.CreateJob{ + ctx, wt := acc.WorkspaceTest(t) + resp, err := wt.W.Jobs.Create(ctx, jobs.CreateJob{ Name: testutil.RandomName("test-tags-"), Tasks: []jobs.Task{ { @@ -35,7 +21,7 @@ func testTags(t *testing.T, tags map[string]string) error { NewCluster: &compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, - NodeTypeId: nodeTypeId, + NodeTypeId: testutil.GetCloud(t).NodeTypeID(), }, SparkPythonTask: &jobs.SparkPythonTask{ PythonFile: "/doesnt_exist.py", @@ -47,7 +33,7 @@ func testTags(t *testing.T, tags map[string]string) error { if resp != nil { t.Cleanup(func() { - _ = w.Jobs.DeleteByJobId(ctx, resp.JobId) + _ = wt.W.Jobs.DeleteByJobId(ctx, resp.JobId) // Cannot enable errchecking there, tests fail with: // Error: Received unexpected error: // Job 0 does not exist. diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index 849ac817c8..4a8a89645d 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -28,6 +28,19 @@ func (c Cloud) String() string { } } +func (c Cloud) NodeTypeID() string { + switch c { + case AWS: + return "i3.xlarge" + case Azure: + return "Standard_DS4_v2" + case GCP: + return "n1-standard-4" + default: + return "unknown" + } +} + func GetCloud(t TestingT) Cloud { env := GetEnvOrSkipTest(t, "CLOUD_ENV") switch env { diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 7ff9b0bb8c..f16245aa29 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -15,7 +15,6 @@ import ( "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -44,11 +43,10 @@ func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { } func TestAccWorkpaceExportPrintsContents(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) + tmpdir := acc.TemporaryWorkspaceDir(wt, "workspace-export-") f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -65,9 +63,10 @@ func TestAccWorkpaceExportPrintsContents(t *testing.T) { func setupWorkspaceImportExportTest(t *testing.T) (context.Context, filer.Filer, string) { ctx, wt := acc.WorkspaceTest(t) + w := wt.W - tmpdir := TemporaryWorkspaceDir(t, wt.W) - f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) + tmpdir := acc.TemporaryWorkspaceDir(wt, "workspace-import-") + f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) return ctx, f, tmpdir From c95870209722697c4e9b08d1b742e7f7e999e995 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 13 Dec 2024 15:38:58 +0100 Subject: [PATCH 41/63] Move integration tests to `integration` package (#2009) ## Changes Objectives: * A dedicated directory for integration tests * It is not picked up by `go test ./...` * No need for a `TestAcc` test name prefix * More granular packages to improve test selection (future) The tree structure generally mirrors the source code tree structure. Requirements for new files in this directory: * Every package **must** be named after its directory with `_test` appended * Requiring a different package name for integration tests avoids aliasing with the main package. * Every integration test package **must** include a `main_test.go` file. These requirements are enforced by a unit test in the `integration` package. ## Tests Integration tests pass. The total run time regresses by about 10%. A follow-up change that increases the degree of test parallelism will address this. --- Makefile | 2 +- integration/README.md | 37 ++++++ .../dashboard_assumptions_test.go | 2 +- integration/assumptions/main_test.go | 13 ++ .../bundle/artifacts_test.go | 2 +- .../bundle/basic_test.go | 2 +- .../bundle/bind_resource_test.go | 2 +- .../databricks_template_schema.json | 0 .../template/databricks.yml.tmpl | 0 .../basic/databricks_template_schema.json | 0 .../basic/template/databricks.yml.tmpl | 0 .../bundles/basic/template/hello_world.py | 0 .../clusters/databricks_template_schema.json | 0 .../clusters/template/databricks.yml.tmpl | 0 .../bundles/clusters/template/hello_world.py | 0 .../databricks_template_schema.json | 0 .../dashboards/template/dashboard.lvdash.json | 0 .../dashboards/template/databricks.yml.tmpl | 0 .../databricks_template_schema.json | 0 .../template/bar.py | 0 .../template/databricks.yml.tmpl | 0 .../template/foo.py | 0 .../template/resources.yml.tmpl | 0 .../databricks_template_schema.json | 0 .../bundles/job_metadata/template/a/b/bar.py | 0 .../template/a/b/resources.yml.tmpl | 0 .../job_metadata/template/databricks.yml.tmpl | 0 .../bundles/job_metadata/template/foo.py | 0 .../databricks_template_schema.json | 0 .../template/databricks.yml.tmpl | 0 .../python_wheel_task/template/setup.py.tmpl | 0 .../template/{{.project_name}}/__init__.py | 0 .../template/{{.project_name}}/__main__.py | 0 .../databricks_template_schema.json | 0 .../template/databricks.yml.tmpl | 0 .../template/setup.py.tmpl | 0 .../template/{{.project_name}}/__init__.py | 0 .../template/{{.project_name}}/__main__.py | 0 .../databricks_template_schema.json | 0 .../template/databricks.yml.tmpl | 0 .../template/setup.py.tmpl | 0 .../template/{{.project_name}}/__init__.py | 0 .../template/{{.project_name}}/__main__.py | 0 .../databricks_template_schema.json | 0 .../template/databricks.yml.tmpl | 0 .../bundles/recreate_pipeline/template/nb.sql | 0 .../databricks_template_schema.json | 0 .../template/databricks.yml.tmpl | 0 .../{{.project_name}}/META-INF/MANIFEST.MF | 0 .../template/{{.project_name}}/PrintArgs.java | 0 .../uc_schema/databricks_template_schema.json | 0 .../uc_schema/template/databricks.yml.tmpl | 0 .../bundle/bundles/uc_schema/template/nb.sql | 0 .../uc_schema/template/schema.yml.tmpl | 0 .../volume/databricks_template_schema.json | 0 .../volume/template/databricks.yml.tmpl | 0 .../bundle/bundles/volume/template/nb.sql | 0 .../databricks_template_schema.json | 0 .../template/databricks.yml.tmpl | 0 .../bundle/clusters_test.go | 2 +- .../bundle/dashboards_test.go | 2 +- .../bundle/deploy_test.go | 2 +- .../deploy_then_remove_resources_test.go | 2 +- .../bundle/deploy_to_shared_test.go | 2 +- .../bundle/deployment_state_test.go | 2 +- .../bundle/destroy_test.go | 2 +- .../bundle/empty_bundle_test.go | 2 +- .../bundle/environments_test.go | 2 +- .../bundle/generate_job_test.go | 2 +- .../bundle/generate_pipeline_test.go | 2 +- .../bundle/helpers_test.go | 2 +- {internal => integration/bundle}/init_test.go | 9 +- .../bundle/job_metadata_test.go | 2 +- .../bundle/local_state_staleness_test.go | 2 +- integration/bundle/main_test.go | 13 ++ .../bundle/python_wheel_test.go | 2 +- .../bundle/spark_jar_test.go | 2 +- .../databricks_template_schema.json | 0 .../field-does-not-exist/template/bar.tmpl | 0 .../bundle/validate_test.go | 2 +- .../cmd/alerts}/alerts_test.go | 2 +- integration/cmd/alerts/main_test.go | 13 ++ {internal => integration/cmd/api}/api_test.go | 2 +- integration/cmd/api/main_test.go | 13 ++ .../cmd/auth/describe_test.go | 6 +- integration/cmd/auth/main_test.go | 13 ++ .../cmd/clusters}/clusters_test.go | 2 +- integration/cmd/clusters/main_test.go | 13 ++ .../cmd/fs/cat_test.go | 2 +- .../cmd/fs/completion_test.go | 2 +- .../cmd/fs/cp_test.go | 2 +- integration/cmd/fs/helpers_test.go | 46 +++++++ .../cmd/fs/ls_test.go | 2 +- integration/cmd/fs/main_test.go | 13 ++ .../cmd/fs/mkdir_test.go | 2 +- .../cmd/fs/rm_test.go | 2 +- .../cmd/jobs}/jobs_test.go | 4 +- integration/cmd/jobs/main_test.go | 13 ++ .../testdata}/create_job_without_workers.json | 0 integration/cmd/main_test.go | 13 ++ integration/cmd/repos/main_test.go | 13 ++ .../cmd/repos}/repos_test.go | 4 +- integration/cmd/secrets/main_test.go | 13 ++ .../cmd/secrets}/secrets_test.go | 2 +- .../cmd/storage_credentials/main_test.go | 13 ++ .../storage_credentials_test.go | 2 +- integration/cmd/sync/main_test.go | 13 ++ .../cmd/sync}/sync_test.go | 2 +- .../cmd}/unknown_command_test.go | 2 +- integration/cmd/version/main_test.go | 13 ++ .../cmd/version}/version_test.go | 2 +- integration/cmd/workspace/main_test.go | 13 ++ .../testdata/import_dir/a/b/c/file-b | 0 .../cmd/workspace}/testdata/import_dir/file-a | 0 .../testdata/import_dir/jupyterNotebook.ipynb | 0 .../testdata/import_dir/pyNotebook.py | 0 .../testdata/import_dir/rNotebook.r | 0 .../testdata/import_dir/scalaNotebook.scala | 0 .../testdata/import_dir/sqlNotebook.sql | 0 .../cmd/workspace}/workspace_test.go | 2 +- integration/enforce_convention_test.go | 116 ++++++++++++++++++ integration/internal/main.go | 20 +++ .../libs/filer}/filer_test.go | 2 +- .../libs/filer/helpers_test.go | 2 +- integration/libs/filer/main_test.go | 13 ++ .../libs/filer}/testdata/notebooks/py1.ipynb | 0 .../libs/filer}/testdata/notebooks/py2.ipynb | 0 .../libs/filer}/testdata/notebooks/r1.ipynb | 0 .../libs/filer}/testdata/notebooks/r2.ipynb | 0 .../filer}/testdata/notebooks/scala1.ipynb | 0 .../filer}/testdata/notebooks/scala2.ipynb | 0 .../libs/filer}/testdata/notebooks/sql1.ipynb | 0 .../libs/filer}/testdata/notebooks/sql2.ipynb | 0 .../libs/git}/git_clone_test.go | 2 +- .../libs/git}/git_fetch_test.go | 2 +- integration/libs/git/main_test.go | 13 ++ .../libs/locker}/locker_test.go | 2 +- integration/libs/locker/main_test.go | 13 ++ integration/libs/tags/main_test.go | 13 ++ .../libs/tags}/tags_test.go | 2 +- integration/python/main_test.go | 13 ++ .../python/python_tasks_test.go | 2 +- .../my_test_code-0.0.1-py3-none-any.whl | Bin .../python/testdata/spark.py | 0 .../python/testdata/test.py | 0 145 files changed, 538 insertions(+), 52 deletions(-) create mode 100644 integration/README.md rename {internal => integration/assumptions}/dashboard_assumptions_test.go (99%) create mode 100644 integration/assumptions/main_test.go rename {internal => integration}/bundle/artifacts_test.go (99%) rename {internal => integration}/bundle/basic_test.go (98%) rename {internal => integration}/bundle/bind_resource_test.go (99%) rename {internal => integration}/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/basic/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/basic/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/basic/template/hello_world.py (100%) rename {internal => integration}/bundle/bundles/clusters/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/clusters/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/clusters/template/hello_world.py (100%) rename {internal => integration}/bundle/bundles/dashboards/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/dashboards/template/dashboard.lvdash.json (100%) rename {internal => integration}/bundle/bundles/dashboards/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/deploy_then_remove_resources/template/bar.py (100%) rename {internal => integration}/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/deploy_then_remove_resources/template/foo.py (100%) rename {internal => integration}/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/job_metadata/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/job_metadata/template/a/b/bar.py (100%) rename {internal => integration}/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/job_metadata/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/job_metadata/template/foo.py (100%) rename {internal => integration}/bundle/bundles/python_wheel_task/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/python_wheel_task/template/setup.py.tmpl (100%) rename {internal => integration}/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py (100%) rename {internal => integration}/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py (100%) rename {internal => integration}/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py (100%) rename {internal => integration}/bundle/bundles/recreate_pipeline/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/recreate_pipeline/template/nb.sql (100%) rename {internal => integration}/bundle/bundles/spark_jar_task/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF (100%) rename {internal => integration}/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java (100%) rename {internal => integration}/bundle/bundles/uc_schema/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/uc_schema/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/uc_schema/template/nb.sql (100%) rename {internal => integration}/bundle/bundles/uc_schema/template/schema.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/volume/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/volume/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/bundles/volume/template/nb.sql (100%) rename {internal => integration}/bundle/bundles/with_includes/databricks_template_schema.json (100%) rename {internal => integration}/bundle/bundles/with_includes/template/databricks.yml.tmpl (100%) rename {internal => integration}/bundle/clusters_test.go (98%) rename {internal => integration}/bundle/dashboards_test.go (99%) rename {internal => integration}/bundle/deploy_test.go (99%) rename {internal => integration}/bundle/deploy_then_remove_resources_test.go (98%) rename {internal => integration}/bundle/deploy_to_shared_test.go (97%) rename {internal => integration}/bundle/deployment_state_test.go (99%) rename {internal => integration}/bundle/destroy_test.go (99%) rename {internal => integration}/bundle/empty_bundle_test.go (97%) rename {internal => integration}/bundle/environments_test.go (98%) rename {internal => integration}/bundle/generate_job_test.go (99%) rename {internal => integration}/bundle/generate_pipeline_test.go (99%) rename internal/bundle/helpers.go => integration/bundle/helpers_test.go (99%) rename {internal => integration/bundle}/init_test.go (94%) rename {internal => integration}/bundle/job_metadata_test.go (99%) rename {internal => integration}/bundle/local_state_staleness_test.go (98%) create mode 100644 integration/bundle/main_test.go rename {internal => integration}/bundle/python_wheel_test.go (99%) rename {internal => integration}/bundle/spark_jar_test.go (99%) rename {internal => integration/bundle}/testdata/init/field-does-not-exist/databricks_template_schema.json (100%) rename {internal => integration/bundle}/testdata/init/field-does-not-exist/template/bar.tmpl (100%) rename {internal => integration}/bundle/validate_test.go (98%) rename {internal => integration/cmd/alerts}/alerts_test.go (95%) create mode 100644 integration/cmd/alerts/main_test.go rename {internal => integration/cmd/api}/api_test.go (98%) create mode 100644 integration/cmd/api/main_test.go rename internal/auth_describe_test.go => integration/cmd/auth/describe_test.go (90%) create mode 100644 integration/cmd/auth/main_test.go rename {internal => integration/cmd/clusters}/clusters_test.go (98%) create mode 100644 integration/cmd/clusters/main_test.go rename internal/fs_cat_test.go => integration/cmd/fs/cat_test.go (99%) rename internal/completer_test.go => integration/cmd/fs/completion_test.go (97%) rename internal/fs_cp_test.go => integration/cmd/fs/cp_test.go (99%) create mode 100644 integration/cmd/fs/helpers_test.go rename internal/fs_ls_test.go => integration/cmd/fs/ls_test.go (99%) create mode 100644 integration/cmd/fs/main_test.go rename internal/fs_mkdir_test.go => integration/cmd/fs/mkdir_test.go (99%) rename internal/fs_rm_test.go => integration/cmd/fs/rm_test.go (99%) rename {internal => integration/cmd/jobs}/jobs_test.go (88%) create mode 100644 integration/cmd/jobs/main_test.go rename {internal/testjsons => integration/cmd/jobs/testdata}/create_job_without_workers.json (100%) create mode 100644 integration/cmd/main_test.go create mode 100644 integration/cmd/repos/main_test.go rename {internal => integration/cmd/repos}/repos_test.go (98%) create mode 100644 integration/cmd/secrets/main_test.go rename {internal => integration/cmd/secrets}/secrets_test.go (99%) create mode 100644 integration/cmd/storage_credentials/main_test.go rename {internal => integration/cmd/storage_credentials}/storage_credentials_test.go (94%) create mode 100644 integration/cmd/sync/main_test.go rename {internal => integration/cmd/sync}/sync_test.go (99%) rename {internal => integration/cmd}/unknown_command_test.go (96%) create mode 100644 integration/cmd/version/main_test.go rename {internal => integration/cmd/version}/version_test.go (98%) create mode 100644 integration/cmd/workspace/main_test.go rename {internal => integration/cmd/workspace}/testdata/import_dir/a/b/c/file-b (100%) rename {internal => integration/cmd/workspace}/testdata/import_dir/file-a (100%) rename {internal => integration/cmd/workspace}/testdata/import_dir/jupyterNotebook.ipynb (100%) rename {internal => integration/cmd/workspace}/testdata/import_dir/pyNotebook.py (100%) rename {internal => integration/cmd/workspace}/testdata/import_dir/rNotebook.r (100%) rename {internal => integration/cmd/workspace}/testdata/import_dir/scalaNotebook.scala (100%) rename {internal => integration/cmd/workspace}/testdata/import_dir/sqlNotebook.sql (100%) rename {internal => integration/cmd/workspace}/workspace_test.go (99%) create mode 100644 integration/enforce_convention_test.go create mode 100644 integration/internal/main.go rename {internal => integration/libs/filer}/filer_test.go (99%) rename internal/helpers.go => integration/libs/filer/helpers_test.go (99%) create mode 100644 integration/libs/filer/main_test.go rename {internal => integration/libs/filer}/testdata/notebooks/py1.ipynb (100%) rename {internal => integration/libs/filer}/testdata/notebooks/py2.ipynb (100%) rename {internal => integration/libs/filer}/testdata/notebooks/r1.ipynb (100%) rename {internal => integration/libs/filer}/testdata/notebooks/r2.ipynb (100%) rename {internal => integration/libs/filer}/testdata/notebooks/scala1.ipynb (100%) rename {internal => integration/libs/filer}/testdata/notebooks/scala2.ipynb (100%) rename {internal => integration/libs/filer}/testdata/notebooks/sql1.ipynb (100%) rename {internal => integration/libs/filer}/testdata/notebooks/sql2.ipynb (100%) rename {internal => integration/libs/git}/git_clone_test.go (99%) rename {internal => integration/libs/git}/git_fetch_test.go (99%) create mode 100644 integration/libs/git/main_test.go rename {internal => integration/libs/locker}/locker_test.go (99%) create mode 100644 integration/libs/locker/main_test.go create mode 100644 integration/libs/tags/main_test.go rename {internal => integration/libs/tags}/tags_test.go (99%) create mode 100644 integration/python/main_test.go rename {internal => integration}/python/python_tasks_test.go (99%) rename {internal => integration}/python/testdata/my_test_code-0.0.1-py3-none-any.whl (100%) rename {internal => integration}/python/testdata/spark.py (100%) rename {internal => integration}/python/testdata/test.py (100%) diff --git a/Makefile b/Makefile index d10332c385..8b33e447c4 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,6 @@ vendor: @go mod vendor integration: - gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./internal/..." -- -run "TestAcc.*" -parallel 4 -timeout=2h + gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h .PHONY: fmt lint lintcheck test testonly coverage build snapshot vendor integration diff --git a/integration/README.md b/integration/README.md new file mode 100644 index 0000000000..1c1d7c6f65 --- /dev/null +++ b/integration/README.md @@ -0,0 +1,37 @@ +# Integration tests + +This directory contains integration tests for the project. + +The tree structure generally mirrors the source code tree structure. + +Requirements for new files in this directory: +* Every package **must** be named after its directory with `_test` appended + * Requiring a different package name for integration tests avoids aliasing with the main package. +* Every integration test package **must** include a `main_test.go` file. + +These requirements are enforced by a unit test in this directory. + +## Running integration tests + +Integration tests require the following environment variables: +* `CLOUD_ENV` - set to the cloud environment to use (e.g. `aws`, `azure`, `gcp`) +* `DATABRICKS_HOST` - set to the Databricks workspace to use +* `DATABRICKS_TOKEN` - set to the Databricks token to use + +Optional environment variables: +* `TEST_DEFAULT_WAREHOUSE_ID` - set to the default warehouse ID to use +* `TEST_METASTORE_ID` - set to the metastore ID to use +* `TEST_INSTANCE_POOL_ID` - set to the instance pool ID to use +* `TEST_BRICKS_CLUSTER_ID` - set to the cluster ID to use + +To run all integration tests, use the following command: + +```bash +go test ./integration/... +``` + +Alternatively: + +```bash +make integration +``` diff --git a/internal/dashboard_assumptions_test.go b/integration/assumptions/dashboard_assumptions_test.go similarity index 99% rename from internal/dashboard_assumptions_test.go rename to integration/assumptions/dashboard_assumptions_test.go index 906efa7819..9696aa9a4e 100644 --- a/internal/dashboard_assumptions_test.go +++ b/integration/assumptions/dashboard_assumptions_test.go @@ -1,4 +1,4 @@ -package internal +package assumptions_test import ( "encoding/base64" diff --git a/integration/assumptions/main_test.go b/integration/assumptions/main_test.go new file mode 100644 index 0000000000..be27613858 --- /dev/null +++ b/integration/assumptions/main_test.go @@ -0,0 +1,13 @@ +package assumptions_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/bundle/artifacts_test.go b/integration/bundle/artifacts_test.go similarity index 99% rename from internal/bundle/artifacts_test.go rename to integration/bundle/artifacts_test.go index 7c8954a0cc..942c4c975c 100644 --- a/internal/bundle/artifacts_test.go +++ b/integration/bundle/artifacts_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" diff --git a/internal/bundle/basic_test.go b/integration/bundle/basic_test.go similarity index 98% rename from internal/bundle/basic_test.go rename to integration/bundle/basic_test.go index 13da6da9ba..77befb5d0c 100644 --- a/internal/bundle/basic_test.go +++ b/integration/bundle/basic_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "os" diff --git a/internal/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go similarity index 99% rename from internal/bundle/bind_resource_test.go rename to integration/bundle/bind_resource_test.go index 7f8151ee9a..374c6f346e 100644 --- a/internal/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" diff --git a/internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json b/integration/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json rename to integration/bundle/bundles/artifact_path_with_volume/databricks_template_schema.json diff --git a/internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl b/integration/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl rename to integration/bundle/bundles/artifact_path_with_volume/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/basic/databricks_template_schema.json b/integration/bundle/bundles/basic/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/basic/databricks_template_schema.json rename to integration/bundle/bundles/basic/databricks_template_schema.json diff --git a/internal/bundle/bundles/basic/template/databricks.yml.tmpl b/integration/bundle/bundles/basic/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/basic/template/databricks.yml.tmpl rename to integration/bundle/bundles/basic/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/basic/template/hello_world.py b/integration/bundle/bundles/basic/template/hello_world.py similarity index 100% rename from internal/bundle/bundles/basic/template/hello_world.py rename to integration/bundle/bundles/basic/template/hello_world.py diff --git a/internal/bundle/bundles/clusters/databricks_template_schema.json b/integration/bundle/bundles/clusters/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/clusters/databricks_template_schema.json rename to integration/bundle/bundles/clusters/databricks_template_schema.json diff --git a/internal/bundle/bundles/clusters/template/databricks.yml.tmpl b/integration/bundle/bundles/clusters/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/clusters/template/databricks.yml.tmpl rename to integration/bundle/bundles/clusters/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/clusters/template/hello_world.py b/integration/bundle/bundles/clusters/template/hello_world.py similarity index 100% rename from internal/bundle/bundles/clusters/template/hello_world.py rename to integration/bundle/bundles/clusters/template/hello_world.py diff --git a/internal/bundle/bundles/dashboards/databricks_template_schema.json b/integration/bundle/bundles/dashboards/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/dashboards/databricks_template_schema.json rename to integration/bundle/bundles/dashboards/databricks_template_schema.json diff --git a/internal/bundle/bundles/dashboards/template/dashboard.lvdash.json b/integration/bundle/bundles/dashboards/template/dashboard.lvdash.json similarity index 100% rename from internal/bundle/bundles/dashboards/template/dashboard.lvdash.json rename to integration/bundle/bundles/dashboards/template/dashboard.lvdash.json diff --git a/internal/bundle/bundles/dashboards/template/databricks.yml.tmpl b/integration/bundle/bundles/dashboards/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/dashboards/template/databricks.yml.tmpl rename to integration/bundle/bundles/dashboards/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json b/integration/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json rename to integration/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py b/integration/bundle/bundles/deploy_then_remove_resources/template/bar.py similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/bar.py rename to integration/bundle/bundles/deploy_then_remove_resources/template/bar.py diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl b/integration/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl rename to integration/bundle/bundles/deploy_then_remove_resources/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/foo.py b/integration/bundle/bundles/deploy_then_remove_resources/template/foo.py similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/foo.py rename to integration/bundle/bundles/deploy_then_remove_resources/template/foo.py diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl b/integration/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl similarity index 100% rename from internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl rename to integration/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/databricks_template_schema.json b/integration/bundle/bundles/job_metadata/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/job_metadata/databricks_template_schema.json rename to integration/bundle/bundles/job_metadata/databricks_template_schema.json diff --git a/internal/bundle/bundles/job_metadata/template/a/b/bar.py b/integration/bundle/bundles/job_metadata/template/a/b/bar.py similarity index 100% rename from internal/bundle/bundles/job_metadata/template/a/b/bar.py rename to integration/bundle/bundles/job_metadata/template/a/b/bar.py diff --git a/internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl b/integration/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl similarity index 100% rename from internal/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl rename to integration/bundle/bundles/job_metadata/template/a/b/resources.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl b/integration/bundle/bundles/job_metadata/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/job_metadata/template/databricks.yml.tmpl rename to integration/bundle/bundles/job_metadata/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/job_metadata/template/foo.py b/integration/bundle/bundles/job_metadata/template/foo.py similarity index 100% rename from internal/bundle/bundles/job_metadata/template/foo.py rename to integration/bundle/bundles/job_metadata/template/foo.py diff --git a/internal/bundle/bundles/python_wheel_task/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task_with_cluster/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task_with_cluster/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json b/integration/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json rename to integration/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl b/integration/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl rename to integration/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl b/integration/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl rename to integration/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py b/integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py rename to integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py b/integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py similarity index 100% rename from internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py rename to integration/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py diff --git a/internal/bundle/bundles/recreate_pipeline/databricks_template_schema.json b/integration/bundle/bundles/recreate_pipeline/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/databricks_template_schema.json rename to integration/bundle/bundles/recreate_pipeline/databricks_template_schema.json diff --git a/internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl b/integration/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl rename to integration/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/recreate_pipeline/template/nb.sql b/integration/bundle/bundles/recreate_pipeline/template/nb.sql similarity index 100% rename from internal/bundle/bundles/recreate_pipeline/template/nb.sql rename to integration/bundle/bundles/recreate_pipeline/template/nb.sql diff --git a/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json b/integration/bundle/bundles/spark_jar_task/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/spark_jar_task/databricks_template_schema.json rename to integration/bundle/bundles/spark_jar_task/databricks_template_schema.json diff --git a/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl b/integration/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl rename to integration/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF b/integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF rename to integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java b/integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java similarity index 100% rename from internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java rename to integration/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java diff --git a/internal/bundle/bundles/uc_schema/databricks_template_schema.json b/integration/bundle/bundles/uc_schema/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/uc_schema/databricks_template_schema.json rename to integration/bundle/bundles/uc_schema/databricks_template_schema.json diff --git a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl b/integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl rename to integration/bundle/bundles/uc_schema/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/uc_schema/template/nb.sql b/integration/bundle/bundles/uc_schema/template/nb.sql similarity index 100% rename from internal/bundle/bundles/uc_schema/template/nb.sql rename to integration/bundle/bundles/uc_schema/template/nb.sql diff --git a/internal/bundle/bundles/uc_schema/template/schema.yml.tmpl b/integration/bundle/bundles/uc_schema/template/schema.yml.tmpl similarity index 100% rename from internal/bundle/bundles/uc_schema/template/schema.yml.tmpl rename to integration/bundle/bundles/uc_schema/template/schema.yml.tmpl diff --git a/internal/bundle/bundles/volume/databricks_template_schema.json b/integration/bundle/bundles/volume/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/volume/databricks_template_schema.json rename to integration/bundle/bundles/volume/databricks_template_schema.json diff --git a/internal/bundle/bundles/volume/template/databricks.yml.tmpl b/integration/bundle/bundles/volume/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/volume/template/databricks.yml.tmpl rename to integration/bundle/bundles/volume/template/databricks.yml.tmpl diff --git a/internal/bundle/bundles/volume/template/nb.sql b/integration/bundle/bundles/volume/template/nb.sql similarity index 100% rename from internal/bundle/bundles/volume/template/nb.sql rename to integration/bundle/bundles/volume/template/nb.sql diff --git a/internal/bundle/bundles/with_includes/databricks_template_schema.json b/integration/bundle/bundles/with_includes/databricks_template_schema.json similarity index 100% rename from internal/bundle/bundles/with_includes/databricks_template_schema.json rename to integration/bundle/bundles/with_includes/databricks_template_schema.json diff --git a/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl b/integration/bundle/bundles/with_includes/template/databricks.yml.tmpl similarity index 100% rename from internal/bundle/bundles/with_includes/template/databricks.yml.tmpl rename to integration/bundle/bundles/with_includes/template/databricks.yml.tmpl diff --git a/internal/bundle/clusters_test.go b/integration/bundle/clusters_test.go similarity index 98% rename from internal/bundle/clusters_test.go rename to integration/bundle/clusters_test.go index d79ded0b7d..d4b16c49b1 100644 --- a/internal/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" diff --git a/internal/bundle/dashboards_test.go b/integration/bundle/dashboards_test.go similarity index 99% rename from internal/bundle/dashboards_test.go rename to integration/bundle/dashboards_test.go index 82ed419b4b..cbca560fb2 100644 --- a/internal/bundle/dashboards_test.go +++ b/integration/bundle/dashboards_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" diff --git a/internal/bundle/deploy_test.go b/integration/bundle/deploy_test.go similarity index 99% rename from internal/bundle/deploy_test.go rename to integration/bundle/deploy_test.go index 038705c37c..c97149b9bb 100644 --- a/internal/bundle/deploy_test.go +++ b/integration/bundle/deploy_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/integration/bundle/deploy_then_remove_resources_test.go similarity index 98% rename from internal/bundle/deploy_then_remove_resources_test.go rename to integration/bundle/deploy_then_remove_resources_test.go index 0cc7ab92d4..5e0b029ccb 100644 --- a/internal/bundle/deploy_then_remove_resources_test.go +++ b/integration/bundle/deploy_then_remove_resources_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "os" diff --git a/internal/bundle/deploy_to_shared_test.go b/integration/bundle/deploy_to_shared_test.go similarity index 97% rename from internal/bundle/deploy_to_shared_test.go rename to integration/bundle/deploy_to_shared_test.go index 7b8d570293..d4f1ca9875 100644 --- a/internal/bundle/deploy_to_shared_test.go +++ b/integration/bundle/deploy_to_shared_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" diff --git a/internal/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go similarity index 99% rename from internal/bundle/deployment_state_test.go rename to integration/bundle/deployment_state_test.go index caa72d0a23..98b4ef530c 100644 --- a/internal/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "os" diff --git a/internal/bundle/destroy_test.go b/integration/bundle/destroy_test.go similarity index 99% rename from internal/bundle/destroy_test.go rename to integration/bundle/destroy_test.go index dad9c0874e..bb9beb5083 100644 --- a/internal/bundle/destroy_test.go +++ b/integration/bundle/destroy_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "errors" diff --git a/internal/bundle/empty_bundle_test.go b/integration/bundle/empty_bundle_test.go similarity index 97% rename from internal/bundle/empty_bundle_test.go rename to integration/bundle/empty_bundle_test.go index 36883ae001..40dd95355c 100644 --- a/internal/bundle/empty_bundle_test.go +++ b/integration/bundle/empty_bundle_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "fmt" diff --git a/internal/bundle/environments_test.go b/integration/bundle/environments_test.go similarity index 98% rename from internal/bundle/environments_test.go rename to integration/bundle/environments_test.go index 7c5b6db893..bbdc52cb24 100644 --- a/internal/bundle/environments_test.go +++ b/integration/bundle/environments_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "testing" diff --git a/internal/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go similarity index 99% rename from internal/bundle/generate_job_test.go rename to integration/bundle/generate_job_test.go index b254dac8c8..c466348393 100644 --- a/internal/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" diff --git a/internal/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go similarity index 99% rename from internal/bundle/generate_pipeline_test.go rename to integration/bundle/generate_pipeline_test.go index 96d8d28285..4b05cc2e41 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" diff --git a/internal/bundle/helpers.go b/integration/bundle/helpers_test.go similarity index 99% rename from internal/bundle/helpers.go rename to integration/bundle/helpers_test.go index 0512de8732..f1c87c0ef9 100644 --- a/internal/bundle/helpers.go +++ b/integration/bundle/helpers_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "bytes" diff --git a/internal/init_test.go b/integration/bundle/init_test.go similarity index 94% rename from internal/init_test.go rename to integration/bundle/init_test.go index e2d96b3e15..b1d314b531 100644 --- a/internal/init_test.go +++ b/integration/bundle/init_test.go @@ -1,4 +1,4 @@ -package internal +package bundle_test import ( "context" @@ -67,8 +67,8 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { testcli.RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) // Assert that the README.md file was created - assert.FileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - assertLocalFileContents(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md"), fmt.Sprintf("# %s", projectName)) + contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) + assert.Contains(t, contents, fmt.Sprintf("# %s", projectName)) // Validate the stack testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) @@ -163,6 +163,7 @@ func TestAccBundleInitHelpers(t *testing.T) { testcli.RequireSuccessfulRun(t, "bundle", "init", tmpDir, "--output-dir", tmpDir2) // Assert that the helper function was correctly computed. - assertLocalFileContents(t, filepath.Join(tmpDir2, "foo.txt"), test.expected) + contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "foo.txt")) + assert.Contains(t, contents, test.expected) } } diff --git a/internal/bundle/job_metadata_test.go b/integration/bundle/job_metadata_test.go similarity index 99% rename from internal/bundle/job_metadata_test.go rename to integration/bundle/job_metadata_test.go index de11d89a6f..a8d7a927c2 100644 --- a/internal/bundle/job_metadata_test.go +++ b/integration/bundle/job_metadata_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" diff --git a/internal/bundle/local_state_staleness_test.go b/integration/bundle/local_state_staleness_test.go similarity index 98% rename from internal/bundle/local_state_staleness_test.go rename to integration/bundle/local_state_staleness_test.go index 1c025b47a4..29283c0022 100644 --- a/internal/bundle/local_state_staleness_test.go +++ b/integration/bundle/local_state_staleness_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" diff --git a/integration/bundle/main_test.go b/integration/bundle/main_test.go new file mode 100644 index 0000000000..1c44d0aaf6 --- /dev/null +++ b/integration/bundle/main_test.go @@ -0,0 +1,13 @@ +package bundle_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go similarity index 99% rename from internal/bundle/python_wheel_test.go rename to integration/bundle/python_wheel_test.go index 1d8647396a..3f53dc95f7 100644 --- a/internal/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "testing" diff --git a/internal/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go similarity index 99% rename from internal/bundle/spark_jar_test.go rename to integration/bundle/spark_jar_test.go index c2402deb60..2dc5cbaa82 100644 --- a/internal/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" diff --git a/internal/testdata/init/field-does-not-exist/databricks_template_schema.json b/integration/bundle/testdata/init/field-does-not-exist/databricks_template_schema.json similarity index 100% rename from internal/testdata/init/field-does-not-exist/databricks_template_schema.json rename to integration/bundle/testdata/init/field-does-not-exist/databricks_template_schema.json diff --git a/internal/testdata/init/field-does-not-exist/template/bar.tmpl b/integration/bundle/testdata/init/field-does-not-exist/template/bar.tmpl similarity index 100% rename from internal/testdata/init/field-does-not-exist/template/bar.tmpl rename to integration/bundle/testdata/init/field-does-not-exist/template/bar.tmpl diff --git a/internal/bundle/validate_test.go b/integration/bundle/validate_test.go similarity index 98% rename from internal/bundle/validate_test.go rename to integration/bundle/validate_test.go index 0942275bc5..ebf525a72d 100644 --- a/internal/bundle/validate_test.go +++ b/integration/bundle/validate_test.go @@ -1,4 +1,4 @@ -package bundle +package bundle_test import ( "context" diff --git a/internal/alerts_test.go b/integration/cmd/alerts/alerts_test.go similarity index 95% rename from internal/alerts_test.go rename to integration/cmd/alerts/alerts_test.go index d7be16d8bb..9113f882c3 100644 --- a/internal/alerts_test.go +++ b/integration/cmd/alerts/alerts_test.go @@ -1,4 +1,4 @@ -package internal +package alerts_test import ( "testing" diff --git a/integration/cmd/alerts/main_test.go b/integration/cmd/alerts/main_test.go new file mode 100644 index 0000000000..6987ade02b --- /dev/null +++ b/integration/cmd/alerts/main_test.go @@ -0,0 +1,13 @@ +package alerts_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/api_test.go b/integration/cmd/api/api_test.go similarity index 98% rename from internal/api_test.go rename to integration/cmd/api/api_test.go index 1c7312ddbd..7242a0bfc6 100644 --- a/internal/api_test.go +++ b/integration/cmd/api/api_test.go @@ -1,4 +1,4 @@ -package internal +package api_test import ( "encoding/json" diff --git a/integration/cmd/api/main_test.go b/integration/cmd/api/main_test.go new file mode 100644 index 0000000000..70d021790d --- /dev/null +++ b/integration/cmd/api/main_test.go @@ -0,0 +1,13 @@ +package api_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/auth_describe_test.go b/integration/cmd/auth/describe_test.go similarity index 90% rename from internal/auth_describe_test.go rename to integration/cmd/auth/describe_test.go index e0b335c592..492c678676 100644 --- a/internal/auth_describe_test.go +++ b/integration/cmd/auth/describe_test.go @@ -1,4 +1,4 @@ -package internal +package auth_test import ( "context" @@ -14,6 +14,8 @@ import ( func TestAuthDescribeSuccess(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") + stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe") outStr := stdout.String() @@ -35,6 +37,8 @@ func TestAuthDescribeSuccess(t *testing.T) { func TestAuthDescribeFailure(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") + stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") outStr := stdout.String() diff --git a/integration/cmd/auth/main_test.go b/integration/cmd/auth/main_test.go new file mode 100644 index 0000000000..97b1d740b4 --- /dev/null +++ b/integration/cmd/auth/main_test.go @@ -0,0 +1,13 @@ +package auth_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/clusters_test.go b/integration/cmd/clusters/clusters_test.go similarity index 98% rename from internal/clusters_test.go rename to integration/cmd/clusters/clusters_test.go index cc1b2aa07c..132b554f12 100644 --- a/internal/clusters_test.go +++ b/integration/cmd/clusters/clusters_test.go @@ -1,4 +1,4 @@ -package internal +package clusters_test import ( "fmt" diff --git a/integration/cmd/clusters/main_test.go b/integration/cmd/clusters/main_test.go new file mode 100644 index 0000000000..ccd5660e70 --- /dev/null +++ b/integration/cmd/clusters/main_test.go @@ -0,0 +1,13 @@ +package clusters_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/fs_cat_test.go b/integration/cmd/fs/cat_test.go similarity index 99% rename from internal/fs_cat_test.go rename to integration/cmd/fs/cat_test.go index e14121feaf..67e21310b1 100644 --- a/internal/fs_cat_test.go +++ b/integration/cmd/fs/cat_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" diff --git a/internal/completer_test.go b/integration/cmd/fs/completion_test.go similarity index 97% rename from internal/completer_test.go rename to integration/cmd/fs/completion_test.go index 52a1f6c08b..b90c20f789 100644 --- a/internal/completer_test.go +++ b/integration/cmd/fs/completion_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" diff --git a/internal/fs_cp_test.go b/integration/cmd/fs/cp_test.go similarity index 99% rename from internal/fs_cp_test.go rename to integration/cmd/fs/cp_test.go index 16318d5c15..fa507830eb 100644 --- a/internal/fs_cp_test.go +++ b/integration/cmd/fs/cp_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" diff --git a/integration/cmd/fs/helpers_test.go b/integration/cmd/fs/helpers_test.go new file mode 100644 index 0000000000..cf46f48acd --- /dev/null +++ b/integration/cmd/fs/helpers_test.go @@ -0,0 +1,46 @@ +package fs_test + +import ( + "os" + "path" + "path/filepath" + + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" + + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/require" +) + +func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { + t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + + tmp := t.TempDir() + f, err := filer.NewLocalClient(tmp) + require.NoError(t, err) + + return f, path.Join(filepath.ToSlash(tmp)) +} + +func setupDbfsFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + tmpdir := acc.TemporaryDbfsDir(wt) + f, err := filer.NewDbfsClient(wt.W, tmpdir) + require.NoError(t, err) + return f, path.Join("dbfs:/", tmpdir) +} + +func setupUcVolumesFiler(t testutil.TestingT) (filer.Filer, string) { + _, wt := acc.WorkspaceTest(t) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + tmpdir := acc.TemporaryVolume(wt) + f, err := filer.NewFilesClient(wt.W, tmpdir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpdir) +} diff --git a/internal/fs_ls_test.go b/integration/cmd/fs/ls_test.go similarity index 99% rename from internal/fs_ls_test.go rename to integration/cmd/fs/ls_test.go index d56fc8bb00..41d9fc053b 100644 --- a/internal/fs_ls_test.go +++ b/integration/cmd/fs/ls_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" diff --git a/integration/cmd/fs/main_test.go b/integration/cmd/fs/main_test.go new file mode 100644 index 0000000000..b9402f0b29 --- /dev/null +++ b/integration/cmd/fs/main_test.go @@ -0,0 +1,13 @@ +package fs_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/fs_mkdir_test.go b/integration/cmd/fs/mkdir_test.go similarity index 99% rename from internal/fs_mkdir_test.go rename to integration/cmd/fs/mkdir_test.go index 197f5d4b5d..93845a3dd4 100644 --- a/internal/fs_mkdir_test.go +++ b/integration/cmd/fs/mkdir_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" diff --git a/internal/fs_rm_test.go b/integration/cmd/fs/rm_test.go similarity index 99% rename from internal/fs_rm_test.go rename to integration/cmd/fs/rm_test.go index 1af779fd27..c3a5349070 100644 --- a/internal/fs_rm_test.go +++ b/integration/cmd/fs/rm_test.go @@ -1,4 +1,4 @@ -package internal +package fs_test import ( "context" diff --git a/internal/jobs_test.go b/integration/cmd/jobs/jobs_test.go similarity index 88% rename from internal/jobs_test.go rename to integration/cmd/jobs/jobs_test.go index 2781323e6a..b9dc2976c7 100644 --- a/internal/jobs_test.go +++ b/integration/cmd/jobs/jobs_test.go @@ -1,4 +1,4 @@ -package internal +package jobs_test import ( "encoding/json" @@ -18,7 +18,7 @@ func TestAccCreateJob(t *testing.T) { if env != "azure" { t.Skipf("Not running test on cloud %s", env) } - stdout, stderr := testcli.RequireSuccessfulRun(t, "jobs", "create", "--json", "@testjsons/create_job_without_workers.json", "--log-level=debug") + stdout, stderr := testcli.RequireSuccessfulRun(t, "jobs", "create", "--json", "@testdata/create_job_without_workers.json", "--log-level=debug") assert.Empty(t, stderr.String()) var output map[string]int err := json.Unmarshal(stdout.Bytes(), &output) diff --git a/integration/cmd/jobs/main_test.go b/integration/cmd/jobs/main_test.go new file mode 100644 index 0000000000..46369a5264 --- /dev/null +++ b/integration/cmd/jobs/main_test.go @@ -0,0 +1,13 @@ +package jobs_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/testjsons/create_job_without_workers.json b/integration/cmd/jobs/testdata/create_job_without_workers.json similarity index 100% rename from internal/testjsons/create_job_without_workers.json rename to integration/cmd/jobs/testdata/create_job_without_workers.json diff --git a/integration/cmd/main_test.go b/integration/cmd/main_test.go new file mode 100644 index 0000000000..a1a5586b64 --- /dev/null +++ b/integration/cmd/main_test.go @@ -0,0 +1,13 @@ +package cmd_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/integration/cmd/repos/main_test.go b/integration/cmd/repos/main_test.go new file mode 100644 index 0000000000..7eaa174bce --- /dev/null +++ b/integration/cmd/repos/main_test.go @@ -0,0 +1,13 @@ +package repos_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/repos_test.go b/integration/cmd/repos/repos_test.go similarity index 98% rename from internal/repos_test.go rename to integration/cmd/repos/repos_test.go index a89217fe1f..69f7e31148 100644 --- a/internal/repos_test.go +++ b/integration/cmd/repos/repos_test.go @@ -1,4 +1,4 @@ -package internal +package repos_test import ( "context" @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/require" ) +const repoUrl = "https://github.com/databricks/databricks-empty-ide-project.git" + func synthesizeTemporaryRepoPath(t *testing.T, w *databricks.WorkspaceClient, ctx context.Context) string { me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) diff --git a/integration/cmd/secrets/main_test.go b/integration/cmd/secrets/main_test.go new file mode 100644 index 0000000000..a44d306711 --- /dev/null +++ b/integration/cmd/secrets/main_test.go @@ -0,0 +1,13 @@ +package secrets_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/secrets_test.go b/integration/cmd/secrets/secrets_test.go similarity index 99% rename from internal/secrets_test.go rename to integration/cmd/secrets/secrets_test.go index 0ad9cece71..8f06699d76 100644 --- a/internal/secrets_test.go +++ b/integration/cmd/secrets/secrets_test.go @@ -1,4 +1,4 @@ -package internal +package secrets_test import ( "context" diff --git a/integration/cmd/storage_credentials/main_test.go b/integration/cmd/storage_credentials/main_test.go new file mode 100644 index 0000000000..14d00d9660 --- /dev/null +++ b/integration/cmd/storage_credentials/main_test.go @@ -0,0 +1,13 @@ +package storage_credentials_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/storage_credentials_test.go b/integration/cmd/storage_credentials/storage_credentials_test.go similarity index 94% rename from internal/storage_credentials_test.go rename to integration/cmd/storage_credentials/storage_credentials_test.go index b7fe1aa56a..41f21f224d 100644 --- a/internal/storage_credentials_test.go +++ b/integration/cmd/storage_credentials/storage_credentials_test.go @@ -1,4 +1,4 @@ -package internal +package storage_credentials_test import ( "testing" diff --git a/integration/cmd/sync/main_test.go b/integration/cmd/sync/main_test.go new file mode 100644 index 0000000000..8d9f3ca252 --- /dev/null +++ b/integration/cmd/sync/main_test.go @@ -0,0 +1,13 @@ +package sync_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/sync_test.go b/integration/cmd/sync/sync_test.go similarity index 99% rename from internal/sync_test.go rename to integration/cmd/sync/sync_test.go index 9fff6055b3..82124cc953 100644 --- a/internal/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -1,4 +1,4 @@ -package internal +package sync_test import ( "context" diff --git a/internal/unknown_command_test.go b/integration/cmd/unknown_command_test.go similarity index 96% rename from internal/unknown_command_test.go rename to integration/cmd/unknown_command_test.go index 96cb4fa5e8..e18493940c 100644 --- a/internal/unknown_command_test.go +++ b/integration/cmd/unknown_command_test.go @@ -1,4 +1,4 @@ -package internal +package cmd_test import ( "testing" diff --git a/integration/cmd/version/main_test.go b/integration/cmd/version/main_test.go new file mode 100644 index 0000000000..4aa5e046ac --- /dev/null +++ b/integration/cmd/version/main_test.go @@ -0,0 +1,13 @@ +package version_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/version_test.go b/integration/cmd/version/version_test.go similarity index 98% rename from internal/version_test.go rename to integration/cmd/version/version_test.go index 5c5c3a1005..ab00e7242f 100644 --- a/internal/version_test.go +++ b/integration/cmd/version/version_test.go @@ -1,4 +1,4 @@ -package internal +package version_test import ( "encoding/json" diff --git a/integration/cmd/workspace/main_test.go b/integration/cmd/workspace/main_test.go new file mode 100644 index 0000000000..40d140eac9 --- /dev/null +++ b/integration/cmd/workspace/main_test.go @@ -0,0 +1,13 @@ +package workspace_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/testdata/import_dir/a/b/c/file-b b/integration/cmd/workspace/testdata/import_dir/a/b/c/file-b similarity index 100% rename from internal/testdata/import_dir/a/b/c/file-b rename to integration/cmd/workspace/testdata/import_dir/a/b/c/file-b diff --git a/internal/testdata/import_dir/file-a b/integration/cmd/workspace/testdata/import_dir/file-a similarity index 100% rename from internal/testdata/import_dir/file-a rename to integration/cmd/workspace/testdata/import_dir/file-a diff --git a/internal/testdata/import_dir/jupyterNotebook.ipynb b/integration/cmd/workspace/testdata/import_dir/jupyterNotebook.ipynb similarity index 100% rename from internal/testdata/import_dir/jupyterNotebook.ipynb rename to integration/cmd/workspace/testdata/import_dir/jupyterNotebook.ipynb diff --git a/internal/testdata/import_dir/pyNotebook.py b/integration/cmd/workspace/testdata/import_dir/pyNotebook.py similarity index 100% rename from internal/testdata/import_dir/pyNotebook.py rename to integration/cmd/workspace/testdata/import_dir/pyNotebook.py diff --git a/internal/testdata/import_dir/rNotebook.r b/integration/cmd/workspace/testdata/import_dir/rNotebook.r similarity index 100% rename from internal/testdata/import_dir/rNotebook.r rename to integration/cmd/workspace/testdata/import_dir/rNotebook.r diff --git a/internal/testdata/import_dir/scalaNotebook.scala b/integration/cmd/workspace/testdata/import_dir/scalaNotebook.scala similarity index 100% rename from internal/testdata/import_dir/scalaNotebook.scala rename to integration/cmd/workspace/testdata/import_dir/scalaNotebook.scala diff --git a/internal/testdata/import_dir/sqlNotebook.sql b/integration/cmd/workspace/testdata/import_dir/sqlNotebook.sql similarity index 100% rename from internal/testdata/import_dir/sqlNotebook.sql rename to integration/cmd/workspace/testdata/import_dir/sqlNotebook.sql diff --git a/internal/workspace_test.go b/integration/cmd/workspace/workspace_test.go similarity index 99% rename from internal/workspace_test.go rename to integration/cmd/workspace/workspace_test.go index f16245aa29..28405a09b1 100644 --- a/internal/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -1,4 +1,4 @@ -package internal +package workspace_test import ( "context" diff --git a/integration/enforce_convention_test.go b/integration/enforce_convention_test.go new file mode 100644 index 0000000000..cc822a6a37 --- /dev/null +++ b/integration/enforce_convention_test.go @@ -0,0 +1,116 @@ +package integration + +import ( + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" + "testing" + "text/template" + + "golang.org/x/exp/maps" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type packageInfo struct { + Name string + Files []string +} + +func enumeratePackages(t *testing.T) map[string]packageInfo { + pkgmap := make(map[string]packageInfo) + err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip files. + if !info.IsDir() { + return nil + } + + // Skip the root directory and the "internal" directory. + if path == "." || strings.HasPrefix(path, "internal") { + return nil + } + + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, path, nil, parser.ParseComments) + require.NoError(t, err) + if len(pkgs) == 0 { + return nil + } + + // Expect one package per directory. + require.Len(t, pkgs, 1, "Directory %s contains more than one package", path) + v := maps.Values(pkgs)[0] + + // Record the package. + pkgmap[path] = packageInfo{ + Name: v.Name, + Files: maps.Keys(v.Files), + } + return nil + }) + require.NoError(t, err) + return pkgmap +} + +// TestEnforcePackageNames checks that all integration test package names use the "_test" suffix. +// We enforce this package name to avoid package name aliasing. +func TestEnforcePackageNames(t *testing.T) { + pkgmap := enumeratePackages(t) + for _, pkg := range pkgmap { + assert.True(t, strings.HasSuffix(pkg.Name, "_test"), "Package name %s does not end with _test", pkg.Name) + } +} + +var mainTestTemplate = template.Must(template.New("main_test").Parse( + `package {{.Name}} + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} +`)) + +func TestEnforceMainTest(t *testing.T) { + pkgmap := enumeratePackages(t) + for dir, pkg := range pkgmap { + found := false + for _, file := range pkg.Files { + if filepath.Base(file) == "main_test.go" { + found = true + break + } + } + + // Expect a "main_test.go" file in each package. + assert.True(t, found, "Directory %s does not contain a main_test.go file", dir) + } +} + +func TestWriteMainTest(t *testing.T) { + t.Skip("Uncomment to write main_test.go files") + + pkgmap := enumeratePackages(t) + for dir, pkg := range pkgmap { + // Write a "main_test.go" file to the package. + // This file is required to run the integration tests. + f, err := os.Create(filepath.Join(dir, "main_test.go")) + require.NoError(t, err) + defer f.Close() + err = mainTestTemplate.Execute(f, pkg) + require.NoError(t, err) + } +} diff --git a/integration/internal/main.go b/integration/internal/main.go new file mode 100644 index 0000000000..6d69dcf70e --- /dev/null +++ b/integration/internal/main.go @@ -0,0 +1,20 @@ +package internal + +import ( + "fmt" + "os" + "testing" +) + +// Main is the entry point for integration tests. +// We use this for all integration tests defined in this subtree to ensure +// they are not inadvertently executed when calling `go test ./...`. +func Main(m *testing.M) { + value := os.Getenv("CLOUD_ENV") + if value == "" { + fmt.Println("CLOUD_ENV is not set, skipping integration tests") + return + } + + m.Run() +} diff --git a/internal/filer_test.go b/integration/libs/filer/filer_test.go similarity index 99% rename from internal/filer_test.go rename to integration/libs/filer/filer_test.go index 4b7a0b53eb..a4cfa9db3f 100644 --- a/internal/filer_test.go +++ b/integration/libs/filer/filer_test.go @@ -1,4 +1,4 @@ -package internal +package filer_test import ( "bytes" diff --git a/internal/helpers.go b/integration/libs/filer/helpers_test.go similarity index 99% rename from internal/helpers.go rename to integration/libs/filer/helpers_test.go index d57e9b9c2c..49031b3922 100644 --- a/internal/helpers.go +++ b/integration/libs/filer/helpers_test.go @@ -1,4 +1,4 @@ -package internal +package filer_test import ( "errors" diff --git a/integration/libs/filer/main_test.go b/integration/libs/filer/main_test.go new file mode 100644 index 0000000000..ca866d9526 --- /dev/null +++ b/integration/libs/filer/main_test.go @@ -0,0 +1,13 @@ +package filer_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/testdata/notebooks/py1.ipynb b/integration/libs/filer/testdata/notebooks/py1.ipynb similarity index 100% rename from internal/testdata/notebooks/py1.ipynb rename to integration/libs/filer/testdata/notebooks/py1.ipynb diff --git a/internal/testdata/notebooks/py2.ipynb b/integration/libs/filer/testdata/notebooks/py2.ipynb similarity index 100% rename from internal/testdata/notebooks/py2.ipynb rename to integration/libs/filer/testdata/notebooks/py2.ipynb diff --git a/internal/testdata/notebooks/r1.ipynb b/integration/libs/filer/testdata/notebooks/r1.ipynb similarity index 100% rename from internal/testdata/notebooks/r1.ipynb rename to integration/libs/filer/testdata/notebooks/r1.ipynb diff --git a/internal/testdata/notebooks/r2.ipynb b/integration/libs/filer/testdata/notebooks/r2.ipynb similarity index 100% rename from internal/testdata/notebooks/r2.ipynb rename to integration/libs/filer/testdata/notebooks/r2.ipynb diff --git a/internal/testdata/notebooks/scala1.ipynb b/integration/libs/filer/testdata/notebooks/scala1.ipynb similarity index 100% rename from internal/testdata/notebooks/scala1.ipynb rename to integration/libs/filer/testdata/notebooks/scala1.ipynb diff --git a/internal/testdata/notebooks/scala2.ipynb b/integration/libs/filer/testdata/notebooks/scala2.ipynb similarity index 100% rename from internal/testdata/notebooks/scala2.ipynb rename to integration/libs/filer/testdata/notebooks/scala2.ipynb diff --git a/internal/testdata/notebooks/sql1.ipynb b/integration/libs/filer/testdata/notebooks/sql1.ipynb similarity index 100% rename from internal/testdata/notebooks/sql1.ipynb rename to integration/libs/filer/testdata/notebooks/sql1.ipynb diff --git a/internal/testdata/notebooks/sql2.ipynb b/integration/libs/filer/testdata/notebooks/sql2.ipynb similarity index 100% rename from internal/testdata/notebooks/sql2.ipynb rename to integration/libs/filer/testdata/notebooks/sql2.ipynb diff --git a/internal/git_clone_test.go b/integration/libs/git/git_clone_test.go similarity index 99% rename from internal/git_clone_test.go rename to integration/libs/git/git_clone_test.go index d5e6762dde..53cfe6fb98 100644 --- a/internal/git_clone_test.go +++ b/integration/libs/git/git_clone_test.go @@ -1,4 +1,4 @@ -package internal +package git_test import ( "context" diff --git a/internal/git_fetch_test.go b/integration/libs/git/git_fetch_test.go similarity index 99% rename from internal/git_fetch_test.go rename to integration/libs/git/git_fetch_test.go index e692970c77..849770c3b8 100644 --- a/internal/git_fetch_test.go +++ b/integration/libs/git/git_fetch_test.go @@ -1,4 +1,4 @@ -package internal +package git_test import ( "os" diff --git a/integration/libs/git/main_test.go b/integration/libs/git/main_test.go new file mode 100644 index 0000000000..5d68e08512 --- /dev/null +++ b/integration/libs/git/main_test.go @@ -0,0 +1,13 @@ +package git_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/locker_test.go b/integration/libs/locker/locker_test.go similarity index 99% rename from internal/locker_test.go rename to integration/libs/locker/locker_test.go index 87f74e9d85..fcc4b346a9 100644 --- a/internal/locker_test.go +++ b/integration/libs/locker/locker_test.go @@ -1,4 +1,4 @@ -package internal +package locker_test import ( "context" diff --git a/integration/libs/locker/main_test.go b/integration/libs/locker/main_test.go new file mode 100644 index 0000000000..33a883768d --- /dev/null +++ b/integration/libs/locker/main_test.go @@ -0,0 +1,13 @@ +package locker_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/integration/libs/tags/main_test.go b/integration/libs/tags/main_test.go new file mode 100644 index 0000000000..4eaf54a20f --- /dev/null +++ b/integration/libs/tags/main_test.go @@ -0,0 +1,13 @@ +package tags_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/tags_test.go b/integration/libs/tags/tags_test.go similarity index 99% rename from internal/tags_test.go rename to integration/libs/tags/tags_test.go index ea2c0db052..2abec29a52 100644 --- a/internal/tags_test.go +++ b/integration/libs/tags/tags_test.go @@ -1,4 +1,4 @@ -package internal +package tags_test import ( "strings" diff --git a/integration/python/main_test.go b/integration/python/main_test.go new file mode 100644 index 0000000000..b35da21e1d --- /dev/null +++ b/integration/python/main_test.go @@ -0,0 +1,13 @@ +package python_test + +import ( + "testing" + + "github.com/databricks/cli/integration/internal" +) + +// TestMain is the entrypoint executed by the test runner. +// See [internal.Main] for prerequisites for running integration tests. +func TestMain(m *testing.M) { + internal.Main(m) +} diff --git a/internal/python/python_tasks_test.go b/integration/python/python_tasks_test.go similarity index 99% rename from internal/python/python_tasks_test.go rename to integration/python/python_tasks_test.go index 36efa4a1b8..18191291ce 100644 --- a/internal/python/python_tasks_test.go +++ b/integration/python/python_tasks_test.go @@ -1,4 +1,4 @@ -package python +package python_test import ( "bytes" diff --git a/internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl b/integration/python/testdata/my_test_code-0.0.1-py3-none-any.whl similarity index 100% rename from internal/python/testdata/my_test_code-0.0.1-py3-none-any.whl rename to integration/python/testdata/my_test_code-0.0.1-py3-none-any.whl diff --git a/internal/python/testdata/spark.py b/integration/python/testdata/spark.py similarity index 100% rename from internal/python/testdata/spark.py rename to integration/python/testdata/spark.py diff --git a/internal/python/testdata/test.py b/integration/python/testdata/test.py similarity index 100% rename from internal/python/testdata/test.py rename to integration/python/testdata/test.py From 4e95cb226c774311d7b3135b400fd746fa5047c2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 13 Dec 2024 15:47:50 +0100 Subject: [PATCH 42/63] Remove superfluous name prefix for integration tests (#2012) ## Changes Mechanical rename of "TestAcc" -> "Test" in the test name prefix. ## Tests n/a --- .../assumptions/dashboard_assumptions_test.go | 2 +- integration/bundle/artifacts_test.go | 10 +++---- integration/bundle/basic_test.go | 2 +- integration/bundle/bind_resource_test.go | 6 ++-- integration/bundle/clusters_test.go | 2 +- integration/bundle/dashboards_test.go | 2 +- integration/bundle/deploy_test.go | 12 ++++---- .../deploy_then_remove_resources_test.go | 2 +- integration/bundle/deploy_to_shared_test.go | 2 +- integration/bundle/deployment_state_test.go | 2 +- integration/bundle/destroy_test.go | 2 +- integration/bundle/empty_bundle_test.go | 2 +- integration/bundle/environments_test.go | 2 +- integration/bundle/generate_job_test.go | 2 +- integration/bundle/generate_pipeline_test.go | 2 +- integration/bundle/init_test.go | 6 ++-- integration/bundle/job_metadata_test.go | 2 +- .../bundle/local_state_staleness_test.go | 2 +- integration/bundle/python_wheel_test.go | 6 ++-- integration/bundle/spark_jar_test.go | 4 +-- integration/bundle/validate_test.go | 2 +- integration/cmd/alerts/alerts_test.go | 2 +- integration/cmd/api/api_test.go | 4 +-- integration/cmd/clusters/clusters_test.go | 4 +-- integration/cmd/fs/cat_test.go | 10 +++---- integration/cmd/fs/completion_test.go | 2 +- integration/cmd/fs/cp_test.go | 26 ++++++++--------- integration/cmd/fs/ls_test.go | 12 ++++---- integration/cmd/fs/mkdir_test.go | 8 +++--- integration/cmd/fs/rm_test.go | 10 +++---- integration/cmd/jobs/jobs_test.go | 2 +- integration/cmd/repos/repos_test.go | 12 ++++---- integration/cmd/secrets/secrets_test.go | 4 +-- .../storage_credentials_test.go | 2 +- integration/cmd/sync/sync_test.go | 24 ++++++++-------- integration/cmd/workspace/workspace_test.go | 28 +++++++++---------- integration/libs/filer/filer_test.go | 26 ++++++++--------- integration/libs/git/git_clone_test.go | 6 ++-- integration/libs/git/git_fetch_test.go | 10 +++---- integration/libs/locker/locker_test.go | 6 ++-- integration/libs/tags/tags_test.go | 12 ++++---- integration/python/python_tasks_test.go | 6 ++-- 42 files changed, 145 insertions(+), 145 deletions(-) diff --git a/integration/assumptions/dashboard_assumptions_test.go b/integration/assumptions/dashboard_assumptions_test.go index 9696aa9a4e..51a8094980 100644 --- a/integration/assumptions/dashboard_assumptions_test.go +++ b/integration/assumptions/dashboard_assumptions_test.go @@ -19,7 +19,7 @@ import ( // Verify that importing a dashboard through the Workspace API retains the identity of the underying resource, // as well as properties exclusively accessible through the dashboards API. -func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { +func TestDashboardAssumptions_WorkspaceImport(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) t.Parallel() diff --git a/integration/bundle/artifacts_test.go b/integration/bundle/artifacts_test.go index 942c4c975c..180c55332b 100644 --- a/integration/bundle/artifacts_test.go +++ b/integration/bundle/artifacts_test.go @@ -31,7 +31,7 @@ func touchEmptyFile(t *testing.T, path string) { f.Close() } -func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") @@ -95,7 +95,7 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { ) } -func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") @@ -159,7 +159,7 @@ func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) ) } -func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { +func TestUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) if os.Getenv("TEST_METASTORE_ID") == "" { @@ -228,7 +228,7 @@ func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { ) } -func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { +func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W @@ -265,7 +265,7 @@ func TestAccUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { assert.Equal(t, "", stderr.String()) } -func TestAccUploadArtifactToVolumeNotYetDeployed(t *testing.T) { +func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W diff --git a/integration/bundle/basic_test.go b/integration/bundle/basic_test.go index 77befb5d0c..820c105e97 100644 --- a/integration/bundle/basic_test.go +++ b/integration/bundle/basic_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { +func TestBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { ctx, _ := acc.WorkspaceTest(t) nodeTypeId := testutil.GetCloud(t).NodeTypeID() diff --git a/integration/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go index 374c6f346e..e324cb27b6 100644 --- a/integration/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBindJobToExistingJob(t *testing.T) { +func TestBindJobToExistingJob(t *testing.T) { env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) @@ -81,7 +81,7 @@ func TestAccBindJobToExistingJob(t *testing.T) { require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") } -func TestAccAbortBind(t *testing.T) { +func TestAbortBind(t *testing.T) { env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) @@ -130,7 +130,7 @@ func TestAccAbortBind(t *testing.T) { require.Contains(t, job.Settings.Tasks[0].NotebookTask.NotebookPath, "test") } -func TestAccGenerateAndBind(t *testing.T) { +func TestGenerateAndBind(t *testing.T) { env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) diff --git a/integration/bundle/clusters_test.go b/integration/bundle/clusters_test.go index d4b16c49b1..e52f5a93cc 100644 --- a/integration/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccDeployBundleWithCluster(t *testing.T) { +func TestDeployBundleWithCluster(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) if testutil.IsAWSCloud(wt) { diff --git a/integration/bundle/dashboards_test.go b/integration/bundle/dashboards_test.go index cbca560fb2..8eb43c5763 100644 --- a/integration/bundle/dashboards_test.go +++ b/integration/bundle/dashboards_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccDashboards(t *testing.T) { +func TestDashboards(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") diff --git a/integration/bundle/deploy_test.go b/integration/bundle/deploy_test.go index c97149b9bb..12920bcb2a 100644 --- a/integration/bundle/deploy_test.go +++ b/integration/bundle/deploy_test.go @@ -81,7 +81,7 @@ func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.Worksp return bundleRoot } -func TestAccBundleDeployUcSchema(t *testing.T) { +func TestBundleDeployUcSchema(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W @@ -106,7 +106,7 @@ func TestAccBundleDeployUcSchema(t *testing.T) { assert.Equal(t, "SCHEMA_DOES_NOT_EXIST", apiErr.ErrorCode) } -func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { +func TestBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W @@ -128,7 +128,7 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } -func TestAccBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { +func TestBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W @@ -176,7 +176,7 @@ properties such as the 'catalog' or 'storage' are changed: assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } -func TestAccBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { +func TestBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W uniqueId := uuid.New().String() @@ -215,7 +215,7 @@ properties such as the 'catalog' or 'storage' are changed: assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } -func TestAccDeployBasicBundleLogs(t *testing.T) { +func TestDeployBasicBundleLogs(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) nodeTypeId := testutil.GetCloud(t).NodeTypeID() @@ -245,7 +245,7 @@ func TestAccDeployBasicBundleLogs(t *testing.T) { assert.Equal(t, "", stdout) } -func TestAccDeployUcVolume(t *testing.T) { +func TestDeployUcVolume(t *testing.T) { ctx, wt := acc.UcWorkspaceTest(t) w := wt.W diff --git a/integration/bundle/deploy_then_remove_resources_test.go b/integration/bundle/deploy_then_remove_resources_test.go index 5e0b029ccb..911826852b 100644 --- a/integration/bundle/deploy_then_remove_resources_test.go +++ b/integration/bundle/deploy_then_remove_resources_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBundleDeployThenRemoveResources(t *testing.T) { +func TestBundleDeployThenRemoveResources(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W diff --git a/integration/bundle/deploy_to_shared_test.go b/integration/bundle/deploy_to_shared_test.go index d4f1ca9875..f9ca44d78c 100644 --- a/integration/bundle/deploy_to_shared_test.go +++ b/integration/bundle/deploy_to_shared_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccDeployBasicToSharedWorkspacePath(t *testing.T) { +func TestDeployBasicToSharedWorkspacePath(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) nodeTypeId := testutil.GetCloud(t).NodeTypeID() diff --git a/integration/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go index 98b4ef530c..8e7228f8a8 100644 --- a/integration/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { +func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) diff --git a/integration/bundle/destroy_test.go b/integration/bundle/destroy_test.go index bb9beb5083..dc18ab9a53 100644 --- a/integration/bundle/destroy_test.go +++ b/integration/bundle/destroy_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBundleDestroy(t *testing.T) { +func TestBundleDestroy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W diff --git a/integration/bundle/empty_bundle_test.go b/integration/bundle/empty_bundle_test.go index 40dd95355c..52ea012202 100644 --- a/integration/bundle/empty_bundle_test.go +++ b/integration/bundle/empty_bundle_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccEmptyBundleDeploy(t *testing.T) { +func TestEmptyBundleDeploy(t *testing.T) { ctx, _ := acc.WorkspaceTest(t) // create empty bundle diff --git a/integration/bundle/environments_test.go b/integration/bundle/environments_test.go index bbdc52cb24..28f64bf414 100644 --- a/integration/bundle/environments_test.go +++ b/integration/bundle/environments_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { +func TestPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { t.Skip("Skipping test until serveless is enabled") ctx, _ := acc.WorkspaceTest(t) diff --git a/integration/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go index c466348393..e2dd9617e4 100644 --- a/integration/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { +func TestGenerateFromExistingJobAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) gt := &generateJobTest{T: wt, w: wt.W} diff --git a/integration/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go index 4b05cc2e41..a76db2e35e 100644 --- a/integration/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { +func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) gt := &generatePipelineTest{T: wt, w: wt.W} diff --git a/integration/bundle/init_test.go b/integration/bundle/init_test.go index b1d314b531..d6efcc80fd 100644 --- a/integration/bundle/init_test.go +++ b/integration/bundle/init_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { +func TestBundleInitErrorOnUnknownFields(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() @@ -39,7 +39,7 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { // 2. While rare and to be avoided if possible, the CLI reserves the right to // make changes that can break the MLOps Stacks DAB. In which case we should // skip this test until the MLOps Stacks DAB is updated to work again. -func TestAccBundleInitOnMlopsStacks(t *testing.T) { +func TestBundleInitOnMlopsStacks(t *testing.T) { env := testutil.GetCloud(t).String() tmpDir1 := t.TempDir() @@ -101,7 +101,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { assert.Contains(t, job.Settings.Name, fmt.Sprintf("dev-%s-batch-inference-job", projectName)) } -func TestAccBundleInitHelpers(t *testing.T) { +func TestBundleInitHelpers(t *testing.T) { env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) diff --git a/integration/bundle/job_metadata_test.go b/integration/bundle/job_metadata_test.go index a8d7a927c2..aa22ec34f0 100644 --- a/integration/bundle/job_metadata_test.go +++ b/integration/bundle/job_metadata_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccJobsMetadataFile(t *testing.T) { +func TestJobsMetadataFile(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W diff --git a/integration/bundle/local_state_staleness_test.go b/integration/bundle/local_state_staleness_test.go index 29283c0022..3c80c24135 100644 --- a/integration/bundle/local_state_staleness_test.go +++ b/integration/bundle/local_state_staleness_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccLocalStateStaleness(t *testing.T) { +func TestLocalStateStaleness(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W diff --git a/integration/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go index 3f53dc95f7..c967eaf366 100644 --- a/integration/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -45,15 +45,15 @@ func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonW require.Contains(t, out, "['my_test_code', 'param1', 'param2']") } -func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { +func TestPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { runPythonWheelTest(t, "python_wheel_task", "13.3.x-snapshot-scala2.12", false) } -func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { +func TestPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { runPythonWheelTest(t, "python_wheel_task", "12.2.x-scala2.12", true) } -func TestAccPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { +func TestPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { _, wt := acc.WorkspaceTest(t) if testutil.IsAWSCloud(wt) { diff --git a/integration/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go index 2dc5cbaa82..5354bd9537 100644 --- a/integration/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -51,7 +51,7 @@ func runSparkJarTestFromWorkspace(t *testing.T, sparkVersion string) { runSparkJarTestCommon(t, ctx, sparkVersion, "n/a") } -func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { +func TestSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") testutil.RequireJDK(t, context.Background(), "1.8.0") @@ -75,7 +75,7 @@ func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { } } -func TestAccSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { +func TestSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") testutil.RequireJDK(t, context.Background(), "1.8.0") diff --git a/integration/bundle/validate_test.go b/integration/bundle/validate_test.go index ebf525a72d..01b99c579a 100644 --- a/integration/bundle/validate_test.go +++ b/integration/bundle/validate_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccBundleValidate(t *testing.T) { +func TestBundleValidate(t *testing.T) { testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") tmpDir := t.TempDir() diff --git a/integration/cmd/alerts/alerts_test.go b/integration/cmd/alerts/alerts_test.go index 9113f882c3..6e940e5a84 100644 --- a/integration/cmd/alerts/alerts_test.go +++ b/integration/cmd/alerts/alerts_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) { +func TestAlertsCreateErrWhenNoArguments(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) _, _, err := testcli.RequireErrorRun(t, "alerts-legacy", "create") diff --git a/integration/cmd/api/api_test.go b/integration/cmd/api/api_test.go index 7242a0bfc6..d2aac37adc 100644 --- a/integration/cmd/api/api_test.go +++ b/integration/cmd/api/api_test.go @@ -15,7 +15,7 @@ import ( "github.com/databricks/cli/internal/testutil" ) -func TestAccApiGet(t *testing.T) { +func TestApiGet(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, _ := testcli.RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") @@ -30,7 +30,7 @@ func TestAccApiGet(t *testing.T) { assert.NotNil(t, out["id"]) } -func TestAccApiPost(t *testing.T) { +func TestApiPost(t *testing.T) { env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) if env == "gcp" { diff --git a/integration/cmd/clusters/clusters_test.go b/integration/cmd/clusters/clusters_test.go index 132b554f12..6357c738f9 100644 --- a/integration/cmd/clusters/clusters_test.go +++ b/integration/cmd/clusters/clusters_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccClustersList(t *testing.T) { +func TestClustersList(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, stderr := testcli.RequireSuccessfulRun(t, "clusters", "list") @@ -29,7 +29,7 @@ func TestAccClustersList(t *testing.T) { assert.NotEmpty(t, clusterId) } -func TestAccClustersGet(t *testing.T) { +func TestClustersGet(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) clusterId := findValidClusterID(t) diff --git a/integration/cmd/fs/cat_test.go b/integration/cmd/fs/cat_test.go index 67e21310b1..4250f9c50f 100644 --- a/integration/cmd/fs/cat_test.go +++ b/integration/cmd/fs/cat_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccFsCat(t *testing.T) { +func TestFsCat(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -35,7 +35,7 @@ func TestAccFsCat(t *testing.T) { } } -func TestAccFsCatOnADir(t *testing.T) { +func TestFsCatOnADir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -54,7 +54,7 @@ func TestAccFsCatOnADir(t *testing.T) { } } -func TestAccFsCatOnNonExistentFile(t *testing.T) { +func TestFsCatOnNonExistentFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -71,14 +71,14 @@ func TestAccFsCatOnNonExistentFile(t *testing.T) { } } -func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { +func TestFsCatForDbfsInvalidScheme(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) _, _, err := testcli.RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") assert.ErrorContains(t, err, "invalid scheme: dab") } -func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { +func TestFsCatDoesNotSupportOutputModeJson(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W diff --git a/integration/cmd/fs/completion_test.go b/integration/cmd/fs/completion_test.go index b90c20f789..8072a33cab 100644 --- a/integration/cmd/fs/completion_test.go +++ b/integration/cmd/fs/completion_test.go @@ -18,7 +18,7 @@ func setupCompletionFile(t *testing.T, f filer.Filer) { require.NoError(t, err) } -func TestAccFsCompletion(t *testing.T) { +func TestFsCompletion(t *testing.T) { f, tmpDir := setupDbfsFiler(t) setupCompletionFile(t, f) diff --git a/integration/cmd/fs/cp_test.go b/integration/cmd/fs/cp_test.go index fa507830eb..755d2d304a 100644 --- a/integration/cmd/fs/cp_test.go +++ b/integration/cmd/fs/cp_test.go @@ -122,7 +122,7 @@ func copyTests() []cpTest { } } -func TestAccFsCpDir(t *testing.T) { +func TestFsCpDir(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -142,7 +142,7 @@ func TestAccFsCpDir(t *testing.T) { } } -func TestAccFsCpFileToFile(t *testing.T) { +func TestFsCpFileToFile(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -162,7 +162,7 @@ func TestAccFsCpFileToFile(t *testing.T) { } } -func TestAccFsCpFileToDir(t *testing.T) { +func TestFsCpFileToDir(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -182,7 +182,7 @@ func TestAccFsCpFileToDir(t *testing.T) { } } -func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { +func TestFsCpFileToDirForWindowsPaths(t *testing.T) { if runtime.GOOS != "windows" { t.Skip("Skipping test on non-windows OS") } @@ -198,7 +198,7 @@ func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { assertTargetFile(t, ctx, targetFiler, "foo.txt") } -func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { +func TestFsCpDirToDirFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -223,7 +223,7 @@ func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { } } -func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { +func TestFsCpFileToDirFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -246,7 +246,7 @@ func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { } } -func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { +func TestFsCpFileToFileFileNotOverwritten(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -269,7 +269,7 @@ func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { } } -func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { +func TestFsCpDirToDirWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -292,7 +292,7 @@ func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { } } -func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { +func TestFsCpFileToFileWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -315,7 +315,7 @@ func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { } } -func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { +func TestFsCpFileToDirWithOverwriteFlag(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { @@ -338,7 +338,7 @@ func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { } } -func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { +func TestFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -356,14 +356,14 @@ func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { } } -func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { +func TestFsCpErrorsOnInvalidScheme(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) _, _, err := testcli.RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") assert.Equal(t, "invalid scheme: https", err.Error()) } -func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { +func TestFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { t.Parallel() for _, testCase := range copyTests() { diff --git a/integration/cmd/fs/ls_test.go b/integration/cmd/fs/ls_test.go index 41d9fc053b..d985d57c93 100644 --- a/integration/cmd/fs/ls_test.go +++ b/integration/cmd/fs/ls_test.go @@ -40,7 +40,7 @@ func setupLsFiles(t *testing.T, f filer.Filer) { require.NoError(t, err) } -func TestAccFsLs(t *testing.T) { +func TestFsLs(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -73,7 +73,7 @@ func TestAccFsLs(t *testing.T) { } } -func TestAccFsLsWithAbsolutePaths(t *testing.T) { +func TestFsLsWithAbsolutePaths(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -106,7 +106,7 @@ func TestAccFsLsWithAbsolutePaths(t *testing.T) { } } -func TestAccFsLsOnFile(t *testing.T) { +func TestFsLsOnFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -124,7 +124,7 @@ func TestAccFsLsOnFile(t *testing.T) { } } -func TestAccFsLsOnEmptyDir(t *testing.T) { +func TestFsLsOnEmptyDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -147,7 +147,7 @@ func TestAccFsLsOnEmptyDir(t *testing.T) { } } -func TestAccFsLsForNonexistingDir(t *testing.T) { +func TestFsLsForNonexistingDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -165,7 +165,7 @@ func TestAccFsLsForNonexistingDir(t *testing.T) { } } -func TestAccFsLsWithoutScheme(t *testing.T) { +func TestFsLsWithoutScheme(t *testing.T) { t.Parallel() t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) diff --git a/integration/cmd/fs/mkdir_test.go b/integration/cmd/fs/mkdir_test.go index 93845a3dd4..de53e804ca 100644 --- a/integration/cmd/fs/mkdir_test.go +++ b/integration/cmd/fs/mkdir_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccFsMkdir(t *testing.T) { +func TestFsMkdir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -38,7 +38,7 @@ func TestAccFsMkdir(t *testing.T) { } } -func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { +func TestFsMkdirCreatesIntermediateDirectories(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -75,7 +75,7 @@ func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { } } -func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { +func TestFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -98,7 +98,7 @@ func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { } } -func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { +func TestFsMkdirWhenFileExistsAtPath(t *testing.T) { t.Parallel() t.Run("dbfs", func(t *testing.T) { diff --git a/integration/cmd/fs/rm_test.go b/integration/cmd/fs/rm_test.go index c3a5349070..a2832aba1d 100644 --- a/integration/cmd/fs/rm_test.go +++ b/integration/cmd/fs/rm_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccFsRmFile(t *testing.T) { +func TestFsRmFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -43,7 +43,7 @@ func TestAccFsRmFile(t *testing.T) { } } -func TestAccFsRmEmptyDir(t *testing.T) { +func TestFsRmEmptyDir(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -73,7 +73,7 @@ func TestAccFsRmEmptyDir(t *testing.T) { } } -func TestAccFsRmNonEmptyDirectory(t *testing.T) { +func TestFsRmNonEmptyDirectory(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -103,7 +103,7 @@ func TestAccFsRmNonEmptyDirectory(t *testing.T) { } } -func TestAccFsRmForNonExistentFile(t *testing.T) { +func TestFsRmForNonExistentFile(t *testing.T) { t.Parallel() for _, testCase := range fsTests { @@ -121,7 +121,7 @@ func TestAccFsRmForNonExistentFile(t *testing.T) { } } -func TestAccFsRmDirRecursively(t *testing.T) { +func TestFsRmDirRecursively(t *testing.T) { t.Parallel() for _, testCase := range fsTests { diff --git a/integration/cmd/jobs/jobs_test.go b/integration/cmd/jobs/jobs_test.go index b9dc2976c7..4513a15973 100644 --- a/integration/cmd/jobs/jobs_test.go +++ b/integration/cmd/jobs/jobs_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccCreateJob(t *testing.T) { +func TestCreateJob(t *testing.T) { acc.WorkspaceTest(t) env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") if env != "azure" { diff --git a/integration/cmd/repos/repos_test.go b/integration/cmd/repos/repos_test.go index 69f7e31148..365d48d6d1 100644 --- a/integration/cmd/repos/repos_test.go +++ b/integration/cmd/repos/repos_test.go @@ -47,7 +47,7 @@ func createTemporaryRepo(t *testing.T, w *databricks.WorkspaceClient, ctx contex return repoInfo.Id, repoPath } -func TestAccReposCreateWithProvider(t *testing.T) { +func TestReposCreateWithProvider(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -64,7 +64,7 @@ func TestAccReposCreateWithProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestAccReposCreateWithoutProvider(t *testing.T) { +func TestReposCreateWithoutProvider(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -81,7 +81,7 @@ func TestAccReposCreateWithoutProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestAccReposGet(t *testing.T) { +func TestReposGet(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -110,7 +110,7 @@ func TestAccReposGet(t *testing.T) { assert.ErrorContains(t, err, "is not a repo") } -func TestAccReposUpdate(t *testing.T) { +func TestReposUpdate(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -131,7 +131,7 @@ func TestAccReposUpdate(t *testing.T) { assert.Equal(t, byIdOutput.String(), byPathOutput.String()) } -func TestAccReposDeleteByID(t *testing.T) { +func TestReposDeleteByID(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -150,7 +150,7 @@ func TestAccReposDeleteByID(t *testing.T) { assert.True(t, apierr.IsMissing(err), err) } -func TestAccReposDeleteByPath(t *testing.T) { +func TestReposDeleteByPath(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() diff --git a/integration/cmd/secrets/secrets_test.go b/integration/cmd/secrets/secrets_test.go index 8f06699d76..83312f5028 100644 --- a/integration/cmd/secrets/secrets_test.go +++ b/integration/cmd/secrets/secrets_test.go @@ -63,7 +63,7 @@ func assertSecretBytesValue(t *acc.WorkspaceT, scope, key string, expected []byt assert.Equal(t, expected, decoded) } -func TestAccSecretsPutSecretStringValue(tt *testing.T) { +func TestSecretsPutSecretStringValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" @@ -77,7 +77,7 @@ func TestAccSecretsPutSecretStringValue(tt *testing.T) { assertSecretBytesValue(t, scope, key, []byte(value)) } -func TestAccSecretsPutSecretBytesValue(tt *testing.T) { +func TestSecretsPutSecretBytesValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" diff --git a/integration/cmd/storage_credentials/storage_credentials_test.go b/integration/cmd/storage_credentials/storage_credentials_test.go index 41f21f224d..834086794b 100644 --- a/integration/cmd/storage_credentials/storage_credentials_test.go +++ b/integration/cmd/storage_credentials/storage_credentials_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestAccStorageCredentialsListRendersResponse(t *testing.T) { +func TestStorageCredentialsListRendersResponse(t *testing.T) { _, _ = acc.WorkspaceTest(t) // Check if metastore is assigned for the workspace, otherwise test will fail diff --git a/integration/cmd/sync/sync_test.go b/integration/cmd/sync/sync_test.go index 82124cc953..c483c66e40 100644 --- a/integration/cmd/sync/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -230,7 +230,7 @@ func (a *syncTest) snapshotContains(files []string) { assert.Equal(a.t, len(files), len(s.LastModifiedTimes)) } -func TestAccSyncFullFileSync(t *testing.T) { +func TestSyncFullFileSync(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--full", "--watch") @@ -262,7 +262,7 @@ func TestAccSyncFullFileSync(t *testing.T) { assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore")) } -func TestAccSyncIncrementalFileSync(t *testing.T) { +func TestSyncIncrementalFileSync(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -296,7 +296,7 @@ func TestAccSyncIncrementalFileSync(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncNestedFolderSync(t *testing.T) { +func TestSyncNestedFolderSync(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -324,7 +324,7 @@ func TestAccSyncNestedFolderSync(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { +func TestSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -357,7 +357,7 @@ func TestAccSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { assertSync.remoteExists(ctx, "dir1") } -func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { +func TestSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -393,7 +393,7 @@ func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { // // In the above scenario sync should delete the empty folder and add foo to the remote // file system -func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { +func TestSyncIncrementalFileOverwritesFolder(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -423,7 +423,7 @@ func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo")) } -func TestAccSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { +func TestSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -454,7 +454,7 @@ func TestAccSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore")) } -func TestAccSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { +func TestSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -478,7 +478,7 @@ func TestAccSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo.py")) } -func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { +func TestSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { ctx := context.Background() assertSync := setupSyncTest(t, "--watch") @@ -500,7 +500,7 @@ func TestAccSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore")) } -func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { +func TestSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) wsc := databricks.Must(databricks.NewWorkspaceClient()) @@ -520,7 +520,7 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { assert.ErrorContains(t, err, " does not exist; please create it first") } -func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { +func TestSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) wsc := databricks.Must(databricks.NewWorkspaceClient()) @@ -542,7 +542,7 @@ func TestAccSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { require.Equal(t, workspace.ObjectTypeDirectory, info.ObjectType) } -func TestAccSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { +func TestSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) wsc := databricks.Must(databricks.NewWorkspaceClient()) diff --git a/integration/cmd/workspace/workspace_test.go b/integration/cmd/workspace/workspace_test.go index 28405a09b1..90c6c66055 100644 --- a/integration/cmd/workspace/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccWorkspaceList(t *testing.T) { +func TestWorkspaceList(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "list", "/") @@ -42,7 +42,7 @@ func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } -func TestAccWorkpaceExportPrintsContents(t *testing.T) { +func TestWorkpaceExportPrintsContents(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W @@ -93,7 +93,7 @@ func assertWorkspaceFileType(t *testing.T, ctx context.Context, f filer.Filer, p assert.Equal(t, fileType, info.Sys().(workspace.ObjectInfo).ObjectType) } -func TestAccExportDir(t *testing.T) { +func TestExportDir(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -138,7 +138,7 @@ func TestAccExportDir(t *testing.T) { assertLocalFileContents(t, filepath.Join(targetDir, "a/b/c/file-b"), "def") } -func TestAccExportDirDoesNotOverwrite(t *testing.T) { +func TestExportDirDoesNotOverwrite(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -159,7 +159,7 @@ func TestAccExportDirDoesNotOverwrite(t *testing.T) { assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "local content") } -func TestAccExportDirWithOverwriteFlag(t *testing.T) { +func TestExportDirWithOverwriteFlag(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) targetDir := t.TempDir() @@ -180,7 +180,7 @@ func TestAccExportDirWithOverwriteFlag(t *testing.T) { assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "content from workspace") } -func TestAccImportDir(t *testing.T) { +func TestImportDir(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") @@ -209,7 +209,7 @@ func TestAccImportDir(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "jupyterNotebook", "# Databricks notebook source\nprint(\"jupyter\")") } -func TestAccImportDirDoesNotOverwrite(t *testing.T) { +func TestImportDirDoesNotOverwrite(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) var err error @@ -237,7 +237,7 @@ func TestAccImportDirDoesNotOverwrite(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") } -func TestAccImportDirWithOverwriteFlag(t *testing.T) { +func TestImportDirWithOverwriteFlag(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) var err error @@ -265,7 +265,7 @@ func TestAccImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") } -func TestAccExport(t *testing.T) { +func TestExport(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) var err error @@ -294,7 +294,7 @@ func TestAccExport(t *testing.T) { assert.Contains(t, string(b), `"metadata":`, "jupyter notebooks contain the metadata field") } -func TestAccExportWithFileFlag(t *testing.T) { +func TestExportWithFileFlag(t *testing.T) { ctx, f, sourceDir := setupWorkspaceImportExportTest(t) localTmpDir := t.TempDir() @@ -328,7 +328,7 @@ func TestAccExportWithFileFlag(t *testing.T) { assertLocalFileContents(t, filepath.Join(localTmpDir, "jupyterNb.ipynb"), `"metadata":`) } -func TestAccImportFileUsingContentFormatSource(t *testing.T) { +func TestImportFileUsingContentFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `print(1)`. Uploaded as a notebook by default @@ -345,7 +345,7 @@ func TestAccImportFileUsingContentFormatSource(t *testing.T) { assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNb", workspace.ObjectTypeNotebook) } -func TestAccImportFileUsingContentFormatAuto(t *testing.T) { +func TestImportFileUsingContentFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `# Databricks notebook source\nprint(1)`. Upload as file if path has no extension. @@ -367,7 +367,7 @@ func TestAccImportFileUsingContentFormatAuto(t *testing.T) { assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } -func TestAccImportFileFormatSource(t *testing.T) { +func TestImportFileFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") @@ -381,7 +381,7 @@ func TestAccImportFileFormatSource(t *testing.T) { assert.ErrorContains(t, err, "The zip file may not be valid or may be an unsupported version. Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook") } -func TestAccImportFileFormatAuto(t *testing.T) { +func TestImportFileFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Upload as file if path has no extension diff --git a/integration/libs/filer/filer_test.go b/integration/libs/filer/filer_test.go index a4cfa9db3f..766f9817bd 100644 --- a/integration/libs/filer/filer_test.go +++ b/integration/libs/filer/filer_test.go @@ -117,7 +117,7 @@ func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.F assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}) } -func TestAccFilerRecursiveDelete(t *testing.T) { +func TestFilerRecursiveDelete(t *testing.T) { t.Parallel() for _, testCase := range []struct { @@ -228,7 +228,7 @@ func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) assert.True(t, errors.Is(err, fs.ErrInvalid)) } -func TestAccFilerReadWrite(t *testing.T) { +func TestFilerReadWrite(t *testing.T) { t.Parallel() for _, testCase := range []struct { @@ -337,7 +337,7 @@ func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.False(t, entries[0].IsDir()) } -func TestAccFilerReadDir(t *testing.T) { +func TestFilerReadDir(t *testing.T) { t.Parallel() for _, testCase := range []struct { @@ -362,7 +362,7 @@ func TestAccFilerReadDir(t *testing.T) { } } -func TestAccFilerWorkspaceNotebook(t *testing.T) { +func TestFilerWorkspaceNotebook(t *testing.T) { t.Parallel() ctx := context.Background() @@ -471,7 +471,7 @@ func TestAccFilerWorkspaceNotebook(t *testing.T) { } } -func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { t.Parallel() files := []struct { @@ -575,7 +575,7 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { return wf } -func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsRead(t *testing.T) { t.Parallel() ctx := context.Background() @@ -612,7 +612,7 @@ func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsDelete(t *testing.T) { t.Parallel() ctx := context.Background() @@ -661,7 +661,7 @@ func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { filerTest{t, wf}.assertNotExists(ctx, "dir") } -func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { +func TestFilerWorkspaceFilesExtensionsStat(t *testing.T) { t.Parallel() ctx := context.Background() @@ -708,7 +708,7 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { } } -func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { +func TestWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { t.Parallel() ctx := context.Background() @@ -723,7 +723,7 @@ func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() @@ -742,7 +742,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() @@ -761,7 +761,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) { +func TestWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) { t.Parallel() ctx := context.Background() @@ -780,7 +780,7 @@ func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) assert.NoError(t, err) } -func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { +func TestWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { t.Parallel() // Case 1: Writing source notebooks. diff --git a/integration/libs/git/git_clone_test.go b/integration/libs/git/git_clone_test.go index 53cfe6fb98..2d960cab9f 100644 --- a/integration/libs/git/git_clone_test.go +++ b/integration/libs/git/git_clone_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestAccGitClone(t *testing.T) { +func TestGitClone(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() @@ -33,7 +33,7 @@ func TestAccGitClone(t *testing.T) { assert.Contains(t, string(b), "ide") } -func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { +func TestGitCloneOnNonDefaultBranch(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() @@ -54,7 +54,7 @@ func TestAccGitCloneOnNonDefaultBranch(t *testing.T) { assert.Contains(t, string(b), "dais-2022") } -func TestAccGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { +func TestGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) tmpDir := t.TempDir() diff --git a/integration/libs/git/git_fetch_test.go b/integration/libs/git/git_fetch_test.go index 849770c3b8..0bb684a29b 100644 --- a/integration/libs/git/git_fetch_test.go +++ b/integration/libs/git/git_fetch_test.go @@ -39,7 +39,7 @@ func assertSparseGitInfo(t *testing.T, expectedRoot string, info git.RepositoryI assert.Equal(t, expectedRoot, info.WorktreeRoot) } -func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { +func TestFetchRepositoryInfoAPI_FromRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) me, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) @@ -66,7 +66,7 @@ func TestAccFetchRepositoryInfoAPI_FromRepo(t *testing.T) { } } -func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { +func TestFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) me, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) @@ -112,7 +112,7 @@ func TestAccFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { } } -func TestAccFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) repo := cloneRepoLocally(t, examplesRepoUrl) @@ -139,7 +139,7 @@ func cloneRepoLocally(t *testing.T, repoUrl string) string { return localRoot } -func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) tempDir := t.TempDir() @@ -161,7 +161,7 @@ func TestAccFetchRepositoryInfoDotGit_FromNonGitRepo(t *testing.T) { } } -func TestAccFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { +func TestFetchRepositoryInfoDotGit_FromBrokenGitRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) tempDir := t.TempDir() diff --git a/integration/libs/locker/locker_test.go b/integration/libs/locker/locker_test.go index fcc4b346a9..20a4f0ae50 100644 --- a/integration/libs/locker/locker_test.go +++ b/integration/libs/locker/locker_test.go @@ -45,7 +45,7 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr return remoteProjectRoot } -func TestAccLock(t *testing.T) { +func TestLock(t *testing.T) { t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.TODO() wsc, err := databricks.NewWorkspaceClient() @@ -181,7 +181,7 @@ func setupLockerTest(t *testing.T) (context.Context, *lockpkg.Locker, filer.File return ctx, locker, f } -func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { +func TestLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { ctx, locker, f := setupLockerTest(t) var err error @@ -202,7 +202,7 @@ func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccLockUnlockWithAllowsLockFileNotExist(t *testing.T) { +func TestLockUnlockWithAllowsLockFileNotExist(t *testing.T) { ctx, locker, f := setupLockerTest(t) var err error diff --git a/integration/libs/tags/tags_test.go b/integration/libs/tags/tags_test.go index 2abec29a52..b7c47b5f5a 100644 --- a/integration/libs/tags/tags_test.go +++ b/integration/libs/tags/tags_test.go @@ -80,7 +80,7 @@ func runTagTestCases(t *testing.T, cases []tagTestCase) { } } -func TestAccTagKeyAWS(t *testing.T) { +func TestTagKeyAWS(t *testing.T) { testutil.Require(t, testutil.AWS) t.Parallel() @@ -112,7 +112,7 @@ func TestAccTagKeyAWS(t *testing.T) { }) } -func TestAccTagValueAWS(t *testing.T) { +func TestTagValueAWS(t *testing.T) { testutil.Require(t, testutil.AWS) t.Parallel() @@ -138,7 +138,7 @@ func TestAccTagValueAWS(t *testing.T) { }) } -func TestAccTagKeyAzure(t *testing.T) { +func TestTagKeyAzure(t *testing.T) { testutil.Require(t, testutil.Azure) t.Parallel() @@ -170,7 +170,7 @@ func TestAccTagKeyAzure(t *testing.T) { }) } -func TestAccTagValueAzure(t *testing.T) { +func TestTagValueAzure(t *testing.T) { testutil.Require(t, testutil.Azure) t.Parallel() @@ -190,7 +190,7 @@ func TestAccTagValueAzure(t *testing.T) { }) } -func TestAccTagKeyGCP(t *testing.T) { +func TestTagKeyGCP(t *testing.T) { testutil.Require(t, testutil.GCP) t.Parallel() @@ -222,7 +222,7 @@ func TestAccTagKeyGCP(t *testing.T) { }) } -func TestAccTagValueGCP(t *testing.T) { +func TestTagValueGCP(t *testing.T) { testutil.Require(t, testutil.GCP) t.Parallel() diff --git a/integration/python/python_tasks_test.go b/integration/python/python_tasks_test.go index 18191291ce..1f8a9f667b 100644 --- a/integration/python/python_tasks_test.go +++ b/integration/python/python_tasks_test.go @@ -75,7 +75,7 @@ var sparkVersions = []string{ "14.1.x-scala2.12", } -func TestAccRunPythonTaskWorkspace(t *testing.T) { +func TestRunPythonTaskWorkspace(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") @@ -96,7 +96,7 @@ func TestAccRunPythonTaskWorkspace(t *testing.T) { }) } -func TestAccRunPythonTaskDBFS(t *testing.T) { +func TestRunPythonTaskDBFS(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") @@ -109,7 +109,7 @@ func TestAccRunPythonTaskDBFS(t *testing.T) { }) } -func TestAccRunPythonTaskRepo(t *testing.T) { +func TestRunPythonTaskRepo(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") From 58dfa70e50d4f36eab1052fd335de33dc355eeb0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 13 Dec 2024 16:55:49 +0100 Subject: [PATCH 43/63] Upgrade TF provider to 1.61.0 (#2011) ## Changes Upgraded to TF provider 1.61.0 ### New Features and Improvements - Added databricks_app resource and data source (https://github.com/databricks/terraform-provider-databricks/pull/4099). - Added databricks_credentials resource and data source --- .../internal/tf/codegen/generator/walker.go | 56 +++++---- bundle/internal/tf/codegen/schema/version.go | 2 +- .../tf/codegen/templates/root.go.tmpl | 6 +- bundle/internal/tf/schema/data_source_app.go | 107 ++++++++++++++++++ bundle/internal/tf/schema/data_source_apps.go | 106 +++++++++++++++++ .../tf/schema/data_source_functions.go | 8 +- bundle/internal/tf/schema/data_source_jobs.go | 5 +- .../data_source_notification_destinations.go | 2 +- .../tf/schema/data_source_registered_model.go | 2 +- .../data_source_registered_model_versions.go | 4 +- .../schema/data_source_serving_endpoints.go | 18 +-- bundle/internal/tf/schema/data_sources.go | 4 + bundle/internal/tf/schema/resource_app.go | 102 +++++++++++++++++ .../schema/resource_azure_adls_gen2_mount.go | 1 + .../internal/tf/schema/resource_credential.go | 52 +++++++++ bundle/internal/tf/schema/resource_grant.go | 1 + bundle/internal/tf/schema/resource_grants.go | 1 + .../tf/schema/resource_permissions.go | 1 + bundle/internal/tf/schema/resources.go | 4 + bundle/internal/tf/schema/root.go | 2 +- 20 files changed, 440 insertions(+), 44 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_app.go create mode 100644 bundle/internal/tf/schema/data_source_apps.go create mode 100644 bundle/internal/tf/schema/resource_app.go create mode 100644 bundle/internal/tf/schema/resource_credential.go diff --git a/bundle/internal/tf/codegen/generator/walker.go b/bundle/internal/tf/codegen/generator/walker.go index 2ed044c3d1..0e9d73c4e5 100644 --- a/bundle/internal/tf/codegen/generator/walker.go +++ b/bundle/internal/tf/codegen/generator/walker.go @@ -2,9 +2,8 @@ package generator import ( "fmt" - "strings" - "slices" + "strings" tfjson "github.com/hashicorp/terraform-json" "github.com/iancoleman/strcase" @@ -70,6 +69,25 @@ func nestedBlockKeys(block *tfjson.SchemaBlock) []string { return keys } +func nestedField(name []string, k string, isRef bool) field { + // Collect field properties. + fieldName := strcase.ToCamel(k) + fieldTypePrefix := "" + if isRef { + fieldTypePrefix = "*" + } else { + fieldTypePrefix = "[]" + } + fieldType := fmt.Sprintf("%s%s", fieldTypePrefix, strings.Join(append(name, strcase.ToCamel(k)), "")) + fieldTag := fmt.Sprintf("%s,omitempty", k) + + return field{ + Name: fieldName, + Type: fieldType, + Tag: fieldTag, + } +} + func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { // Produce nested types before this block itself. // This ensures types are defined before they are referenced. @@ -91,10 +109,24 @@ func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { v := block.Attributes[k] // Assert the attribute type is always set. - if v.AttributeType == cty.NilType { + if v.AttributeType == cty.NilType && v.AttributeNestedType == nil { return fmt.Errorf("unexpected nil type for attribute %s", k) } + // If there is a nested type, walk it and continue to next attribute. + if v.AttributeNestedType != nil { + nestedBlock := &tfjson.SchemaBlock{ + Attributes: v.AttributeNestedType.Attributes, + } + err := w.walk(nestedBlock, append(name, strcase.ToCamel(k))) + if err != nil { + return err + } + // Append to list of fields for type. + typ.Fields = append(typ.Fields, nestedField(name, k, v.AttributeNestedType.NestingMode == tfjson.SchemaNestingModeSingle)) + continue + } + // Collect field properties. fieldName := strcase.ToCamel(k) fieldType := processAttributeType(v.AttributeType) @@ -117,24 +149,8 @@ func (w *walker) walk(block *tfjson.SchemaBlock, name []string) error { // Declare nested blocks. for _, k := range nestedBlockKeys(block) { v := block.NestedBlocks[k] - - // Collect field properties. - fieldName := strcase.ToCamel(k) - fieldTypePrefix := "" - if v.MaxItems == 1 { - fieldTypePrefix = "*" - } else { - fieldTypePrefix = "[]" - } - fieldType := fmt.Sprintf("%s%s", fieldTypePrefix, strings.Join(append(name, strcase.ToCamel(k)), "")) - fieldTag := fmt.Sprintf("%s,omitempty", k) - // Append to list of fields for type. - typ.Fields = append(typ.Fields, field{ - Name: fieldName, - Type: fieldType, - Tag: fieldTag, - }) + typ.Fields = append(typ.Fields, nestedField(name, k, v.MaxItems == 1)) } // Append type to list of structs. diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index a778e0232f..a3c5c62819 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.59.0" +const ProviderVersion = "1.61.0" diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl index e03e978f08..b5c53c1615 100644 --- a/bundle/internal/tf/codegen/templates/root.go.tmpl +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -25,9 +25,9 @@ const ProviderVersion = "{{ .ProviderVersion }}" func NewRoot() *Root { return &Root{ - Terraform: map[string]interface{}{ - "required_providers": map[string]interface{}{ - "databricks": map[string]interface{}{ + Terraform: map[string]any{ + "required_providers": map[string]any{ + "databricks": map[string]any{ "source": ProviderSource, "version": ProviderVersion, }, diff --git a/bundle/internal/tf/schema/data_source_app.go b/bundle/internal/tf/schema/data_source_app.go new file mode 100644 index 0000000000..9b4ef077e7 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_app.go @@ -0,0 +1,107 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAppAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type DataSourceAppAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppAppResources struct { + Description string `json:"description,omitempty"` + Job *DataSourceAppAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *DataSourceAppAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *DataSourceAppAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *DataSourceAppAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type DataSourceAppApp struct { + ActiveDeployment *DataSourceAppAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *DataSourceAppAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *DataSourceAppAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *DataSourceAppAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []DataSourceAppAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} + +type DataSourceApp struct { + App *DataSourceAppApp `json:"app,omitempty"` + Name string `json:"name"` +} diff --git a/bundle/internal/tf/schema/data_source_apps.go b/bundle/internal/tf/schema/data_source_apps.go new file mode 100644 index 0000000000..dd381eabff --- /dev/null +++ b/bundle/internal/tf/schema/data_source_apps.go @@ -0,0 +1,106 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAppsAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppsAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppsAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppsAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppsAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type DataSourceAppsAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type DataSourceAppsAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *DataSourceAppsAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *DataSourceAppsAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type DataSourceAppsAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type DataSourceAppsAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type DataSourceAppsAppResources struct { + Description string `json:"description,omitempty"` + Job *DataSourceAppsAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *DataSourceAppsAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *DataSourceAppsAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *DataSourceAppsAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type DataSourceAppsApp struct { + ActiveDeployment *DataSourceAppsAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *DataSourceAppsAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *DataSourceAppsAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *DataSourceAppsAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []DataSourceAppsAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} + +type DataSourceApps struct { + App []DataSourceAppsApp `json:"app,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_functions.go b/bundle/internal/tf/schema/data_source_functions.go index 6085d7522f..416db8fc83 100644 --- a/bundle/internal/tf/schema/data_source_functions.go +++ b/bundle/internal/tf/schema/data_source_functions.go @@ -69,6 +69,7 @@ type DataSourceFunctionsFunctions struct { FullDataType string `json:"full_data_type,omitempty"` FullName string `json:"full_name,omitempty"` FunctionId string `json:"function_id,omitempty"` + InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` IsDeterministic bool `json:"is_deterministic,omitempty"` IsNullCall bool `json:"is_null_call,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` @@ -76,8 +77,10 @@ type DataSourceFunctionsFunctions struct { Owner string `json:"owner,omitempty"` ParameterStyle string `json:"parameter_style,omitempty"` Properties string `json:"properties,omitempty"` + ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` RoutineBody string `json:"routine_body,omitempty"` RoutineDefinition string `json:"routine_definition,omitempty"` + RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` SchemaName string `json:"schema_name,omitempty"` SecurityType string `json:"security_type,omitempty"` SpecificName string `json:"specific_name,omitempty"` @@ -85,14 +88,11 @@ type DataSourceFunctionsFunctions struct { SqlPath string `json:"sql_path,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` - InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` - ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` - RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` } type DataSourceFunctions struct { CatalogName string `json:"catalog_name"` + Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` IncludeBrowse bool `json:"include_browse,omitempty"` SchemaName string `json:"schema_name"` - Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_jobs.go b/bundle/internal/tf/schema/data_source_jobs.go index 98533c0c81..643f7a9f9f 100644 --- a/bundle/internal/tf/schema/data_source_jobs.go +++ b/bundle/internal/tf/schema/data_source_jobs.go @@ -3,6 +3,7 @@ package schema type DataSourceJobs struct { - Id string `json:"id,omitempty"` - Ids map[string]string `json:"ids,omitempty"` + Id string `json:"id,omitempty"` + Ids map[string]string `json:"ids,omitempty"` + JobNameContains string `json:"job_name_contains,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_notification_destinations.go b/bundle/internal/tf/schema/data_source_notification_destinations.go index c95ad6db93..8447b497bc 100644 --- a/bundle/internal/tf/schema/data_source_notification_destinations.go +++ b/bundle/internal/tf/schema/data_source_notification_destinations.go @@ -10,6 +10,6 @@ type DataSourceNotificationDestinationsNotificationDestinations struct { type DataSourceNotificationDestinations struct { DisplayNameContains string `json:"display_name_contains,omitempty"` - Type string `json:"type,omitempty"` NotificationDestinations []DataSourceNotificationDestinationsNotificationDestinations `json:"notification_destinations,omitempty"` + Type string `json:"type,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_registered_model.go b/bundle/internal/tf/schema/data_source_registered_model.go index e19e0849ad..41d69ff8f3 100644 --- a/bundle/internal/tf/schema/data_source_registered_model.go +++ b/bundle/internal/tf/schema/data_source_registered_model.go @@ -8,6 +8,7 @@ type DataSourceRegisteredModelModelInfoAliases struct { } type DataSourceRegisteredModelModelInfo struct { + Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` BrowseOnly bool `json:"browse_only,omitempty"` CatalogName string `json:"catalog_name,omitempty"` Comment string `json:"comment,omitempty"` @@ -21,7 +22,6 @@ type DataSourceRegisteredModelModelInfo struct { StorageLocation string `json:"storage_location,omitempty"` UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` - Aliases []DataSourceRegisteredModelModelInfoAliases `json:"aliases,omitempty"` } type DataSourceRegisteredModel struct { diff --git a/bundle/internal/tf/schema/data_source_registered_model_versions.go b/bundle/internal/tf/schema/data_source_registered_model_versions.go index f70e58f85f..1a670dfbc9 100644 --- a/bundle/internal/tf/schema/data_source_registered_model_versions.go +++ b/bundle/internal/tf/schema/data_source_registered_model_versions.go @@ -25,6 +25,7 @@ type DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies stru } type DataSourceRegisteredModelVersionsModelVersions struct { + Aliases []DataSourceRegisteredModelVersionsModelVersionsAliases `json:"aliases,omitempty"` BrowseOnly bool `json:"browse_only,omitempty"` CatalogName string `json:"catalog_name,omitempty"` Comment string `json:"comment,omitempty"` @@ -33,6 +34,7 @@ type DataSourceRegisteredModelVersionsModelVersions struct { Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` ModelName string `json:"model_name,omitempty"` + ModelVersionDependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies `json:"model_version_dependencies,omitempty"` RunId string `json:"run_id,omitempty"` RunWorkspaceId int `json:"run_workspace_id,omitempty"` SchemaName string `json:"schema_name,omitempty"` @@ -42,8 +44,6 @@ type DataSourceRegisteredModelVersionsModelVersions struct { UpdatedAt int `json:"updated_at,omitempty"` UpdatedBy string `json:"updated_by,omitempty"` Version int `json:"version,omitempty"` - Aliases []DataSourceRegisteredModelVersionsModelVersionsAliases `json:"aliases,omitempty"` - ModelVersionDependencies []DataSourceRegisteredModelVersionsModelVersionsModelVersionDependencies `json:"model_version_dependencies,omitempty"` } type DataSourceRegisteredModelVersions struct { diff --git a/bundle/internal/tf/schema/data_source_serving_endpoints.go b/bundle/internal/tf/schema/data_source_serving_endpoints.go index 028121b5ad..bdfd778e03 100644 --- a/bundle/internal/tf/schema/data_source_serving_endpoints.go +++ b/bundle/internal/tf/schema/data_source_serving_endpoints.go @@ -8,9 +8,9 @@ type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii struct { type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInput struct { InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii `json:"pii,omitempty"` Safety bool `json:"safety,omitempty"` ValidTopics []string `json:"valid_topics,omitempty"` - Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsInputPii `json:"pii,omitempty"` } type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct { @@ -19,9 +19,9 @@ type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii struct { type DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutput struct { InvalidKeywords []string `json:"invalid_keywords,omitempty"` + Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii `json:"pii,omitempty"` Safety bool `json:"safety,omitempty"` ValidTopics []string `json:"valid_topics,omitempty"` - Pii []DataSourceServingEndpointsEndpointsAiGatewayGuardrailsOutputPii `json:"pii,omitempty"` } type DataSourceServingEndpointsEndpointsAiGatewayGuardrails struct { @@ -111,17 +111,17 @@ type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmCon } type DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel struct { - Name string `json:"name"` - Provider string `json:"provider"` - Task string `json:"task"` Ai21LabsConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAi21LabsConfig `json:"ai21labs_config,omitempty"` AmazonBedrockConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAmazonBedrockConfig `json:"amazon_bedrock_config,omitempty"` AnthropicConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"` CohereConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"` DatabricksModelServingConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` GoogleCloudVertexAiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig `json:"google_cloud_vertex_ai_config,omitempty"` + Name string `json:"name"` OpenaiConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"` PalmConfig []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"` + Provider string `json:"provider"` + Task string `json:"task"` } type DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel struct { @@ -134,9 +134,9 @@ type DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel stru type DataSourceServingEndpointsEndpointsConfigServedEntities struct { EntityName string `json:"entity_name,omitempty"` EntityVersion string `json:"entity_version,omitempty"` - Name string `json:"name,omitempty"` ExternalModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesExternalModel `json:"external_model,omitempty"` FoundationModel []DataSourceServingEndpointsEndpointsConfigServedEntitiesFoundationModel `json:"foundation_model,omitempty"` + Name string `json:"name,omitempty"` } type DataSourceServingEndpointsEndpointsConfigServedModels struct { @@ -161,16 +161,16 @@ type DataSourceServingEndpointsEndpointsTags struct { } type DataSourceServingEndpointsEndpoints struct { + AiGateway []DataSourceServingEndpointsEndpointsAiGateway `json:"ai_gateway,omitempty"` + Config []DataSourceServingEndpointsEndpointsConfig `json:"config,omitempty"` CreationTimestamp int `json:"creation_timestamp,omitempty"` Creator string `json:"creator,omitempty"` Id string `json:"id,omitempty"` LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` Name string `json:"name,omitempty"` - Task string `json:"task,omitempty"` - AiGateway []DataSourceServingEndpointsEndpointsAiGateway `json:"ai_gateway,omitempty"` - Config []DataSourceServingEndpointsEndpointsConfig `json:"config,omitempty"` State []DataSourceServingEndpointsEndpointsState `json:"state,omitempty"` Tags []DataSourceServingEndpointsEndpointsTags `json:"tags,omitempty"` + Task string `json:"task,omitempty"` } type DataSourceServingEndpoints struct { diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 3a59bf8c37..1880db25a7 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -3,6 +3,8 @@ package schema type DataSources struct { + App map[string]any `json:"databricks_app,omitempty"` + Apps map[string]any `json:"databricks_apps,omitempty"` AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` @@ -66,6 +68,8 @@ type DataSources struct { func NewDataSources() *DataSources { return &DataSources{ + App: make(map[string]any), + Apps: make(map[string]any), AwsAssumeRolePolicy: make(map[string]any), AwsBucketPolicy: make(map[string]any), AwsCrossaccountPolicy: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_app.go b/bundle/internal/tf/schema/resource_app.go new file mode 100644 index 0000000000..14c93b7937 --- /dev/null +++ b/bundle/internal/tf/schema/resource_app.go @@ -0,0 +1,102 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAppActiveDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type ResourceAppActiveDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppActiveDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *ResourceAppActiveDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *ResourceAppActiveDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type ResourceAppAppStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppComputeStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppPendingDeploymentDeploymentArtifacts struct { + SourceCodePath string `json:"source_code_path,omitempty"` +} + +type ResourceAppPendingDeploymentStatus struct { + Message string `json:"message,omitempty"` + State string `json:"state,omitempty"` +} + +type ResourceAppPendingDeployment struct { + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DeploymentArtifacts *ResourceAppPendingDeploymentDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + DeploymentId string `json:"deployment_id,omitempty"` + Mode string `json:"mode,omitempty"` + SourceCodePath string `json:"source_code_path,omitempty"` + Status *ResourceAppPendingDeploymentStatus `json:"status,omitempty"` + UpdateTime string `json:"update_time,omitempty"` +} + +type ResourceAppResourcesJob struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type ResourceAppResourcesSecret struct { + Key string `json:"key"` + Permission string `json:"permission"` + Scope string `json:"scope"` +} + +type ResourceAppResourcesServingEndpoint struct { + Name string `json:"name"` + Permission string `json:"permission"` +} + +type ResourceAppResourcesSqlWarehouse struct { + Id string `json:"id"` + Permission string `json:"permission"` +} + +type ResourceAppResources struct { + Description string `json:"description,omitempty"` + Job *ResourceAppResourcesJob `json:"job,omitempty"` + Name string `json:"name"` + Secret *ResourceAppResourcesSecret `json:"secret,omitempty"` + ServingEndpoint *ResourceAppResourcesServingEndpoint `json:"serving_endpoint,omitempty"` + SqlWarehouse *ResourceAppResourcesSqlWarehouse `json:"sql_warehouse,omitempty"` +} + +type ResourceApp struct { + ActiveDeployment *ResourceAppActiveDeployment `json:"active_deployment,omitempty"` + AppStatus *ResourceAppAppStatus `json:"app_status,omitempty"` + ComputeStatus *ResourceAppComputeStatus `json:"compute_status,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Creator string `json:"creator,omitempty"` + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + PendingDeployment *ResourceAppPendingDeployment `json:"pending_deployment,omitempty"` + Resources []ResourceAppResources `json:"resources,omitempty"` + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + ServicePrincipalId int `json:"service_principal_id,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Updater string `json:"updater,omitempty"` + Url string `json:"url,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go b/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go index d0f96d54ea..6e2ea08e82 100644 --- a/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go +++ b/bundle/internal/tf/schema/resource_azure_adls_gen2_mount.go @@ -9,6 +9,7 @@ type ResourceAzureAdlsGen2Mount struct { ClusterId string `json:"cluster_id,omitempty"` ContainerName string `json:"container_name"` Directory string `json:"directory,omitempty"` + Environment string `json:"environment,omitempty"` Id string `json:"id,omitempty"` InitializeFileSystem bool `json:"initialize_file_system"` MountName string `json:"mount_name"` diff --git a/bundle/internal/tf/schema/resource_credential.go b/bundle/internal/tf/schema/resource_credential.go new file mode 100644 index 0000000000..9d47219ea9 --- /dev/null +++ b/bundle/internal/tf/schema/resource_credential.go @@ -0,0 +1,52 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceCredentialAwsIamRole struct { + ExternalId string `json:"external_id,omitempty"` + RoleArn string `json:"role_arn,omitempty"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} + +type ResourceCredentialAzureManagedIdentity struct { + AccessConnectorId string `json:"access_connector_id"` + CredentialId string `json:"credential_id,omitempty"` + ManagedIdentityId string `json:"managed_identity_id,omitempty"` +} + +type ResourceCredentialAzureServicePrincipal struct { + ApplicationId string `json:"application_id"` + ClientSecret string `json:"client_secret"` + DirectoryId string `json:"directory_id"` +} + +type ResourceCredentialDatabricksGcpServiceAccount struct { + CredentialId string `json:"credential_id,omitempty"` + Email string `json:"email,omitempty"` + PrivateKeyId string `json:"private_key_id,omitempty"` +} + +type ResourceCredential struct { + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + CredentialId string `json:"credential_id,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` + FullName string `json:"full_name,omitempty"` + Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + Purpose string `json:"purpose"` + ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` + AwsIamRole *ResourceCredentialAwsIamRole `json:"aws_iam_role,omitempty"` + AzureManagedIdentity *ResourceCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` + AzureServicePrincipal *ResourceCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` + DatabricksGcpServiceAccount *ResourceCredentialDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_grant.go b/bundle/internal/tf/schema/resource_grant.go index d8569f3042..6ed97791cc 100644 --- a/bundle/internal/tf/schema/resource_grant.go +++ b/bundle/internal/tf/schema/resource_grant.go @@ -4,6 +4,7 @@ package schema type ResourceGrant struct { Catalog string `json:"catalog,omitempty"` + Credential string `json:"credential,omitempty"` ExternalLocation string `json:"external_location,omitempty"` ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` diff --git a/bundle/internal/tf/schema/resource_grants.go b/bundle/internal/tf/schema/resource_grants.go index dd00152fba..474a9950fc 100644 --- a/bundle/internal/tf/schema/resource_grants.go +++ b/bundle/internal/tf/schema/resource_grants.go @@ -9,6 +9,7 @@ type ResourceGrantsGrant struct { type ResourceGrants struct { Catalog string `json:"catalog,omitempty"` + Credential string `json:"credential,omitempty"` ExternalLocation string `json:"external_location,omitempty"` ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` diff --git a/bundle/internal/tf/schema/resource_permissions.go b/bundle/internal/tf/schema/resource_permissions.go index a3d05e6f2c..7dfb84b5ff 100644 --- a/bundle/internal/tf/schema/resource_permissions.go +++ b/bundle/internal/tf/schema/resource_permissions.go @@ -10,6 +10,7 @@ type ResourcePermissionsAccessControl struct { } type ResourcePermissions struct { + AppName string `json:"app_name,omitempty"` Authorization string `json:"authorization,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterPolicyId string `json:"cluster_policy_id,omitempty"` diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index ea5b618fd7..b57c2711a0 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -5,6 +5,7 @@ package schema type Resources struct { AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` Alert map[string]any `json:"databricks_alert,omitempty"` + App map[string]any `json:"databricks_app,omitempty"` ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"` AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` @@ -18,6 +19,7 @@ type Resources struct { ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"` Connection map[string]any `json:"databricks_connection,omitempty"` + Credential map[string]any `json:"databricks_credential,omitempty"` CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"` Dashboard map[string]any `json:"databricks_dashboard,omitempty"` DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` @@ -111,6 +113,7 @@ func NewResources() *Resources { return &Resources{ AccessControlRuleSet: make(map[string]any), Alert: make(map[string]any), + App: make(map[string]any), ArtifactAllowlist: make(map[string]any), AutomaticClusterUpdateWorkspaceSetting: make(map[string]any), AwsS3Mount: make(map[string]any), @@ -124,6 +127,7 @@ func NewResources() *Resources { ClusterPolicy: make(map[string]any), ComplianceSecurityProfileWorkspaceSetting: make(map[string]any), Connection: make(map[string]any), + Credential: make(map[string]any), CustomAppIntegration: make(map[string]any), Dashboard: make(map[string]any), DbfsFile: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 2cadb80905..6befba596c 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.59.0" +const ProviderVersion = "1.61.0" func NewRoot() *Root { return &Root{ From 3b00d7861ecaf79bacc9ffb5e60d8b020b3a7da9 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 13 Dec 2024 17:09:51 +0100 Subject: [PATCH 44/63] Remove calls to `testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")` (#2014) ## Changes These calls are no longer necessary now that integration tests use a main function that performs this check. This change updates integration tests that call this function. Of those, the call sites that initialize a workspace client are updated to use `acc.WorkspaceTest(t)` to get one. ## Tests n/a --- integration/bundle/bind_resource_test.go | 9 ---- integration/bundle/deployment_state_test.go | 3 -- integration/bundle/init_test.go | 30 ++++++------- integration/bundle/spark_jar_test.go | 2 - integration/bundle/validate_test.go | 2 - integration/cmd/alerts/alerts_test.go | 3 -- integration/cmd/api/api_test.go | 6 +-- integration/cmd/auth/describe_test.go | 5 --- integration/cmd/clusters/clusters_test.go | 5 --- integration/cmd/fs/cat_test.go | 3 -- integration/cmd/fs/cp_test.go | 2 - integration/cmd/fs/helpers_test.go | 2 - integration/cmd/fs/ls_test.go | 2 - integration/cmd/jobs/jobs_test.go | 7 +-- integration/cmd/repos/repos_test.go | 49 +++++++-------------- integration/cmd/sync/sync_test.go | 16 +++---- integration/cmd/workspace/workspace_test.go | 3 -- integration/libs/filer/helpers_test.go | 2 - integration/libs/git/git_clone_test.go | 7 --- integration/libs/locker/locker_test.go | 6 +-- integration/python/python_tasks_test.go | 6 --- 21 files changed, 39 insertions(+), 131 deletions(-) diff --git a/integration/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go index e324cb27b6..127b160571 100644 --- a/integration/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -17,9 +17,6 @@ import ( ) func TestBindJobToExistingJob(t *testing.T) { - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - ctx, wt := acc.WorkspaceTest(t) gt := &generateJobTest{T: wt, w: wt.W} @@ -82,9 +79,6 @@ func TestBindJobToExistingJob(t *testing.T) { } func TestAbortBind(t *testing.T) { - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - ctx, wt := acc.WorkspaceTest(t) gt := &generateJobTest{T: wt, w: wt.W} @@ -131,9 +125,6 @@ func TestAbortBind(t *testing.T) { } func TestGenerateAndBind(t *testing.T) { - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - ctx, wt := acc.WorkspaceTest(t) gt := &generateJobTest{T: wt, w: wt.W} diff --git a/integration/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go index 8e7228f8a8..10b95e3986 100644 --- a/integration/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -14,9 +14,6 @@ import ( ) func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - ctx, wt := acc.WorkspaceTest(t) w := wt.W diff --git a/integration/bundle/init_test.go b/integration/bundle/init_test.go index d6efcc80fd..e36b09c619 100644 --- a/integration/bundle/init_test.go +++ b/integration/bundle/init_test.go @@ -11,17 +11,15 @@ import ( "testing" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/iamutil" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestBundleInitErrorOnUnknownFields(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - tmpDir := t.TempDir() _, _, err := testcli.RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") @@ -40,15 +38,14 @@ func TestBundleInitErrorOnUnknownFields(t *testing.T) { // make changes that can break the MLOps Stacks DAB. In which case we should // skip this test until the MLOps Stacks DAB is updated to work again. func TestBundleInitOnMlopsStacks(t *testing.T) { - env := testutil.GetCloud(t).String() + _, wt := acc.WorkspaceTest(t) + w := wt.W tmpDir1 := t.TempDir() tmpDir2 := t.TempDir() - w, err := databricks.NewWorkspaceClient(&databricks.Config{}) - require.NoError(t, err) - projectName := testutil.RandomName("project_name_") + env := testutil.GetCloud(t).String() // Create a config file with the project name and root dir initConfig := map[string]string{ @@ -102,23 +99,22 @@ func TestBundleInitOnMlopsStacks(t *testing.T) { } func TestBundleInitHelpers(t *testing.T) { - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - w, err := databricks.NewWorkspaceClient(&databricks.Config{}) - require.NoError(t, err) - - me, err := w.CurrentUser.Me(context.Background()) + me, err := w.CurrentUser.Me(ctx) require.NoError(t, err) var smallestNode string - switch env { - case "azure": + switch testutil.GetCloud(t) { + case testutil.Azure: smallestNode = "Standard_D3_v2" - case "gcp": + case testutil.GCP: smallestNode = "n1-standard-4" - default: + case testutil.AWS: smallestNode = "i3.xlarge" + default: + t.Fatal("Unknown cloud environment") } tests := []struct { diff --git a/integration/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go index 5354bd9537..be389d64ba 100644 --- a/integration/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -52,7 +52,6 @@ func runSparkJarTestFromWorkspace(t *testing.T, sparkVersion string) { } func TestSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: @@ -76,7 +75,6 @@ func TestSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { } func TestSparkJarTaskDeployAndRunOnWorkspace(t *testing.T) { - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") testutil.RequireJDK(t, context.Background(), "1.8.0") // Failure on earlier DBR versions: diff --git a/integration/bundle/validate_test.go b/integration/bundle/validate_test.go index 01b99c579a..2dd8ada67d 100644 --- a/integration/bundle/validate_test.go +++ b/integration/bundle/validate_test.go @@ -14,8 +14,6 @@ import ( ) func TestBundleValidate(t *testing.T) { - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - tmpDir := t.TempDir() testutil.WriteFile(t, filepath.Join(tmpDir, "databricks.yml"), ` diff --git a/integration/cmd/alerts/alerts_test.go b/integration/cmd/alerts/alerts_test.go index 6e940e5a84..c892e81692 100644 --- a/integration/cmd/alerts/alerts_test.go +++ b/integration/cmd/alerts/alerts_test.go @@ -4,13 +4,10 @@ import ( "testing" "github.com/databricks/cli/internal/testcli" - "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" ) func TestAlertsCreateErrWhenNoArguments(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := testcli.RequireErrorRun(t, "alerts-legacy", "create") assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) } diff --git a/integration/cmd/api/api_test.go b/integration/cmd/api/api_test.go index d2aac37adc..0e97527574 100644 --- a/integration/cmd/api/api_test.go +++ b/integration/cmd/api/api_test.go @@ -16,8 +16,6 @@ import ( ) func TestApiGet(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, _ := testcli.RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") // Deserialize SCIM API response. @@ -31,9 +29,7 @@ func TestApiGet(t *testing.T) { } func TestApiPost(t *testing.T) { - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - if env == "gcp" { + if testutil.GetCloud(t) == testutil.GCP { t.Skip("DBFS REST API is disabled on gcp") } diff --git a/integration/cmd/auth/describe_test.go b/integration/cmd/auth/describe_test.go index 492c678676..cbc782d370 100644 --- a/integration/cmd/auth/describe_test.go +++ b/integration/cmd/auth/describe_test.go @@ -6,14 +6,11 @@ import ( "testing" "github.com/databricks/cli/internal/testcli" - "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) func TestAuthDescribeSuccess(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe") @@ -35,8 +32,6 @@ func TestAuthDescribeSuccess(t *testing.T) { } func TestAuthDescribeFailure(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") diff --git a/integration/cmd/clusters/clusters_test.go b/integration/cmd/clusters/clusters_test.go index 6357c738f9..d62586cd45 100644 --- a/integration/cmd/clusters/clusters_test.go +++ b/integration/cmd/clusters/clusters_test.go @@ -7,7 +7,6 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" - "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" @@ -15,8 +14,6 @@ import ( ) func TestClustersList(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, stderr := testcli.RequireSuccessfulRun(t, "clusters", "list") outStr := stdout.String() assert.Contains(t, outStr, "ID") @@ -30,8 +27,6 @@ func TestClustersList(t *testing.T) { } func TestClustersGet(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - clusterId := findValidClusterID(t) stdout, stderr := testcli.RequireSuccessfulRun(t, "clusters", "get", clusterId) outStr := stdout.String() diff --git a/integration/cmd/fs/cat_test.go b/integration/cmd/fs/cat_test.go index 4250f9c50f..015a937609 100644 --- a/integration/cmd/fs/cat_test.go +++ b/integration/cmd/fs/cat_test.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" - "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -72,8 +71,6 @@ func TestFsCatOnNonExistentFile(t *testing.T) { } func TestFsCatForDbfsInvalidScheme(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := testcli.RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") assert.ErrorContains(t, err, "invalid scheme: dab") } diff --git a/integration/cmd/fs/cp_test.go b/integration/cmd/fs/cp_test.go index 755d2d304a..a44d93a679 100644 --- a/integration/cmd/fs/cp_test.go +++ b/integration/cmd/fs/cp_test.go @@ -357,8 +357,6 @@ func TestFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { } func TestFsCpErrorsOnInvalidScheme(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := testcli.RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") assert.Equal(t, "invalid scheme: https", err.Error()) } diff --git a/integration/cmd/fs/helpers_test.go b/integration/cmd/fs/helpers_test.go index cf46f48acd..da4fd48cff 100644 --- a/integration/cmd/fs/helpers_test.go +++ b/integration/cmd/fs/helpers_test.go @@ -13,8 +13,6 @@ import ( ) func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - tmp := t.TempDir() f, err := filer.NewLocalClient(tmp) require.NoError(t, err) diff --git a/integration/cmd/fs/ls_test.go b/integration/cmd/fs/ls_test.go index d985d57c93..e8b955ff8c 100644 --- a/integration/cmd/fs/ls_test.go +++ b/integration/cmd/fs/ls_test.go @@ -168,8 +168,6 @@ func TestFsLsForNonexistingDir(t *testing.T) { func TestFsLsWithoutScheme(t *testing.T) { t.Parallel() - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := testcli.RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/integration/cmd/jobs/jobs_test.go b/integration/cmd/jobs/jobs_test.go index 4513a15973..60f3f8d22a 100644 --- a/integration/cmd/jobs/jobs_test.go +++ b/integration/cmd/jobs/jobs_test.go @@ -5,7 +5,6 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" @@ -13,11 +12,7 @@ import ( ) func TestCreateJob(t *testing.T) { - acc.WorkspaceTest(t) - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - if env != "azure" { - t.Skipf("Not running test on cloud %s", env) - } + testutil.Require(t, testutil.Azure) stdout, stderr := testcli.RequireSuccessfulRun(t, "jobs", "create", "--json", "@testdata/create_job_without_workers.json", "--log-level=debug") assert.Empty(t, stderr.String()) var output map[string]int diff --git a/integration/cmd/repos/repos_test.go b/integration/cmd/repos/repos_test.go index 365d48d6d1..e3297e8ff5 100644 --- a/integration/cmd/repos/repos_test.go +++ b/integration/cmd/repos/repos_test.go @@ -6,6 +6,7 @@ import ( "strconv" "testing" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" @@ -48,11 +49,8 @@ func createTemporaryRepo(t *testing.T, w *databricks.WorkspaceClient, ctx contex } func TestReposCreateWithProvider(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) _, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", repoUrl, "gitHub", "--path", repoPath) @@ -65,11 +63,8 @@ func TestReposCreateWithProvider(t *testing.T) { } func TestReposCreateWithoutProvider(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) _, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", repoUrl, "--path", repoPath) @@ -82,11 +77,8 @@ func TestReposCreateWithoutProvider(t *testing.T) { } func TestReposGet(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) @@ -102,7 +94,7 @@ func TestReposGet(t *testing.T) { assert.Equal(t, byIdOutput.String(), byPathOutput.String()) // Get by path fails - _, stderr, err = testcli.RequireErrorRun(t, "repos", "get", repoPath+"-doesntexist", "--output=json") + _, stderr, err := testcli.RequireErrorRun(t, "repos", "get", repoPath+"-doesntexist", "--output=json") assert.ErrorContains(t, err, "failed to look up repo") // Get by path resolves to something other than a repo @@ -111,11 +103,8 @@ func TestReposGet(t *testing.T) { } func TestReposUpdate(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) @@ -132,11 +121,8 @@ func TestReposUpdate(t *testing.T) { } func TestReposDeleteByID(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, _ := createTemporaryRepo(t, w, ctx) @@ -146,16 +132,13 @@ func TestReposDeleteByID(t *testing.T) { assert.Equal(t, "", stderr.String()) // Check it was actually deleted - _, err = w.Repos.GetByRepoId(ctx, repoId) + _, err := w.Repos.GetByRepoId(ctx, repoId) assert.True(t, apierr.IsMissing(err), err) } func TestReposDeleteByPath(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W repoId, repoPath := createTemporaryRepo(t, w, ctx) @@ -165,6 +148,6 @@ func TestReposDeleteByPath(t *testing.T) { assert.Equal(t, "", stderr.String()) // Check it was actually deleted - _, err = w.Repos.GetByRepoId(ctx, repoId) + _, err := w.Repos.GetByRepoId(ctx, repoId) assert.True(t, apierr.IsMissing(err), err) } diff --git a/integration/cmd/sync/sync_test.go b/integration/cmd/sync/sync_test.go index c483c66e40..4949700161 100644 --- a/integration/cmd/sync/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -501,10 +501,8 @@ func TestSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { } func TestSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) @@ -521,10 +519,9 @@ func TestSyncEnsureRemotePathIsUsableIfRepoDoesntExist(t *testing.T) { } func TestSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() _, remoteRepoPath := setupRepo(t, wsc, ctx) // Repo itself is usable. @@ -543,10 +540,9 @@ func TestSyncEnsureRemotePathIsUsableIfRepoExists(t *testing.T) { } func TestSyncEnsureRemotePathIsUsableInWorkspace(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W - wsc := databricks.Must(databricks.NewWorkspaceClient()) - ctx := context.Background() me, err := wsc.CurrentUser.Me(ctx) require.NoError(t, err) diff --git a/integration/cmd/workspace/workspace_test.go b/integration/cmd/workspace/workspace_test.go index 90c6c66055..57b6a36242 100644 --- a/integration/cmd/workspace/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -13,7 +13,6 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" - "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" @@ -21,8 +20,6 @@ import ( ) func TestWorkspaceList(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "list", "/") outStr := stdout.String() assert.Contains(t, outStr, "ID") diff --git a/integration/libs/filer/helpers_test.go b/integration/libs/filer/helpers_test.go index 49031b3922..2383ae352f 100644 --- a/integration/libs/filer/helpers_test.go +++ b/integration/libs/filer/helpers_test.go @@ -16,8 +16,6 @@ import ( ) func setupLocalFiler(t testutil.TestingT) (filer.Filer, string) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - tmp := t.TempDir() f, err := filer.NewLocalClient(tmp) require.NoError(t, err) diff --git a/integration/libs/git/git_clone_test.go b/integration/libs/git/git_clone_test.go index 2d960cab9f..cbc2d091dd 100644 --- a/integration/libs/git/git_clone_test.go +++ b/integration/libs/git/git_clone_test.go @@ -6,14 +6,11 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/git" "github.com/stretchr/testify/assert" ) func TestGitClone(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - tmpDir := t.TempDir() ctx := context.Background() var err error @@ -34,8 +31,6 @@ func TestGitClone(t *testing.T) { } func TestGitCloneOnNonDefaultBranch(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - tmpDir := t.TempDir() ctx := context.Background() var err error @@ -55,8 +50,6 @@ func TestGitCloneOnNonDefaultBranch(t *testing.T) { } func TestGitCloneErrorsWhenRepositoryDoesNotExist(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - tmpDir := t.TempDir() err := git.Clone(context.Background(), "https://github.com/monalisa/doesnot-exist.git", "", tmpDir) diff --git a/integration/libs/locker/locker_test.go b/integration/libs/locker/locker_test.go index 20a4f0ae50..7624afdee4 100644 --- a/integration/libs/locker/locker_test.go +++ b/integration/libs/locker/locker_test.go @@ -46,10 +46,8 @@ func createRemoteTestProject(t *testing.T, projectNamePrefix string, wsc *databr } func TestLock(t *testing.T) { - t.Log(testutil.GetEnvOrSkipTest(t, "CLOUD_ENV")) - ctx := context.TODO() - wsc, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + wsc := wt.W remoteProjectRoot := createRemoteTestProject(t, "lock-acc-", wsc) // 5 lockers try to acquire a lock at the same time diff --git a/integration/python/python_tasks_test.go b/integration/python/python_tasks_test.go index 1f8a9f667b..d0a92e084d 100644 --- a/integration/python/python_tasks_test.go +++ b/integration/python/python_tasks_test.go @@ -78,7 +78,6 @@ var sparkVersions = []string{ func TestRunPythonTaskWorkspace(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") unsupportedSparkVersionsForWheel := []string{ "11.3.x-scala2.12", @@ -99,7 +98,6 @@ func TestRunPythonTaskWorkspace(t *testing.T) { func TestRunPythonTaskDBFS(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") runPythonTasks(t, prepareDBFSFiles(t), testOpts{ name: "Python tasks from DBFS", @@ -112,7 +110,6 @@ func TestRunPythonTaskDBFS(t *testing.T) { func TestRunPythonTaskRepo(t *testing.T) { // TODO: remove RUN_PYTHON_TASKS_TEST when ready to be executed as part of nightly testutil.GetEnvOrSkipTest(t, "RUN_PYTHON_TASKS_TEST") - testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") runPythonTasks(t, prepareRepoFiles(t), testOpts{ name: "Python tasks from Repo", @@ -123,9 +120,6 @@ func TestRunPythonTaskRepo(t *testing.T) { } func runPythonTasks(t *testing.T, tw *testFiles, opts testOpts) { - env := testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - w := tw.w nodeTypeId := testutil.GetCloud(t).NodeTypeID() From daf0f48143ca227ba967926969c9532abb87fcc6 Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Fri, 13 Dec 2024 17:13:21 +0100 Subject: [PATCH 45/63] Remove unused vscode settings in the templates (#2013) ## Changes VSCode extension no longer uses `databricks.python.envFile ` setting. And older extension versions will use the same default value anyway. ## Tests None --- .vscode/settings.json | 1 - .../template/{{.project_name}}/.vscode/settings.json.tmpl | 1 - .../template/{{.project_name}}/.vscode/settings.json | 1 - .../template/{{.project_name}}/.vscode/settings.json.tmpl | 1 - 4 files changed, 4 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index a7830d4ae9..f8b04f1269 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -15,7 +15,6 @@ "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "python.envFile": "${workspaceRoot}/.env", - "databricks.python.envFile": "${workspaceFolder}/.env", "python.analysis.stubPath": ".vscode", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------" diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl index 562ba136f5..3eca012261 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json index f19498daa3..8ee87c30d4 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json +++ b/libs/template/templates/default-python/template/{{.project_name}}/.vscode/settings.json @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl index c63af24b4b..03a365f9de 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -1,6 +1,5 @@ { "python.analysis.stubPath": ".vscode", - "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", "python.testing.pytestArgs": [ From 9f9d892db9a3d43b14dae084b663990d729ca7fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 08:55:19 +0100 Subject: [PATCH 46/63] Bump golang.org/x/crypto from 0.24.0 to 0.31.0 (#2006) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.24.0 to 0.31.0.
Commits
  • b4f1988 ssh: make the public key cache a 1-entry FIFO cache
  • 7042ebc openpgp/clearsign: just use rand.Reader in tests
  • 3e90321 go.mod: update golang.org/x dependencies
  • 8c4e668 x509roots/fallback: update bundle
  • 6018723 go.mod: update golang.org/x dependencies
  • 71ed71b README: don't recommend go get
  • 750a45f sha3: add MarshalBinary, AppendBinary, and UnmarshalBinary
  • 36b1725 sha3: avoid trailing permutation
  • 80ea76e sha3: fix padding for long cSHAKE parameters
  • c17aa50 sha3: avoid buffer copy
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/crypto&package-manager=go_modules&previous-version=0.24.0&new-version=0.31.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/databricks/cli/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 6ad5a8cdf1..c9a008fb3e 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( golang.org/x/oauth2 v0.24.0 golang.org/x/sync v0.10.0 golang.org/x/term v0.27.0 - golang.org/x/text v0.20.0 + golang.org/x/text v0.21.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -62,7 +62,7 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.24.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index d8fd63d00d..63bf2be333 100644 --- a/go.sum +++ b/go.sum @@ -176,8 +176,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -218,8 +218,8 @@ golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From d929ea3eefebc675d761b300a840baf7e1d59781 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 08:55:33 +0100 Subject: [PATCH 47/63] Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in /bundle/internal/tf/codegen (#2005) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.30.0 to 0.31.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/crypto&package-manager=go_modules&previous-version=0.30.0&new-version=0.31.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/databricks/cli/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- bundle/internal/tf/codegen/go.mod | 2 +- bundle/internal/tf/codegen/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bundle/internal/tf/codegen/go.mod b/bundle/internal/tf/codegen/go.mod index 508ff6ffba..6279003ccc 100644 --- a/bundle/internal/tf/codegen/go.mod +++ b/bundle/internal/tf/codegen/go.mod @@ -20,7 +20,7 @@ require ( github.com/cloudflare/circl v1.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - golang.org/x/crypto v0.30.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect diff --git a/bundle/internal/tf/codegen/go.sum b/bundle/internal/tf/codegen/go.sum index df59b9d056..1ce56777f0 100644 --- a/bundle/internal/tf/codegen/go.sum +++ b/bundle/internal/tf/codegen/go.sum @@ -60,8 +60,8 @@ github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d h1:0olWaB5pg3+oychR51GUVCEsGkeCU/2JxjBgIo4f3M0= golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= From 70b7bbfd817b761aa5b3568f9d4d5a3119edb364 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 16 Dec 2024 12:34:37 +0100 Subject: [PATCH 48/63] Remove calls to `t.Setenv` from integration tests (#2018) ## Changes The `Setenv` helper function configures an environment variable and resets it to its original value when exiting the test scope. It is incompatible with running tests in parallel because it modifies process-wide state. The `libs/env` package defines functions to interact with the environment but records `Setenv` calls on a `context.Context`. This enables us to override/specialize the environment scoped to a context. Pre-requisites for removing the `t.Setenv` calls: * Make `cmdio.NewIO` accept a context and use it with `libs/env` * Make all `internal/testcli` functions use a context The rest of this change: * Modifies integration tests to initialize a context to use if there wasn't already one * Updates `t.Setenv` calls to use `env.Set` ## Tests n/a --- bundle/run/job_test.go | 4 +- bundle/run/pipeline_test.go | 2 +- cmd/labs/installed_test.go | 2 +- cmd/labs/list_test.go | 2 +- cmd/labs/project/command_test.go | 8 +-- cmd/labs/project/installer_test.go | 6 +- cmd/root/io.go | 5 +- integration/bundle/artifacts_test.go | 9 +-- integration/bundle/bind_resource_test.go | 19 ++++--- integration/bundle/deploy_test.go | 33 +++++------ integration/bundle/deployment_state_test.go | 3 +- integration/bundle/generate_job_test.go | 5 +- integration/bundle/generate_pipeline_test.go | 5 +- integration/bundle/helpers_test.go | 26 +++++---- integration/bundle/init_test.go | 17 +++--- integration/cmd/alerts/alerts_test.go | 4 +- integration/cmd/api/api_test.go | 11 +++- integration/cmd/auth/describe_test.go | 6 +- integration/cmd/clusters/clusters_test.go | 10 +++- integration/cmd/fs/cat_test.go | 16 ++++-- integration/cmd/fs/completion_test.go | 3 +- integration/cmd/fs/cp_test.go | 38 ++++++++----- integration/cmd/fs/ls_test.go | 19 +++++-- integration/cmd/fs/mkdir_test.go | 15 +++-- integration/cmd/fs/rm_test.go | 15 +++-- integration/cmd/jobs/jobs_test.go | 6 +- integration/cmd/repos/repos_test.go | 20 +++---- integration/cmd/secrets/secrets_test.go | 7 ++- .../storage_credentials_test.go | 4 +- integration/cmd/sync/sync_test.go | 35 +++++------- integration/cmd/unknown_command_test.go | 4 +- integration/cmd/version/version_test.go | 13 +++-- integration/cmd/workspace/workspace_test.go | 57 ++++++++++--------- integration/libs/git/git_fetch_test.go | 34 +++++------ internal/testcli/runner.go | 19 ++----- libs/cmdio/io.go | 4 +- libs/cmdio/render_test.go | 5 +- .../databrickscfg/cfgpickers/clusters_test.go | 4 +- libs/template/helpers_test.go | 2 +- 39 files changed, 274 insertions(+), 223 deletions(-) diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index 2461f61bde..5d19ca4fff 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -160,7 +160,7 @@ func TestJobRunnerRestart(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) jobApi := m.GetMockJobsAPI() @@ -231,7 +231,7 @@ func TestJobRunnerRestartForContinuousUnpausedJobs(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) jobApi := m.GetMockJobsAPI() diff --git a/bundle/run/pipeline_test.go b/bundle/run/pipeline_test.go index e4608061c0..66f9d86be7 100644 --- a/bundle/run/pipeline_test.go +++ b/bundle/run/pipeline_test.go @@ -76,7 +76,7 @@ func TestPipelineRunnerRestart(t *testing.T) { } b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) ctx = cmdio.NewContext(ctx, cmdio.NewLogger(flags.ModeAppend)) mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{ diff --git a/cmd/labs/installed_test.go b/cmd/labs/installed_test.go index 0418e5a09b..3c38e5e11c 100644 --- a/cmd/labs/installed_test.go +++ b/cmd/labs/installed_test.go @@ -11,7 +11,7 @@ import ( func TestListsInstalledProjects(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - r := testcli.NewRunnerWithContext(t, ctx, "labs", "installed") + r := testcli.NewRunner(t, ctx, "labs", "installed") r.RunAndExpectOutput(` Name Description Version blueprint Blueprint Project v0.3.15 diff --git a/cmd/labs/list_test.go b/cmd/labs/list_test.go index eec942d414..4388fdd0e9 100644 --- a/cmd/labs/list_test.go +++ b/cmd/labs/list_test.go @@ -12,7 +12,7 @@ import ( func TestListingWorks(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "project/testdata/installed-in-home") - c := testcli.NewRunnerWithContext(t, ctx, "labs", "list") + c := testcli.NewRunner(t, ctx, "labs", "list") stdout, _, err := c.Run() require.NoError(t, err) require.Contains(t, stdout.String(), "ucx") diff --git a/cmd/labs/project/command_test.go b/cmd/labs/project/command_test.go index f6326b2d1b..453329e1d1 100644 --- a/cmd/labs/project/command_test.go +++ b/cmd/labs/project/command_test.go @@ -30,7 +30,7 @@ func devEnvContext(t *testing.T) context.Context { func TestRunningBlueprintEcho(t *testing.T) { ctx := devEnvContext(t) - r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "echo") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "echo") var out echoOut r.RunAndParseJSON(&out) assert.Equal(t, "echo", out.Command) @@ -41,14 +41,14 @@ func TestRunningBlueprintEcho(t *testing.T) { func TestRunningBlueprintEchoProfileWrongOverride(t *testing.T) { ctx := devEnvContext(t) - r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "echo", "--profile", "workspace-profile") _, _, err := r.Run() assert.ErrorIs(t, err, databricks.ErrNotAccountClient) } func TestRunningCommand(t *testing.T) { ctx := devEnvContext(t) - r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "foo") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "foo") r.WithStdin() defer r.CloseStdin() @@ -60,7 +60,7 @@ func TestRunningCommand(t *testing.T) { func TestRenderingTable(t *testing.T) { ctx := devEnvContext(t) - r := testcli.NewRunnerWithContext(t, ctx, "labs", "blueprint", "table") + r := testcli.NewRunner(t, ctx, "labs", "blueprint", "table") r.RunAndExpectOutput(` Key Value First Second diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 367daccb33..a69389b315 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -236,7 +236,7 @@ func TestInstallerWorksForReleases(t *testing.T) { // │ │ │ └── site-packages // │ │ │ ├── ... // │ │ │ ├── distutils-precedence.pth - r := testcli.NewRunnerWithContext(t, ctx, "labs", "install", "blueprint", "--debug") + r := testcli.NewRunner(t, ctx, "labs", "install", "blueprint", "--debug") r.RunAndExpectOutput("setting up important infrastructure") } @@ -356,7 +356,7 @@ account_id = abc // └── databrickslabs-blueprint-releases.json // `databricks labs install .` means "verify this installer i'm developing does work" - r := testcli.NewRunnerWithContext(t, ctx, "labs", "install", ".") + r := testcli.NewRunner(t, ctx, "labs", "install", ".") r.WithStdin() defer r.CloseStdin() @@ -426,7 +426,7 @@ func TestUpgraderWorksForReleases(t *testing.T) { ctx = env.Set(ctx, "DATABRICKS_CLUSTER_ID", "installer-cluster") ctx = env.Set(ctx, "DATABRICKS_WAREHOUSE_ID", "installer-warehouse") - r := testcli.NewRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") + r := testcli.NewRunner(t, ctx, "labs", "upgrade", "blueprint") r.RunAndExpectOutput("setting up important infrastructure") // Check if the stub was called with the 'python -m pip install' command diff --git a/cmd/root/io.go b/cmd/root/io.go index 96c5a9a266..bba989a796 100644 --- a/cmd/root/io.go +++ b/cmd/root/io.go @@ -45,8 +45,9 @@ func (f *outputFlag) initializeIO(cmd *cobra.Command) error { headerTemplate = cmd.Annotations["headerTemplate"] } - cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) - ctx := cmdio.InContext(cmd.Context(), cmdIO) + ctx := cmd.Context() + cmdIO := cmdio.NewIO(ctx, f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) + ctx = cmdio.InContext(ctx, cmdIO) cmd.SetContext(ctx) return nil } diff --git a/integration/bundle/artifacts_test.go b/integration/bundle/artifacts_test.go index 180c55332b..b070be92ca 100644 --- a/integration/bundle/artifacts_test.go +++ b/integration/bundle/artifacts_test.go @@ -15,6 +15,7 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -253,8 +254,8 @@ func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { }) require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := testcli.RequireErrorRun(t, "bundle", "deploy") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") assert.Error(t, err) assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/doesnotexist does not exist: Not Found @@ -290,8 +291,8 @@ func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) { }) require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := testcli.RequireErrorRun(t, "bundle", "deploy") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") assert.Error(t, err) assert.Equal(t, fmt.Sprintf(`Error: volume /Volumes/main/%s/my_volume does not exist: Not Found diff --git a/integration/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go index 127b160571..823d5c1457 100644 --- a/integration/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -35,8 +36,8 @@ func TestBindJobToExistingJob(t *testing.T) { require.NoError(t, err) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := testcli.NewRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") _, _, err = c.Run() require.NoError(t, err) @@ -58,7 +59,7 @@ func TestBindJobToExistingJob(t *testing.T) { require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") - c = testcli.NewRunner(t, "bundle", "deployment", "unbind", "foo") + c = testcli.NewRunner(t, ctx, "bundle", "deployment", "unbind", "foo") _, _, err = c.Run() require.NoError(t, err) @@ -99,9 +100,9 @@ func TestAbortBind(t *testing.T) { }) // Bind should fail because prompting is not possible. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := testcli.NewRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) // Expect error suggesting to use --auto-approve _, _, err = c.Run() @@ -147,8 +148,8 @@ func TestGenerateAndBind(t *testing.T) { } }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "generate", "job", + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", "--key", "test_job_key", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), @@ -164,7 +165,7 @@ func TestGenerateAndBind(t *testing.T) { require.Len(t, matches, 1) - c = testcli.NewRunner(t, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") + c = testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") _, _, err = c.Run() require.NoError(t, err) diff --git a/integration/bundle/deploy_test.go b/integration/bundle/deploy_test.go index 12920bcb2a..64c59b3142 100644 --- a/integration/bundle/deploy_test.go +++ b/integration/bundle/deploy_test.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/catalog" @@ -118,9 +119,9 @@ func TestBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { require.NoError(t, err) // Redeploy the bundle - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -162,9 +163,9 @@ func TestBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { require.NoError(t, err) // Redeploy the bundle. Expect it to fail because deleting the pipeline requires --auto-approve. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -201,9 +202,9 @@ func TestBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { require.Equal(t, pipelineName, pipeline.Name) // Redeploy the bundle, pointing the DLT pipeline to a different UC catalog. - t.Setenv("BUNDLE_ROOT", bundleRoot) - t.Setenv("TERM", "dumb") - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--var=\"catalog=whatever\"") stdout, stderr, err := c.Run() assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) @@ -235,7 +236,7 @@ func TestDeployBasicBundleLogs(t *testing.T) { currentUser, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) - stdout, stderr := blackBoxRun(t, root, "bundle", "deploy") + stdout, stderr := blackBoxRun(t, ctx, root, "bundle", "deploy") assert.Equal(t, strings.Join([]string{ fmt.Sprintf("Uploading bundle files to /Workspace/Users/%s/.bundle/%s/files...", currentUser.UserName, uniqueId), "Deploying resources...", @@ -282,9 +283,9 @@ func TestDeployUcVolume(t *testing.T) { assert.Equal(t, []catalog.Privilege{catalog.PrivilegeWriteVolume}, grants.PrivilegeAssignments[0].Privileges) // Recreation of the volume without --auto-approve should fail since prompting is not possible - t.Setenv("TERM", "dumb") - t.Setenv("BUNDLE_ROOT", bundleRoot) - stdout, stderr, err := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + stdout, stderr, err := testcli.NewRunner(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}").Run() assert.Error(t, err) assert.Contains(t, stderr.String(), `This action will result in the deletion or recreation of the following volumes. For managed volumes, the files stored in the volume are also deleted from your @@ -294,9 +295,9 @@ is removed from the catalog, but the underlying files are not deleted: assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") // Successfully recreate the volume with --auto-approve - t.Setenv("TERM", "dumb") - t.Setenv("BUNDLE_ROOT", bundleRoot) - _, _, err = testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "TERM", "dumb") + _, _, err = testcli.NewRunner(t, ctx, "bundle", "deploy", "--var=schema_name=${resources.schemas.schema2.name}", "--auto-approve").Run() assert.NoError(t, err) // Assert the volume is updated successfully diff --git a/integration/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go index 10b95e3986..43a19018ee 100644 --- a/integration/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) @@ -26,7 +27,7 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { }) require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", bundleRoot) + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) // Add some test file to the bundle err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644) diff --git a/integration/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go index e2dd9617e4..331a75d627 100644 --- a/integration/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/compute" @@ -35,8 +36,8 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) { gt.destroyJob(ctx, jobId) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "generate", "job", + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "job", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) diff --git a/integration/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go index a76db2e35e..5be1ff7ddb 100644 --- a/integration/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -34,8 +35,8 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { gt.destroyPipeline(ctx, pipelineId) }) - t.Setenv("BUNDLE_ROOT", bundleRoot) - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "generate", "pipeline", + ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) + c := testcli.NewRunner(t, ctx, "bundle", "generate", "pipeline", "--existing-pipeline-id", fmt.Sprint(pipelineId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) diff --git a/integration/bundle/helpers_test.go b/integration/bundle/helpers_test.go index f1c87c0ef9..c2deb67a94 100644 --- a/integration/bundle/helpers_test.go +++ b/integration/bundle/helpers_test.go @@ -40,7 +40,7 @@ func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, te } ctx = root.SetWorkspaceClient(ctx, nil) - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") + cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") ctx = cmdio.InContext(ctx, cmd) out, err := filer.NewLocalClient(bundleRoot) @@ -65,7 +65,7 @@ func writeConfigFile(t testutil.TestingT, config map[string]any) (string, error) func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") + c := testcli.NewRunner(t, ctx, "bundle", "validate", "--output", "json") stdout, _, err := c.Run() return stdout.Bytes(), err } @@ -85,7 +85,7 @@ func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle { func deployBundle(t testutil.TestingT, ctx context.Context, path string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") + c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") _, _, err := c.Run() return err } @@ -93,7 +93,7 @@ func deployBundle(t testutil.TestingT, ctx context.Context, path string) error { func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args = append([]string{"bundle", "deploy"}, args...) - c := testcli.NewRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) stdout, stderr, err := c.Run() return stdout.String(), stderr.String(), err } @@ -102,7 +102,7 @@ func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string ctx = env.Set(ctx, "BUNDLE_ROOT", path) args := []string{"bundle", "deploy", "--force-lock"} args = append(args, flags...) - c := testcli.NewRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) _, _, err := c.Run() return err } @@ -111,7 +111,7 @@ func runResource(t testutil.TestingT, ctx context.Context, path, key string) (st ctx = env.Set(ctx, "BUNDLE_ROOT", path) ctx = cmdio.NewContext(ctx, cmdio.Default()) - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "run", key) + c := testcli.NewRunner(t, ctx, "bundle", "run", key) stdout, _, err := c.Run() return stdout.String(), err } @@ -123,14 +123,14 @@ func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key s args := make([]string, 0) args = append(args, "bundle", "run", key) args = append(args, params...) - c := testcli.NewRunnerWithContext(t, ctx, args...) + c := testcli.NewRunner(t, ctx, args...) stdout, _, err := c.Run() return stdout.String(), err } func destroyBundle(t testutil.TestingT, ctx context.Context, path string) error { ctx = env.Set(ctx, "BUNDLE_ROOT", path) - c := testcli.NewRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") + c := testcli.NewRunner(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() return err } @@ -143,16 +143,20 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, return root } -func blackBoxRun(t testutil.TestingT, root string, args ...string) (stdout, stderr string) { +func blackBoxRun(t testutil.TestingT, ctx context.Context, root string, args ...string) (stdout, stderr string) { gitRoot, err := folders.FindDirWithLeaf(".", ".git") require.NoError(t, err) - t.Setenv("BUNDLE_ROOT", root) - // Create the command cmd := exec.Command("go", append([]string{"run", "main.go"}, args...)...) cmd.Dir = gitRoot + // Configure the environment + ctx = env.Set(ctx, "BUNDLE_ROOT", root) + for key, value := range env.All(ctx) { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) + } + // Create buffers to capture output var outBuffer, errBuffer bytes.Buffer cmd.Stdout = &outBuffer diff --git a/integration/bundle/init_test.go b/integration/bundle/init_test.go index e36b09c619..bc3757fdea 100644 --- a/integration/bundle/init_test.go +++ b/integration/bundle/init_test.go @@ -20,8 +20,9 @@ import ( ) func TestBundleInitErrorOnUnknownFields(t *testing.T) { + ctx := context.Background() tmpDir := t.TempDir() - _, _, err := testcli.RequireErrorRun(t, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) + _, _, err := testcli.RequireErrorRun(t, ctx, "bundle", "init", "./testdata/init/field-does-not-exist", "--output-dir", tmpDir) assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") } @@ -38,7 +39,7 @@ func TestBundleInitErrorOnUnknownFields(t *testing.T) { // make changes that can break the MLOps Stacks DAB. In which case we should // skip this test until the MLOps Stacks DAB is updated to work again. func TestBundleInitOnMlopsStacks(t *testing.T) { - _, wt := acc.WorkspaceTest(t) + ctx, wt := acc.WorkspaceTest(t) w := wt.W tmpDir1 := t.TempDir() @@ -61,7 +62,7 @@ func TestBundleInitOnMlopsStacks(t *testing.T) { // Run bundle init assert.NoFileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) - testcli.RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) + testcli.RequireSuccessfulRun(t, ctx, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) // Assert that the README.md file was created contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) @@ -69,17 +70,17 @@ func TestBundleInitOnMlopsStacks(t *testing.T) { // Validate the stack testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) - testcli.RequireSuccessfulRun(t, "bundle", "validate") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "validate") // Deploy the stack - testcli.RequireSuccessfulRun(t, "bundle", "deploy") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "deploy") t.Cleanup(func() { // Delete the stack - testcli.RequireSuccessfulRun(t, "bundle", "destroy", "--auto-approve") + testcli.RequireSuccessfulRun(t, ctx, "bundle", "destroy", "--auto-approve") }) // Get summary of the bundle deployment - stdout, _ := testcli.RequireSuccessfulRun(t, "bundle", "summary", "--output", "json") + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "bundle", "summary", "--output", "json") summary := &config.Root{} err = json.Unmarshal(stdout.Bytes(), summary) require.NoError(t, err) @@ -156,7 +157,7 @@ func TestBundleInitHelpers(t *testing.T) { require.NoError(t, err) // Run bundle init. - testcli.RequireSuccessfulRun(t, "bundle", "init", tmpDir, "--output-dir", tmpDir2) + testcli.RequireSuccessfulRun(t, ctx, "bundle", "init", tmpDir, "--output-dir", tmpDir2) // Assert that the helper function was correctly computed. contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "foo.txt")) diff --git a/integration/cmd/alerts/alerts_test.go b/integration/cmd/alerts/alerts_test.go index c892e81692..ca17198138 100644 --- a/integration/cmd/alerts/alerts_test.go +++ b/integration/cmd/alerts/alerts_test.go @@ -1,6 +1,7 @@ package alerts_test import ( + "context" "testing" "github.com/databricks/cli/internal/testcli" @@ -8,6 +9,7 @@ import ( ) func TestAlertsCreateErrWhenNoArguments(t *testing.T) { - _, _, err := testcli.RequireErrorRun(t, "alerts-legacy", "create") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "alerts-legacy", "create") assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) } diff --git a/integration/cmd/api/api_test.go b/integration/cmd/api/api_test.go index 0e97527574..4cb9b1737c 100644 --- a/integration/cmd/api/api_test.go +++ b/integration/cmd/api/api_test.go @@ -1,6 +1,7 @@ package api_test import ( + "context" "encoding/json" "fmt" "path" @@ -16,7 +17,9 @@ import ( ) func TestApiGet(t *testing.T) { - stdout, _ := testcli.RequireSuccessfulRun(t, "api", "get", "/api/2.0/preview/scim/v2/Me") + ctx := context.Background() + + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "api", "get", "/api/2.0/preview/scim/v2/Me") // Deserialize SCIM API response. var out map[string]any @@ -29,6 +32,8 @@ func TestApiGet(t *testing.T) { } func TestApiPost(t *testing.T) { + ctx := context.Background() + if testutil.GetCloud(t) == testutil.GCP { t.Skip("DBFS REST API is disabled on gcp") } @@ -41,11 +46,11 @@ func TestApiPost(t *testing.T) { // Post to mkdir { - testcli.RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") + testcli.RequireSuccessfulRun(t, ctx, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/mkdirs") } // Post to delete { - testcli.RequireSuccessfulRun(t, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") + testcli.RequireSuccessfulRun(t, ctx, "api", "post", "--json=@"+requestPath, "/api/2.0/dbfs/delete") } } diff --git a/integration/cmd/auth/describe_test.go b/integration/cmd/auth/describe_test.go index cbc782d370..41288dce6f 100644 --- a/integration/cmd/auth/describe_test.go +++ b/integration/cmd/auth/describe_test.go @@ -13,7 +13,8 @@ import ( func TestAuthDescribeSuccess(t *testing.T) { t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") - stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe") + ctx := context.Background() + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe") outStr := stdout.String() w, err := databricks.NewWorkspaceClient(&databricks.Config{}) @@ -34,7 +35,8 @@ func TestAuthDescribeSuccess(t *testing.T) { func TestAuthDescribeFailure(t *testing.T) { t.Skipf("Skipping because of https://github.com/databricks/cli/issues/2010") - stdout, _ := testcli.RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") + ctx := context.Background() + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "auth", "describe", "--profile", "nonexistent") outStr := stdout.String() require.NotEmpty(t, outStr) diff --git a/integration/cmd/clusters/clusters_test.go b/integration/cmd/clusters/clusters_test.go index d62586cd45..4cc6cb6581 100644 --- a/integration/cmd/clusters/clusters_test.go +++ b/integration/cmd/clusters/clusters_test.go @@ -1,6 +1,7 @@ package clusters_test import ( + "context" "fmt" "regexp" "testing" @@ -14,7 +15,8 @@ import ( ) func TestClustersList(t *testing.T) { - stdout, stderr := testcli.RequireSuccessfulRun(t, "clusters", "list") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "clusters", "list") outStr := stdout.String() assert.Contains(t, outStr, "ID") assert.Contains(t, outStr, "Name") @@ -27,15 +29,17 @@ func TestClustersList(t *testing.T) { } func TestClustersGet(t *testing.T) { + ctx := context.Background() clusterId := findValidClusterID(t) - stdout, stderr := testcli.RequireSuccessfulRun(t, "clusters", "get", clusterId) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "clusters", "get", clusterId) outStr := stdout.String() assert.Contains(t, outStr, fmt.Sprintf(`"cluster_id":"%s"`, clusterId)) assert.Equal(t, "", stderr.String()) } func TestClusterCreateErrorWhenNoArguments(t *testing.T) { - _, _, err := testcli.RequireErrorRun(t, "clusters", "create") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "clusters", "create") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } diff --git a/integration/cmd/fs/cat_test.go b/integration/cmd/fs/cat_test.go index 015a937609..b0f99ae4e5 100644 --- a/integration/cmd/fs/cat_test.go +++ b/integration/cmd/fs/cat_test.go @@ -23,11 +23,13 @@ func TestFsCat(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) + err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) require.NoError(t, err) - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "cat", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "cat", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "abcd", stdout.String()) }) @@ -43,11 +45,13 @@ func TestFsCatOnADir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "dir1") require.NoError(t, err) - _, _, err = testcli.RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "dir1")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cat", path.Join(tmpDir, "dir1")) assert.ErrorAs(t, err, &filer.NotAFile{}) }) } @@ -62,16 +66,18 @@ func TestFsCatOnNonExistentFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := testcli.RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "non-existent-file")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cat", path.Join(tmpDir, "non-existent-file")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } } func TestFsCatForDbfsInvalidScheme(t *testing.T) { - _, _, err := testcli.RequireErrorRun(t, "fs", "cat", "dab:/non-existent-file") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cat", "dab:/non-existent-file") assert.ErrorContains(t, err, "invalid scheme: dab") } @@ -86,6 +92,6 @@ func TestFsCatDoesNotSupportOutputModeJson(t *testing.T) { err = f.Write(ctx, "hello.txt", strings.NewReader("abc")) require.NoError(t, err) - _, _, err = testcli.RequireErrorRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") assert.ErrorContains(t, err, "json output not supported") } diff --git a/integration/cmd/fs/completion_test.go b/integration/cmd/fs/completion_test.go index 8072a33cab..88ce2fcc1d 100644 --- a/integration/cmd/fs/completion_test.go +++ b/integration/cmd/fs/completion_test.go @@ -19,10 +19,11 @@ func setupCompletionFile(t *testing.T, f filer.Filer) { } func TestFsCompletion(t *testing.T) { + ctx := context.Background() f, tmpDir := setupDbfsFiler(t) setupCompletionFile(t, f) - stdout, _ := testcli.RequireSuccessfulRun(t, "__complete", "fs", "ls", tmpDir+"/") + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "__complete", "fs", "ls", tmpDir+"/") expectedOutput := fmt.Sprintf("%s/dir1/\n:2\n", tmpDir) assert.Equal(t, expectedOutput, stdout.String()) } diff --git a/integration/cmd/fs/cp_test.go b/integration/cmd/fs/cp_test.go index a44d93a679..76aef7acf4 100644 --- a/integration/cmd/fs/cp_test.go +++ b/integration/cmd/fs/cp_test.go @@ -131,11 +131,12 @@ func TestFsCpDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) - testcli.RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive") assertTargetDir(t, context.Background(), targetFiler) }) @@ -151,11 +152,12 @@ func TestFsCpFileToFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) assertTargetFile(t, context.Background(), targetFiler, "bar.txt") }) @@ -171,11 +173,12 @@ func TestFsCpFileToDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceFile(t, context.Background(), sourceFiler) - testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) assertTargetFile(t, context.Background(), targetFiler, "foo.txt") }) @@ -194,7 +197,7 @@ func TestFsCpFileToDirForWindowsPaths(t *testing.T) { windowsPath := filepath.Join(filepath.FromSlash(sourceDir), "foo.txt") - testcli.RequireSuccessfulRun(t, "fs", "cp", windowsPath, targetDir) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", windowsPath, targetDir) assertTargetFile(t, ctx, targetFiler, "foo.txt") } @@ -207,6 +210,7 @@ func TestFsCpDirToDirFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -215,7 +219,7 @@ func TestFsCpDirToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") assertFileContent(t, context.Background(), targetFiler, "query.sql", "SELECT 1") assertFileContent(t, context.Background(), targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") @@ -232,6 +236,7 @@ func TestFsCpFileToDirFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -240,7 +245,7 @@ func TestFsCpFileToDirFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") }) } @@ -255,6 +260,7 @@ func TestFsCpFileToFileFileNotOverwritten(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -263,7 +269,7 @@ func TestFsCpFileToFileFileNotOverwritten(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/dontoverwrite.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) assertFileContent(t, context.Background(), targetFiler, "a/b/c/dontoverwrite.txt", "this should not be overwritten") }) } @@ -278,6 +284,7 @@ func TestFsCpDirToDirWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -286,7 +293,7 @@ func TestFsCpDirToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") assertTargetDir(t, context.Background(), targetFiler) }) } @@ -301,6 +308,7 @@ func TestFsCpFileToFileWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -309,7 +317,7 @@ func TestFsCpFileToFileWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/overwritten.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/overwritten.txt", "hello, world\n") }) } @@ -324,6 +332,7 @@ func TestFsCpFileToDirWithOverwriteFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -332,7 +341,7 @@ func TestFsCpFileToDirWithOverwriteFlag(t *testing.T) { err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "hello, world\n") }) } @@ -347,9 +356,10 @@ func TestFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := testcli.RequireErrorRun(t, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) r := regexp.MustCompile("source path .* is a directory. Please specify the --recursive flag") assert.Regexp(t, r, err.Error()) }) @@ -357,7 +367,8 @@ func TestFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { } func TestFsCpErrorsOnInvalidScheme(t *testing.T) { - _, _, err := testcli.RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "cp", "dbfs:/a", "https:/b") assert.Equal(t, "invalid scheme: https", err.Error()) } @@ -370,6 +381,7 @@ func TestFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() sourceFiler, sourceDir := tc.setupSource(t) targetFiler, targetDir := tc.setupTarget(t) setupSourceDir(t, context.Background(), sourceFiler) @@ -378,7 +390,7 @@ func TestFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { err := targetFiler.Write(context.Background(), "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) require.NoError(t, err) - _, _, err = testcli.RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") assert.Error(t, err) }) } diff --git a/integration/cmd/fs/ls_test.go b/integration/cmd/fs/ls_test.go index e8b955ff8c..58e776d8a9 100644 --- a/integration/cmd/fs/ls_test.go +++ b/integration/cmd/fs/ls_test.go @@ -49,10 +49,11 @@ func TestFsLs(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -82,10 +83,11 @@ func TestFsLsWithAbsolutePaths(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json", "--absolute") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json", "--absolute") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any @@ -114,10 +116,12 @@ func TestFsLsOnFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + + ctx := context.Background() f, tmpDir := tc.setupFiler(t) setupLsFiles(t, f) - _, _, err := testcli.RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) assert.ErrorAs(t, err, &filer.NotADirectory{}) }) @@ -133,9 +137,10 @@ func TestFsLsOnEmptyDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "ls", tmpDir, "--output=json") assert.Equal(t, "", stderr.String()) var parsedStdout []map[string]any err := json.Unmarshal(stdout.Bytes(), &parsedStdout) @@ -156,9 +161,10 @@ func TestFsLsForNonexistingDir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) - _, _, err := testcli.RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) assert.Regexp(t, regexp.MustCompile("no such directory: .*/nonexistent"), err.Error()) }) @@ -168,6 +174,7 @@ func TestFsLsForNonexistingDir(t *testing.T) { func TestFsLsWithoutScheme(t *testing.T) { t.Parallel() - _, _, err := testcli.RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/integration/cmd/fs/mkdir_test.go b/integration/cmd/fs/mkdir_test.go index de53e804ca..f332bb5263 100644 --- a/integration/cmd/fs/mkdir_test.go +++ b/integration/cmd/fs/mkdir_test.go @@ -22,10 +22,11 @@ func TestFsMkdir(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a" - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -47,10 +48,11 @@ func TestFsMkdirCreatesIntermediateDirectories(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a/b/c" - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -84,6 +86,7 @@ func TestFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // create directory "a" @@ -91,7 +94,7 @@ func TestFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { require.NoError(t, err) // assert run is successful without any errors - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) }) @@ -104,6 +107,7 @@ func TestFsMkdirWhenFileExistsAtPath(t *testing.T) { t.Run("dbfs", func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := setupDbfsFiler(t) // create file "hello" @@ -111,7 +115,7 @@ func TestFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = testcli.RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "hello")) // Different cloud providers or cloud configurations return different errors. regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$|(^|: )"The specified path already exists.".*$`) @@ -121,6 +125,7 @@ func TestFsMkdirWhenFileExistsAtPath(t *testing.T) { t.Run("uc-volumes", func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := setupUcVolumesFiler(t) // create file "hello" @@ -128,7 +133,7 @@ func TestFsMkdirWhenFileExistsAtPath(t *testing.T) { require.NoError(t, err) // assert mkdir fails - _, _, err = testcli.RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "mkdir", path.Join(tmpDir, "hello")) assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{}) }) diff --git a/integration/cmd/fs/rm_test.go b/integration/cmd/fs/rm_test.go index a2832aba1d..018c7920e4 100644 --- a/integration/cmd/fs/rm_test.go +++ b/integration/cmd/fs/rm_test.go @@ -23,6 +23,7 @@ func TestFsRmFile(t *testing.T) { t.Parallel() // Create a file + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) require.NoError(t, err) @@ -32,7 +33,7 @@ func TestFsRmFile(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "hello.txt")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "hello.txt")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -53,6 +54,7 @@ func TestFsRmEmptyDir(t *testing.T) { t.Parallel() // Create a directory + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Mkdir(context.Background(), "a") require.NoError(t, err) @@ -62,7 +64,7 @@ func TestFsRmEmptyDir(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a")) assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) @@ -83,6 +85,7 @@ func TestFsRmNonEmptyDirectory(t *testing.T) { t.Parallel() // Create a directory + ctx := context.Background() f, tmpDir := tc.setupFiler(t) err := f.Mkdir(context.Background(), "a") require.NoError(t, err) @@ -96,7 +99,7 @@ func TestFsRmNonEmptyDirectory(t *testing.T) { assert.NoError(t, err) // Run rm command - _, _, err = testcli.RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "a")) + _, _, err = testcli.RequireErrorRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a")) assert.ErrorIs(t, err, fs.ErrInvalid) assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) }) @@ -112,10 +115,11 @@ func TestFsRmForNonExistentFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() _, tmpDir := tc.setupFiler(t) // Expect error if file does not exist - _, _, err := testcli.RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "does-not-exist")) + _, _, err := testcli.RequireErrorRun(t, ctx, "fs", "rm", path.Join(tmpDir, "does-not-exist")) assert.ErrorIs(t, err, fs.ErrNotExist) }) } @@ -130,6 +134,7 @@ func TestFsRmDirRecursively(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() f, tmpDir := tc.setupFiler(t) // Create a directory @@ -145,7 +150,7 @@ func TestFsRmDirRecursively(t *testing.T) { assert.NoError(t, err) // Run rm command - stdout, stderr := testcli.RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") assert.Equal(t, "", stderr.String()) assert.Equal(t, "", stdout.String()) diff --git a/integration/cmd/jobs/jobs_test.go b/integration/cmd/jobs/jobs_test.go index 60f3f8d22a..b6bcfc5b3c 100644 --- a/integration/cmd/jobs/jobs_test.go +++ b/integration/cmd/jobs/jobs_test.go @@ -1,6 +1,7 @@ package jobs_test import ( + "context" "encoding/json" "fmt" "testing" @@ -13,10 +14,11 @@ import ( func TestCreateJob(t *testing.T) { testutil.Require(t, testutil.Azure) - stdout, stderr := testcli.RequireSuccessfulRun(t, "jobs", "create", "--json", "@testdata/create_job_without_workers.json", "--log-level=debug") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "jobs", "create", "--json", "@testdata/create_job_without_workers.json", "--log-level=debug") assert.Empty(t, stderr.String()) var output map[string]int err := json.Unmarshal(stdout.Bytes(), &output) require.NoError(t, err) - testcli.RequireSuccessfulRun(t, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") + testcli.RequireSuccessfulRun(t, ctx, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug") } diff --git a/integration/cmd/repos/repos_test.go b/integration/cmd/repos/repos_test.go index e3297e8ff5..b5ad120d65 100644 --- a/integration/cmd/repos/repos_test.go +++ b/integration/cmd/repos/repos_test.go @@ -53,7 +53,7 @@ func TestReposCreateWithProvider(t *testing.T) { w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", repoUrl, "gitHub", "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "create", repoUrl, "gitHub", "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -67,7 +67,7 @@ func TestReposCreateWithoutProvider(t *testing.T) { w := wt.W repoPath := synthesizeTemporaryRepoPath(t, w, ctx) - _, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", repoUrl, "--path", repoPath) + _, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "create", repoUrl, "--path", repoPath) assert.Equal(t, "", stderr.String()) // Confirm the repo was created. @@ -83,22 +83,22 @@ func TestReposGet(t *testing.T) { repoId, repoPath := createTemporaryRepo(t, w, ctx) // Get by ID - byIdOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "get", strconv.FormatInt(repoId, 10), "--output=json") assert.Equal(t, "", stderr.String()) // Get by path - byPathOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "get", repoPath, "--output=json") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "get", repoPath, "--output=json") assert.Equal(t, "", stderr.String()) // Output should be the same assert.Equal(t, byIdOutput.String(), byPathOutput.String()) // Get by path fails - _, stderr, err := testcli.RequireErrorRun(t, "repos", "get", repoPath+"-doesntexist", "--output=json") + _, stderr, err := testcli.RequireErrorRun(t, ctx, "repos", "get", repoPath+"-doesntexist", "--output=json") assert.ErrorContains(t, err, "failed to look up repo") // Get by path resolves to something other than a repo - _, stderr, err = testcli.RequireErrorRun(t, "repos", "get", "/Repos", "--output=json") + _, stderr, err = testcli.RequireErrorRun(t, ctx, "repos", "get", "/Repos", "--output=json") assert.ErrorContains(t, err, "is not a repo") } @@ -109,11 +109,11 @@ func TestReposUpdate(t *testing.T) { repoId, repoPath := createTemporaryRepo(t, w, ctx) // Update by ID - byIdOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") + byIdOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "update", strconv.FormatInt(repoId, 10), "--branch", "ide") assert.Equal(t, "", stderr.String()) // Update by path - byPathOutput, stderr := testcli.RequireSuccessfulRun(t, "repos", "update", repoPath, "--branch", "ide") + byPathOutput, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "update", repoPath, "--branch", "ide") assert.Equal(t, "", stderr.String()) // Output should be the same @@ -127,7 +127,7 @@ func TestReposDeleteByID(t *testing.T) { repoId, _ := createTemporaryRepo(t, w, ctx) // Delete by ID - stdout, stderr := testcli.RequireSuccessfulRun(t, "repos", "delete", strconv.FormatInt(repoId, 10)) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "delete", strconv.FormatInt(repoId, 10)) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) @@ -143,7 +143,7 @@ func TestReposDeleteByPath(t *testing.T) { repoId, repoPath := createTemporaryRepo(t, w, ctx) // Delete by path - stdout, stderr := testcli.RequireSuccessfulRun(t, "repos", "delete", repoPath) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "repos", "delete", repoPath) assert.Equal(t, "", stdout.String()) assert.Equal(t, "", stderr.String()) diff --git a/integration/cmd/secrets/secrets_test.go b/integration/cmd/secrets/secrets_test.go index 83312f5028..4dd133c251 100644 --- a/integration/cmd/secrets/secrets_test.go +++ b/integration/cmd/secrets/secrets_test.go @@ -15,7 +15,8 @@ import ( ) func TestSecretsCreateScopeErrWhenNoArguments(t *testing.T) { - _, _, err := testcli.RequireErrorRun(t, "secrets", "create-scope") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "secrets", "create-scope") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } @@ -69,7 +70,7 @@ func TestSecretsPutSecretStringValue(tt *testing.T) { key := "test-key" value := "test-value\nwith-newlines\n" - stdout, stderr := testcli.RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--string-value", value) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "secrets", "put-secret", scope, key, "--string-value", value) assert.Empty(t, stdout) assert.Empty(t, stderr) @@ -83,7 +84,7 @@ func TestSecretsPutSecretBytesValue(tt *testing.T) { key := "test-key" value := []byte{0x00, 0x01, 0x02, 0x03} - stdout, stderr := testcli.RequireSuccessfulRun(t, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "secrets", "put-secret", scope, key, "--bytes-value", string(value)) assert.Empty(t, stdout) assert.Empty(t, stderr) diff --git a/integration/cmd/storage_credentials/storage_credentials_test.go b/integration/cmd/storage_credentials/storage_credentials_test.go index 834086794b..73727a875e 100644 --- a/integration/cmd/storage_credentials/storage_credentials_test.go +++ b/integration/cmd/storage_credentials/storage_credentials_test.go @@ -10,12 +10,12 @@ import ( ) func TestStorageCredentialsListRendersResponse(t *testing.T) { - _, _ = acc.WorkspaceTest(t) + ctx, _ := acc.WorkspaceTest(t) // Check if metastore is assigned for the workspace, otherwise test will fail t.Log(testutil.GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) - stdout, stderr := testcli.RequireSuccessfulRun(t, "storage-credentials", "list") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "storage-credentials", "list") assert.NotEmpty(t, stdout) assert.Empty(t, stderr) } diff --git a/integration/cmd/sync/sync_test.go b/integration/cmd/sync/sync_test.go index 4949700161..077a06079a 100644 --- a/integration/cmd/sync/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -72,8 +72,8 @@ type syncTest struct { remoteRoot string } -func setupSyncTest(t *testing.T, args ...string) *syncTest { - _, wt := acc.WorkspaceTest(t) +func setupSyncTest(t *testing.T, args ...string) (context.Context, *syncTest) { + ctx, wt := acc.WorkspaceTest(t) w := wt.W localRoot := t.TempDir() @@ -90,10 +90,10 @@ func setupSyncTest(t *testing.T, args ...string) *syncTest { "json", }, args...) - c := testcli.NewRunner(t, args...) + c := testcli.NewRunner(t, ctx, args...) c.RunBackground() - return &syncTest{ + return ctx, &syncTest{ t: t, c: c, w: w, @@ -231,8 +231,7 @@ func (a *syncTest) snapshotContains(files []string) { } func TestSyncFullFileSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--full", "--watch") + ctx, assertSync := setupSyncTest(t, "--full", "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -263,8 +262,7 @@ func TestSyncFullFileSync(t *testing.T) { } func TestSyncIncrementalFileSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -297,8 +295,7 @@ func TestSyncIncrementalFileSync(t *testing.T) { } func TestSyncNestedFolderSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -325,8 +322,7 @@ func TestSyncNestedFolderSync(t *testing.T) { } func TestSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -358,8 +354,7 @@ func TestSyncNestedFolderDoesntFailOnNonEmptyDirectory(t *testing.T) { } func TestSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // .gitignore is created by the sync process to enforce .databricks is not synced assertSync.waitForCompletionMarker() @@ -394,8 +389,7 @@ func TestSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { // In the above scenario sync should delete the empty folder and add foo to the remote // file system func TestSyncIncrementalFileOverwritesFolder(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // create foo/bar.txt localFilePath := filepath.Join(assertSync.localRoot, "foo/bar.txt") @@ -424,8 +418,7 @@ func TestSyncIncrementalFileOverwritesFolder(t *testing.T) { } func TestSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // create python notebook localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -455,8 +448,7 @@ func TestSyncIncrementalSyncPythonNotebookToFile(t *testing.T) { } func TestSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // create vanilla python file localFilePath := filepath.Join(assertSync.localRoot, "foo.py") @@ -479,8 +471,7 @@ func TestSyncIncrementalSyncFileToPythonNotebook(t *testing.T) { } func TestSyncIncrementalSyncPythonNotebookDelete(t *testing.T) { - ctx := context.Background() - assertSync := setupSyncTest(t, "--watch") + ctx, assertSync := setupSyncTest(t, "--watch") // create python notebook localFilePath := filepath.Join(assertSync.localRoot, "foo.py") diff --git a/integration/cmd/unknown_command_test.go b/integration/cmd/unknown_command_test.go index e18493940c..fd87a77ff8 100644 --- a/integration/cmd/unknown_command_test.go +++ b/integration/cmd/unknown_command_test.go @@ -1,6 +1,7 @@ package cmd_test import ( + "context" "testing" "github.com/databricks/cli/internal/testcli" @@ -8,7 +9,8 @@ import ( ) func TestUnknownCommand(t *testing.T) { - stdout, stderr, err := testcli.RequireErrorRun(t, "unknown-command") + ctx := context.Background() + stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "unknown-command") assert.Error(t, err, "unknown command", `unknown command "unknown-command" for "databricks"`) assert.Equal(t, "", stdout.String()) diff --git a/integration/cmd/version/version_test.go b/integration/cmd/version/version_test.go index ab00e7242f..b12974d693 100644 --- a/integration/cmd/version/version_test.go +++ b/integration/cmd/version/version_test.go @@ -1,6 +1,7 @@ package version_test import ( + "context" "encoding/json" "fmt" "testing" @@ -13,25 +14,29 @@ import ( var expectedVersion = fmt.Sprintf("Databricks CLI v%s\n", build.GetInfo().Version) func TestVersionFlagShort(t *testing.T) { - stdout, stderr := testcli.RequireSuccessfulRun(t, "-v") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "-v") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionFlagLong(t *testing.T) { - stdout, stderr := testcli.RequireSuccessfulRun(t, "--version") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "--version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommand(t *testing.T) { - stdout, stderr := testcli.RequireSuccessfulRun(t, "version") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "version") assert.Equal(t, expectedVersion, stdout.String()) assert.Equal(t, "", stderr.String()) } func TestVersionCommandWithJSONOutput(t *testing.T) { - stdout, stderr := testcli.RequireSuccessfulRun(t, "version", "--output", "json") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "version", "--output", "json") assert.NotEmpty(t, stdout.String()) assert.Equal(t, "", stderr.String()) diff --git a/integration/cmd/workspace/workspace_test.go b/integration/cmd/workspace/workspace_test.go index 57b6a36242..9338f60aa2 100644 --- a/integration/cmd/workspace/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -20,7 +20,8 @@ import ( ) func TestWorkspaceList(t *testing.T) { - stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "list", "/") + ctx := context.Background() + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "list", "/") outStr := stdout.String() assert.Contains(t, outStr, "ID") assert.Contains(t, outStr, "Type") @@ -30,12 +31,14 @@ func TestWorkspaceList(t *testing.T) { } func TestWorkpaceListErrorWhenNoArguments(t *testing.T) { - _, _, err := testcli.RequireErrorRun(t, "workspace", "list") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "list") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { - _, _, err := testcli.RequireErrorRun(t, "workspace", "get-status") + ctx := context.Background() + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "get-status") assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } @@ -53,7 +56,7 @@ func TestWorkpaceExportPrintsContents(t *testing.T) { require.NoError(t, err) // Run export - stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(tmpdir, "file-a")) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(tmpdir, "file-a")) assert.Equal(t, contents, stdout.String()) assert.Equal(t, "", stderr.String()) } @@ -122,7 +125,7 @@ func TestExportDir(t *testing.T) { }, "\n") // Run Export - stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir) assert.Equal(t, expectedLogs, stdout.String()) assert.Equal(t, "", stderr.String()) @@ -150,7 +153,7 @@ func TestExportDirDoesNotOverwrite(t *testing.T) { require.NoError(t, err) // Run Export - testcli.RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir) // Assert file is not overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "local content") @@ -171,7 +174,7 @@ func TestExportDirWithOverwriteFlag(t *testing.T) { require.NoError(t, err) // Run Export - testcli.RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "export-dir", sourceDir, targetDir, "--overwrite") // Assert file has been overwritten assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "content from workspace") @@ -179,7 +182,7 @@ func TestExportDirWithOverwriteFlag(t *testing.T) { func TestImportDir(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - stdout, stderr := testcli.RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") + stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") expectedLogs := strings.Join([]string{ fmt.Sprintf("Importing files from %s", "./testdata/import_dir"), @@ -220,7 +223,7 @@ func TestImportDirDoesNotOverwrite(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - testcli.RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir) + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir) // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -248,7 +251,7 @@ func TestImportDirWithOverwriteFlag(t *testing.T) { assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "old file") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"old notebook\")") - testcli.RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--overwrite") // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "a/b/c/file-b", "file-in-dir") @@ -270,7 +273,7 @@ func TestExport(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a")) + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "file-a")) b, err := io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "abc", string(b)) @@ -278,13 +281,13 @@ func TestExport(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "# Databricks notebook source\n", string(b)) // Export python notebook as jupyter - stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER") b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Contains(t, string(b), `"cells":`, "jupyter notebooks contain the cells field") @@ -300,7 +303,7 @@ func TestExportWithFileFlag(t *testing.T) { // Export vanilla file err = f.Write(ctx, "file-a", strings.NewReader("abc")) require.NoError(t, err) - stdout, _ := testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) + stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "file-a"), "--file", filepath.Join(localTmpDir, "file.txt")) b, err := io.ReadAll(&stdout) require.NoError(t, err) // Expect nothing to be printed to stdout @@ -310,14 +313,14 @@ func TestExportWithFileFlag(t *testing.T) { // Export python notebook err = f.Write(ctx, "pyNotebook.py", strings.NewReader("# Databricks notebook source")) require.NoError(t, err) - stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--file", filepath.Join(localTmpDir, "pyNb.py")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) assertLocalFileContents(t, filepath.Join(localTmpDir, "pyNb.py"), "# Databricks notebook source\n") // Export python notebook as jupyter - stdout, _ = testcli.RequireSuccessfulRun(t, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) + stdout, _ = testcli.RequireSuccessfulRun(t, ctx, "workspace", "export", path.Join(sourceDir, "pyNotebook"), "--format", "JUPYTER", "--file", filepath.Join(localTmpDir, "jupyterNb.ipynb")) b, err = io.ReadAll(&stdout) require.NoError(t, err) assert.Equal(t, "", string(b)) @@ -329,13 +332,13 @@ func TestImportFileUsingContentFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `print(1)`. Uploaded as a notebook by default - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyScript"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyScript"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyScript", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyScript", workspace.ObjectTypeNotebook) // Import with content = `# Databricks notebook source\nprint(1)`. Uploaded as a notebook with the content just being print(1) - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNb"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyNb"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNb", "print(1)") @@ -346,19 +349,19 @@ func TestImportFileUsingContentFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Content = `# Databricks notebook source\nprint(1)`. Upload as file if path has no extension. - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Content = `# Databricks notebook source\nprint(1)`. Upload as notebook if path has py extension - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("`# Databricks notebook source\nprint(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Content = `print(1)`. Upload as file if content is not notebook (even if path has .py extension) - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--content", base64.StdEncoding.EncodeToString([]byte("print(1)")), "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "print(1)") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) @@ -366,15 +369,15 @@ func TestImportFileUsingContentFormatAuto(t *testing.T) { func TestImportFileFormatSource(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "pyNotebook"), "--file", "./testdata/import_dir/pyNotebook.py", "--language=PYTHON") assertFilerFileContents(t, ctx, workspaceFiler, "pyNotebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "pyNotebook", workspace.ObjectTypeNotebook) - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala", "--language=SCALA") assertFilerFileContents(t, ctx, workspaceFiler, "scalaNotebook", "// Databricks notebook source\nprintln(\"scala\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "scalaNotebook", workspace.ObjectTypeNotebook) - _, _, err := testcli.RequireErrorRun(t, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") + _, _, err := testcli.RequireErrorRun(t, ctx, "workspace", "import", path.Join(targetDir, "scalaNotebook"), "--file", "./testdata/import_dir/scalaNotebook.scala") assert.ErrorContains(t, err, "The zip file may not be valid or may be an unsupported version. Hint: Objects imported using format=SOURCE are expected to be zip encoded databricks source notebook(s) by default. Please specify a language using the --language flag if you are trying to import a single uncompressed notebook") } @@ -382,18 +385,18 @@ func TestImportFileFormatAuto(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) // Upload as file if path has no extension - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-file"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "# Databricks notebook source") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-file", "print(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-file", workspace.ObjectTypeFile) // Upload as notebook if path has extension - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "py-nb-as-notebook.py"), "--file", "./testdata/import_dir/pyNotebook.py", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "py-nb-as-notebook", "# Databricks notebook source\nprint(\"python\")") assertWorkspaceFileType(t, ctx, workspaceFiler, "py-nb-as-notebook", workspace.ObjectTypeNotebook) // Upload as file if content is not notebook (even if path has .py extension) - testcli.RequireSuccessfulRun(t, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") + testcli.RequireSuccessfulRun(t, ctx, "workspace", "import", path.Join(targetDir, "not-a-notebook.py"), "--file", "./testdata/import_dir/file-a", "--format=AUTO") assertFilerFileContents(t, ctx, workspaceFiler, "not-a-notebook.py", "hello, world") assertWorkspaceFileType(t, ctx, workspaceFiler, "not-a-notebook.py", workspace.ObjectTypeFile) } diff --git a/integration/libs/git/git_fetch_test.go b/integration/libs/git/git_fetch_test.go index 0bb684a29b..e53b5469a5 100644 --- a/integration/libs/git/git_fetch_test.go +++ b/integration/libs/git/git_fetch_test.go @@ -5,11 +5,10 @@ import ( "os/exec" "path" "path/filepath" + "strings" "testing" "github.com/databricks/cli/internal/acc" - "github.com/databricks/cli/internal/testcli" - "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/git" "github.com/stretchr/testify/assert" @@ -39,19 +38,18 @@ func assertSparseGitInfo(t *testing.T, expectedRoot string, info git.RepositoryI assert.Equal(t, expectedRoot, info.WorktreeRoot) } +func ensureWorkspacePrefix(root string) string { + // The fixture helper doesn't include /Workspace, so include it here. + if !strings.HasPrefix(root, "/Workspace/") { + return path.Join("/Workspace", root) + } + return root +} + func TestFetchRepositoryInfoAPI_FromRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - me, err := wt.W.CurrentUser.Me(ctx) - require.NoError(t, err) + targetPath := ensureWorkspacePrefix(acc.TemporaryRepo(wt, examplesRepoUrl)) - targetPath := testutil.RandomName(path.Join("/Workspace/Users", me.UserName, "/testing-clone-bundle-examples-")) - stdout, stderr := testcli.RequireSuccessfulRun(t, "repos", "create", examplesRepoUrl, examplesRepoProvider, "--path", targetPath) - t.Cleanup(func() { - testcli.RequireSuccessfulRun(t, "repos", "delete", targetPath) - }) - - assert.Empty(t, stderr.String()) - assert.NotEmpty(t, stdout.String()) ctx = dbr.MockRuntime(ctx, true) for _, inputPath := range []string{ @@ -68,16 +66,12 @@ func TestFetchRepositoryInfoAPI_FromRepo(t *testing.T) { func TestFetchRepositoryInfoAPI_FromNonRepo(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) - me, err := wt.W.CurrentUser.Me(ctx) - require.NoError(t, err) + rootPath := ensureWorkspacePrefix(acc.TemporaryWorkspaceDir(wt, "testing-nonrepo-")) - rootPath := testutil.RandomName(path.Join("/Workspace/Users", me.UserName, "testing-nonrepo-")) - _, stderr := testcli.RequireSuccessfulRun(t, "workspace", "mkdirs", path.Join(rootPath, "a/b/c")) - t.Cleanup(func() { - testcli.RequireSuccessfulRun(t, "workspace", "delete", "--recursive", rootPath) - }) + // Create directory inside this root path (this is cleaned up as part of the root path). + err := wt.W.Workspace.MkdirsByPath(ctx, path.Join(rootPath, "a/b/c")) + require.NoError(t, err) - assert.Empty(t, stderr.String()) ctx = dbr.MockRuntime(ctx, true) tests := []struct { diff --git a/internal/testcli/runner.go b/internal/testcli/runner.go index 55e3faca12..95073b57c2 100644 --- a/internal/testcli/runner.go +++ b/internal/testcli/runner.go @@ -281,16 +281,7 @@ func (r *Runner) RunAndParseJSON(v any) { require.NoError(r, err) } -func NewRunner(t testutil.TestingT, args ...string) *Runner { - return &Runner{ - TestingT: t, - - ctx: context.Background(), - args: args, - } -} - -func NewRunnerWithContext(t testutil.TestingT, ctx context.Context, args ...string) *Runner { +func NewRunner(t testutil.TestingT, ctx context.Context, args ...string) *Runner { return &Runner{ TestingT: t, @@ -299,16 +290,16 @@ func NewRunnerWithContext(t testutil.TestingT, ctx context.Context, args ...stri } } -func RequireSuccessfulRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer) { +func RequireSuccessfulRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer) { t.Logf("run args: [%s]", strings.Join(args, ", ")) - r := NewRunner(t, args...) + r := NewRunner(t, ctx, args...) stdout, stderr, err := r.Run() require.NoError(t, err) return stdout, stderr } -func RequireErrorRun(t testutil.TestingT, args ...string) (bytes.Buffer, bytes.Buffer, error) { - r := NewRunner(t, args...) +func RequireErrorRun(t testutil.TestingT, ctx context.Context, args ...string) (bytes.Buffer, bytes.Buffer, error) { + r := NewRunner(t, ctx, args...) stdout, stderr, err := r.Run() require.Error(t, err) return stdout, stderr, err diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index f7987c2335..c0e9e868a8 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -31,9 +31,9 @@ type cmdIO struct { err io.Writer } -func NewIO(outputFormat flags.Output, in io.Reader, out, err io.Writer, headerTemplate, template string) *cmdIO { +func NewIO(ctx context.Context, outputFormat flags.Output, in io.Reader, out, err io.Writer, headerTemplate, template string) *cmdIO { // The check below is similar to color.NoColor but uses the specified err writer. - dumb := os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb" + dumb := env.Get(ctx, "NO_COLOR") != "" || env.Get(ctx, "TERM") == "dumb" if f, ok := err.(*os.File); ok && !dumb { dumb = !isatty.IsTerminal(f.Fd()) && !isatty.IsCygwinTerminal(f.Fd()) } diff --git a/libs/cmdio/render_test.go b/libs/cmdio/render_test.go index 6bde446c4e..f26190a23a 100644 --- a/libs/cmdio/render_test.go +++ b/libs/cmdio/render_test.go @@ -171,8 +171,9 @@ func TestRender(t *testing.T) { for _, c := range testCases { t.Run(c.name, func(t *testing.T) { output := &bytes.Buffer{} - cmdIO := NewIO(c.outputFormat, nil, output, output, c.headerTemplate, c.template) - ctx := InContext(context.Background(), cmdIO) + ctx := context.Background() + cmdIO := NewIO(ctx, c.outputFormat, nil, output, output, c.headerTemplate, c.template) + ctx = InContext(ctx, cmdIO) var err error if vv, ok := c.v.(listing.Iterator[*provisioning.Workspace]); ok { err = RenderIterator(ctx, vv) diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go index d17e86d4ae..cde09aa443 100644 --- a/libs/databrickscfg/cfgpickers/clusters_test.go +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -115,7 +115,7 @@ func TestFirstCompatibleCluster(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) clusterID, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.NoError(t, err) require.Equal(t, "bcd-id", clusterID) @@ -162,7 +162,7 @@ func TestNoCompatibleClusters(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(ctx, flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) _, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.Equal(t, ErrNoCompatibleClusters, err) } diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index a2e8269d28..d98f40b240 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -162,7 +162,7 @@ func TestWorkspaceHost(t *testing.T) { func TestWorkspaceHostNotConfigured(t *testing.T) { ctx := context.Background() - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") + cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") ctx = cmdio.InContext(ctx, cmd) w := &databricks.WorkspaceClient{ From e5b836a6ac182692386ea29c955168061cd73d57 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Mon, 16 Dec 2024 13:41:32 +0100 Subject: [PATCH 49/63] Refactor initTestTemplate/deployBundle/destroyBundle to not return errors (#2017) ## Changes These test helpers were updated to handle the error internally and not return it. Since they have testing.T object, they can do so directly. On the caller side, this functions were always followed by require.NoError(t, err), that was cleaned up. This approach helps reduce the setup/teardown boilerplate in the test cases. ## Tests Existing tests. --- integration/bundle/artifacts_test.go | 6 +-- integration/bundle/basic_test.go | 15 +++---- integration/bundle/bind_resource_test.go | 32 +++++--------- integration/bundle/clusters_test.go | 9 ++-- integration/bundle/dashboards_test.go | 14 +++---- integration/bundle/deploy_test.go | 42 +++++++------------ .../deploy_then_remove_resources_test.go | 12 ++---- integration/bundle/deploy_to_shared_test.go | 9 ++-- integration/bundle/deployment_state_test.go | 13 +++--- integration/bundle/destroy_test.go | 11 ++--- integration/bundle/empty_bundle_test.go | 6 +-- integration/bundle/environments_test.go | 9 ++-- integration/bundle/generate_job_test.go | 11 ++--- integration/bundle/generate_pipeline_test.go | 11 ++--- integration/bundle/helpers_test.go | 41 +++++++++--------- integration/bundle/job_metadata_test.go | 9 ++-- .../bundle/local_state_staleness_test.go | 15 +++---- integration/bundle/python_wheel_test.go | 9 ++-- integration/bundle/spark_jar_test.go | 9 ++-- 19 files changed, 105 insertions(+), 178 deletions(-) diff --git a/integration/bundle/artifacts_test.go b/integration/bundle/artifacts_test.go index b070be92ca..3a5da721cc 100644 --- a/integration/bundle/artifacts_test.go +++ b/integration/bundle/artifacts_test.go @@ -247,12 +247,11 @@ func TestUploadArtifactFileToVolumeThatDoesNotExist(t *testing.T) { require.NoError(t, err) }) - bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ "unique_id": uuid.New().String(), "schema_name": schemaName, "volume_name": "doesnotexist", }) - require.NoError(t, err) ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") @@ -284,12 +283,11 @@ func TestUploadArtifactToVolumeNotYetDeployed(t *testing.T) { require.NoError(t, err) }) - bundleRoot, err := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "artifact_path_with_volume", map[string]any{ "unique_id": uuid.New().String(), "schema_name": schemaName, "volume_name": "my_volume", }) - require.NoError(t, err) ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) stdout, stderr, err := testcli.RequireErrorRun(t, ctx, "bundle", "deploy") diff --git a/integration/bundle/basic_test.go b/integration/bundle/basic_test.go index 820c105e97..e458706e03 100644 --- a/integration/bundle/basic_test.go +++ b/integration/bundle/basic_test.go @@ -16,27 +16,22 @@ func TestBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + root := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) // deploy empty bundle - err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) - require.NoError(t, err) + deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) // Remove .databricks directory to simulate a fresh deployment - err = os.RemoveAll(filepath.Join(root, ".databricks")) - require.NoError(t, err) + require.NoError(t, os.RemoveAll(filepath.Join(root, ".databricks"))) // deploy empty bundle again - err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) - require.NoError(t, err) + deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) } diff --git a/integration/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go index 823d5c1457..b512357704 100644 --- a/integration/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -23,30 +23,27 @@ func TestBindJobToExistingJob(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { gt.destroyJob(ctx, jobId) - require.NoError(t, err) }) ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") - _, _, err = c.Run() + _, _, err := c.Run() require.NoError(t, err) // Remove .databricks directory to simulate a fresh deployment err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -67,8 +64,7 @@ func TestBindJobToExistingJob(t *testing.T) { err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) require.NoError(t, err) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // Check that job is unbound and exists after bundle is destroyed job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ @@ -85,18 +81,16 @@ func TestAbortBind(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { gt.destroyJob(ctx, jobId) - err := destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) // Bind should fail because prompting is not possible. @@ -105,12 +99,11 @@ func TestAbortBind(t *testing.T) { c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) // Expect error suggesting to use --auto-approve - _, _, err = c.Run() + _, _, err := c.Run() assert.ErrorContains(t, err, "failed to bind the resource") assert.ErrorContains(t, err, "This bind operation requires user confirmation, but the current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -130,10 +123,9 @@ func TestGenerateAndBind(t *testing.T) { gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) w, err := databricks.NewWorkspaceClient() require.NoError(t, err) @@ -169,11 +161,9 @@ func TestGenerateAndBind(t *testing.T) { _, _, err = c.Run() require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // Check that job is bound and does not extsts after bundle is destroyed _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ diff --git a/integration/bundle/clusters_test.go b/integration/bundle/clusters_test.go index e52f5a93cc..7992ecd7da 100644 --- a/integration/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -20,16 +20,14 @@ func TestDeployBundleWithCluster(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "clusters", map[string]any{ + root := initTestTemplate(t, ctx, "clusters", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) if err != nil { @@ -39,8 +37,7 @@ func TestDeployBundleWithCluster(t *testing.T) { } }) - err = deployBundle(t, ctx, root) - require.NoError(t, err) + deployBundle(t, ctx, root) // Cluster should exists after bundle deployment cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) diff --git a/integration/bundle/dashboards_test.go b/integration/bundle/dashboards_test.go index 8eb43c5763..985ef8611f 100644 --- a/integration/bundle/dashboards_test.go +++ b/integration/bundle/dashboards_test.go @@ -18,19 +18,16 @@ func TestDashboards(t *testing.T) { warehouseID := testutil.GetEnvOrSkipTest(t, "TEST_DEFAULT_WAREHOUSE_ID") uniqueID := uuid.New().String() - root, err := initTestTemplate(t, ctx, "dashboards", map[string]any{ + root := initTestTemplate(t, ctx, "dashboards", map[string]any{ "unique_id": uniqueID, "warehouse_id": warehouseID, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) - err = deployBundle(t, ctx, root) - require.NoError(t, err) + deployBundle(t, ctx, root) // Load bundle configuration by running the validate command. b := unmarshalConfig(t, mustValidateBundle(t, ctx, root)) @@ -55,12 +52,11 @@ func TestDashboards(t *testing.T) { require.NoError(t, err) // Try to redeploy the bundle and confirm that the out of band modification is detected. - stdout, _, err := deployBundleWithArgs(t, ctx, root) + stdout, _, err := deployBundleWithArgsErr(t, ctx, root) require.Error(t, err) assert.Contains(t, stdout, `Error: dashboard "file_reference" has been modified remotely`+"\n") // Redeploy the bundle with the --force flag and confirm that the out of band modification is ignored. - _, stderr, err := deployBundleWithArgs(t, ctx, root, "--force") - require.NoError(t, err) + _, stderr := deployBundleWithArgs(t, ctx, root, "--force") assert.Contains(t, stderr, `Deployment complete!`+"\n") } diff --git a/integration/bundle/deploy_test.go b/integration/bundle/deploy_test.go index 64c59b3142..2c31101909 100644 --- a/integration/bundle/deploy_test.go +++ b/integration/bundle/deploy_test.go @@ -25,17 +25,14 @@ import ( ) func setupUcSchemaBundle(t *testing.T, ctx context.Context, w *databricks.WorkspaceClient, uniqueId string) string { - bundleRoot, err := initTestTemplate(t, ctx, "uc_schema", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "uc_schema", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { - err := destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) // Assert the schema is created @@ -97,8 +94,7 @@ func TestBundleDeployUcSchema(t *testing.T) { require.NoError(t, err) // Redeploy the bundle - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Assert the schema is deleted _, err = w.Schemas.GetByFullName(ctx, strings.Join([]string{catalogName, schemaName}, ".")) @@ -135,16 +131,14 @@ func TestBundlePipelineDeleteWithoutAutoApprove(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy pipeline - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId @@ -182,17 +176,14 @@ func TestBundlePipelineRecreateWithoutAutoApprove(t *testing.T) { w := wt.W uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "recreate_pipeline", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { - err := destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) // Assert the pipeline is created @@ -221,16 +212,14 @@ func TestDeployBasicBundleLogs(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + root := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) currentUser, err := wt.W.CurrentUser.Me(ctx) @@ -251,17 +240,14 @@ func TestDeployUcVolume(t *testing.T) { w := wt.W uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "volume", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "volume", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { - err := destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) // Assert the volume is created successfully diff --git a/integration/bundle/deploy_then_remove_resources_test.go b/integration/bundle/deploy_then_remove_resources_test.go index 911826852b..b792d3623f 100644 --- a/integration/bundle/deploy_then_remove_resources_test.go +++ b/integration/bundle/deploy_then_remove_resources_test.go @@ -18,16 +18,14 @@ func TestBundleDeployThenRemoveResources(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy pipeline - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId @@ -46,8 +44,7 @@ func TestBundleDeployThenRemoveResources(t *testing.T) { require.NoError(t, err) // deploy again - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // assert pipeline is deleted _, err = w.Pipelines.GetByName(ctx, pipelineName) @@ -58,7 +55,6 @@ func TestBundleDeployThenRemoveResources(t *testing.T) { assert.ErrorContains(t, err, "does not exist") t.Cleanup(func() { - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) } diff --git a/integration/bundle/deploy_to_shared_test.go b/integration/bundle/deploy_to_shared_test.go index f9ca44d78c..c97933217c 100644 --- a/integration/bundle/deploy_to_shared_test.go +++ b/integration/bundle/deploy_to_shared_test.go @@ -19,19 +19,16 @@ func TestDeployBasicToSharedWorkspacePath(t *testing.T) { currentUser, err := wt.W.CurrentUser.Me(ctx) require.NoError(t, err) - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, "root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName), }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(wt, ctx, bundleRoot) - require.NoError(wt, err) + destroyBundle(wt, ctx, bundleRoot) }) - err = deployBundle(wt, ctx, bundleRoot) - require.NoError(wt, err) + deployBundle(wt, ctx, bundleRoot) } diff --git a/integration/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go index 43a19018ee..a7e01643d9 100644 --- a/integration/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -20,17 +20,16 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "spark_version": "13.3.x-scala2.12", "node_type_id": nodeTypeId, }) - require.NoError(t, err) ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot) // Add some test file to the bundle - err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644) + err := os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0o644) require.NoError(t, err) err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0o644) @@ -40,11 +39,10 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0o644) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { - require.NoError(t, destroyBundle(t, ctx, bundleRoot)) + destroyBundle(t, ctx, bundleRoot) }) remoteRoot := getBundleRemoteRootPath(w, t, uniqueId) @@ -80,8 +78,7 @@ func TestFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0o644) require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Check that removed file is not in workspace anymore _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py")) diff --git a/integration/bundle/destroy_test.go b/integration/bundle/destroy_test.go index dc18ab9a53..59d1816e01 100644 --- a/integration/bundle/destroy_test.go +++ b/integration/bundle/destroy_test.go @@ -20,22 +20,20 @@ func TestBundleDestroy(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots") // Assert the snapshot file does not exist - _, err = os.ReadDir(snapshotsDir) + _, err := os.ReadDir(snapshotsDir) assert.ErrorIs(t, err, os.ErrNotExist) // deploy resources - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Assert the snapshot file exists entries, err := os.ReadDir(snapshotsDir) @@ -60,8 +58,7 @@ func TestBundleDestroy(t *testing.T) { assert.Equal(t, job.Settings.Name, jobName) // destroy bundle - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) // assert pipeline is deleted _, err = w.Pipelines.GetByName(ctx, pipelineName) diff --git a/integration/bundle/empty_bundle_test.go b/integration/bundle/empty_bundle_test.go index 52ea012202..0cf9aad345 100644 --- a/integration/bundle/empty_bundle_test.go +++ b/integration/bundle/empty_bundle_test.go @@ -26,11 +26,9 @@ func TestEmptyBundleDeploy(t *testing.T) { f.Close() // deploy empty bundle - err = deployBundle(t, ctx, tmpDir) - require.NoError(t, err) + deployBundle(t, ctx, tmpDir) t.Cleanup(func() { - err = destroyBundle(t, ctx, tmpDir) - require.NoError(t, err) + destroyBundle(t, ctx, tmpDir) }) } diff --git a/integration/bundle/environments_test.go b/integration/bundle/environments_test.go index 28f64bf414..4f831cf681 100644 --- a/integration/bundle/environments_test.go +++ b/integration/bundle/environments_test.go @@ -13,17 +13,14 @@ func TestPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { ctx, _ := acc.WorkspaceTest(t) - bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ "unique_id": uuid.New().String(), }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { - err := destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) out, err := runResource(t, ctx, bundleRoot, "some_other_job") diff --git a/integration/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go index 331a75d627..9d0b466ba8 100644 --- a/integration/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -26,10 +26,9 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) { gt := &generateJobTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) jobId := gt.createTestJob(ctx) t.Cleanup(func() { @@ -41,7 +40,7 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) { "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) - _, _, err = c.Run() + _, _, err := c.Run() require.NoError(t, err) _, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py")) @@ -62,11 +61,9 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12") require.Contains(t, generatedYaml, "num_workers: 1") - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) } type generateJobTest struct { diff --git a/integration/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go index 5be1ff7ddb..303d5cb228 100644 --- a/integration/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -25,10 +25,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { gt := &generatePipelineTest{T: wt, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) - require.NoError(t, err) pipelineId, name := gt.createTestPipeline(ctx) t.Cleanup(func() { @@ -40,7 +39,7 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { "--existing-pipeline-id", fmt.Sprint(pipelineId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) - _, _, err = c.Run() + _, _, err := c.Run() require.NoError(t, err) _, err = os.Stat(filepath.Join(bundleRoot, "src", "notebook.py")) @@ -70,11 +69,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "- file:") require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py"))) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) } type generatePipelineTest struct { diff --git a/integration/bundle/helpers_test.go b/integration/bundle/helpers_test.go index c2deb67a94..e884cd8c68 100644 --- a/integration/bundle/helpers_test.go +++ b/integration/bundle/helpers_test.go @@ -26,18 +26,15 @@ import ( const defaultSparkVersion = "13.3.x-snapshot-scala2.12" -func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) (string, error) { +func initTestTemplate(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any) string { bundleRoot := t.TempDir() return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot) } -func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) { +func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, templateName string, config map[string]any, bundleRoot string) string { templateRoot := filepath.Join("bundles", templateName) - configFilePath, err := writeConfigFile(t, config) - if err != nil { - return "", err - } + configFilePath := writeConfigFile(t, config) ctx = root.SetWorkspaceClient(ctx, nil) cmd := cmdio.NewIO(ctx, flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") @@ -46,21 +43,21 @@ func initTestTemplateWithBundleRoot(t testutil.TestingT, ctx context.Context, te out, err := filer.NewLocalClient(bundleRoot) require.NoError(t, err) err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), out) - return bundleRoot, err + require.NoError(t, err) + return bundleRoot } -func writeConfigFile(t testutil.TestingT, config map[string]any) (string, error) { +func writeConfigFile(t testutil.TestingT, config map[string]any) string { bytes, err := json.Marshal(config) - if err != nil { - return "", err - } + require.NoError(t, err) dir := t.TempDir() filepath := filepath.Join(dir, "config.json") t.Log("Configuration for template: ", string(bytes)) err = os.WriteFile(filepath, bytes, 0o644) - return filepath, err + require.NoError(t, err) + return filepath } func validateBundle(t testutil.TestingT, ctx context.Context, path string) ([]byte, error) { @@ -83,14 +80,14 @@ func unmarshalConfig(t testutil.TestingT, data []byte) *bundle.Bundle { return bundle } -func deployBundle(t testutil.TestingT, ctx context.Context, path string) error { +func deployBundle(t testutil.TestingT, ctx context.Context, path string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) c := testcli.NewRunner(t, ctx, "bundle", "deploy", "--force-lock", "--auto-approve") _, _, err := c.Run() - return err + require.NoError(t, err) } -func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) { +func deployBundleWithArgsErr(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string, error) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args = append([]string{"bundle", "deploy"}, args...) c := testcli.NewRunner(t, ctx, args...) @@ -98,13 +95,19 @@ func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, return stdout.String(), stderr.String(), err } -func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) error { +func deployBundleWithArgs(t testutil.TestingT, ctx context.Context, path string, args ...string) (string, string) { + stdout, stderr, err := deployBundleWithArgsErr(t, ctx, path, args...) + require.NoError(t, err) + return stdout, stderr +} + +func deployBundleWithFlags(t testutil.TestingT, ctx context.Context, path string, flags []string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) args := []string{"bundle", "deploy", "--force-lock"} args = append(args, flags...) c := testcli.NewRunner(t, ctx, args...) _, _, err := c.Run() - return err + require.NoError(t, err) } func runResource(t testutil.TestingT, ctx context.Context, path, key string) (string, error) { @@ -128,11 +131,11 @@ func runResourceWithParams(t testutil.TestingT, ctx context.Context, path, key s return stdout.String(), err } -func destroyBundle(t testutil.TestingT, ctx context.Context, path string) error { +func destroyBundle(t testutil.TestingT, ctx context.Context, path string) { ctx = env.Set(ctx, "BUNDLE_ROOT", path) c := testcli.NewRunner(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() - return err + require.NoError(t, err) } func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t testutil.TestingT, uniqueId string) string { diff --git a/integration/bundle/job_metadata_test.go b/integration/bundle/job_metadata_test.go index aa22ec34f0..f470b37da4 100644 --- a/integration/bundle/job_metadata_test.go +++ b/integration/bundle/job_metadata_test.go @@ -24,21 +24,18 @@ func TestJobsMetadataFile(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{ + bundleRoot := initTestTemplate(t, ctx, "job_metadata", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) // deploy bundle - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) // Cleanup the deployed bundle t.Cleanup(func() { - err = destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) // assert job 1 is created diff --git a/integration/bundle/local_state_staleness_test.go b/integration/bundle/local_state_staleness_test.go index 3c80c24135..c02a38bb1a 100644 --- a/integration/bundle/local_state_staleness_test.go +++ b/integration/bundle/local_state_staleness_test.go @@ -27,16 +27,14 @@ func TestLocalStateStaleness(t *testing.T) { nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() initialize := func() string { - root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + root := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": defaultSparkVersion, }) - require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, ctx, root) - require.NoError(t, err) + destroyBundle(t, ctx, root) }) return root @@ -48,16 +46,13 @@ func TestLocalStateStaleness(t *testing.T) { bundleB := initialize() // 1) Deploy bundle A - err = deployBundle(t, ctx, bundleA) - require.NoError(t, err) + deployBundle(t, ctx, bundleA) // 2) Deploy bundle B - err = deployBundle(t, ctx, bundleB) - require.NoError(t, err) + deployBundle(t, ctx, bundleB) // 3) Deploy bundle A again - err = deployBundle(t, ctx, bundleA) - require.NoError(t, err) + deployBundle(t, ctx, bundleA) // Assert that there is only a single job in the workspace corresponding to this bundle. iter := w.Jobs.List(context.Background(), jobs.ListJobsRequest{ diff --git a/integration/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go index c967eaf366..a90642ecc9 100644 --- a/integration/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -15,21 +15,18 @@ func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonW nodeTypeId := testutil.GetCloud(t).NodeTypeID() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") - bundleRoot, err := initTestTemplate(t, ctx, templateName, map[string]any{ + bundleRoot := initTestTemplate(t, ctx, templateName, map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, "python_wheel_wrapper": pythonWheelWrapper, "instance_pool_id": instancePoolId, }) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { - err := destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) out, err := runResource(t, ctx, bundleRoot, "some_other_job") diff --git a/integration/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go index be389d64ba..98acb75acb 100644 --- a/integration/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -15,7 +15,7 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti nodeTypeId := testutil.GetCloud(t).NodeTypeID() tmpDir := t.TempDir() instancePoolId := env.Get(ctx, "TEST_INSTANCE_POOL_ID") - bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ + bundleRoot := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, @@ -23,14 +23,11 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti "artifact_path": artifactPath, "instance_pool_id": instancePoolId, }, tmpDir) - require.NoError(t, err) - err = deployBundle(t, ctx, bundleRoot) - require.NoError(t, err) + deployBundle(t, ctx, bundleRoot) t.Cleanup(func() { - err := destroyBundle(t, ctx, bundleRoot) - require.NoError(t, err) + destroyBundle(t, ctx, bundleRoot) }) out, err := runResource(t, ctx, bundleRoot, "jar_job") From b6f299974ffdf601421b53870a6935b3f5822e87 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Mon, 16 Dec 2024 17:21:20 +0100 Subject: [PATCH 50/63] Fix testutil.RandomName to use the full character set (#2020) ## Changes It was using first 12 chars, that does not seem intended. ## Tests Existing tests. --- internal/testutil/helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/testutil/helpers.go b/internal/testutil/helpers.go index 69ed7595b7..019a8e6186 100644 --- a/internal/testutil/helpers.go +++ b/internal/testutil/helpers.go @@ -23,7 +23,7 @@ func RandomName(prefix ...string) string { randLen := 12 b := make([]byte, randLen) for i := range b { - b[i] = charset[rand.Intn(randLen)] + b[i] = charset[rand.Intn(len(charset))] } if len(prefix) > 0 { return fmt.Sprintf("%s%s", strings.Join(prefix, ""), b) From e60fe1bff2fa24751e358801abe34f60050c8084 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 16 Dec 2024 17:34:22 +0100 Subject: [PATCH 51/63] Fixed downloading arm64 binaries (#2021) ## Changes Fixed downloading arm64 binaries Go 1.23 changed the way built binaries are prefixed on amd64, more details here: https://tip.golang.org/doc/go1.23#arm64 --- internal/bugbash/exec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/bugbash/exec.sh b/internal/bugbash/exec.sh index ac25b16edf..4a087dc662 100755 --- a/internal/bugbash/exec.sh +++ b/internal/bugbash/exec.sh @@ -31,7 +31,7 @@ function cli_snapshot_directory() { dir="${dir}_386" ;; arm64|aarch64) - dir="${dir}_arm64" + dir="${dir}_arm64_v8.0" ;; armv7l|armv8l) dir="${dir}_arm_6" From d7eac598cda4bd12aa75d7ff23f076189c13be46 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 17 Dec 2024 08:45:58 +0100 Subject: [PATCH 52/63] Move integration test helpers to `integration/internal` (#2022) ## Changes The `acc` package is exclusively used by integration tests, so it belongs under `integration/internal`. It's not the best name we can rename later. ## Tests n/a --- integration/assumptions/dashboard_assumptions_test.go | 2 +- integration/bundle/artifacts_test.go | 2 +- integration/bundle/basic_test.go | 2 +- integration/bundle/bind_resource_test.go | 2 +- integration/bundle/clusters_test.go | 2 +- integration/bundle/dashboards_test.go | 2 +- integration/bundle/deploy_test.go | 2 +- integration/bundle/deploy_then_remove_resources_test.go | 2 +- integration/bundle/deploy_to_shared_test.go | 2 +- integration/bundle/deployment_state_test.go | 2 +- integration/bundle/destroy_test.go | 2 +- integration/bundle/empty_bundle_test.go | 2 +- integration/bundle/environments_test.go | 2 +- integration/bundle/generate_job_test.go | 2 +- integration/bundle/generate_pipeline_test.go | 2 +- integration/bundle/init_test.go | 2 +- integration/bundle/job_metadata_test.go | 2 +- integration/bundle/local_state_staleness_test.go | 2 +- integration/bundle/python_wheel_test.go | 2 +- integration/bundle/spark_jar_test.go | 2 +- integration/cmd/clusters/clusters_test.go | 2 +- integration/cmd/fs/cat_test.go | 2 +- integration/cmd/fs/helpers_test.go | 2 +- integration/cmd/repos/repos_test.go | 2 +- integration/cmd/secrets/secrets_test.go | 2 +- integration/cmd/storage_credentials/storage_credentials_test.go | 2 +- integration/cmd/sync/sync_test.go | 2 +- integration/cmd/workspace/workspace_test.go | 2 +- {internal => integration/internal}/acc/debug.go | 0 {internal => integration/internal}/acc/fixtures.go | 0 {internal => integration/internal}/acc/workspace.go | 0 integration/libs/filer/helpers_test.go | 2 +- integration/libs/git/git_fetch_test.go | 2 +- integration/libs/locker/locker_test.go | 2 +- integration/libs/tags/tags_test.go | 2 +- integration/python/python_tasks_test.go | 2 +- 36 files changed, 33 insertions(+), 33 deletions(-) rename {internal => integration/internal}/acc/debug.go (100%) rename {internal => integration/internal}/acc/fixtures.go (100%) rename {internal => integration/internal}/acc/workspace.go (100%) diff --git a/integration/assumptions/dashboard_assumptions_test.go b/integration/assumptions/dashboard_assumptions_test.go index 51a8094980..3a1dcc9073 100644 --- a/integration/assumptions/dashboard_assumptions_test.go +++ b/integration/assumptions/dashboard_assumptions_test.go @@ -4,7 +4,7 @@ import ( "encoding/base64" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/convert" diff --git a/integration/bundle/artifacts_test.go b/integration/bundle/artifacts_test.go index 3a5da721cc..1b71a1c3d1 100644 --- a/integration/bundle/artifacts_test.go +++ b/integration/bundle/artifacts_test.go @@ -12,7 +12,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" diff --git a/integration/bundle/basic_test.go b/integration/bundle/basic_test.go index e458706e03..79301b850a 100644 --- a/integration/bundle/basic_test.go +++ b/integration/bundle/basic_test.go @@ -5,7 +5,7 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/require" diff --git a/integration/bundle/bind_resource_test.go b/integration/bundle/bind_resource_test.go index b512357704..508aa34107 100644 --- a/integration/bundle/bind_resource_test.go +++ b/integration/bundle/bind_resource_test.go @@ -6,7 +6,7 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" diff --git a/integration/bundle/clusters_test.go b/integration/bundle/clusters_test.go index 7992ecd7da..f9e5c9b648 100644 --- a/integration/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/google/uuid" diff --git a/integration/bundle/dashboards_test.go b/integration/bundle/dashboards_test.go index 985ef8611f..83b4b8b03b 100644 --- a/integration/bundle/dashboards_test.go +++ b/integration/bundle/dashboards_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/databricks-sdk-go/service/workspace" diff --git a/integration/bundle/deploy_test.go b/integration/bundle/deploy_test.go index 2c31101909..0b37e5630c 100644 --- a/integration/bundle/deploy_test.go +++ b/integration/bundle/deploy_test.go @@ -11,7 +11,7 @@ import ( "testing" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" diff --git a/integration/bundle/deploy_then_remove_resources_test.go b/integration/bundle/deploy_then_remove_resources_test.go index b792d3623f..052d84dd6e 100644 --- a/integration/bundle/deploy_then_remove_resources_test.go +++ b/integration/bundle/deploy_then_remove_resources_test.go @@ -5,7 +5,7 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/assert" diff --git a/integration/bundle/deploy_to_shared_test.go b/integration/bundle/deploy_to_shared_test.go index c97933217c..b4395f4c63 100644 --- a/integration/bundle/deploy_to_shared_test.go +++ b/integration/bundle/deploy_to_shared_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/google/uuid" "github.com/stretchr/testify/require" diff --git a/integration/bundle/deployment_state_test.go b/integration/bundle/deployment_state_test.go index a7e01643d9..fff1504d2b 100644 --- a/integration/bundle/deployment_state_test.go +++ b/integration/bundle/deployment_state_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/databricks/cli/bundle/deploy" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" diff --git a/integration/bundle/destroy_test.go b/integration/bundle/destroy_test.go index 59d1816e01..f18138ce53 100644 --- a/integration/bundle/destroy_test.go +++ b/integration/bundle/destroy_test.go @@ -6,7 +6,7 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/apierr" "github.com/google/uuid" diff --git a/integration/bundle/empty_bundle_test.go b/integration/bundle/empty_bundle_test.go index 0cf9aad345..1ab240d136 100644 --- a/integration/bundle/empty_bundle_test.go +++ b/integration/bundle/empty_bundle_test.go @@ -6,7 +6,7 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) diff --git a/integration/bundle/environments_test.go b/integration/bundle/environments_test.go index 4f831cf681..e0dc915321 100644 --- a/integration/bundle/environments_test.go +++ b/integration/bundle/environments_test.go @@ -3,7 +3,7 @@ package bundle_test import ( "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) diff --git a/integration/bundle/generate_job_test.go b/integration/bundle/generate_job_test.go index 9d0b466ba8..b68bb7d611 100644 --- a/integration/bundle/generate_job_test.go +++ b/integration/bundle/generate_job_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" diff --git a/integration/bundle/generate_pipeline_test.go b/integration/bundle/generate_pipeline_test.go index 303d5cb228..7843ec0c3c 100644 --- a/integration/bundle/generate_pipeline_test.go +++ b/integration/bundle/generate_pipeline_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" diff --git a/integration/bundle/init_test.go b/integration/bundle/init_test.go index bc3757fdea..f5c263ca3d 100644 --- a/integration/bundle/init_test.go +++ b/integration/bundle/init_test.go @@ -11,7 +11,7 @@ import ( "testing" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/iamutil" diff --git a/integration/bundle/job_metadata_test.go b/integration/bundle/job_metadata_test.go index f470b37da4..a7290c6e32 100644 --- a/integration/bundle/job_metadata_test.go +++ b/integration/bundle/job_metadata_test.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/google/uuid" diff --git a/integration/bundle/local_state_staleness_test.go b/integration/bundle/local_state_staleness_test.go index c02a38bb1a..3984815044 100644 --- a/integration/bundle/local_state_staleness_test.go +++ b/integration/bundle/local_state_staleness_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/jobs" diff --git a/integration/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go index a90642ecc9..5cca1cb53f 100644 --- a/integration/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -3,7 +3,7 @@ package bundle_test import ( "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" diff --git a/integration/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go index 98acb75acb..65893052e0 100644 --- a/integration/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/google/uuid" diff --git a/integration/cmd/clusters/clusters_test.go b/integration/cmd/clusters/clusters_test.go index 4cc6cb6581..4e20a05581 100644 --- a/integration/cmd/clusters/clusters_test.go +++ b/integration/cmd/clusters/clusters_test.go @@ -6,7 +6,7 @@ import ( "regexp" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/compute" diff --git a/integration/cmd/fs/cat_test.go b/integration/cmd/fs/cat_test.go index b0f99ae4e5..3e964fe6ee 100644 --- a/integration/cmd/fs/cat_test.go +++ b/integration/cmd/fs/cat_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" diff --git a/integration/cmd/fs/helpers_test.go b/integration/cmd/fs/helpers_test.go index da4fd48cff..e1bebb28f4 100644 --- a/integration/cmd/fs/helpers_test.go +++ b/integration/cmd/fs/helpers_test.go @@ -5,7 +5,7 @@ import ( "path" "path/filepath" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" diff --git a/integration/cmd/repos/repos_test.go b/integration/cmd/repos/repos_test.go index b5ad120d65..7526a14ca5 100644 --- a/integration/cmd/repos/repos_test.go +++ b/integration/cmd/repos/repos_test.go @@ -6,7 +6,7 @@ import ( "strconv" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go" diff --git a/integration/cmd/secrets/secrets_test.go b/integration/cmd/secrets/secrets_test.go index 4dd133c251..43ad54de28 100644 --- a/integration/cmd/secrets/secrets_test.go +++ b/integration/cmd/secrets/secrets_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/workspace" diff --git a/integration/cmd/storage_credentials/storage_credentials_test.go b/integration/cmd/storage_credentials/storage_credentials_test.go index 73727a875e..e4b861312b 100644 --- a/integration/cmd/storage_credentials/storage_credentials_test.go +++ b/integration/cmd/storage_credentials/storage_credentials_test.go @@ -3,7 +3,7 @@ package storage_credentials_test import ( "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" diff --git a/integration/cmd/sync/sync_test.go b/integration/cmd/sync/sync_test.go index 077a06079a..984f6ac49d 100644 --- a/integration/cmd/sync/sync_test.go +++ b/integration/cmd/sync/sync_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" diff --git a/integration/cmd/workspace/workspace_test.go b/integration/cmd/workspace/workspace_test.go index 9338f60aa2..4edbbfc834 100644 --- a/integration/cmd/workspace/workspace_test.go +++ b/integration/cmd/workspace/workspace_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testcli" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go/service/workspace" diff --git a/internal/acc/debug.go b/integration/internal/acc/debug.go similarity index 100% rename from internal/acc/debug.go rename to integration/internal/acc/debug.go diff --git a/internal/acc/fixtures.go b/integration/internal/acc/fixtures.go similarity index 100% rename from internal/acc/fixtures.go rename to integration/internal/acc/fixtures.go diff --git a/internal/acc/workspace.go b/integration/internal/acc/workspace.go similarity index 100% rename from internal/acc/workspace.go rename to integration/internal/acc/workspace.go diff --git a/integration/libs/filer/helpers_test.go b/integration/libs/filer/helpers_test.go index 2383ae352f..a3a3aaae58 100644 --- a/integration/libs/filer/helpers_test.go +++ b/integration/libs/filer/helpers_test.go @@ -7,7 +7,7 @@ import ( "path" "path/filepath" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" diff --git a/integration/libs/git/git_fetch_test.go b/integration/libs/git/git_fetch_test.go index e53b5469a5..0998d775bd 100644 --- a/integration/libs/git/git_fetch_test.go +++ b/integration/libs/git/git_fetch_test.go @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/git" "github.com/stretchr/testify/assert" diff --git a/integration/libs/locker/locker_test.go b/integration/libs/locker/locker_test.go index 7624afdee4..c51972b90c 100644 --- a/integration/libs/locker/locker_test.go +++ b/integration/libs/locker/locker_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" lockpkg "github.com/databricks/cli/libs/locker" diff --git a/integration/libs/tags/tags_test.go b/integration/libs/tags/tags_test.go index b7c47b5f5a..8a54a966bf 100644 --- a/integration/libs/tags/tags_test.go +++ b/integration/libs/tags/tags_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" diff --git a/integration/python/python_tasks_test.go b/integration/python/python_tasks_test.go index d0a92e084d..9ad3ed5def 100644 --- a/integration/python/python_tasks_test.go +++ b/integration/python/python_tasks_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/databricks/cli/bundle/run/output" - "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/integration/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" From 2fa3b48083438482703b63584ae36b5765b548d7 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Tue, 17 Dec 2024 16:34:54 +0100 Subject: [PATCH 53/63] Remove 'make fmt' and 'fmt' workflow (#2026) Remove unnecessary make command and github workflow - it's a subset of "lint" now. However, keep "mod tidy" separately, don't think the linter does that. --- .github/workflows/push.yml | 33 ++++----------------------------- Makefile | 8 +------- 2 files changed, 5 insertions(+), 36 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index a497a7d7b7..6e07f46157 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -57,46 +57,21 @@ jobs: - name: Publish test coverage uses: codecov/codecov-action@v4 - fmt: + golangci: + name: lint runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Go - uses: actions/setup-go@v5 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: 1.23.2 - - # No need to download cached dependencies when running gofmt. - cache: false - - - name: Install goimports - run: | - go install golang.org/x/tools/cmd/goimports@latest - - - name: Run make fmt - run: | - make fmt - - name: Run go mod tidy run: | go mod tidy - - name: Fail on differences run: | # Exit with status code 1 if there are differences (i.e. unformatted files) git diff --exit-code - - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: 1.23.2 - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: diff --git a/Makefile b/Makefile index 8b33e447c4..6c849986e4 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,5 @@ default: build -fmt: - @echo "✓ Formatting source code with goimports ..." - @goimports -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") - @echo "✓ Formatting source code with gofmt ..." - @gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") - lint: vendor @echo "✓ Linting source code with https://golangci-lint.run/ (with --fix)..." @golangci-lint run --fix ./... @@ -39,4 +33,4 @@ vendor: integration: gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h -.PHONY: fmt lint lintcheck test testonly coverage build snapshot vendor integration +.PHONY: lint lintcheck test testonly coverage build snapshot vendor integration From 23ddee8023bae3d069d2097f518905f4bb217fa8 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 17 Dec 2024 18:16:58 +0100 Subject: [PATCH 54/63] Skip job runs during integration testing for PRs (#2024) ## Changes A small subset of tests trigger cluster creation to run jobs. These tests comprise a substantial amount of the total integration test runtime. We can skip them on PRs and only run them on the main branch. ## Tests Confirmed the short runtime is ~20 mins. --- Makefile | 9 +++++++-- integration/bundle/clusters_test.go | 5 +++++ integration/bundle/python_wheel_test.go | 5 +++++ integration/bundle/spark_jar_test.go | 5 +++++ 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 6c849986e4..a7c0584fad 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,12 @@ vendor: @echo "✓ Filling vendor folder with library code ..." @go mod vendor +INTEGRATION = gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h + integration: - gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h + $(INTEGRATION) + +integration-short: + $(INTEGRATION) -short -.PHONY: lint lintcheck test testonly coverage build snapshot vendor integration +.PHONY: lint lintcheck test testonly coverage build snapshot vendor integration integration-short diff --git a/integration/bundle/clusters_test.go b/integration/bundle/clusters_test.go index f9e5c9b648..c153b87330 100644 --- a/integration/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -44,6 +44,11 @@ func TestDeployBundleWithCluster(t *testing.T) { require.NoError(t, err) require.NotNil(t, cluster) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, root, "foo") require.NoError(t, err) require.Contains(t, out, "Hello World!") diff --git a/integration/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go index 5cca1cb53f..f7fb1ff3f6 100644 --- a/integration/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -29,6 +29,11 @@ func runPythonWheelTest(t *testing.T, templateName, sparkVersion string, pythonW destroyBundle(t, ctx, bundleRoot) }) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, bundleRoot, "some_other_job") require.NoError(t, err) require.Contains(t, out, "Hello from my func") diff --git a/integration/bundle/spark_jar_test.go b/integration/bundle/spark_jar_test.go index 65893052e0..cbdf5a00c3 100644 --- a/integration/bundle/spark_jar_test.go +++ b/integration/bundle/spark_jar_test.go @@ -30,6 +30,11 @@ func runSparkJarTestCommon(t *testing.T, ctx context.Context, sparkVersion, arti destroyBundle(t, ctx, bundleRoot) }) + if testing.Short() { + t.Log("Skip the job run in short mode") + return + } + out, err := runResource(t, ctx, bundleRoot, "jar_job") require.NoError(t, err) require.Contains(t, out, "Hello from Jar!") From 13fa43e0f5af0c2b0d5a6397ca7273a36548cbc1 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 17 Dec 2024 18:34:09 +0100 Subject: [PATCH 55/63] Remove superfluous helper (#2028) ## Changes There was only one helper for AWS and not the other clouds. Found this when looking through double calls to `acc.WorkspaceTest()` (see `TestPythonWheelTaskDeployAndRunOnInteractiveCluster`). ## Tests n/a --- integration/bundle/clusters_test.go | 6 +++--- integration/bundle/python_wheel_test.go | 4 +--- internal/testutil/cloud.go | 4 ---- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/integration/bundle/clusters_test.go b/integration/bundle/clusters_test.go index c153b87330..4492062089 100644 --- a/integration/bundle/clusters_test.go +++ b/integration/bundle/clusters_test.go @@ -12,12 +12,12 @@ import ( ) func TestDeployBundleWithCluster(t *testing.T) { - ctx, wt := acc.WorkspaceTest(t) - - if testutil.IsAWSCloud(wt) { + if testutil.GetCloud(t) == testutil.AWS { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } + ctx, wt := acc.WorkspaceTest(t) + nodeTypeId := testutil.GetCloud(t).NodeTypeID() uniqueId := uuid.New().String() root := initTestTemplate(t, ctx, "clusters", map[string]any{ diff --git a/integration/bundle/python_wheel_test.go b/integration/bundle/python_wheel_test.go index f7fb1ff3f6..62846f7b5c 100644 --- a/integration/bundle/python_wheel_test.go +++ b/integration/bundle/python_wheel_test.go @@ -56,9 +56,7 @@ func TestPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { } func TestPythonWheelTaskDeployAndRunOnInteractiveCluster(t *testing.T) { - _, wt := acc.WorkspaceTest(t) - - if testutil.IsAWSCloud(wt) { + if testutil.GetCloud(t) == testutil.AWS { t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") } diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index 4a8a89645d..33921db0cd 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -58,7 +58,3 @@ func GetCloud(t TestingT) Cloud { } return -1 } - -func IsAWSCloud(t TestingT) bool { - return GetCloud(t) == AWS -} From 5b84856b17178bd802fb58ea1256372d960aa845 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 17 Dec 2024 20:05:42 +0100 Subject: [PATCH 56/63] Correctly handle required query params in CLI generation (#2027) ## Changes If there's required query params, it is a top-level field of request object and not a field of nested request body. This is needed for upcoming changes from OpenAPI spec changes where such query parameters is introduced. No changes after regenerating CLI with current spec and the fix (appears we haven't had such params before) --- .codegen/service.go.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index ef7977e1bd..ee2c7b0fd9 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -411,5 +411,5 @@ func new{{.PascalName}}() *cobra.Command { {{- define "request-body-obj" -}} {{- $method := .Method -}} {{- $field := .Field -}} - {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} + {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (and (not $field.IsPath) (not $field.IsQuery))) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} {{- end -}} From 042c8d88c6f7af28221ad125d8aece8e6a7131d5 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Wed, 18 Dec 2024 11:19:14 +0100 Subject: [PATCH 57/63] Custom annotations for bundle-specific JSON schema fields (#1957) ## Changes Adds annotations to json-schema for fields which are not covered by OpenAPI spec. Custom descriptions were copy-pasted from documentation PR which is still WIP so descriptions for some fields are missing Further improvements: * documentation autogen based on json-schema * fix missing descriptions ## Tests This script is not part of CLI package so I didn't test all corner cases. Few high-level tests were added to be sure that schema annotations is in sync with actual config --------- Co-authored-by: Pieter Noordhuis --- .codegen.json | 2 +- .github/workflows/push.yml | 9 +- Makefile | 6 +- bundle/internal/schema/annotations.go | 209 ++ bundle/internal/schema/annotations.yml | 501 ++++ .../internal/schema/annotations_openapi.yml | 2585 +++++++++++++++++ .../schema/annotations_openapi_overrides.yml | 155 + bundle/internal/schema/annotations_test.go | 44 + bundle/internal/schema/main.go | 45 +- bundle/internal/schema/main_test.go | 125 + bundle/internal/schema/parser.go | 108 +- .../schema/testdata/pass/target_variable.yml | 5 + bundle/schema/embed_test.go | 6 +- bundle/schema/jsonschema.json | 601 ++-- libs/jsonschema/extension.go | 6 + libs/jsonschema/schema.go | 7 + 16 files changed, 4142 insertions(+), 272 deletions(-) create mode 100644 bundle/internal/schema/annotations.go create mode 100644 bundle/internal/schema/annotations.yml create mode 100644 bundle/internal/schema/annotations_openapi.yml create mode 100644 bundle/internal/schema/annotations_openapi_overrides.yml create mode 100644 bundle/internal/schema/annotations_test.go create mode 100644 bundle/internal/schema/main_test.go create mode 100644 bundle/internal/schema/testdata/pass/target_variable.yml diff --git a/.codegen.json b/.codegen.json index 73ab8c2a4f..735e1ee319 100644 --- a/.codegen.json +++ b/.codegen.json @@ -11,7 +11,7 @@ "required": ["go"], "post_generate": [ "go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build", - "go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json", + "make schema", "echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes", "echo 'go.sum linguist-generated=true' >> ./.gitattributes", "echo 'bundle/schema/jsonschema.json linguist-generated=true' >> ./.gitattributes" diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 6e07f46157..ea561805bf 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -99,14 +99,19 @@ jobs: # By default the ajv-cli runs in strict mode which will fail if the schema # itself is not valid. Strict mode is more strict than the JSON schema # specification. See for details: https://ajv.js.org/options.html#strict-mode-options + # The ajv-cli is configured to use the markdownDescription keyword which is not part of the JSON schema specification, + # but is used in editors like VSCode to render markdown in the description field - name: Validate bundle schema run: | go run main.go bundle schema > schema.json + # Add markdownDescription keyword to ajv + echo "module.exports=function(a){a.addKeyword('markdownDescription')}" >> keywords.js + for file in ./bundle/internal/schema/testdata/pass/*.yml; do - ajv test -s schema.json -d $file --valid + ajv test -s schema.json -d $file --valid -c=./keywords.js done for file in ./bundle/internal/schema/testdata/fail/*.yml; do - ajv test -s schema.json -d $file --invalid + ajv test -s schema.json -d $file --invalid -c=./keywords.js done diff --git a/Makefile b/Makefile index a7c0584fad..f8e7834a50 100644 --- a/Makefile +++ b/Makefile @@ -29,6 +29,10 @@ snapshot: vendor: @echo "✓ Filling vendor folder with library code ..." @go mod vendor + +schema: + @echo "✓ Generating json-schema ..." + @go run ./bundle/internal/schema ./bundle/internal/schema ./bundle/schema/jsonschema.json INTEGRATION = gotestsum --format github-actions --rerun-fails --jsonfile output.json --packages "./integration/..." -- -parallel 4 -timeout=2h @@ -38,4 +42,4 @@ integration: integration-short: $(INTEGRATION) -short -.PHONY: lint lintcheck test testonly coverage build snapshot vendor integration integration-short +.PHONY: lint lintcheck test testonly coverage build snapshot vendor schema integration integration-short diff --git a/bundle/internal/schema/annotations.go b/bundle/internal/schema/annotations.go new file mode 100644 index 0000000000..aec5e68b0e --- /dev/null +++ b/bundle/internal/schema/annotations.go @@ -0,0 +1,209 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "reflect" + "regexp" + "strings" + + yaml3 "gopkg.in/yaml.v3" + + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/dyn/yamlsaver" + "github.com/databricks/cli/libs/jsonschema" +) + +type annotation struct { + Description string `json:"description,omitempty"` + MarkdownDescription string `json:"markdown_description,omitempty"` + Title string `json:"title,omitempty"` + Default any `json:"default,omitempty"` + Enum []any `json:"enum,omitempty"` +} + +type annotationHandler struct { + // Annotations read from all annotation files including all overrides + parsedAnnotations annotationFile + // Missing annotations for fields that are found in config that need to be added to the annotation file + missingAnnotations annotationFile +} + +/** + * Parsed file with annotations, expected format: + * github.com/databricks/cli/bundle/config.Bundle: + * cluster_id: + * description: "Description" + */ +type annotationFile map[string]map[string]annotation + +const Placeholder = "PLACEHOLDER" + +// Adds annotations to the JSON schema reading from the annotation files. +// More details https://json-schema.org/understanding-json-schema/reference/annotations +func newAnnotationHandler(sources []string) (*annotationHandler, error) { + prev := dyn.NilValue + for _, path := range sources { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + generated, err := yamlloader.LoadYAML(path, bytes.NewBuffer(b)) + if err != nil { + return nil, err + } + prev, err = merge.Merge(prev, generated) + if err != nil { + return nil, err + } + } + + var data annotationFile + + err := convert.ToTyped(&data, prev) + if err != nil { + return nil, err + } + + d := &annotationHandler{} + d.parsedAnnotations = data + d.missingAnnotations = annotationFile{} + return d, nil +} + +func (d *annotationHandler) addAnnotations(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + refPath := getPath(typ) + shouldHandle := strings.HasPrefix(refPath, "github.com") + if !shouldHandle { + return s + } + + annotations := d.parsedAnnotations[refPath] + if annotations == nil { + annotations = map[string]annotation{} + } + + rootTypeAnnotation, ok := annotations[RootTypeKey] + if ok { + assignAnnotation(&s, rootTypeAnnotation) + } + + for k, v := range s.Properties { + item := annotations[k] + if item.Description == "" { + item.Description = Placeholder + + emptyAnnotations := d.missingAnnotations[refPath] + if emptyAnnotations == nil { + emptyAnnotations = map[string]annotation{} + d.missingAnnotations[refPath] = emptyAnnotations + } + emptyAnnotations[k] = item + } + assignAnnotation(v, item) + } + return s +} + +// Writes missing annotations with placeholder values back to the annotation file +func (d *annotationHandler) syncWithMissingAnnotations(outputPath string) error { + existingFile, err := os.ReadFile(outputPath) + if err != nil { + return err + } + existing, err := yamlloader.LoadYAML("", bytes.NewBuffer(existingFile)) + if err != nil { + return err + } + missingAnnotations, err := convert.FromTyped(&d.missingAnnotations, dyn.NilValue) + if err != nil { + return err + } + + output, err := merge.Merge(existing, missingAnnotations) + if err != nil { + return err + } + + err = saveYamlWithStyle(outputPath, output) + if err != nil { + return err + } + return nil +} + +func getPath(typ reflect.Type) string { + return typ.PkgPath() + "." + typ.Name() +} + +func assignAnnotation(s *jsonschema.Schema, a annotation) { + if a.Description != Placeholder { + s.Description = a.Description + } + + if a.Default != nil { + s.Default = a.Default + } + s.MarkdownDescription = convertLinksToAbsoluteUrl(a.MarkdownDescription) + s.Title = a.Title + s.Enum = a.Enum +} + +func saveYamlWithStyle(outputPath string, input dyn.Value) error { + style := map[string]yaml3.Style{} + file, _ := input.AsMap() + for _, v := range file.Keys() { + style[v.MustString()] = yaml3.LiteralStyle + } + + saver := yamlsaver.NewSaverWithStyle(style) + err := saver.SaveAsYAML(file, outputPath, true) + if err != nil { + return err + } + return nil +} + +func convertLinksToAbsoluteUrl(s string) string { + if s == "" { + return s + } + base := "https://docs.databricks.com" + referencePage := "/dev-tools/bundles/reference.html" + + // Regular expression to match Markdown-style links like [_](link) + re := regexp.MustCompile(`\[_\]\(([^)]+)\)`) + result := re.ReplaceAllStringFunc(s, func(match string) string { + matches := re.FindStringSubmatch(match) + if len(matches) < 2 { + return match + } + link := matches[1] + var text, absoluteURL string + + if strings.HasPrefix(link, "#") { + text = strings.TrimPrefix(link, "#") + absoluteURL = fmt.Sprintf("%s%s%s", base, referencePage, link) + + // Handle relative paths like /dev-tools/bundles/resources.html#dashboard + } else if strings.HasPrefix(link, "/") { + absoluteURL = strings.ReplaceAll(fmt.Sprintf("%s%s", base, link), ".md", ".html") + if strings.Contains(link, "#") { + parts := strings.Split(link, "#") + text = parts[1] + } else { + text = "link" + } + } else { + return match + } + + return fmt.Sprintf("[%s](%s)", text, absoluteURL) + }) + + return result +} diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml new file mode 100644 index 0000000000..e52189daa1 --- /dev/null +++ b/bundle/internal/schema/annotations.yml @@ -0,0 +1,501 @@ +github.com/databricks/cli/bundle/config.Artifact: + "build": + "description": |- + An optional set of non-default build commands that you want to run locally before deployment. + + For Python wheel builds, the Databricks CLI assumes that it can find a local install of the Python wheel package to run builds, and it runs the command python setup.py bdist_wheel by default during each bundle deployment. + + To specify multiple build commands, separate each command with double-ampersand (&&) characters. + "executable": + "description": |- + The executable type. + "files": + "description": |- + The source files for the artifact. + "markdown_description": |- + The source files for the artifact, defined as an [_](#artifact_file). + "path": + "description": |- + The location where the built artifact will be saved. + "type": + "description": |- + The type of the artifact. + "markdown_description": |- + The type of the artifact. Valid values are `wheel` or `jar` +github.com/databricks/cli/bundle/config.ArtifactFile: + "source": + "description": |- + The path of the files used to build the artifact. +github.com/databricks/cli/bundle/config.Bundle: + "cluster_id": + "description": |- + The ID of a cluster to use to run the bundle. + "markdown_description": |- + The ID of a cluster to use to run the bundle. See [_](/dev-tools/bundles/settings.md#cluster_id). + "compute_id": + "description": |- + PLACEHOLDER + "databricks_cli_version": + "description": |- + The Databricks CLI version to use for the bundle. + "markdown_description": |- + The Databricks CLI version to use for the bundle. See [_](/dev-tools/bundles/settings.md#databricks_cli_version). + "deployment": + "description": |- + The definition of the bundle deployment + "markdown_description": |- + The definition of the bundle deployment. For supported attributes, see [_](#deployment) and [_](/dev-tools/bundles/deployment-modes.md). + "git": + "description": |- + The Git version control details that are associated with your bundle. + "markdown_description": |- + The Git version control details that are associated with your bundle. For supported attributes, see [_](#git) and [_](/dev-tools/bundles/settings.md#git). + "name": + "description": |- + The name of the bundle. + "uuid": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config.Deployment: + "fail_on_active_runs": + "description": |- + Whether to fail on active runs. If this is set to true a deployment that is running can be interrupted. + "lock": + "description": |- + The deployment lock attributes. + "markdown_description": |- + The deployment lock attributes. See [_](#lock). +github.com/databricks/cli/bundle/config.Experimental: + "pydabs": + "description": |- + The PyDABs configuration. + "python_wheel_wrapper": + "description": |- + Whether to use a Python wheel wrapper + "scripts": + "description": |- + The commands to run + "use_legacy_run_as": + "description": |- + Whether to use the legacy run_as behavior +github.com/databricks/cli/bundle/config.Git: + "branch": + "description": |- + The Git branch name. + "markdown_description": |- + The Git branch name. See [_](/dev-tools/bundles/settings.md#git). + "origin_url": + "description": |- + The origin URL of the repository. + "markdown_description": |- + The origin URL of the repository. See [_](/dev-tools/bundles/settings.md#git). +github.com/databricks/cli/bundle/config.Lock: + "enabled": + "description": |- + Whether this lock is enabled. + "force": + "description": |- + Whether to force this lock if it is enabled. +github.com/databricks/cli/bundle/config.Presets: + "jobs_max_concurrent_runs": + "description": |- + The maximum concurrent runs for a job. + "name_prefix": + "description": |- + The prefix for job runs of the bundle. + "pipelines_development": + "description": |- + Whether pipeline deployments should be locked in development mode. + "source_linked_deployment": + "description": |- + Whether to link the deployment to the bundle source. + "tags": + "description": |- + The tags for the bundle deployment. + "trigger_pause_status": + "description": |- + A pause status to apply to all job triggers and schedules. Valid values are PAUSED or UNPAUSED. +github.com/databricks/cli/bundle/config.PyDABs: + "enabled": + "description": |- + Whether or not PyDABs (Private Preview) is enabled + "import": + "description": |- + The PyDABs project to import to discover resources, resource generator and mutators + "venv_path": + "description": |- + The Python virtual environment path +github.com/databricks/cli/bundle/config.Resources: + "clusters": + "description": |- + The cluster definitions for the bundle. + "markdown_description": |- + The cluster definitions for the bundle. See [_](/dev-tools/bundles/resources.md#cluster) + "dashboards": + "description": |- + The dashboard definitions for the bundle. + "markdown_description": |- + The dashboard definitions for the bundle. See [_](/dev-tools/bundles/resources.md#dashboard) + "experiments": + "description": |- + The experiment definitions for the bundle. + "markdown_description": |- + The experiment definitions for the bundle. See [_](/dev-tools/bundles/resources.md#experiment) + "jobs": + "description": |- + The job definitions for the bundle. + "markdown_description": |- + The job definitions for the bundle. See [_](/dev-tools/bundles/resources.md#job) + "model_serving_endpoints": + "description": |- + The model serving endpoint definitions for the bundle. + "markdown_description": |- + The model serving endpoint definitions for the bundle. See [_](/dev-tools/bundles/resources.md#model_serving_endpoint) + "models": + "description": |- + The model definitions for the bundle. + "markdown_description": |- + The model definitions for the bundle. See [_](/dev-tools/bundles/resources.md#model) + "pipelines": + "description": |- + The pipeline definitions for the bundle. + "markdown_description": |- + The pipeline definitions for the bundle. See [_](/dev-tools/bundles/resources.md#pipeline) + "quality_monitors": + "description": |- + The quality monitor definitions for the bundle. + "markdown_description": |- + The quality monitor definitions for the bundle. See [_](/dev-tools/bundles/resources.md#quality_monitor) + "registered_models": + "description": |- + The registered model definitions for the bundle. + "markdown_description": |- + The registered model definitions for the bundle. See [_](/dev-tools/bundles/resources.md#registered_model) + "schemas": + "description": |- + The schema definitions for the bundle. + "markdown_description": |- + The schema definitions for the bundle. See [_](/dev-tools/bundles/resources.md#schema) + "volumes": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config.Root: + "artifacts": + "description": |- + Defines the attributes to build an artifact + "bundle": + "description": |- + The attributes of the bundle. + "markdown_description": |- + The attributes of the bundle. See [_](/dev-tools/bundles/settings.md#bundle) + "experimental": + "description": |- + Defines attributes for experimental features. + "include": + "description": |- + Specifies a list of path globs that contain configuration files to include within the bundle. + "markdown_description": |- + Specifies a list of path globs that contain configuration files to include within the bundle. See [_](/dev-tools/bundles/settings.md#include) + "permissions": + "description": |- + Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle + "markdown_description": |- + Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle. See [_](/dev-tools/bundles/settings.md#permissions) and [_](/dev-tools/bundles/permissions.md). + "presets": + "description": |- + Defines bundle deployment presets. + "markdown_description": |- + Defines bundle deployment presets. See [_](/dev-tools/bundles/deployment-modes.md#presets). + "resources": + "description": |- + Specifies information about the Databricks resources used by the bundle + "markdown_description": |- + Specifies information about the Databricks resources used by the bundle. See [_](/dev-tools/bundles/resources.md). + "run_as": + "description": |- + The identity to use to run the bundle. + "sync": + "description": |- + The files and file paths to include or exclude in the bundle. + "markdown_description": |- + The files and file paths to include or exclude in the bundle. See [_](/dev-tools/bundles/) + "targets": + "description": |- + Defines deployment targets for the bundle. + "variables": + "description": |- + A Map that defines the custom variables for the bundle, where each key is the name of the variable, and the value is a Map that defines the variable. + "workspace": + "description": |- + Defines the Databricks workspace for the bundle. +github.com/databricks/cli/bundle/config.Sync: + "exclude": + "description": |- + A list of files or folders to exclude from the bundle. + "include": + "description": |- + A list of files or folders to include in the bundle. + "paths": + "description": |- + The local folder paths, which can be outside the bundle root, to synchronize to the workspace when the bundle is deployed. +github.com/databricks/cli/bundle/config.Target: + "artifacts": + "description": |- + The artifacts to include in the target deployment. + "markdown_description": |- + The artifacts to include in the target deployment. See [_](#artifact) + "bundle": + "description": |- + The name of the bundle when deploying to this target. + "cluster_id": + "description": |- + The ID of the cluster to use for this target. + "compute_id": + "description": |- + Deprecated. The ID of the compute to use for this target. + "default": + "description": |- + Whether this target is the default target. + "git": + "description": |- + The Git version control settings for the target. + "markdown_description": |- + The Git version control settings for the target. See [_](#git). + "mode": + "description": |- + The deployment mode for the target. + "markdown_description": |- + The deployment mode for the target. Valid values are `development` or `production`. See [_](/dev-tools/bundles/deployment-modes.md). + "permissions": + "description": |- + The permissions for deploying and running the bundle in the target. + "markdown_description": |- + The permissions for deploying and running the bundle in the target. See [_](#permission). + "presets": + "description": |- + The deployment presets for the target. + "markdown_description": |- + The deployment presets for the target. See [_](#preset). + "resources": + "description": |- + The resource definitions for the target. + "markdown_description": |- + The resource definitions for the target. See [_](#resources). + "run_as": + "description": |- + The identity to use to run the bundle. + "markdown_description": |- + The identity to use to run the bundle. See [_](#job_run_as) and [_](/dev-tools/bundles/run_as.md). + "sync": + "description": |- + The local paths to sync to the target workspace when a bundle is run or deployed. + "markdown_description": |- + The local paths to sync to the target workspace when a bundle is run or deployed. See [_](#sync). + "variables": + "description": |- + The custom variable definitions for the target. + "markdown_description": |- + The custom variable definitions for the target. See [_](/dev-tools/bundles/settings.md#variables) and [_](/dev-tools/bundles/variables.md). + "workspace": + "description": |- + The Databricks workspace for the target. + "markdown_description": |- + The Databricks workspace for the target. [_](#workspace) +github.com/databricks/cli/bundle/config.Workspace: + "artifact_path": + "description": |- + The artifact path to use within the workspace for both deployments and workflow runs + "auth_type": + "description": |- + The authentication type. + "azure_client_id": + "description": |- + The Azure client ID + "azure_environment": + "description": |- + The Azure environment + "azure_login_app_id": + "description": |- + The Azure login app ID + "azure_tenant_id": + "description": |- + The Azure tenant ID + "azure_use_msi": + "description": |- + Whether to use MSI for Azure + "azure_workspace_resource_id": + "description": |- + The Azure workspace resource ID + "client_id": + "description": |- + The client ID for the workspace + "file_path": + "description": |- + The file path to use within the workspace for both deployments and workflow runs + "google_service_account": + "description": |- + The Google service account name + "host": + "description": |- + The Databricks workspace host URL + "profile": + "description": |- + The Databricks workspace profile name + "resource_path": + "description": |- + The workspace resource path + "root_path": + "description": |- + The Databricks workspace root path + "state_path": + "description": |- + The workspace state path +github.com/databricks/cli/bundle/config/resources.Grant: + "principal": + "description": |- + The name of the principal that will be granted privileges + "privileges": + "description": |- + The privileges to grant to the specified entity +github.com/databricks/cli/bundle/config/resources.Permission: + "group_name": + "description": |- + The name of the group that has the permission set in level. + "level": + "description": |- + The allowed permission for user, group, service principal defined for this permission. + "service_principal_name": + "description": |- + The name of the service principal that has the permission set in level. + "user_name": + "description": |- + The name of the user that has the permission set in level. +github.com/databricks/cli/bundle/config/variable.Lookup: + "alert": + "description": |- + PLACEHOLDER + "cluster": + "description": |- + PLACEHOLDER + "cluster_policy": + "description": |- + PLACEHOLDER + "dashboard": + "description": |- + PLACEHOLDER + "instance_pool": + "description": |- + PLACEHOLDER + "job": + "description": |- + PLACEHOLDER + "metastore": + "description": |- + PLACEHOLDER + "notification_destination": + "description": |- + PLACEHOLDER + "pipeline": + "description": |- + PLACEHOLDER + "query": + "description": |- + PLACEHOLDER + "service_principal": + "description": |- + PLACEHOLDER + "warehouse": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/variable.TargetVariable: + "default": + "description": |- + PLACEHOLDER + "description": + "description": |- + The description of the variable. + "lookup": + "description": |- + The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID. + "type": + "description": |- + The type of the variable. + "markdown_description": + "description": |- + The type of the variable. +github.com/databricks/cli/bundle/config/variable.Variable: + "default": + "description": |- + PLACEHOLDER + "description": + "description": |- + The description of the variable + "lookup": + "description": |- + The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID. + "markdown_description": |- + The name of the `alert`, `cluster_policy`, `cluster`, `dashboard`, `instance_pool`, `job`, `metastore`, `pipeline`, `query`, `service_principal`, or `warehouse` object for which to retrieve an ID." + "type": + "description": |- + The type of the variable. +github.com/databricks/databricks-sdk-go/service/serving.Ai21LabsConfig: + "ai21labs_api_key": + "description": |- + PLACEHOLDER + "ai21labs_api_key_plaintext": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/serving.GoogleCloudVertexAiConfig: + "private_key": + "description": |- + PLACEHOLDER + "private_key_plaintext": + "description": |- + PLACEHOLDER + "project_id": + "description": |- + PLACEHOLDER + "region": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/serving.OpenAiConfig: + "microsoft_entra_client_id": + "description": |- + PLACEHOLDER + "microsoft_entra_client_secret": + "description": |- + PLACEHOLDER + "microsoft_entra_client_secret_plaintext": + "description": |- + PLACEHOLDER + "microsoft_entra_tenant_id": + "description": |- + PLACEHOLDER + "openai_api_base": + "description": |- + PLACEHOLDER + "openai_api_key": + "description": |- + PLACEHOLDER + "openai_api_key_plaintext": + "description": |- + PLACEHOLDER + "openai_api_type": + "description": |- + PLACEHOLDER + "openai_api_version": + "description": |- + PLACEHOLDER + "openai_deployment_name": + "description": |- + PLACEHOLDER + "openai_organization": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/serving.PaLmConfig: + "palm_api_key": + "description": |- + PLACEHOLDER + "palm_api_key_plaintext": + "description": |- + PLACEHOLDER diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml new file mode 100644 index 0000000000..8cedada545 --- /dev/null +++ b/bundle/internal/schema/annotations_openapi.yml @@ -0,0 +1,2585 @@ +# This file is auto-generated. DO NOT EDIT. +github.com/databricks/cli/bundle/config/resources.Cluster: + apply_policy_default_values: + description: When set to true, fixed and default values from the policy will be + used for fields that are omitted. When set to false, only fixed values from + the policy will be applied. + autoscale: + description: |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + autotermination_minutes: + description: |- + Automatically terminates the cluster after it is inactive for this time in minutes. If not set, + this cluster will not be automatically terminated. If specified, the threshold must be between + 10 and 10000 minutes. + Users can also set this value to 0 to explicitly disable automatic termination. + aws_attributes: + description: |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + azure_attributes: + description: |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + cluster_log_conf: + description: |- + The configuration for delivering spark logs to a long-term storage destination. + Two kinds of destinations (dbfs and s3) are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + cluster_name: + description: | + Cluster name requested by the user. This doesn't have to be unique. + If not specified at creation, the cluster name will be an empty string. + custom_tags: + description: |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + data_security_mode: {} + docker_image: {} + driver_instance_pool_id: + description: |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + driver_node_type_id: + description: | + The node type of the Spark driver. Note that this field is optional; + if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + enable_elastic_disk: + description: |- + Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk + space when its Spark workers are running low on disk space. This feature requires specific AWS + permissions to function correctly - refer to the User Guide for more details. + enable_local_disk_encryption: + description: Whether to enable LUKS on cluster VMs' local disks + gcp_attributes: + description: |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + init_scripts: + description: The configuration for storing init scripts. Any number of destinations + can be specified. The scripts are executed sequentially in the order provided. + If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + instance_pool_id: + description: The optional ID of the instance pool to which the cluster belongs. + node_type_id: + description: | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + num_workers: + description: |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + policy_id: + description: The ID of the cluster policy used to create the cluster if applicable. + runtime_engine: {} + single_user_name: + description: Single user name if data_security_mode is `SINGLE_USER` + spark_conf: + description: | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + Users can also pass in a string of extra JVM options to the driver and the executors via + `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively. + spark_env_vars: + description: |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + spark_version: + description: | + The Spark version of the cluster, e.g. `3.3.x-scala2.11`. + A list of available Spark versions can be retrieved by using + the :method:clusters/sparkVersions API call. + ssh_public_keys: + description: |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. + workload_type: {} +github.com/databricks/cli/bundle/config/resources.Dashboard: + create_time: + description: The timestamp of when the dashboard was created. + dashboard_id: + description: UUID identifying the dashboard. + display_name: + description: The display name of the dashboard. + etag: + description: |- + The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard + has not been modified since the last read. + This field is excluded in List Dashboards responses. + lifecycle_state: + description: The state of the dashboard resource. Used for tracking trashed status. + parent_path: + description: |- + The workspace path of the folder containing the dashboard. Includes leading slash and no + trailing slash. + This field is excluded in List Dashboards responses. + path: + description: |- + The workspace path of the dashboard asset, including the file name. + Exported dashboards always have the file extension `.lvdash.json`. + This field is excluded in List Dashboards responses. + serialized_dashboard: + description: |- + The contents of the dashboard in serialized string form. + This field is excluded in List Dashboards responses. + Use the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get) + to retrieve an example response, which includes the `serialized_dashboard` field. + This field provides the structure of the JSON string that represents the dashboard's + layout and components. + update_time: + description: |- + The timestamp of when the dashboard was last updated by the user. + This field is excluded in List Dashboards responses. + warehouse_id: + description: The warehouse ID used to run the dashboard. +github.com/databricks/cli/bundle/config/resources.Job: + budget_policy_id: + description: |- + The id of the user specified budget policy to use for this job. + If not specified, a default budget policy may be applied when creating or modifying the job. + See `effective_budget_policy_id` for the budget policy used by this workload. + continuous: + description: An optional continuous property for this job. The continuous property + will ensure that there is always one run executing. Only one of `schedule` and + `continuous` can be used. + deployment: + description: Deployment information for jobs managed by external sources. + description: + description: An optional description for the job. The maximum length is 27700 + characters in UTF-8 encoding. + edit_mode: + description: |- + Edit mode of the job. + + * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. + * `EDITABLE`: The job is in an editable state and can be modified. + email_notifications: + description: An optional set of email addresses that is notified when runs of + this job begin or complete as well as when this job is deleted. + environments: + description: |- + A list of task execution environment specifications that can be referenced by serverless tasks of this job. + An environment is required to be present for serverless tasks. + For serverless notebook tasks, the environment is accessible in the notebook environment panel. + For other serverless tasks, the task environment is required to be specified using environment_key in the task settings. + format: + description: Used to tell what is the format of the job. This field is ignored + in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always + set to `"MULTI_TASK"`. + git_source: + description: |- + An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. + + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. + + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. + health: {} + job_clusters: + description: A list of job cluster specifications that can be shared and reused + by tasks of this job. Libraries cannot be declared in a shared job cluster. + You must declare dependent libraries in task settings. + max_concurrent_runs: + description: |- + An optional maximum allowed number of concurrent runs of the job. + Set this value if you want to be able to execute multiple runs of the same job concurrently. + This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters. + This setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. + However, from then on, new runs are skipped unless there are fewer than 3 active runs. + This value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped. + name: + description: An optional name for the job. The maximum length is 4096 bytes in + UTF-8 encoding. + notification_settings: + description: Optional notification settings that are used when sending notifications + to each of the `email_notifications` and `webhook_notifications` for this job. + parameters: + description: Job-level parameter definitions + queue: + description: The queue settings of the job. + run_as: {} + schedule: + description: An optional periodic schedule for this job. The default behavior + is that the job only runs when triggered by clicking “Run Now” in the Jobs UI + or sending an API request to `runNow`. + tags: + description: A map of tags associated with the job. These are forwarded to the + cluster as cluster tags for jobs clusters, and are subject to the same limitations + as cluster tags. A maximum of 25 tags can be added to the job. + tasks: + description: A list of task specifications to be executed by this job. + timeout_seconds: + description: An optional timeout applied to each run of this job. A value of `0` + means no timeout. + trigger: + description: A configuration to trigger a run when certain conditions are met. + The default behavior is that the job runs only when triggered by clicking “Run + Now” in the Jobs UI or sending an API request to `runNow`. + webhook_notifications: + description: A collection of system notification IDs to notify when runs of this + job begin or complete. +github.com/databricks/cli/bundle/config/resources.MlflowExperiment: + artifact_location: + description: Location where artifacts for the experiment are stored. + creation_time: + description: Creation time + experiment_id: + description: Unique identifier for the experiment. + last_update_time: + description: Last update time + lifecycle_stage: + description: |- + Current life cycle stage of the experiment: "active" or "deleted". + Deleted experiments are not returned by APIs. + name: + description: Human readable name that identifies the experiment. + tags: + description: 'Tags: Additional metadata key-value pairs.' +github.com/databricks/cli/bundle/config/resources.MlflowModel: + creation_timestamp: + description: Timestamp recorded when this `registered_model` was created. + description: + description: Description of this `registered_model`. + last_updated_timestamp: + description: Timestamp recorded when metadata for this `registered_model` was + last updated. + latest_versions: + description: |- + Collection of latest model versions for each stage. + Only contains models with current `READY` status. + name: + description: Unique name for the model. + tags: + description: 'Tags: Additional metadata key-value pairs for this `registered_model`.' + user_id: + description: User that created this `registered_model` +github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: + ai_gateway: + description: 'The AI Gateway configuration for the serving endpoint. NOTE: only + external model endpoints are supported as of now.' + config: + description: The core config of the serving endpoint. + name: + description: | + The name of the serving endpoint. This field is required and must be unique across a Databricks workspace. + An endpoint name can consist of alphanumeric characters, dashes, and underscores. + rate_limits: + description: 'Rate limits to be applied to the serving endpoint. NOTE: this field + is deprecated, please use AI Gateway to manage rate limits.' + route_optimized: + description: Enable route optimization for the serving endpoint. + tags: + description: Tags to be attached to the serving endpoint and automatically propagated + to billing logs. +github.com/databricks/cli/bundle/config/resources.Pipeline: + budget_policy_id: + description: Budget policy of this pipeline. + catalog: + description: A catalog in Unity Catalog to publish data from this pipeline to. + If `target` is specified, tables in this pipeline are published to a `target` + schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` + is not specified, no data is published to Unity Catalog. + channel: + description: DLT Release Channel that specifies which version to use. + clusters: + description: Cluster settings for this pipeline deployment. + configuration: + description: String-String configuration for this pipeline execution. + continuous: + description: Whether the pipeline is continuous or triggered. This replaces `trigger`. + deployment: + description: Deployment type of this pipeline. + development: + description: Whether the pipeline is in Development mode. Defaults to false. + edition: + description: Pipeline product edition. + filters: + description: Filters on which Pipeline packages to include in the deployed graph. + gateway_definition: + description: The definition of a gateway pipeline to support change data capture. + id: + description: Unique identifier for this pipeline. + ingestion_definition: + description: The configuration for a managed ingestion pipeline. These settings + cannot be used with the 'libraries', 'target' or 'catalog' settings. + libraries: + description: Libraries or code needed by this deployment. + name: + description: Friendly identifier for this pipeline. + notifications: + description: List of notification settings for this pipeline. + photon: + description: Whether Photon is enabled for this pipeline. + restart_window: + description: Restart window of this pipeline. + schema: + description: The default schema (database) where tables are read from or published + to. The presence of this field implies that the pipeline is in direct publishing + mode. + serverless: + description: Whether serverless compute is enabled for this pipeline. + storage: + description: DBFS root directory for storing checkpoints and tables. + target: + description: Target schema (database) to add tables in this pipeline to. If not + specified, no data is published to the Hive metastore or Unity Catalog. To publish + to Unity Catalog, also specify `catalog`. + trigger: + description: 'Which pipeline trigger to use. Deprecated: Use `continuous` instead.' +github.com/databricks/cli/bundle/config/resources.QualityMonitor: + assets_dir: + description: The directory to store monitoring assets (e.g. dashboard, metric + tables). + baseline_table_name: + description: | + Name of the baseline table from which drift metrics are computed from. + Columns in the monitored table should also be present in the baseline table. + custom_metrics: + description: | + Custom metrics to compute on the monitored table. These can be aggregate metrics, derived + metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time + windows). + data_classification_config: + description: The data classification config for the monitor. + inference_log: + description: Configuration for monitoring inference logs. + notifications: + description: The notification settings for the monitor. + output_schema_name: + description: Schema where output metric tables are created. + schedule: + description: The schedule for automatically updating and refreshing metric tables. + skip_builtin_dashboard: + description: Whether to skip creating a default dashboard summarizing data quality + metrics. + slicing_exprs: + description: | + List of column expressions to slice data with for targeted analysis. The data is grouped by + each expression independently, resulting in a separate slice for each predicate and its + complements. For high-cardinality columns, only the top 100 unique values by frequency will + generate slices. + snapshot: + description: Configuration for monitoring snapshot tables. + time_series: + description: Configuration for monitoring time series tables. + warehouse_id: + description: | + Optional argument to specify the warehouse for dashboard creation. If not specified, the first running + warehouse will be used. +github.com/databricks/cli/bundle/config/resources.RegisteredModel: + catalog_name: + description: The name of the catalog where the schema and the registered model + reside + comment: + description: The comment attached to the registered model + name: + description: The name of the registered model + schema_name: + description: The name of the schema where the registered model resides + storage_location: + description: The storage location on the cloud under which model version data + files are stored +github.com/databricks/cli/bundle/config/resources.Schema: + catalog_name: + description: Name of parent catalog. + comment: + description: User-provided free-form text description. + name: + description: Name of schema, relative to parent catalog. + properties: {} + storage_root: + description: Storage root URL for managed tables within schema. +github.com/databricks/cli/bundle/config/resources.Volume: + catalog_name: + description: The name of the catalog where the schema and the volume are + comment: + description: The comment attached to the volume + name: + description: The name of the volume + schema_name: + description: The name of the schema where the volume is + storage_location: + description: The storage location on the cloud + volume_type: {} +github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: + pause_status: + description: Read only field that indicates whether a schedule is paused or not. + quartz_cron_expression: + description: | + The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html). + timezone_id: + description: | + The timezone id (e.g., ``"PST"``) in which to evaluate the quartz expression. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus: + _: + description: Read only field that indicates whether a schedule is paused or not. + enum: + - UNPAUSED + - PAUSED +github.com/databricks/databricks-sdk-go/service/catalog.MonitorDataClassificationConfig: + enabled: + description: Whether data classification is enabled. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorDestination: + email_addresses: + description: The list of email addresses to send the notification to. A maximum + of 5 email addresses is supported. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog: + granularities: + description: | + Granularities for aggregating data into time windows based on their timestamp. Currently the following static + granularities are supported: + {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + label_col: + description: Optional column that contains the ground truth for the prediction. + model_id_col: + description: | + Column that contains the id of the model generating the predictions. Metrics will be computed per model id by + default, and also across all model ids. + prediction_col: + description: Column that contains the output/prediction from the model. + prediction_proba_col: + description: | + Optional column that contains the prediction probabilities for each class in a classification problem type. + The values in this column should be a map, mapping each class label to the prediction probability for a given + sample. The map should be of PySpark MapType(). + problem_type: + description: Problem type the model aims to solve. Determines the type of model-quality + metrics that will be computed. + timestamp_col: + description: | + Column that contains the timestamps of requests. The column must be one of the following: + - A ``TimestampType`` column + - A column whose values can be converted to timestamps through the pyspark + ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). +github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType: + _: + description: Problem type the model aims to solve. Determines the type of model-quality + metrics that will be computed. + enum: + - PROBLEM_TYPE_CLASSIFICATION + - PROBLEM_TYPE_REGRESSION +github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetric: + definition: + description: Jinja template for a SQL expression that specifies how to compute + the metric. See [create metric definition](https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition). + input_columns: + description: | + A list of column names in the input table the metric should be computed for. + Can use ``":table"`` to indicate that the metric needs information from multiple columns. + name: + description: Name of the metric in the output tables. + output_data_type: + description: The output type of the custom metric. + type: + description: | + Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. + The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics + are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across + baseline and input table, or across the two consecutive time windows. + - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table + - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics + - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics +github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType: + _: + description: | + Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. + The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics + are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across + baseline and input table, or across the two consecutive time windows. + - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table + - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics + - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics + enum: + - CUSTOM_METRIC_TYPE_AGGREGATE + - CUSTOM_METRIC_TYPE_DERIVED + - CUSTOM_METRIC_TYPE_DRIFT +github.com/databricks/databricks-sdk-go/service/catalog.MonitorNotifications: + on_failure: + description: Who to send notifications to on monitor failure. + on_new_classification_tag_detected: + description: Who to send notifications to when new data classification tags are + detected. +github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot: {} +github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: + granularities: + description: | + Granularities for aggregating data into time windows based on their timestamp. Currently the following static + granularities are supported: + {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + timestamp_col: + description: | + Column that contains the timestamps of requests. The column must be one of the following: + - A ``TimestampType`` column + - A column whose values can be converted to timestamps through the pyspark + ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). +github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: + _: + enum: + - EXTERNAL + - MANAGED +github.com/databricks/databricks-sdk-go/service/compute.Adlsgen2Info: + destination: + description: abfss destination, e.g. `abfss://@.dfs.core.windows.net/`. +github.com/databricks/databricks-sdk-go/service/compute.AutoScale: + max_workers: + description: |- + The maximum number of workers to which the cluster can scale up when overloaded. + Note that `max_workers` must be strictly greater than `min_workers`. + min_workers: + description: |- + The minimum number of workers to which the cluster can scale down when underutilized. + It is also the initial number of workers the cluster will have after creation. +github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: + availability: {} + ebs_volume_count: + description: |- + The number of volumes launched for each instance. Users can choose up to 10 volumes. + This feature is only enabled for supported node types. Legacy node types cannot specify + custom EBS volumes. + For node types with no instance store, at least one EBS volume needs to be specified; + otherwise, cluster creation will fail. + + These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. + Instance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + + If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for + scratch storage because heterogenously sized scratch devices can lead to inefficient disk + utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance + store volumes. + + Please note that if EBS volumes are specified, then the Spark configuration `spark.local.dir` + will be overridden. + ebs_volume_iops: + description: If using gp3 volumes, what IOPS to use for the disk. If this is not + set, the maximum performance of a gp2 volume with the same volume size will + be used. + ebs_volume_size: + description: |- + The size of each EBS volume (in GiB) launched for each instance. For general purpose + SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, + this value must be within the range 500 - 4096. + ebs_volume_throughput: + description: If using gp3 volumes, what throughput to use for the disk. If this + is not set, the maximum performance of a gp2 volume with the same volume size + will be used. + ebs_volume_type: {} + first_on_demand: + description: |- + The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. + If this value is greater than 0, the cluster driver node in particular will be placed on an + on-demand instance. If this value is greater than or equal to the current cluster size, all + nodes will be placed on on-demand instances. If this value is less than the current cluster + size, `first_on_demand` nodes will be placed on on-demand instances and the remainder will + be placed on `availability` instances. Note that this value does not affect + cluster size and cannot currently be mutated over the lifetime of a cluster. + instance_profile_arn: + description: |- + Nodes for this cluster will only be placed on AWS instances with this instance profile. If + ommitted, nodes will be placed on instances without an IAM instance profile. The instance + profile must have previously been added to the Databricks environment by an account + administrator. + + This feature may only be available to certain customer plans. + + If this field is ommitted, we will pull in the default from the conf if it exists. + spot_bid_price_percent: + description: |- + The bid price for AWS spot instances, as a percentage of the corresponding instance type's + on-demand price. + For example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot + instance, then the bid price is half of the price of + on-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice + the price of on-demand `r3.xlarge` instances. If not specified, the default value is 100. + When spot instances are requested for this cluster, only spot instances whose bid price + percentage matches this field will be considered. + Note that, for safety, we enforce this field to be no more than 10000. + + The default value and documentation here should be kept consistent with + CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent. + zone_id: + description: |- + Identifier for the availability zone/datacenter in which the cluster resides. + This string will be of a form like "us-west-2a". The provided availability + zone must be in the same region as the Databricks deployment. For example, "us-west-2a" + is not a valid zone id if the Databricks deployment resides in the "us-east-1" region. + This is an optional field at cluster creation, and if not specified, a default zone will be used. + If the zone specified is "auto", will try to place cluster in a zone with high availability, + and will retry placement in a different AZ if there is not enough capacity. + The list of available zones as well as the default value can be found by using the + `List Zones` method. +github.com/databricks/databricks-sdk-go/service/compute.AwsAvailability: + _: + description: | + Availability type used for all subsequent nodes past the `first_on_demand` ones. + + Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster. + enum: + - SPOT + - ON_DEMAND + - SPOT_WITH_FALLBACK +github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes: + availability: {} + first_on_demand: + description: |- + The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. + This value should be greater than 0, to make sure the cluster driver node is placed on an + on-demand instance. If this value is greater than or equal to the current cluster size, all + nodes will be placed on on-demand instances. If this value is less than the current cluster + size, `first_on_demand` nodes will be placed on on-demand instances and the remainder will + be placed on `availability` instances. Note that this value does not affect + cluster size and cannot currently be mutated over the lifetime of a cluster. + log_analytics_info: + description: Defines values necessary to configure and run Azure Log Analytics + agent + spot_bid_max_price: + description: |- + The max bid price to be used for Azure spot instances. + The Max price for the bid cannot be higher than the on-demand price of the instance. + If not specified, the default value is -1, which specifies that the instance cannot be evicted + on the basis of price, and only on the basis of availability. Further, the value should > 0 or -1. +github.com/databricks/databricks-sdk-go/service/compute.AzureAvailability: + _: + description: |- + Availability type used for all subsequent nodes past the `first_on_demand` ones. + Note: If `first_on_demand` is zero (which only happens on pool clusters), this availability + type will be used for the entire cluster. + enum: + - SPOT_AZURE + - ON_DEMAND_AZURE + - SPOT_WITH_FALLBACK_AZURE +github.com/databricks/databricks-sdk-go/service/compute.ClientsTypes: + jobs: + description: With jobs set, the cluster can be used for jobs + notebooks: + description: With notebooks set, this cluster can be used for notebooks +github.com/databricks/databricks-sdk-go/service/compute.ClusterLogConf: + dbfs: + description: |- + destination needs to be provided. e.g. + `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" } }` + s3: + description: |- + destination and either the region or endpoint need to be provided. e.g. + `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` + Cluster iam role is used to access s3, please make sure the cluster iam role in + `instance_profile_arn` has permission to write data to the s3 destination. +github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: + apply_policy_default_values: + description: When set to true, fixed and default values from the policy will be + used for fields that are omitted. When set to false, only fixed values from + the policy will be applied. + autoscale: + description: |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + autotermination_minutes: + description: |- + Automatically terminates the cluster after it is inactive for this time in minutes. If not set, + this cluster will not be automatically terminated. If specified, the threshold must be between + 10 and 10000 minutes. + Users can also set this value to 0 to explicitly disable automatic termination. + aws_attributes: + description: |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + azure_attributes: + description: |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + cluster_log_conf: + description: |- + The configuration for delivering spark logs to a long-term storage destination. + Two kinds of destinations (dbfs and s3) are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + cluster_name: + description: | + Cluster name requested by the user. This doesn't have to be unique. + If not specified at creation, the cluster name will be an empty string. + custom_tags: + description: |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + data_security_mode: {} + docker_image: {} + driver_instance_pool_id: + description: |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + driver_node_type_id: + description: | + The node type of the Spark driver. Note that this field is optional; + if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + enable_elastic_disk: + description: |- + Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk + space when its Spark workers are running low on disk space. This feature requires specific AWS + permissions to function correctly - refer to the User Guide for more details. + enable_local_disk_encryption: + description: Whether to enable LUKS on cluster VMs' local disks + gcp_attributes: + description: |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + init_scripts: + description: The configuration for storing init scripts. Any number of destinations + can be specified. The scripts are executed sequentially in the order provided. + If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + instance_pool_id: + description: The optional ID of the instance pool to which the cluster belongs. + node_type_id: + description: | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + num_workers: + description: |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + policy_id: + description: The ID of the cluster policy used to create the cluster if applicable. + runtime_engine: {} + single_user_name: + description: Single user name if data_security_mode is `SINGLE_USER` + spark_conf: + description: | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + Users can also pass in a string of extra JVM options to the driver and the executors via + `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively. + spark_env_vars: + description: |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + spark_version: + description: | + The Spark version of the cluster, e.g. `3.3.x-scala2.11`. + A list of available Spark versions can be retrieved by using + the :method:clusters/sparkVersions API call. + ssh_public_keys: + description: |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. + workload_type: {} +github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode: + _: + description: | + Data security mode decides what data governance model to use when accessing data + from a cluster. + + * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode. + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode. + * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited. + + The following modes are deprecated starting with Databricks Runtime 15.0 and + will be removed for future Databricks Runtime versions: + + * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. + * `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters. + * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters. + * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled. + enum: + - NONE + - SINGLE_USER + - USER_ISOLATION + - LEGACY_TABLE_ACL + - LEGACY_PASSTHROUGH + - LEGACY_SINGLE_USER + - LEGACY_SINGLE_USER_STANDARD +github.com/databricks/databricks-sdk-go/service/compute.DbfsStorageInfo: + destination: + description: dbfs destination, e.g. `dbfs:/my/path` +github.com/databricks/databricks-sdk-go/service/compute.DockerBasicAuth: + password: + description: Password of the user + username: + description: Name of the user +github.com/databricks/databricks-sdk-go/service/compute.DockerImage: + basic_auth: {} + url: + description: URL of the docker image. +github.com/databricks/databricks-sdk-go/service/compute.EbsVolumeType: + _: + description: The type of EBS volumes that will be launched with this cluster. + enum: + - GENERAL_PURPOSE_SSD + - THROUGHPUT_OPTIMIZED_HDD +github.com/databricks/databricks-sdk-go/service/compute.Environment: + _: + description: |- + The environment entity used to preserve serverless environment side panel and jobs' environment for non-notebook task. + In this minimal environment spec, only pip dependencies are supported. + client: + description: |- + Client version used by the environment + The client is the user-facing environment of the runtime. + Each client comes with a specific set of pre-installed libraries. + The version is a string, consisting of the major client version. + dependencies: + description: |- + List of pip dependencies, as supported by the version of pip in this environment. + Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/ + Allowed dependency could be , , (WSFS or Volumes in Databricks), + E.g. dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] +github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: + availability: {} + boot_disk_size: + description: boot disk size in GB + google_service_account: + description: |- + If provided, the cluster will impersonate the google service account when accessing + gcloud services (like GCS). The google service account + must have previously been added to the Databricks environment by an account + administrator. + local_ssd_count: + description: If provided, each node (workers and driver) in the cluster will have + this number of local SSDs attached. Each local SSD is 375GB in size. Refer to + [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) + for the supported number of local SSDs for each instance type. + use_preemptible_executors: + description: |- + This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default). + Note: Soon to be deprecated, use the availability field instead. + zone_id: + description: |- + Identifier for the availability zone in which the cluster resides. + This can be one of the following: + - "HA" => High availability, spread nodes across availability zones for a Databricks deployment region [default] + - "AUTO" => Databricks picks an availability zone to schedule the cluster on. + - A GCP availability zone => Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones. +github.com/databricks/databricks-sdk-go/service/compute.GcpAvailability: + _: + description: |- + This field determines whether the instance pool will contain preemptible + VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable. + enum: + - PREEMPTIBLE_GCP + - ON_DEMAND_GCP + - PREEMPTIBLE_WITH_FALLBACK_GCP +github.com/databricks/databricks-sdk-go/service/compute.GcsStorageInfo: + destination: + description: GCS destination/URI, e.g. `gs://my-bucket/some-prefix` +github.com/databricks/databricks-sdk-go/service/compute.InitScriptInfo: + abfss: + description: |- + destination needs to be provided. e.g. + `{ "abfss" : { "destination" : "abfss://@.dfs.core.windows.net/" } } + dbfs: + description: |- + destination needs to be provided. e.g. + `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" } }` + file: + description: |- + destination needs to be provided. e.g. + `{ "file" : { "destination" : "file:/my/local/file.sh" } }` + gcs: + description: |- + destination needs to be provided. e.g. + `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }` + s3: + description: |- + destination and either the region or endpoint need to be provided. e.g. + `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` + Cluster iam role is used to access s3, please make sure the cluster iam role in + `instance_profile_arn` has permission to write data to the s3 destination. + volumes: + description: |- + destination needs to be provided. e.g. + `{ "volumes" : { "destination" : "/Volumes/my-init.sh" } }` + workspace: + description: |- + destination needs to be provided. e.g. + `{ "workspace" : { "destination" : "/Users/user1@databricks.com/my-init.sh" } }` +github.com/databricks/databricks-sdk-go/service/compute.Library: + cran: + description: Specification of a CRAN library to be installed as part of the library + egg: + description: Deprecated. URI of the egg library to install. Installing Python + egg files is deprecated and is not supported in Databricks Runtime 14.0 and + above. + jar: + description: |- + URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. + For example: `{ "jar": "/Workspace/path/to/library.jar" }`, `{ "jar" : "/Volumes/path/to/library.jar" }` or + `{ "jar": "s3://my-bucket/library.jar" }`. + If S3 is used, please make sure the cluster has read access on the library. You may need to + launch the cluster with an IAM role to access the S3 URI. + maven: + description: |- + Specification of a maven library to be installed. For example: + `{ "coordinates": "org.jsoup:jsoup:1.7.2" }` + pypi: + description: |- + Specification of a PyPi library to be installed. For example: + `{ "package": "simplejson" }` + requirements: + description: |- + URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported. + For example: `{ "requirements": "/Workspace/path/to/requirements.txt" }` or `{ "requirements" : "/Volumes/path/to/requirements.txt" }` + whl: + description: |- + URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. + For example: `{ "whl": "/Workspace/path/to/library.whl" }`, `{ "whl" : "/Volumes/path/to/library.whl" }` or + `{ "whl": "s3://my-bucket/library.whl" }`. + If S3 is used, please make sure the cluster has read access on the library. You may need to + launch the cluster with an IAM role to access the S3 URI. +github.com/databricks/databricks-sdk-go/service/compute.LocalFileInfo: + destination: + description: local file destination, e.g. `file:/my/local/file.sh` +github.com/databricks/databricks-sdk-go/service/compute.LogAnalyticsInfo: + log_analytics_primary_key: + description: + log_analytics_workspace_id: + description: +github.com/databricks/databricks-sdk-go/service/compute.MavenLibrary: + coordinates: + description: 'Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2".' + exclusions: + description: |- + List of dependences to exclude. For example: `["slf4j:slf4j", "*:hadoop-client"]`. + + Maven dependency exclusions: + https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html. + repo: + description: |- + Maven repo to install the Maven package from. If omitted, both Maven Central Repository + and Spark Packages are searched. +github.com/databricks/databricks-sdk-go/service/compute.PythonPyPiLibrary: + package: + description: |- + The name of the pypi package to install. An optional exact version specification is also + supported. Examples: "simplejson" and "simplejson==3.8.0". + repo: + description: |- + The repository where the package can be found. If not specified, the default pip index is + used. +github.com/databricks/databricks-sdk-go/service/compute.RCranLibrary: + package: + description: The name of the CRAN package to install. + repo: + description: The repository where the package can be found. If not specified, + the default CRAN repo is used. +github.com/databricks/databricks-sdk-go/service/compute.RuntimeEngine: + _: + description: | + Determines the cluster's runtime engine, either standard or Photon. + + This field is not compatible with legacy `spark_version` values that contain `-photon-`. + Remove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. + + If left unspecified, the runtime engine defaults to standard unless the spark_version + contains -photon-, in which case Photon will be used. + enum: + - "NULL" + - STANDARD + - PHOTON +github.com/databricks/databricks-sdk-go/service/compute.S3StorageInfo: + canned_acl: + description: |- + (Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`. + If `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on + the destination bucket and prefix. The full list of possible canned acl can be found at + http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + Please also note that by default only the object owner gets full controls. If you are using cross account + role for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to + read the logs. + destination: + description: |- + S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using + cluster iam role, please make sure you set cluster iam role and the role has write access to the + destination. Please also note that you cannot use AWS keys to deliver logs. + enable_encryption: + description: (Optional) Flag to enable server side encryption, `false` by default. + encryption_type: + description: |- + (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when + encryption is enabled and the default type is `sse-s3`. + endpoint: + description: |- + S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set. + If both are set, endpoint will be used. + kms_key: + description: (Optional) Kms key which will be used if encryption is enabled and + encryption type is set to `sse-kms`. + region: + description: |- + S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set, + endpoint will be used. +github.com/databricks/databricks-sdk-go/service/compute.VolumesStorageInfo: + destination: + description: Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh` +github.com/databricks/databricks-sdk-go/service/compute.WorkloadType: + clients: + description: ' defined what type of clients can use the cluster. E.g. Notebooks, + Jobs' +github.com/databricks/databricks-sdk-go/service/compute.WorkspaceStorageInfo: + destination: + description: workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh` +github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState: + _: + enum: + - ACTIVE + - TRASHED +github.com/databricks/databricks-sdk-go/service/jobs.Condition: + _: + enum: + - ANY_UPDATED + - ALL_UPDATED +github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask: + left: + description: The left operand of the condition task. Can be either a string value + or a job state or parameter reference. + op: + description: |- + * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`. + * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to `false`. + + The boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison. + right: + description: The right operand of the condition task. Can be either a string value + or a job state or parameter reference. +github.com/databricks/databricks-sdk-go/service/jobs.ConditionTaskOp: + _: + description: |- + * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`. + * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to `false`. + + The boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison. + enum: + - EQUAL_TO + - GREATER_THAN + - GREATER_THAN_OR_EQUAL + - LESS_THAN + - LESS_THAN_OR_EQUAL + - NOT_EQUAL +github.com/databricks/databricks-sdk-go/service/jobs.Continuous: + pause_status: + description: Indicate whether the continuous execution of the job is paused or + not. Defaults to UNPAUSED. +github.com/databricks/databricks-sdk-go/service/jobs.CronSchedule: + pause_status: + description: Indicate whether this schedule is paused or not. + quartz_cron_expression: + description: A Cron expression using Quartz syntax that describes the schedule + for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) + for details. This field is required. + timezone_id: + description: A Java timezone ID. The schedule for a job is resolved with respect + to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) + for details. This field is required. +github.com/databricks/databricks-sdk-go/service/jobs.DbtTask: + catalog: + description: Optional name of the catalog to use. The value is the top level in + the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog + value can only be specified if a warehouse_id is specified. Requires dbt-databricks + >= 1.1.1. + commands: + description: A list of dbt commands to execute. All commands must start with `dbt`. + This parameter must not be empty. A maximum of up to 10 commands can be provided. + profiles_directory: + description: Optional (relative) path to the profiles directory. Can only be specified + if no warehouse_id is specified. If no warehouse_id is specified and this folder + is unset, the root directory is used. + project_directory: + description: |- + Path to the project directory. Optional for Git sourced tasks, in which + case if no value is provided, the root of the Git repository is used. + schema: + description: Optional schema to write to. This parameter is only used when a warehouse_id + is also provided. If not provided, the `default` schema is used. + source: + description: |- + Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved + from the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: Project is located in Databricks workspace. + * `GIT`: Project is located in cloud Git provider. + warehouse_id: + description: ID of the SQL warehouse to connect to. If provided, we automatically + generate and provide the profile and connection details to dbt. It can be overridden + on a per-command basis by using the `--profiles-dir` command line argument. +github.com/databricks/databricks-sdk-go/service/jobs.FileArrivalTriggerConfiguration: + min_time_between_triggers_seconds: + description: |- + If set, the trigger starts a run only after the specified amount of time passed since + the last time the trigger fired. The minimum allowed value is 60 seconds + url: + description: URL to be monitored for file arrivals. The path must point to the + root or a subpath of the external location. + wait_after_last_change_seconds: + description: |- + If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. + This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The + minimum allowed value is 60 seconds. +github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask: + concurrency: + description: |- + An optional maximum allowed number of concurrent runs of the task. + Set this value if you want to be able to execute multiple runs of the task concurrently. + inputs: + description: |- + Array for task to iterate on. This can be a JSON string or a reference to + an array parameter. + task: + description: Configuration for the task that will be run for each element in the + array +github.com/databricks/databricks-sdk-go/service/jobs.Format: + _: + enum: + - SINGLE_TASK + - MULTI_TASK +github.com/databricks/databricks-sdk-go/service/jobs.GitProvider: + _: + enum: + - gitHub + - bitbucketCloud + - azureDevOpsServices + - gitHubEnterprise + - bitbucketServer + - gitLab + - gitLabEnterpriseEdition + - awsCodeCommit +github.com/databricks/databricks-sdk-go/service/jobs.GitSnapshot: + _: + description: Read-only state of the remote repository at the time the job was + run. This field is only included on job runs. + used_commit: + description: Commit that was used to execute the run. If git_branch was specified, + this points to the HEAD of the branch at the time of the run; if git_tag was + specified, this points to the commit the tag points to. +github.com/databricks/databricks-sdk-go/service/jobs.GitSource: + _: + description: |- + An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. + + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. + + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. + git_branch: + description: Name of the branch to be checked out and used by this job. This field + cannot be specified in conjunction with git_tag or git_commit. + git_commit: + description: Commit to be checked out and used by this job. This field cannot + be specified in conjunction with git_branch or git_tag. + git_provider: + description: Unique identifier of the service used to host the Git repository. + The value is case insensitive. + git_snapshot: {} + git_tag: + description: Name of the tag to be checked out and used by this job. This field + cannot be specified in conjunction with git_branch or git_commit. + git_url: + description: URL of the repository to be cloned by this job. + job_source: + description: The source of the job specification in the remote repository when + the job is source controlled. +github.com/databricks/databricks-sdk-go/service/jobs.JobCluster: + job_cluster_key: + description: |- + A unique name for the job cluster. This field is required and must be unique within the job. + `JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution. + new_cluster: + description: If new_cluster, a description of a cluster that is created for each + task. +github.com/databricks/databricks-sdk-go/service/jobs.JobDeployment: + kind: + description: |- + The kind of deployment that manages the job. + + * `BUNDLE`: The job is managed by Databricks Asset Bundle. + metadata_file_path: + description: Path of the file that contains deployment metadata. +github.com/databricks/databricks-sdk-go/service/jobs.JobDeploymentKind: + _: + description: '* `BUNDLE`: The job is managed by Databricks Asset Bundle.' + enum: + - BUNDLE +github.com/databricks/databricks-sdk-go/service/jobs.JobEditMode: + _: + description: |- + Edit mode of the job. + + * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. + * `EDITABLE`: The job is in an editable state and can be modified. + enum: + - UI_LOCKED + - EDITABLE +github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications: + no_alert_for_skipped_runs: + description: |- + If true, do not send email to recipients specified in `on_failure` if the run is skipped. + This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. + on_duration_warning_threshold_exceeded: + description: A list of email addresses to be notified when the duration of a run + exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the + `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified + in the `health` field for the job, notifications are not sent. + on_failure: + description: A list of email addresses to be notified when a run unsuccessfully + completes. A run is considered to have completed unsuccessfully if it ends with + an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. + If this is not specified on job creation, reset, or update the list is empty, + and notifications are not sent. + on_start: + description: A list of email addresses to be notified when a run begins. If not + specified on job creation, reset, or update, the list is empty, and notifications + are not sent. + on_streaming_backlog_exceeded: + description: |- + A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + on_success: + description: A list of email addresses to be notified when a run successfully + completes. A run is considered to have completed successfully if it ends with + a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified + on job creation, reset, or update, the list is empty, and notifications are + not sent. +github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: + environment_key: + description: The key of an environment. It has to be unique within a job. + spec: {} +github.com/databricks/databricks-sdk-go/service/jobs.JobNotificationSettings: + no_alert_for_canceled_runs: + description: If true, do not send notifications to recipients specified in `on_failure` + if the run is canceled. + no_alert_for_skipped_runs: + description: If true, do not send notifications to recipients specified in `on_failure` + if the run is skipped. +github.com/databricks/databricks-sdk-go/service/jobs.JobParameterDefinition: + default: + description: Default value of the parameter. + name: + description: The name of the defined parameter. May only contain alphanumeric + characters, `_`, `-`, and `.` +github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs: + _: + description: |- + Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job. + + Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown. + service_principal_name: + description: Application ID of an active service principal. Setting this field + requires the `servicePrincipal/user` role. + user_name: + description: The email of an active workspace user. Non-admin users can only set + this field to their own email. +github.com/databricks/databricks-sdk-go/service/jobs.JobSource: + _: + description: The source of the job specification in the remote repository when + the job is source controlled. + dirty_state: + description: |- + Dirty state indicates the job is not fully synced with the job specification in the remote repository. + + Possible values are: + * `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced. + * `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced. + import_from_git_branch: + description: Name of the branch which the job is imported from. + job_config_path: + description: Path of the job YAML file that contains the job specification. +github.com/databricks/databricks-sdk-go/service/jobs.JobSourceDirtyState: + _: + description: |- + Dirty state indicates the job is not fully synced with the job specification + in the remote repository. + + Possible values are: + * `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced. + * `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced. + enum: + - NOT_SYNCED + - DISCONNECTED +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthMetric: + _: + description: |- + Specifies the health metric that is being evaluated for a particular health rule. + + * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. + * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview. + * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview. + * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview. + * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview. + enum: + - RUN_DURATION_SECONDS + - STREAMING_BACKLOG_BYTES + - STREAMING_BACKLOG_RECORDS + - STREAMING_BACKLOG_SECONDS + - STREAMING_BACKLOG_FILES +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthOperator: + _: + description: Specifies the operator used to compare the health metric value with + the specified threshold. + enum: + - GREATER_THAN +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRule: + metric: {} + op: {} + value: + description: Specifies the threshold value that the health metric should obey + to satisfy the health rule. +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules: + _: + description: An optional set of health rules that can be defined for this job. + rules: {} +github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask: + base_parameters: + description: |- + Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run + Now with parameters specified, the two parameters maps are merged. If the same key is specified in + `base_parameters` and in `run-now`, the value from `run-now` is used. + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + If the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters, + the default value from the notebook is used. + + Retrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets). + + The JSON representation of this field cannot exceed 1MB. + notebook_path: + description: |- + The path of the notebook to be run in the Databricks workspace or remote repository. + For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. + For notebooks stored in a remote repository, the path must be relative. This field is required. + source: + description: |- + Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + * `WORKSPACE`: Notebook is located in Databricks workspace. + * `GIT`: Notebook is located in cloud Git provider. + warehouse_id: + description: |- + Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses. + + Note that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail. +github.com/databricks/databricks-sdk-go/service/jobs.PauseStatus: + _: + enum: + - UNPAUSED + - PAUSED +github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfiguration: + interval: + description: The interval at which the trigger should run. + unit: + description: The unit of time for the interval. +github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfigurationTimeUnit: + _: + enum: + - HOURS + - DAYS + - WEEKS +github.com/databricks/databricks-sdk-go/service/jobs.PipelineParams: + full_refresh: + description: If true, triggers a full refresh on the delta live table. +github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask: + full_refresh: + description: If true, triggers a full refresh on the delta live table. + pipeline_id: + description: The full name of the pipeline task to execute. +github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask: + entry_point: + description: Named entry point to use, if it does not exist in the metadata of + the package it executes the function from the package directly using `$packageName.$entryPoint()` + named_parameters: + description: Command-line parameters passed to Python wheel task in the form of + `["--name=task", "--data=dbfs:/path/to/data.json"]`. Leave it empty if `parameters` + is not null. + package_name: + description: Name of the package to execute + parameters: + description: Command-line parameters passed to Python wheel task. Leave it empty + if `named_parameters` is not null. +github.com/databricks/databricks-sdk-go/service/jobs.QueueSettings: + enabled: + description: If true, enable queueing for the job. This is a required field. +github.com/databricks/databricks-sdk-go/service/jobs.RunIf: + _: + description: |- + An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + + Possible values are: + * `ALL_SUCCESS`: All dependencies have executed and succeeded + * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded + * `NONE_FAILED`: None of the dependencies have failed and at least one was executed + * `ALL_DONE`: All dependencies have been completed + * `AT_LEAST_ONE_FAILED`: At least one dependency failed + * `ALL_FAILED`: ALl dependencies have failed + enum: + - ALL_SUCCESS + - ALL_DONE + - NONE_FAILED + - AT_LEAST_ONE_SUCCESS + - ALL_FAILED + - AT_LEAST_ONE_FAILED +github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: + dbt_commands: + description: 'An array of commands to execute for jobs with the dbt task, for + example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + run"]`' + jar_params: + description: |- + A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", "35"]`. + The parameters are used to invoke the main function of the main class specified in the Spark JAR task. + If not specified upon `run-now`, it defaults to an empty list. + jar_params cannot be specified in conjunction with notebook_params. + The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + job_id: + description: ID of the job to trigger. + job_parameters: + description: Job-level parameters used to trigger the job. + notebook_params: + description: |- + A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name": "john doe", "age": "35"}`. + The map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function. + + If not specified upon `run-now`, the triggered run uses the job’s base parameters. + + notebook_params cannot be specified in conjunction with jar_params. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. + pipeline_params: + description: Controls whether the pipeline should perform a full refresh + python_named_params: {} + python_params: + description: |- + A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. + The parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite + the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) + cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + + Important + + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. + Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. + spark_submit_params: + description: |- + A list of parameters for jobs with spark submit task, for example `"spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"]`. + The parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the + parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) + cannot exceed 10,000 bytes. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs + + Important + + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. + Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. + sql_params: + description: 'A map from keys to values for jobs with SQL task, for example `"sql_params": + {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom + parameters.' +github.com/databricks/databricks-sdk-go/service/jobs.Source: + _: + description: |- + Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\ + from the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: SQL file is located in Databricks workspace. + * `GIT`: SQL file is located in cloud Git provider. + enum: + - WORKSPACE + - GIT +github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask: + jar_uri: + description: Deprecated since 04/2016. Provide a `jar` through the `libraries` + field instead. For an example, see :method:jobs/create. + main_class_name: + description: |- + The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. + + The code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail. + parameters: + description: |- + Parameters passed to the main method. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask: + parameters: + description: |- + Command line parameters passed to the Python file. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + python_file: + description: The Python file to be executed. Cloud file URIs (such as dbfs:/, + s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored + in the Databricks workspace, the path must be absolute and begin with `/`. For + files stored in a remote repository, the path must be relative. This field is + required. + source: + description: |- + Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local + Databricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`, + the Python file will be retrieved from a Git repository defined in `git_source`. + + * `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI. + * `GIT`: The Python file is located in a remote Git repository. +github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask: + parameters: + description: |- + Command-line parameters passed to spark submit. + + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTask: + alert: + description: If alert, indicates that this job must refresh a SQL alert. + dashboard: + description: If dashboard, indicates that this job must refresh a SQL dashboard. + file: + description: If file, indicates that this job runs a SQL file in a remote Git + repository. + parameters: + description: Parameters to be used for each run of this job. The SQL alert task + does not support custom parameters. + query: + description: If query, indicates that this job must execute a SQL query. + warehouse_id: + description: The canonical identifier of the SQL warehouse. Recommended to use + with serverless or pro SQL warehouses. Classic SQL warehouses are only supported + for SQL alert, dashboard and query tasks and are limited to scheduled single-task + jobs. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskAlert: + alert_id: + description: The canonical identifier of the SQL alert. + pause_subscriptions: + description: If true, the alert notifications are not sent to subscribers. + subscriptions: + description: If specified, alert notifications are sent to subscribers. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskDashboard: + custom_subject: + description: Subject of the email sent to subscribers of this task. + dashboard_id: + description: The canonical identifier of the SQL dashboard. + pause_subscriptions: + description: If true, the dashboard snapshot is not taken, and emails are not + sent to subscribers. + subscriptions: + description: If specified, dashboard snapshots are sent to subscriptions. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskFile: + path: + description: Path of the SQL file. Must be relative if the source is a remote + Git repository and absolute for workspace paths. + source: + description: |- + Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved + from the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: SQL file is located in Databricks workspace. + * `GIT`: SQL file is located in cloud Git provider. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskQuery: + query_id: + description: The canonical identifier of the SQL query. +github.com/databricks/databricks-sdk-go/service/jobs.SqlTaskSubscription: + destination_id: + description: The canonical identifier of the destination to receive email notification. + This parameter is mutually exclusive with user_name. You cannot set both destination_id + and user_name for subscription notifications. + user_name: + description: The user name to receive the subscription email. This parameter is + mutually exclusive with destination_id. You cannot set both destination_id and + user_name for subscription notifications. +github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration: + condition: + description: The table(s) condition based on which to trigger a job run. + min_time_between_triggers_seconds: + description: |- + If set, the trigger starts a run only after the specified amount of time has passed since + the last time the trigger fired. The minimum allowed value is 60 seconds. + table_names: + description: A list of Delta tables to monitor for changes. The table name must + be in the format `catalog_name.schema_name.table_name`. + wait_after_last_change_seconds: + description: |- + If set, the trigger starts a run only after no table updates have occurred for the specified time + and can be used to wait for a series of table updates before triggering a run. The + minimum allowed value is 60 seconds. +github.com/databricks/databricks-sdk-go/service/jobs.Task: + condition_task: + description: |- + The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present. + The condition task does not require a cluster to execute and does not support retries or notifications. + dbt_task: + description: The task runs one or more dbt commands when the `dbt_task` field + is present. The dbt task requires both Databricks SQL and the ability to use + a serverless or a pro SQL warehouse. + depends_on: + description: |- + An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true. + The key is `task_key`, and the value is the name assigned to the dependent task. + description: + description: An optional description for this task. + disable_auto_optimization: + description: An option to disable auto optimization in serverless + email_notifications: + description: An optional set of email addresses that is notified when runs of + this task begin or complete as well as when this task is deleted. The default + behavior is to not send any emails. + environment_key: + description: The key that references an environment spec in a job. This field + is required for Python script, Python wheel and dbt tasks when using serverless + compute. + existing_cluster_id: + description: |- + If existing_cluster_id, the ID of an existing cluster that is used for all runs. + When running jobs or tasks on an existing cluster, you may need to manually restart + the cluster if it stops responding. We suggest running jobs and tasks on new clusters for + greater reliability + for_each_task: + description: The task executes a nested task for every input provided when the + `for_each_task` field is present. + health: {} + job_cluster_key: + description: If job_cluster_key, this task is executed reusing the cluster specified + in `job.settings.job_clusters`. + libraries: + description: |- + An optional list of libraries to be installed on the cluster. + The default value is an empty list. + max_retries: + description: An optional maximum number of times to retry an unsuccessful run. + A run is considered to be unsuccessful if it completes with the `FAILED` result_state + or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means to retry indefinitely + and the value `0` means to never retry. + min_retry_interval_millis: + description: An optional minimal interval in milliseconds between the start of + the failed run and the subsequent retry run. The default behavior is that unsuccessful + runs are immediately retried. + new_cluster: + description: If new_cluster, a description of a new cluster that is created for + each run. + notebook_task: + description: The task runs a notebook when the `notebook_task` field is present. + notification_settings: + description: Optional notification settings that are used when sending notifications + to each of the `email_notifications` and `webhook_notifications` for this task. + pipeline_task: + description: The task triggers a pipeline update when the `pipeline_task` field + is present. Only pipelines configured to use triggered more are supported. + python_wheel_task: + description: The task runs a Python wheel when the `python_wheel_task` field is + present. + retry_on_timeout: + description: |- + An optional policy to specify whether to retry a job when it times out. The default behavior + is to not retry on timeout. + run_if: + description: |- + An optional value specifying the condition determining whether the task is run once its dependencies have been completed. + + * `ALL_SUCCESS`: All dependencies have executed and succeeded + * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded + * `NONE_FAILED`: None of the dependencies have failed and at least one was executed + * `ALL_DONE`: All dependencies have been completed + * `AT_LEAST_ONE_FAILED`: At least one dependency failed + * `ALL_FAILED`: ALl dependencies have failed + run_job_task: + description: The task triggers another job when the `run_job_task` field is present. + spark_jar_task: + description: The task runs a JAR when the `spark_jar_task` field is present. + spark_python_task: + description: The task runs a Python file when the `spark_python_task` field is + present. + spark_submit_task: + description: |- + (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. + + In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. + + `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. + + By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. + + The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. + sql_task: + description: The task runs a SQL query or file, or it refreshes a SQL alert or + a legacy SQL dashboard when the `sql_task` field is present. + task_key: + description: |- + A unique name for the task. This field is used to refer to this task from other tasks. + This field is required and must be unique within its parent job. + On Update or Reset, this field is used to reference the tasks to be updated or reset. + timeout_seconds: + description: An optional timeout applied to each run of this job task. A value + of `0` means no timeout. + webhook_notifications: + description: A collection of system notification IDs to notify when runs of this + task begin or complete. The default behavior is to not send any system notifications. +github.com/databricks/databricks-sdk-go/service/jobs.TaskDependency: + outcome: + description: Can only be specified on condition task dependencies. The outcome + of the dependent task that must be met for this task to run. + task_key: + description: The name of the task this task depends on. +github.com/databricks/databricks-sdk-go/service/jobs.TaskEmailNotifications: + no_alert_for_skipped_runs: + description: |- + If true, do not send email to recipients specified in `on_failure` if the run is skipped. + This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. + on_duration_warning_threshold_exceeded: + description: A list of email addresses to be notified when the duration of a run + exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the + `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified + in the `health` field for the job, notifications are not sent. + on_failure: + description: A list of email addresses to be notified when a run unsuccessfully + completes. A run is considered to have completed unsuccessfully if it ends with + an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. + If this is not specified on job creation, reset, or update the list is empty, + and notifications are not sent. + on_start: + description: A list of email addresses to be notified when a run begins. If not + specified on job creation, reset, or update, the list is empty, and notifications + are not sent. + on_streaming_backlog_exceeded: + description: |- + A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + on_success: + description: A list of email addresses to be notified when a run successfully + completes. A run is considered to have completed successfully if it ends with + a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified + on job creation, reset, or update, the list is empty, and notifications are + not sent. +github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings: + alert_on_last_attempt: + description: If true, do not send notifications to recipients specified in `on_start` + for the retried runs and do not send notifications to recipients specified in + `on_failure` until the last retry of the run. + no_alert_for_canceled_runs: + description: If true, do not send notifications to recipients specified in `on_failure` + if the run is canceled. + no_alert_for_skipped_runs: + description: If true, do not send notifications to recipients specified in `on_failure` + if the run is skipped. +github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: + file_arrival: + description: File arrival trigger settings. + pause_status: + description: Whether this trigger is paused or not. + periodic: + description: Periodic trigger settings. + table: + description: Old table trigger settings name. Deprecated in favor of `table_update`. + table_update: {} +github.com/databricks/databricks-sdk-go/service/jobs.Webhook: + id: {} +github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications: + on_duration_warning_threshold_exceeded: + description: An optional list of system notification IDs to call when the duration + of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric + in the `health` field. A maximum of 3 destinations can be specified for the + `on_duration_warning_threshold_exceeded` property. + on_failure: + description: An optional list of system notification IDs to call when the run + fails. A maximum of 3 destinations can be specified for the `on_failure` property. + on_start: + description: An optional list of system notification IDs to call when the run + starts. A maximum of 3 destinations can be specified for the `on_start` property. + on_streaming_backlog_exceeded: + description: |- + An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream. + Streaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. + Alerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes. + A maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property. + on_success: + description: An optional list of system notification IDs to call when the run + completes successfully. A maximum of 3 destinations can be specified for the + `on_success` property. +github.com/databricks/databricks-sdk-go/service/ml.ExperimentTag: + key: + description: The tag key. + value: + description: The tag value. +github.com/databricks/databricks-sdk-go/service/ml.ModelTag: + key: + description: The tag key. + value: + description: The tag value. +github.com/databricks/databricks-sdk-go/service/ml.ModelVersion: + creation_timestamp: + description: Timestamp recorded when this `model_version` was created. + current_stage: + description: Current stage for this `model_version`. + description: + description: Description of this `model_version`. + last_updated_timestamp: + description: Timestamp recorded when metadata for this `model_version` was last + updated. + name: + description: Unique name of the model + run_id: + description: |- + MLflow run ID used when creating `model_version`, if `source` was generated by an + experiment run stored in MLflow tracking server. + run_link: + description: 'Run Link: Direct link to the run that generated this version' + source: + description: URI indicating the location of the source model artifacts, used when + creating `model_version` + status: + description: Current status of `model_version` + status_message: + description: Details on current `status`, if it is pending or failed. + tags: + description: 'Tags: Additional metadata key-value pairs for this `model_version`.' + user_id: + description: User that created this `model_version`. + version: + description: Model's version number. +github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus: + _: + description: Current status of `model_version` + enum: + - PENDING_REGISTRATION + - FAILED_REGISTRATION + - READY +github.com/databricks/databricks-sdk-go/service/ml.ModelVersionTag: + key: + description: The tag key. + value: + description: The tag value. +github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: + quartz_cron_schedule: {} + timezone_id: {} +github.com/databricks/databricks-sdk-go/service/pipelines.DeploymentKind: + _: + description: | + The deployment method that manages the pipeline: + - BUNDLE: The pipeline is managed by a Databricks Asset Bundle. + enum: + - BUNDLE +github.com/databricks/databricks-sdk-go/service/pipelines.FileLibrary: + path: + description: The absolute path of the file. +github.com/databricks/databricks-sdk-go/service/pipelines.Filters: + exclude: + description: Paths to exclude. + include: + description: Paths to include. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionConfig: + report: + description: Select a specific source report. + schema: + description: Select all tables from a specific source schema. + table: + description: Select a specific source table. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition: + connection_id: + description: '[Deprecated, use connection_name instead] Immutable. The Unity Catalog + connection that this gateway pipeline uses to communicate with the source.' + connection_name: + description: Immutable. The Unity Catalog connection that this gateway pipeline + uses to communicate with the source. + gateway_storage_catalog: + description: Required, Immutable. The name of the catalog for the gateway pipeline's + storage location. + gateway_storage_name: + description: | + Optional. The Unity Catalog-compatible name for the gateway storage location. + This is the destination to use for the data that is extracted by the gateway. + Delta Live Tables system will automatically create the storage location under the catalog and schema. + gateway_storage_schema: + description: Required, Immutable. The name of the schema for the gateway pipelines's + storage location. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinition: + connection_name: + description: Immutable. The Unity Catalog connection that this ingestion pipeline + uses to communicate with the source. This is used with connectors for applications + like Salesforce, Workday, and so on. + ingestion_gateway_id: + description: Immutable. Identifier for the gateway that is used by this ingestion + pipeline to communicate with the source database. This is used with connectors + to databases like SQL Server. + objects: + description: Required. Settings specifying tables to replicate and the destination + for the replicated tables. + table_configuration: + description: Configuration settings to control the ingestion of tables. These + settings are applied to all tables in the pipeline. +github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} +github.com/databricks/databricks-sdk-go/service/pipelines.NotebookLibrary: + path: + description: The absolute path of the notebook. +github.com/databricks/databricks-sdk-go/service/pipelines.Notifications: + alerts: + description: | + A list of alerts that trigger the sending of notifications to the configured + destinations. The supported alerts are: + + * `on-update-success`: A pipeline update completes successfully. + * `on-update-failure`: Each time a pipeline update fails. + * `on-update-fatal-failure`: A pipeline update fails with a non-retryable (fatal) error. + * `on-flow-failure`: A single data flow fails. + email_recipients: + description: | + A list of email addresses notified when a configured alert is triggered. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineCluster: + apply_policy_default_values: + description: 'Note: This field won''t be persisted. Only API users will check + this field.' + autoscale: + description: |- + Parameters needed in order to automatically scale clusters up and down based on load. + Note: autoscaling works best with DB runtime versions 3.0 or later. + aws_attributes: + description: |- + Attributes related to clusters running on Amazon Web Services. + If not specified at cluster creation, a set of default values will be used. + azure_attributes: + description: |- + Attributes related to clusters running on Microsoft Azure. + If not specified at cluster creation, a set of default values will be used. + cluster_log_conf: + description: | + The configuration for delivering spark logs to a long-term storage destination. + Only dbfs destinations are supported. Only one destination can be specified + for one cluster. If the conf is given, the logs will be delivered to the destination every + `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while + the destination of executor logs is `$destination/$clusterId/executor`. + custom_tags: + description: |- + Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS + instances and EBS volumes) with these tags in addition to `default_tags`. Notes: + + - Currently, Databricks allows at most 45 custom tags + + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags + driver_instance_pool_id: + description: |- + The optional ID of the instance pool for the driver of the cluster belongs. + The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not + assigned. + driver_node_type_id: + description: |- + The node type of the Spark driver. + Note that this field is optional; if unset, the driver node type will be set as the same value + as `node_type_id` defined above. + enable_local_disk_encryption: + description: Whether to enable local disk encryption for the cluster. + gcp_attributes: + description: |- + Attributes related to clusters running on Google Cloud Platform. + If not specified at cluster creation, a set of default values will be used. + init_scripts: + description: The configuration for storing init scripts. Any number of destinations + can be specified. The scripts are executed sequentially in the order provided. + If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. + instance_pool_id: + description: The optional ID of the instance pool to which the cluster belongs. + label: + description: A label for the cluster specification, either `default` to configure + the default cluster, or `maintenance` to configure the maintenance cluster. + This field is optional. The default value is `default`. + node_type_id: + description: | + This field encodes, through a single value, the resources available to each of + the Spark nodes in this cluster. For example, the Spark nodes can be provisioned + and optimized for memory or compute intensive workloads. A list of available node + types can be retrieved by using the :method:clusters/listNodeTypes API call. + num_workers: + description: |- + Number of worker nodes that this cluster should have. A cluster has one Spark Driver + and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. + + Note: When reading the properties of a cluster, this field reflects the desired number + of workers rather than the actual current number of workers. For instance, if a cluster + is resized from 5 to 10 workers, this field will immediately be updated to reflect + the target size of 10 workers, whereas the workers listed in `spark_info` will gradually + increase from 5 to 10 as the new nodes are provisioned. + policy_id: + description: The ID of the cluster policy used to create the cluster if applicable. + spark_conf: + description: | + An object containing a set of optional, user-specified Spark configuration key-value pairs. + See :method:clusters/create for more details. + spark_env_vars: + description: |- + An object containing a set of optional, user-specified environment variable key-value pairs. + Please note that key-value pair of the form (X,Y) will be exported as is (i.e., + `export X='Y'`) while launching the driver and workers. + + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending + them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all + default databricks managed environmental variables are included as well. + + Example Spark environment variables: + `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or + `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + ssh_public_keys: + description: |- + SSH public key contents that will be added to each Spark node in this cluster. The + corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. + Up to 10 keys can be specified. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscale: + max_workers: + description: The maximum number of workers to which the cluster can scale up when + overloaded. `max_workers` must be strictly greater than `min_workers`. + min_workers: + description: |- + The minimum number of workers the cluster can scale down to when underutilized. + It is also the initial number of workers the cluster will have after creation. + mode: + description: | + Databricks Enhanced Autoscaling optimizes cluster utilization by automatically + allocating cluster resources based on workload volume, with minimal impact to + the data processing latency of your pipelines. Enhanced Autoscaling is available + for `updates` clusters only. The legacy autoscaling feature is used for `maintenance` + clusters. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode: + _: + description: | + Databricks Enhanced Autoscaling optimizes cluster utilization by automatically + allocating cluster resources based on workload volume, with minimal impact to + the data processing latency of your pipelines. Enhanced Autoscaling is available + for `updates` clusters only. The legacy autoscaling feature is used for `maintenance` + clusters. + enum: + - ENHANCED + - LEGACY +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineDeployment: + kind: + description: The deployment method that manages the pipeline. + metadata_file_path: + description: The path to the file containing metadata about the deployment. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineLibrary: + file: + description: | + The path to a file that defines a pipeline and is stored in the Databricks Repos. + jar: + description: | + URI of the jar to be installed. Currently only DBFS is supported. + maven: + description: | + Specification of a maven library to be installed. + notebook: + description: | + The path to a notebook that defines a pipeline and is stored in the Databricks workspace. + whl: + description: URI of the whl to be installed. +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger: + cron: {} + manual: {} +github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec: + destination_catalog: + description: Required. Destination catalog to store table. + destination_schema: + description: Required. Destination schema to store table. + destination_table: + description: Required. Destination table name. The pipeline fails if a table with + that name already exists. + source_url: + description: Required. Report URL in the source system. + table_configuration: + description: Configuration settings to control the ingestion of tables. These + settings override the table_configuration defined in the IngestionPipelineDefinition + object. +github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow: + days_of_week: + description: |- + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + If not specified all days of the week will be used. + start_hour: + description: |- + An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day. + Continuous pipeline restart is triggered only within a five-hour window starting at this hour. + time_zone_id: + description: |- + Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details. + If not specified, UTC will be used. +github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek: + _: + description: |- + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + If not specified all days of the week will be used. + enum: + - MONDAY + - TUESDAY + - WEDNESDAY + - THURSDAY + - FRIDAY + - SATURDAY + - SUNDAY +github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec: + destination_catalog: + description: Required. Destination catalog to store tables. + destination_schema: + description: Required. Destination schema to store tables in. Tables with the + same name as the source tables are created in this destination schema. The pipeline + fails If a table with the same name already exists. + source_catalog: + description: The source catalog name. Might be optional depending on the type + of source. + source_schema: + description: Required. Schema name in the source database. + table_configuration: + description: Configuration settings to control the ingestion of tables. These + settings are applied to all tables in this schema and override the table_configuration + defined in the IngestionPipelineDefinition object. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec: + destination_catalog: + description: Required. Destination catalog to store table. + destination_schema: + description: Required. Destination schema to store table. + destination_table: + description: Optional. Destination table name. The pipeline fails if a table with + that name already exists. If not set, the source table name is used. + source_catalog: + description: Source catalog name. Might be optional depending on the type of source. + source_schema: + description: Schema name in the source database. Might be optional depending on + the type of source. + source_table: + description: Required. Table name in the source database. + table_configuration: + description: Configuration settings to control the ingestion of tables. These + settings override the table_configuration defined in the IngestionPipelineDefinition + object and the SchemaSpec. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: + primary_keys: + description: The primary key of the table used to apply changes. + salesforce_include_formula_fields: + description: If true, formula fields defined in the table are included in the + ingestion. This setting is only valid for the Salesforce connector + scd_type: + description: The SCD type to use to ingest the table. + sequence_by: + description: The column names specifying the logical order of events in the source + data. Delta Live Tables uses this sequencing to handle change events that arrive + out of order. +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType: + _: + description: The SCD type to use to ingest the table. + enum: + - SCD_TYPE_1 + - SCD_TYPE_2 +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayConfig: + guardrails: + description: Configuration for AI Guardrails to prevent unwanted data and unsafe + data in requests and responses. + inference_table_config: + description: Configuration for payload logging using inference tables. Use these + tables to monitor and audit data being sent to and received from model APIs + and to improve model quality. + rate_limits: + description: Configuration for rate limits which can be set to limit endpoint + traffic. + usage_tracking_config: + description: Configuration to enable usage tracking using system tables. These + tables allow you to monitor operational usage on endpoints and their associated + costs. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParameters: + invalid_keywords: + description: List of invalid keywords. AI guardrail uses keyword or string matching + to decide if the keyword exists in the request or response content. + pii: + description: Configuration for guardrail PII filter. + safety: + description: Indicates whether the safety filter is enabled. + valid_topics: + description: The list of allowed topics. Given a chat request, this guardrail + flags the request if its topic is not in the allowed topics. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehavior: + behavior: + description: Behavior for PII filter. Currently only 'BLOCK' is supported. If + 'BLOCK' is set for the input guardrail and the request contains PII, the request + is not sent to the model server and 400 status code is returned; if 'BLOCK' + is set for the output guardrail and the model response contains PII, the PII + info in the response is redacted and 400 status code is returned. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior: + _: + description: Behavior for PII filter. Currently only 'BLOCK' is supported. If + 'BLOCK' is set for the input guardrail and the request contains PII, the request + is not sent to the model server and 400 status code is returned; if 'BLOCK' + is set for the output guardrail and the model response contains PII, the PII + info in the response is redacted and 400 status code is returned. + enum: + - NONE + - BLOCK +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails: + input: + description: Configuration for input guardrail filters. + output: + description: Configuration for output guardrail filters. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayInferenceTableConfig: + catalog_name: + description: 'The name of the catalog in Unity Catalog. Required when enabling + inference tables. NOTE: On update, you have to disable inference table first + in order to change the catalog name.' + enabled: + description: Indicates whether the inference table is enabled. + schema_name: + description: 'The name of the schema in Unity Catalog. Required when enabling + inference tables. NOTE: On update, you have to disable inference table first + in order to change the schema name.' + table_name_prefix: + description: 'The prefix of the table in Unity Catalog. NOTE: On update, you have + to disable inference table first in order to change the prefix name.' +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit: + calls: + description: Used to specify how many calls are allowed for a key within the renewal_period. + key: + description: Key field for a rate limit. Currently, only 'user' and 'endpoint' + are supported, with 'endpoint' being the default if not specified. + renewal_period: + description: Renewal period field for a rate limit. Currently, only 'minute' is + supported. +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey: + _: + description: Key field for a rate limit. Currently, only 'user' and 'endpoint' + are supported, with 'endpoint' being the default if not specified. + enum: + - user + - endpoint +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod: + _: + description: Renewal period field for a rate limit. Currently, only 'minute' is + supported. + enum: + - minute +github.com/databricks/databricks-sdk-go/service/serving.AiGatewayUsageTrackingConfig: + enabled: + description: Whether to enable usage tracking. +github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfig: + aws_access_key_id: + description: 'The Databricks secret key reference for an AWS access key ID with + permissions to interact with Bedrock services. If you prefer to paste your API + key directly, see `aws_access_key_id`. You must provide an API key using one + of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.' + aws_access_key_id_plaintext: + description: 'An AWS access key ID with permissions to interact with Bedrock services + provided as a plaintext string. If you prefer to reference your key using Databricks + Secrets, see `aws_access_key_id`. You must provide an API key using one of the + following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`.' + aws_region: + description: The AWS region to use. Bedrock has to be enabled there. + aws_secret_access_key: + description: 'The Databricks secret key reference for an AWS secret access key + paired with the access key ID, with permissions to interact with Bedrock services. + If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. + You must provide an API key using one of the following fields: `aws_secret_access_key` + or `aws_secret_access_key_plaintext`.' + aws_secret_access_key_plaintext: + description: 'An AWS secret access key paired with the access key ID, with permissions + to interact with Bedrock services provided as a plaintext string. If you prefer + to reference your key using Databricks Secrets, see `aws_secret_access_key`. + You must provide an API key using one of the following fields: `aws_secret_access_key` + or `aws_secret_access_key_plaintext`.' + bedrock_provider: + description: 'The underlying provider in Amazon Bedrock. Supported values (case + insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.' +github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider: + _: + description: 'The underlying provider in Amazon Bedrock. Supported values (case + insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.' + enum: + - anthropic + - cohere + - ai21labs + - amazon +github.com/databricks/databricks-sdk-go/service/serving.AnthropicConfig: + anthropic_api_key: + description: 'The Databricks secret key reference for an Anthropic API key. If + you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. + You must provide an API key using one of the following fields: `anthropic_api_key` + or `anthropic_api_key_plaintext`.' + anthropic_api_key_plaintext: + description: 'The Anthropic API key provided as a plaintext string. If you prefer + to reference your key using Databricks Secrets, see `anthropic_api_key`. You + must provide an API key using one of the following fields: `anthropic_api_key` + or `anthropic_api_key_plaintext`.' +github.com/databricks/databricks-sdk-go/service/serving.AutoCaptureConfigInput: + catalog_name: + description: 'The name of the catalog in Unity Catalog. NOTE: On update, you cannot + change the catalog name if the inference table is already enabled.' + enabled: + description: Indicates whether the inference table is enabled. + schema_name: + description: 'The name of the schema in Unity Catalog. NOTE: On update, you cannot + change the schema name if the inference table is already enabled.' + table_name_prefix: + description: 'The prefix of the table in Unity Catalog. NOTE: On update, you cannot + change the prefix name if the inference table is already enabled.' +github.com/databricks/databricks-sdk-go/service/serving.CohereConfig: + cohere_api_base: + description: "This is an optional field to provide a customized base URL for the + Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n" + cohere_api_key: + description: 'The Databricks secret key reference for a Cohere API key. If you + prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must + provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.' + cohere_api_key_plaintext: + description: 'The Cohere API key provided as a plaintext string. If you prefer + to reference your key using Databricks Secrets, see `cohere_api_key`. You must + provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`.' +github.com/databricks/databricks-sdk-go/service/serving.DatabricksModelServingConfig: + databricks_api_token: + description: | + The Databricks secret key reference for a Databricks API token that corresponds to a user or service + principal with Can Query access to the model serving endpoint pointed to by this external model. + If you prefer to paste your API key directly, see `databricks_api_token_plaintext`. + You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`. + databricks_api_token_plaintext: + description: | + The Databricks API token that corresponds to a user or service + principal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string. + If you prefer to reference your key using Databricks Secrets, see `databricks_api_token`. + You must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`. + databricks_workspace_url: + description: | + The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model. +github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput: + auto_capture_config: + description: Configuration for Inference Tables which automatically logs requests + and responses to Unity Catalog. + served_entities: + description: A list of served entities for the endpoint to serve. A serving endpoint + can have up to 15 served entities. + served_models: + description: (Deprecated, use served_entities instead) A list of served models + for the endpoint to serve. A serving endpoint can have up to 15 served models. + traffic_config: + description: The traffic config defining how invocations to the serving endpoint + should be routed. +github.com/databricks/databricks-sdk-go/service/serving.EndpointTag: + key: + description: Key field for a serving endpoint tag. + value: + description: Optional value field for a serving endpoint tag. +github.com/databricks/databricks-sdk-go/service/serving.ExternalModel: + ai21labs_config: + description: AI21Labs Config. Only required if the provider is 'ai21labs'. + amazon_bedrock_config: + description: Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. + anthropic_config: + description: Anthropic Config. Only required if the provider is 'anthropic'. + cohere_config: + description: Cohere Config. Only required if the provider is 'cohere'. + databricks_model_serving_config: + description: Databricks Model Serving Config. Only required if the provider is + 'databricks-model-serving'. + google_cloud_vertex_ai_config: + description: Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'. + name: + description: The name of the external model. + openai_config: + description: OpenAI Config. Only required if the provider is 'openai'. + palm_config: + description: PaLM Config. Only required if the provider is 'palm'. + provider: + description: | + The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', + 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", + task: + description: The task type of the external model. +github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider: + _: + description: | + The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', + 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", + enum: + - ai21labs + - anthropic + - amazon-bedrock + - cohere + - databricks-model-serving + - google-cloud-vertex-ai + - openai + - palm +github.com/databricks/databricks-sdk-go/service/serving.RateLimit: + calls: + description: Used to specify how many calls are allowed for a key within the renewal_period. + key: + description: Key field for a serving endpoint rate limit. Currently, only 'user' + and 'endpoint' are supported, with 'endpoint' being the default if not specified. + renewal_period: + description: Renewal period field for a serving endpoint rate limit. Currently, + only 'minute' is supported. +github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey: + _: + description: Key field for a serving endpoint rate limit. Currently, only 'user' + and 'endpoint' are supported, with 'endpoint' being the default if not specified. + enum: + - user + - endpoint +github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod: + _: + description: Renewal period field for a serving endpoint rate limit. Currently, + only 'minute' is supported. + enum: + - minute +github.com/databricks/databricks-sdk-go/service/serving.Route: + served_model_name: + description: The name of the served model this route configures traffic for. + traffic_percentage: + description: The percentage of endpoint traffic to send to this route. It must + be an integer between 0 and 100 inclusive. +github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: + entity_name: + description: | + The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), + or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of + __catalog_name__.__schema_name__.__model_name__. + entity_version: + description: The version of the model in Databricks Model Registry to be served + or empty if the entity is a FEATURE_SPEC. + environment_vars: + description: "An object containing a set of optional, user-specified environment + variable key-value pairs used for serving this entity.\nNote: this is an experimental + feature and subject to change. \nExample entity environment variables that refer + to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", + \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`" + external_model: + description: | + The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) + can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, + it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. + The task type of all external models within an endpoint must be the same. + instance_profile_arn: + description: ARN of the instance profile that the served entity uses to access + AWS resources. + max_provisioned_throughput: + description: The maximum tokens per second that the endpoint can scale up to. + min_provisioned_throughput: + description: The minimum tokens per second that the endpoint can scale down to. + name: + description: | + The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. + If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other + entities, it defaults to -. + scale_to_zero_enabled: + description: Whether the compute resources for the served entity should scale + down to zero. + workload_size: + description: | + The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. + workload_type: + description: | + The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: + environment_vars: + description: "An object containing a set of optional, user-specified environment + variable key-value pairs used for serving this model.\nNote: this is an experimental + feature and subject to change. \nExample model environment variables that refer + to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", + \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`" + instance_profile_arn: + description: ARN of the instance profile that the served model will use to access + AWS resources. + max_provisioned_throughput: + description: The maximum tokens per second that the endpoint can scale up to. + min_provisioned_throughput: + description: The minimum tokens per second that the endpoint can scale down to. + model_name: + description: | + The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, + in the form of __catalog_name__.__schema_name__.__model_name__. + model_version: + description: The version of the model in Databricks Model Registry or Unity Catalog + to be served. + name: + description: | + The name of a served model. It must be unique across an endpoint. If not specified, this field will default to -. + A served model name can consist of alphanumeric characters, dashes, and underscores. + scale_to_zero_enabled: + description: Whether the compute resources for the served model should scale down + to zero. + workload_size: + description: | + The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. + workload_type: + description: | + The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize: + _: + description: | + The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. + A single unit of provisioned concurrency can process one request at a time. + Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. + enum: + - Small + - Medium + - Large +github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType: + _: + description: | + The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is + "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. + See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). + enum: + - CPU + - GPU_SMALL + - GPU_MEDIUM + - GPU_LARGE + - MULTIGPU_MEDIUM +github.com/databricks/databricks-sdk-go/service/serving.TrafficConfig: + routes: + description: The list of routes that define traffic to each served entity. diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml new file mode 100644 index 0000000000..e9e659e687 --- /dev/null +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -0,0 +1,155 @@ +github.com/databricks/cli/bundle/config/resources.Cluster: + "data_security_mode": + "description": |- + PLACEHOLDER + "docker_image": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "runtime_engine": + "description": |- + PLACEHOLDER + "workload_type": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Dashboard: + "embed_credentials": + "description": |- + PLACEHOLDER + "file_path": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Job: + "health": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "run_as": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.MlflowExperiment: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.MlflowModel: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Pipeline: + "permissions": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.QualityMonitor: + "table_name": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.RegisteredModel: + "grants": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Schema: + "grants": + "description": |- + PLACEHOLDER + "properties": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Volume: + "grants": + "description": |- + PLACEHOLDER + "volume_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: + "availability": + "description": |- + PLACEHOLDER + "ebs_volume_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes: + "availability": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: + "data_security_mode": + "description": |- + PLACEHOLDER + "docker_image": + "description": |- + PLACEHOLDER + "runtime_engine": + "description": |- + PLACEHOLDER + "workload_type": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.DockerImage: + "basic_auth": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: + "availability": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.GitSource: + "git_snapshot": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: + "spec": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRule: + "metric": + "description": |- + PLACEHOLDER + "op": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthRules: + "rules": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: + "python_named_params": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.Task: + "health": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: + "table_update": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/jobs.Webhook: + "id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: + "quartz_cron_schedule": + "description": |- + PLACEHOLDER + "timezone_id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger: + "cron": + "description": |- + PLACEHOLDER + "manual": + "description": |- + PLACEHOLDER diff --git a/bundle/internal/schema/annotations_test.go b/bundle/internal/schema/annotations_test.go new file mode 100644 index 0000000000..d7e2fea7cf --- /dev/null +++ b/bundle/internal/schema/annotations_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "testing" +) + +func TestConvertLinksToAbsoluteUrl(t *testing.T) { + tests := []struct { + input string + expected string + }{ + { + input: "", + expected: "", + }, + { + input: "Some text (not a link)", + expected: "Some text (not a link)", + }, + { + input: "This is a link to [_](#section)", + expected: "This is a link to [section](https://docs.databricks.com/dev-tools/bundles/reference.html#section)", + }, + { + input: "This is a link to [_](/dev-tools/bundles/resources.html#dashboard)", + expected: "This is a link to [dashboard](https://docs.databricks.com/dev-tools/bundles/resources.html#dashboard)", + }, + { + input: "This is a link to [_](/dev-tools/bundles/resources.html)", + expected: "This is a link to [link](https://docs.databricks.com/dev-tools/bundles/resources.html)", + }, + { + input: "This is a link to [external](https://external.com)", + expected: "This is a link to [external](https://external.com)", + }, + } + + for _, test := range tests { + result := convertLinksToAbsoluteUrl(test.input) + if result != test.expected { + t.Errorf("For input '%s', expected '%s', but got '%s'", test.input, test.expected, result) + } + } +} diff --git a/bundle/internal/schema/main.go b/bundle/internal/schema/main.go index cc06f0bbee..77927a9661 100644 --- a/bundle/internal/schema/main.go +++ b/bundle/internal/schema/main.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "os" + "path/filepath" "reflect" "github.com/databricks/cli/bundle/config" @@ -43,7 +44,8 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema. case jsonschema.ArrayType, jsonschema.ObjectType: // arrays and objects can have complex variable values specified. return jsonschema.Schema{ - AnyOf: []jsonschema.Schema{ + // OneOf is used because we don't expect more than 1 match and schema-based auto-complete works better with OneOf + OneOf: []jsonschema.Schema{ s, { Type: jsonschema.StringType, @@ -55,7 +57,7 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema. // primitives can have variable values, or references like ${bundle.xyz} // or ${workspace.xyz} return jsonschema.Schema{ - AnyOf: []jsonschema.Schema{ + OneOf: []jsonschema.Schema{ s, {Type: jsonschema.StringType, Pattern: interpolationPattern("resources")}, {Type: jsonschema.StringType, Pattern: interpolationPattern("bundle")}, @@ -113,37 +115,60 @@ func makeVolumeTypeOptional(typ reflect.Type, s jsonschema.Schema) jsonschema.Sc } func main() { - if len(os.Args) != 2 { - fmt.Println("Usage: go run main.go ") + if len(os.Args) != 3 { + fmt.Println("Usage: go run main.go ") os.Exit(1) } + // Directory with annotation files + workdir := os.Args[1] // Output file, where the generated JSON schema will be written to. - outputFile := os.Args[1] + outputFile := os.Args[2] + + generateSchema(workdir, outputFile) +} + +func generateSchema(workdir, outputFile string) { + annotationsPath := filepath.Join(workdir, "annotations.yml") + annotationsOpenApiPath := filepath.Join(workdir, "annotations_openapi.yml") + annotationsOpenApiOverridesPath := filepath.Join(workdir, "annotations_openapi_overrides.yml") // Input file, the databricks openapi spec. inputFile := os.Getenv("DATABRICKS_OPENAPI_SPEC") - if inputFile == "" { - log.Fatal("DATABRICKS_OPENAPI_SPEC environment variable not set") + if inputFile != "" { + p, err := newParser(inputFile) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Writing OpenAPI annotations to %s\n", annotationsOpenApiPath) + err = p.extractAnnotations(reflect.TypeOf(config.Root{}), annotationsOpenApiPath, annotationsOpenApiOverridesPath) + if err != nil { + log.Fatal(err) + } } - p, err := newParser(inputFile) + a, err := newAnnotationHandler([]string{annotationsOpenApiPath, annotationsOpenApiOverridesPath, annotationsPath}) if err != nil { log.Fatal(err) } // Generate the JSON schema from the bundle Go struct. s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ - p.addDescriptions, - p.addEnums, removeJobsFields, makeVolumeTypeOptional, + a.addAnnotations, addInterpolationPatterns, }) if err != nil { log.Fatal(err) } + // Overwrite the input annotation file, adding missing annotations + err = a.syncWithMissingAnnotations(annotationsPath) + if err != nil { + log.Fatal(err) + } + b, err := json.MarshalIndent(s, "", " ") if err != nil { log.Fatal(err) diff --git a/bundle/internal/schema/main_test.go b/bundle/internal/schema/main_test.go new file mode 100644 index 0000000000..f1389e61ad --- /dev/null +++ b/bundle/internal/schema/main_test.go @@ -0,0 +1,125 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/jsonschema" + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" +) + +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + if err != nil { + return err + } + + return out.Close() +} + +// Checks whether descriptions are added for new config fields in the annotations.yml file +// If this test fails either manually add descriptions to the `annotations.yml` or do the following: +// 1. run `make schema` from the repository root to add placeholder descriptions +// 2. replace all "PLACEHOLDER" values with the actual descriptions if possible +// 3. run `make schema` again to regenerate the schema with acutal descriptions +func TestRequiredAnnotationsForNewFields(t *testing.T) { + workdir := t.TempDir() + annotationsPath := path.Join(workdir, "annotations.yml") + annotationsOpenApiPath := path.Join(workdir, "annotations_openapi.yml") + annotationsOpenApiOverridesPath := path.Join(workdir, "annotations_openapi_overrides.yml") + + // Copy existing annotation files from the same folder as this test + err := copyFile("annotations.yml", annotationsPath) + assert.NoError(t, err) + err = copyFile("annotations_openapi.yml", annotationsOpenApiPath) + assert.NoError(t, err) + err = copyFile("annotations_openapi_overrides.yml", annotationsOpenApiOverridesPath) + assert.NoError(t, err) + + generateSchema(workdir, path.Join(t.TempDir(), "schema.json")) + + originalFile, err := os.ReadFile("annotations.yml") + assert.NoError(t, err) + currentFile, err := os.ReadFile(annotationsPath) + assert.NoError(t, err) + original, err := yamlloader.LoadYAML("", bytes.NewBuffer(originalFile)) + assert.NoError(t, err) + current, err := yamlloader.LoadYAML("", bytes.NewBuffer(currentFile)) + assert.NoError(t, err) + + // Collect added paths. + var updatedFieldPaths []string + _, err = merge.Override(original, current, merge.OverrideVisitor{ + VisitInsert: func(basePath dyn.Path, right dyn.Value) (dyn.Value, error) { + updatedFieldPaths = append(updatedFieldPaths, basePath.String()) + return right, nil + }, + }) + assert.NoError(t, err) + assert.Empty(t, updatedFieldPaths, fmt.Sprintf("Missing JSON-schema descriptions for new config fields in bundle/internal/schema/annotations.yml:\n%s", strings.Join(updatedFieldPaths, "\n"))) +} + +// Checks whether types in annotation files are still present in Config type +func TestNoDetachedAnnotations(t *testing.T) { + files := []string{ + "annotations.yml", + "annotations_openapi.yml", + "annotations_openapi_overrides.yml", + } + + types := map[string]bool{} + for _, file := range files { + annotations, err := getAnnotations(file) + assert.NoError(t, err) + for k := range annotations { + types[k] = false + } + } + + _, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ + func(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + delete(types, getPath(typ)) + return s + }, + }) + assert.NoError(t, err) + + for typ := range types { + t.Errorf("Type `%s` in annotations file is not found in `root.Config` type", typ) + } + assert.Empty(t, types, "Detached annotations found, regenerate schema and check for package path changes") +} + +func getAnnotations(path string) (annotationFile, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var data annotationFile + err = yaml.Unmarshal(b, &data) + return data, err +} diff --git a/bundle/internal/schema/parser.go b/bundle/internal/schema/parser.go index ef3d6e719b..6fb6e0047d 100644 --- a/bundle/internal/schema/parser.go +++ b/bundle/internal/schema/parser.go @@ -1,6 +1,7 @@ package main import ( + "bytes" "encoding/json" "fmt" "os" @@ -8,6 +9,9 @@ import ( "reflect" "strings" + "github.com/ghodss/yaml" + + "github.com/databricks/cli/libs/dyn/yamlloader" "github.com/databricks/cli/libs/jsonschema" ) @@ -23,6 +27,8 @@ type openapiParser struct { ref map[string]jsonschema.Schema } +const RootTypeKey = "_" + func newParser(path string) (*openapiParser, error) { b, err := os.ReadFile(path) if err != nil { @@ -89,35 +95,95 @@ func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) { } // Use the OpenAPI spec to load descriptions for the given type. -func (p *openapiParser) addDescriptions(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { - ref, ok := p.findRef(typ) - if !ok { - return s +func (p *openapiParser) extractAnnotations(typ reflect.Type, outputPath, overridesPath string) error { + annotations := annotationFile{} + overrides := annotationFile{} + + b, err := os.ReadFile(overridesPath) + if err != nil { + return err + } + err = yaml.Unmarshal(b, &overrides) + if err != nil { + return err + } + if overrides == nil { + overrides = annotationFile{} } - s.Description = ref.Description - for k, v := range s.Properties { - if refProp, ok := ref.Properties[k]; ok { - v.Description = refProp.Description - } + _, err = jsonschema.FromType(typ, []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{ + func(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { + ref, ok := p.findRef(typ) + if !ok { + return s + } + + basePath := getPath(typ) + pkg := map[string]annotation{} + annotations[basePath] = pkg + + if ref.Description != "" || ref.Enum != nil { + pkg[RootTypeKey] = annotation{Description: ref.Description, Enum: ref.Enum} + } + + for k := range s.Properties { + if refProp, ok := ref.Properties[k]; ok { + pkg[k] = annotation{Description: refProp.Description, Enum: refProp.Enum} + if refProp.Description == "" { + addEmptyOverride(k, basePath, overrides) + } + } else { + addEmptyOverride(k, basePath, overrides) + } + } + return s + }, + }) + if err != nil { + return err } - return s + b, err = yaml.Marshal(overrides) + if err != nil { + return err + } + o, err := yamlloader.LoadYAML("", bytes.NewBuffer(b)) + if err != nil { + return err + } + err = saveYamlWithStyle(overridesPath, o) + if err != nil { + return err + } + b, err = yaml.Marshal(annotations) + if err != nil { + return err + } + b = bytes.Join([][]byte{[]byte("# This file is auto-generated. DO NOT EDIT."), b}, []byte("\n")) + err = os.WriteFile(outputPath, b, 0o644) + if err != nil { + return err + } + + return nil } -// Use the OpenAPI spec add enum values for the given type. -func (p *openapiParser) addEnums(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema { - ref, ok := p.findRef(typ) - if !ok { - return s +func addEmptyOverride(key, pkg string, overridesFile annotationFile) { + if overridesFile[pkg] == nil { + overridesFile[pkg] = map[string]annotation{} } - s.Enum = append(s.Enum, ref.Enum...) - for k, v := range s.Properties { - if refProp, ok := ref.Properties[k]; ok { - v.Enum = append(v.Enum, refProp.Enum...) - } + overrides := overridesFile[pkg] + if overrides[key].Description == "" { + overrides[key] = annotation{Description: Placeholder} } - return s + a, ok := overrides[key] + if !ok { + a = annotation{} + } + if a.Description == "" { + a.Description = Placeholder + } + overrides[key] = a } diff --git a/bundle/internal/schema/testdata/pass/target_variable.yml b/bundle/internal/schema/testdata/pass/target_variable.yml new file mode 100644 index 0000000000..34af946585 --- /dev/null +++ b/bundle/internal/schema/testdata/pass/target_variable.yml @@ -0,0 +1,5 @@ +targets: + production: + variables: + myvar: + default: true diff --git a/bundle/schema/embed_test.go b/bundle/schema/embed_test.go index ff2e7651b4..59f1458cb6 100644 --- a/bundle/schema/embed_test.go +++ b/bundle/schema/embed_test.go @@ -41,21 +41,21 @@ func TestJsonSchema(t *testing.T) { resourceJob := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Job") fields := []string{"name", "continuous", "tasks", "trigger"} for _, field := range fields { - assert.NotEmpty(t, resourceJob.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, resourceJob.OneOf[0].Properties[field].Description) } // Assert descriptions were also loaded for a job task definition. jobTask := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.Task") fields = []string{"notebook_task", "spark_jar_task", "spark_python_task", "spark_submit_task", "description", "depends_on", "environment_key", "for_each_task", "existing_cluster_id"} for _, field := range fields { - assert.NotEmpty(t, jobTask.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, jobTask.OneOf[0].Properties[field].Description) } // Assert descriptions are loaded for pipelines pipeline := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Pipeline") fields = []string{"name", "catalog", "clusters", "channel", "continuous", "development"} for _, field := range fields { - assert.NotEmpty(t, pipeline.AnyOf[0].Properties[field].Description) + assert.NotEmpty(t, pipeline.OneOf[0].Properties[field].Description) } providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider") diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index e813e44063..8ff7ff4d46 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1,7 +1,7 @@ { "$defs": { "bool": { - "anyOf": [ + "oneOf": [ { "type": "boolean" }, @@ -28,7 +28,7 @@ ] }, "float64": { - "anyOf": [ + "oneOf": [ { "type": "number" }, @@ -60,7 +60,7 @@ "bundle": { "config": { "resources.Cluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -181,7 +181,7 @@ ] }, "resources.Dashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -244,14 +244,16 @@ ] }, "resources.Grant": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "principal": { + "description": "The name of the principal that will be granted privileges", "$ref": "#/$defs/string" }, "privileges": { + "description": "The privileges to grant to the specified entity", "$ref": "#/$defs/slice/string" } }, @@ -268,7 +270,7 @@ ] }, "resources.Job": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -363,7 +365,7 @@ ] }, "resources.MlflowExperiment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -408,7 +410,7 @@ ] }, "resources.MlflowModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -453,7 +455,7 @@ ] }, "resources.ModelServingEndpoint": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -498,20 +500,24 @@ ] }, "resources.Permission": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "group_name": { + "description": "The name of the group that has the permission set in level.", "$ref": "#/$defs/string" }, "level": { + "description": "The allowed permission for user, group, service principal defined for this permission.", "$ref": "#/$defs/string" }, "service_principal_name": { + "description": "The name of the service principal that has the permission set in level.", "$ref": "#/$defs/string" }, "user_name": { + "description": "The name of the user that has the permission set in level.", "$ref": "#/$defs/string" } }, @@ -527,7 +533,7 @@ ] }, "resources.Pipeline": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -636,7 +642,7 @@ ] }, "resources.QualityMonitor": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -710,7 +716,7 @@ ] }, "resources.RegisteredModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -752,7 +758,7 @@ ] }, "resources.Schema": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -792,7 +798,7 @@ ] }, "resources.Volume": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -837,7 +843,7 @@ ] }, "variable.Lookup": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -895,12 +901,15 @@ "$ref": "#/$defs/interface" }, "description": { + "description": "The description of the variable.", "$ref": "#/$defs/string" }, "lookup": { + "description": "The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup" }, "type": { + "description": "The type of the variable.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.VariableType" } }, @@ -916,12 +925,16 @@ "$ref": "#/$defs/interface" }, "description": { + "description": "The description of the variable", "$ref": "#/$defs/string" }, "lookup": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup" + "description": "The name of the alert, cluster_policy, cluster, dashboard, instance_pool, job, metastore, pipeline, query, service_principal, or warehouse object for which to retrieve an ID.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.Lookup", + "markdownDescription": "The name of the `alert`, `cluster_policy`, `cluster`, `dashboard`, `instance_pool`, `job`, `metastore`, `pipeline`, `query`, `service_principal`, or `warehouse` object for which to retrieve an ID.\"" }, "type": { + "description": "The type of the variable.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/variable.VariableType" } }, @@ -932,24 +945,31 @@ } }, "config.Artifact": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "build": { + "description": "An optional set of non-default build commands that you want to run locally before deployment.\n\nFor Python wheel builds, the Databricks CLI assumes that it can find a local install of the Python wheel package to run builds, and it runs the command python setup.py bdist_wheel by default during each bundle deployment.\n\nTo specify multiple build commands, separate each command with double-ampersand (\u0026\u0026) characters.", "$ref": "#/$defs/string" }, "executable": { + "description": "The executable type.", "$ref": "#/$defs/github.com/databricks/cli/libs/exec.ExecutableType" }, "files": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config.ArtifactFile" + "description": "The source files for the artifact.", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config.ArtifactFile", + "markdownDescription": "The source files for the artifact, defined as an [artifact_file](https://docs.databricks.com/dev-tools/bundles/reference.html#artifact_file)." }, "path": { + "description": "The location where the built artifact will be saved.", "$ref": "#/$defs/string" }, "type": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.ArtifactType" + "description": "The type of the artifact.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.ArtifactType", + "markdownDescription": "The type of the artifact. Valid values are `wheel` or `jar`" } }, "additionalProperties": false, @@ -964,11 +984,12 @@ ] }, "config.ArtifactFile": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "source": { + "description": "The path of the files used to build the artifact.", "$ref": "#/$defs/string" } }, @@ -987,26 +1008,35 @@ "type": "string" }, "config.Bundle": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "cluster_id": { - "$ref": "#/$defs/string" + "description": "The ID of a cluster to use to run the bundle.", + "$ref": "#/$defs/string", + "markdownDescription": "The ID of a cluster to use to run the bundle. See [cluster_id](https://docs.databricks.com/dev-tools/bundles/settings.html#cluster_id)." }, "compute_id": { "$ref": "#/$defs/string" }, "databricks_cli_version": { - "$ref": "#/$defs/string" + "description": "The Databricks CLI version to use for the bundle.", + "$ref": "#/$defs/string", + "markdownDescription": "The Databricks CLI version to use for the bundle. See [databricks_cli_version](https://docs.databricks.com/dev-tools/bundles/settings.html#databricks_cli_version)." }, "deployment": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Deployment" + "description": "The definition of the bundle deployment", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Deployment", + "markdownDescription": "The definition of the bundle deployment. For supported attributes, see [deployment](https://docs.databricks.com/dev-tools/bundles/reference.html#deployment) and [link](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html)." }, "git": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git" + "description": "The Git version control details that are associated with your bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git", + "markdownDescription": "The Git version control details that are associated with your bundle. For supported attributes, see [git](https://docs.databricks.com/dev-tools/bundles/reference.html#git) and [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." }, "name": { + "description": "The name of the bundle.", "$ref": "#/$defs/string" }, "uuid": { @@ -1028,15 +1058,18 @@ "type": "string" }, "config.Deployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "fail_on_active_runs": { + "description": "Whether to fail on active runs. If this is set to true a deployment that is running can be interrupted.", "$ref": "#/$defs/bool" }, "lock": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Lock" + "description": "The deployment lock attributes.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Lock", + "markdownDescription": "The deployment lock attributes. See [lock](https://docs.databricks.com/dev-tools/bundles/reference.html#lock)." } }, "additionalProperties": false @@ -1048,20 +1081,24 @@ ] }, "config.Experimental": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "pydabs": { + "description": "The PyDABs configuration.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.PyDABs" }, "python_wheel_wrapper": { + "description": "Whether to use a Python wheel wrapper", "$ref": "#/$defs/bool" }, "scripts": { + "description": "The commands to run", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Command" }, "use_legacy_run_as": { + "description": "Whether to use the legacy run_as behavior", "$ref": "#/$defs/bool" } }, @@ -1074,15 +1111,19 @@ ] }, "config.Git": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "branch": { - "$ref": "#/$defs/string" + "description": "The Git branch name.", + "$ref": "#/$defs/string", + "markdownDescription": "The Git branch name. See [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." }, "origin_url": { - "$ref": "#/$defs/string" + "description": "The origin URL of the repository.", + "$ref": "#/$defs/string", + "markdownDescription": "The origin URL of the repository. See [git](https://docs.databricks.com/dev-tools/bundles/settings.html#git)." } }, "additionalProperties": false @@ -1094,14 +1135,16 @@ ] }, "config.Lock": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "enabled": { + "description": "Whether this lock is enabled.", "$ref": "#/$defs/bool" }, "force": { + "description": "Whether to force this lock if it is enabled.", "$ref": "#/$defs/bool" } }, @@ -1117,26 +1160,32 @@ "type": "string" }, "config.Presets": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "jobs_max_concurrent_runs": { + "description": "The maximum concurrent runs for a job.", "$ref": "#/$defs/int" }, "name_prefix": { + "description": "The prefix for job runs of the bundle.", "$ref": "#/$defs/string" }, "pipelines_development": { + "description": "Whether pipeline deployments should be locked in development mode.", "$ref": "#/$defs/bool" }, "source_linked_deployment": { + "description": "Whether to link the deployment to the bundle source.", "$ref": "#/$defs/bool" }, "tags": { + "description": "The tags for the bundle deployment.", "$ref": "#/$defs/map/string" }, "trigger_pause_status": { + "description": "A pause status to apply to all job triggers and schedules. Valid values are PAUSED or UNPAUSED.", "$ref": "#/$defs/string" } }, @@ -1149,17 +1198,20 @@ ] }, "config.PyDABs": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "enabled": { + "description": "Whether or not PyDABs (Private Preview) is enabled", "$ref": "#/$defs/bool" }, "import": { + "description": "The PyDABs project to import to discover resources, resource generator and mutators", "$ref": "#/$defs/slice/string" }, "venv_path": { + "description": "The Python virtual environment path", "$ref": "#/$defs/string" } }, @@ -1172,39 +1224,59 @@ ] }, "config.Resources": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "clusters": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster" + "description": "The cluster definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster", + "markdownDescription": "The cluster definitions for the bundle. See [cluster](https://docs.databricks.com/dev-tools/bundles/resources.html#cluster)" }, "dashboards": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard" + "description": "The dashboard definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Dashboard", + "markdownDescription": "The dashboard definitions for the bundle. See [dashboard](https://docs.databricks.com/dev-tools/bundles/resources.html#dashboard)" }, "experiments": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment" + "description": "The experiment definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment", + "markdownDescription": "The experiment definitions for the bundle. See [experiment](https://docs.databricks.com/dev-tools/bundles/resources.html#experiment)" }, "jobs": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Job" + "description": "The job definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Job", + "markdownDescription": "The job definitions for the bundle. See [job](https://docs.databricks.com/dev-tools/bundles/resources.html#job)" }, "model_serving_endpoints": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint" + "description": "The model serving endpoint definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint", + "markdownDescription": "The model serving endpoint definitions for the bundle. See [model_serving_endpoint](https://docs.databricks.com/dev-tools/bundles/resources.html#model_serving_endpoint)" }, "models": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowModel" + "description": "The model definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowModel", + "markdownDescription": "The model definitions for the bundle. See [model](https://docs.databricks.com/dev-tools/bundles/resources.html#model)" }, "pipelines": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Pipeline" + "description": "The pipeline definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Pipeline", + "markdownDescription": "The pipeline definitions for the bundle. See [pipeline](https://docs.databricks.com/dev-tools/bundles/resources.html#pipeline)" }, "quality_monitors": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.QualityMonitor" + "description": "The quality monitor definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.QualityMonitor", + "markdownDescription": "The quality monitor definitions for the bundle. See [quality_monitor](https://docs.databricks.com/dev-tools/bundles/resources.html#quality_monitor)" }, "registered_models": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.RegisteredModel" + "description": "The registered model definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.RegisteredModel", + "markdownDescription": "The registered model definitions for the bundle. See [registered_model](https://docs.databricks.com/dev-tools/bundles/resources.html#registered_model)" }, "schemas": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema" + "description": "The schema definitions for the bundle.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema", + "markdownDescription": "The schema definitions for the bundle. See [schema](https://docs.databricks.com/dev-tools/bundles/resources.html#schema)" }, "volumes": { "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Volume" @@ -1219,17 +1291,20 @@ ] }, "config.Sync": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "exclude": { + "description": "A list of files or folders to exclude from the bundle.", "$ref": "#/$defs/slice/string" }, "include": { + "description": "A list of files or folders to include in the bundle.", "$ref": "#/$defs/slice/string" }, "paths": { + "description": "The local folder paths, which can be outside the bundle root, to synchronize to the workspace when the bundle is deployed.", "$ref": "#/$defs/slice/string" } }, @@ -1242,51 +1317,75 @@ ] }, "config.Target": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "artifacts": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact" + "description": "The artifacts to include in the target deployment.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact", + "markdownDescription": "The artifacts to include in the target deployment. See [artifact](https://docs.databricks.com/dev-tools/bundles/reference.html#artifact)" }, "bundle": { + "description": "The name of the bundle when deploying to this target.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle" }, "cluster_id": { + "description": "The ID of the cluster to use for this target.", "$ref": "#/$defs/string" }, "compute_id": { + "description": "Deprecated. The ID of the compute to use for this target.", "$ref": "#/$defs/string" }, "default": { + "description": "Whether this target is the default target.", "$ref": "#/$defs/bool" }, "git": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git" + "description": "The Git version control settings for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Git", + "markdownDescription": "The Git version control settings for the target. See [git](https://docs.databricks.com/dev-tools/bundles/reference.html#git)." }, "mode": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Mode" + "description": "The deployment mode for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Mode", + "markdownDescription": "The deployment mode for the target. Valid values are `development` or `production`. See [link](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html)." }, "permissions": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" + "description": "The permissions for deploying and running the bundle in the target.", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission", + "markdownDescription": "The permissions for deploying and running the bundle in the target. See [permission](https://docs.databricks.com/dev-tools/bundles/reference.html#permission)." }, "presets": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets" + "description": "The deployment presets for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets", + "markdownDescription": "The deployment presets for the target. See [preset](https://docs.databricks.com/dev-tools/bundles/reference.html#preset)." }, "resources": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources" + "description": "The resource definitions for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources", + "markdownDescription": "The resource definitions for the target. See [resources](https://docs.databricks.com/dev-tools/bundles/reference.html#resources)." }, "run_as": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs" + "description": "The identity to use to run the bundle.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs", + "markdownDescription": "The identity to use to run the bundle. See [job_run_as](https://docs.databricks.com/dev-tools/bundles/reference.html#job_run_as) and [link](https://docs.databricks.com/dev-tools/bundles/run_as.html)." }, "sync": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync" + "description": "The local paths to sync to the target workspace when a bundle is run or deployed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync", + "markdownDescription": "The local paths to sync to the target workspace when a bundle is run or deployed. See [sync](https://docs.databricks.com/dev-tools/bundles/reference.html#sync)." }, "variables": { - "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.TargetVariable" + "description": "The custom variable definitions for the target.", + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.TargetVariable", + "markdownDescription": "The custom variable definitions for the target. See [variables](https://docs.databricks.com/dev-tools/bundles/settings.html#variables) and [link](https://docs.databricks.com/dev-tools/bundles/variables.html)." }, "workspace": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace" + "description": "The Databricks workspace for the target.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace", + "markdownDescription": "The Databricks workspace for the target. [workspace](https://docs.databricks.com/dev-tools/bundles/reference.html#workspace)" } }, "additionalProperties": false @@ -1298,56 +1397,72 @@ ] }, "config.Workspace": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { "artifact_path": { + "description": "The artifact path to use within the workspace for both deployments and workflow runs", "$ref": "#/$defs/string" }, "auth_type": { + "description": "The authentication type.", "$ref": "#/$defs/string" }, "azure_client_id": { + "description": "The Azure client ID", "$ref": "#/$defs/string" }, "azure_environment": { + "description": "The Azure environment", "$ref": "#/$defs/string" }, "azure_login_app_id": { + "description": "The Azure login app ID", "$ref": "#/$defs/string" }, "azure_tenant_id": { + "description": "The Azure tenant ID", "$ref": "#/$defs/string" }, "azure_use_msi": { + "description": "Whether to use MSI for Azure", "$ref": "#/$defs/bool" }, "azure_workspace_resource_id": { + "description": "The Azure workspace resource ID", "$ref": "#/$defs/string" }, "client_id": { + "description": "The client ID for the workspace", "$ref": "#/$defs/string" }, "file_path": { + "description": "The file path to use within the workspace for both deployments and workflow runs", "$ref": "#/$defs/string" }, "google_service_account": { + "description": "The Google service account name", "$ref": "#/$defs/string" }, "host": { + "description": "The Databricks workspace host URL", "$ref": "#/$defs/string" }, "profile": { + "description": "The Databricks workspace profile name", "$ref": "#/$defs/string" }, "resource_path": { + "description": "The workspace resource path", "$ref": "#/$defs/string" }, "root_path": { + "description": "The Databricks workspace root path", "$ref": "#/$defs/string" }, "state_path": { + "description": "The workspace state path", "$ref": "#/$defs/string" } }, @@ -1369,7 +1484,7 @@ "databricks-sdk-go": { "service": { "catalog.MonitorCronSchedule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1407,7 +1522,7 @@ ] }, "catalog.MonitorDataClassificationConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1425,7 +1540,7 @@ ] }, "catalog.MonitorDestination": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1443,7 +1558,7 @@ ] }, "catalog.MonitorInferenceLog": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1500,7 +1615,7 @@ ] }, "catalog.MonitorMetric": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1550,7 +1665,7 @@ ] }, "catalog.MonitorNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1572,7 +1687,7 @@ ] }, "catalog.MonitorSnapshot": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": false @@ -1584,7 +1699,7 @@ ] }, "catalog.MonitorTimeSeries": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1617,7 +1732,7 @@ ] }, "compute.Adlsgen2Info": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1638,7 +1753,7 @@ ] }, "compute.AutoScale": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1660,7 +1775,7 @@ ] }, "compute.AwsAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1721,7 +1836,7 @@ ] }, "compute.AzureAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1759,7 +1874,7 @@ ] }, "compute.ClientsTypes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1781,7 +1896,7 @@ ] }, "compute.ClusterLogConf": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1803,7 +1918,7 @@ ] }, "compute.ClusterSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1934,7 +2049,7 @@ ] }, "compute.DbfsStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1955,7 +2070,7 @@ ] }, "compute.DockerBasicAuth": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -1977,7 +2092,7 @@ ] }, "compute.DockerImage": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2006,7 +2121,7 @@ ] }, "compute.Environment": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "The environment entity used to preserve serverless environment side panel and jobs' environment for non-notebook task.\nIn this minimal environment spec, only pip dependencies are supported.", @@ -2032,7 +2147,7 @@ ] }, "compute.GcpAttributes": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2078,7 +2193,7 @@ ] }, "compute.GcsStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2099,7 +2214,7 @@ ] }, "compute.InitScriptInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2141,7 +2256,7 @@ ] }, "compute.Library": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2183,7 +2298,7 @@ ] }, "compute.LocalFileInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2204,7 +2319,7 @@ ] }, "compute.LogAnalyticsInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2226,7 +2341,7 @@ ] }, "compute.MavenLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2255,7 +2370,7 @@ ] }, "compute.PythonPyPiLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2280,7 +2395,7 @@ ] }, "compute.RCranLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2314,7 +2429,7 @@ ] }, "compute.S3StorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2359,7 +2474,7 @@ ] }, "compute.VolumesStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2380,7 +2495,7 @@ ] }, "compute.WorkloadType": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2401,7 +2516,7 @@ ] }, "compute.WorkspaceStorageInfo": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2436,7 +2551,7 @@ ] }, "jobs.ConditionTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2479,7 +2594,7 @@ ] }, "jobs.Continuous": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2497,7 +2612,7 @@ ] }, "jobs.CronSchedule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2527,7 +2642,7 @@ ] }, "jobs.DbtTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2572,7 +2687,7 @@ ] }, "jobs.FileArrivalTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2601,7 +2716,7 @@ ] }, "jobs.ForEachTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2651,7 +2766,7 @@ ] }, "jobs.GitSnapshot": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", @@ -2670,7 +2785,7 @@ ] }, "jobs.GitSource": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", @@ -2709,7 +2824,7 @@ ] }, "jobs.JobCluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2735,7 +2850,7 @@ ] }, "jobs.JobDeployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2775,7 +2890,7 @@ ] }, "jobs.JobEmailNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2813,7 +2928,7 @@ ] }, "jobs.JobEnvironment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2837,7 +2952,7 @@ ] }, "jobs.JobNotificationSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2859,7 +2974,7 @@ ] }, "jobs.JobParameterDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2885,7 +3000,7 @@ ] }, "jobs.JobRunAs": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", @@ -2908,7 +3023,7 @@ ] }, "jobs.JobSource": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "The source of the job specification in the remote repository when the job is source controlled.", @@ -2965,7 +3080,7 @@ ] }, "jobs.JobsHealthRule": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -2994,7 +3109,7 @@ ] }, "jobs.JobsHealthRules": { - "anyOf": [ + "oneOf": [ { "type": "object", "description": "An optional set of health rules that can be defined for this job.", @@ -3012,7 +3127,7 @@ ] }, "jobs.NotebookTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3052,7 +3167,7 @@ ] }, "jobs.PeriodicTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3086,7 +3201,7 @@ ] }, "jobs.PipelineParams": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3104,7 +3219,7 @@ ] }, "jobs.PipelineTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3129,7 +3244,7 @@ ] }, "jobs.PythonWheelTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3163,7 +3278,7 @@ ] }, "jobs.QueueSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3196,7 +3311,7 @@ ] }, "jobs.RunJobTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3260,7 +3375,7 @@ ] }, "jobs.SparkJarTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3286,7 +3401,7 @@ ] }, "jobs.SparkPythonTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3315,7 +3430,7 @@ ] }, "jobs.SparkSubmitTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3333,7 +3448,7 @@ ] }, "jobs.SqlTask": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3374,7 +3489,7 @@ ] }, "jobs.SqlTaskAlert": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3403,7 +3518,7 @@ ] }, "jobs.SqlTaskDashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3436,7 +3551,7 @@ ] }, "jobs.SqlTaskFile": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3461,7 +3576,7 @@ ] }, "jobs.SqlTaskQuery": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3482,7 +3597,7 @@ ] }, "jobs.SqlTaskSubscription": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3504,7 +3619,7 @@ ] }, "jobs.TableUpdateTriggerConfiguration": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3534,7 +3649,7 @@ ] }, "jobs.Task": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3666,7 +3781,7 @@ ] }, "jobs.TaskDependency": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3691,7 +3806,7 @@ ] }, "jobs.TaskEmailNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3729,7 +3844,7 @@ ] }, "jobs.TaskNotificationSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3755,7 +3870,7 @@ ] }, "jobs.TriggerSettings": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3788,7 +3903,7 @@ ] }, "jobs.Webhook": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3808,7 +3923,7 @@ ] }, "jobs.WebhookNotifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3842,7 +3957,7 @@ ] }, "ml.ExperimentTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3864,7 +3979,7 @@ ] }, "ml.ModelTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3886,7 +4001,7 @@ ] }, "ml.ModelVersion": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3961,7 +4076,7 @@ ] }, "ml.ModelVersionTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -3983,7 +4098,7 @@ ] }, "pipelines.CronTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4010,7 +4125,7 @@ ] }, "pipelines.FileLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4028,7 +4143,7 @@ ] }, "pipelines.Filters": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4050,7 +4165,7 @@ ] }, "pipelines.IngestionConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4076,7 +4191,7 @@ ] }, "pipelines.IngestionGatewayPipelineDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4110,7 +4225,7 @@ ] }, "pipelines.IngestionPipelineDefinition": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4140,7 +4255,7 @@ ] }, "pipelines.ManualTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": false @@ -4152,7 +4267,7 @@ ] }, "pipelines.NotebookLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4170,7 +4285,7 @@ ] }, "pipelines.Notifications": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4192,7 +4307,7 @@ ] }, "pipelines.PipelineCluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4282,7 +4397,7 @@ ] }, "pipelines.PipelineClusterAutoscale": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4320,7 +4435,7 @@ ] }, "pipelines.PipelineDeployment": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4342,7 +4457,7 @@ ] }, "pipelines.PipelineLibrary": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4376,7 +4491,7 @@ ] }, "pipelines.PipelineTrigger": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4396,7 +4511,7 @@ ] }, "pipelines.ReportSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4430,7 +4545,7 @@ ] }, "pipelines.RestartWindow": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4472,7 +4587,7 @@ ] }, "pipelines.SchemaSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4506,7 +4621,7 @@ ] }, "pipelines.TableSpec": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4548,7 +4663,7 @@ ] }, "pipelines.TableSpecificConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4586,7 +4701,7 @@ ] }, "serving.Ai21LabsConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4606,7 +4721,7 @@ ] }, "serving.AiGatewayConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4636,7 +4751,7 @@ ] }, "serving.AiGatewayGuardrailParameters": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4666,7 +4781,7 @@ ] }, "serving.AiGatewayGuardrailPiiBehavior": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4695,7 +4810,7 @@ ] }, "serving.AiGatewayGuardrails": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4717,7 +4832,7 @@ ] }, "serving.AiGatewayInferenceTableConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4747,7 +4862,7 @@ ] }, "serving.AiGatewayRateLimit": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4792,7 +4907,7 @@ ] }, "serving.AiGatewayUsageTrackingConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4810,7 +4925,7 @@ ] }, "serving.AmazonBedrockConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4862,7 +4977,7 @@ ] }, "serving.AnthropicConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4884,7 +4999,7 @@ ] }, "serving.AutoCaptureConfigInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4914,7 +5029,7 @@ ] }, "serving.CohereConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4940,7 +5055,7 @@ ] }, "serving.DatabricksModelServingConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4969,7 +5084,7 @@ ] }, "serving.EndpointCoreConfigInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -4999,7 +5114,7 @@ ] }, "serving.EndpointTag": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5024,7 +5139,7 @@ ] }, "serving.ExternalModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5101,7 +5216,7 @@ ] }, "serving.GoogleCloudVertexAiConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5127,7 +5242,7 @@ ] }, "serving.OpenAiConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5174,7 +5289,7 @@ ] }, "serving.PaLmConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5194,7 +5309,7 @@ ] }, "serving.RateLimit": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5239,7 +5354,7 @@ ] }, "serving.Route": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5265,7 +5380,7 @@ ] }, "serving.ServedEntityInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5323,7 +5438,7 @@ ] }, "serving.ServedModelInput": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5402,7 +5517,7 @@ ] }, "serving.TrafficConfig": { - "anyOf": [ + "oneOf": [ { "type": "object", "properties": { @@ -5424,7 +5539,7 @@ } }, "int": { - "anyOf": [ + "oneOf": [ { "type": "integer" }, @@ -5451,7 +5566,7 @@ ] }, "int64": { - "anyOf": [ + "oneOf": [ { "type": "integer" }, @@ -5485,7 +5600,7 @@ "bundle": { "config": { "resources.Cluster": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5499,7 +5614,7 @@ ] }, "resources.Dashboard": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5513,7 +5628,7 @@ ] }, "resources.Job": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5527,7 +5642,7 @@ ] }, "resources.MlflowExperiment": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5541,7 +5656,7 @@ ] }, "resources.MlflowModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5555,7 +5670,7 @@ ] }, "resources.ModelServingEndpoint": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5569,7 +5684,7 @@ ] }, "resources.Pipeline": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5583,7 +5698,7 @@ ] }, "resources.QualityMonitor": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5597,7 +5712,7 @@ ] }, "resources.RegisteredModel": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5611,7 +5726,7 @@ ] }, "resources.Schema": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5625,7 +5740,7 @@ ] }, "resources.Volume": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5639,7 +5754,7 @@ ] }, "variable.TargetVariable": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5653,7 +5768,7 @@ ] }, "variable.Variable": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5668,7 +5783,7 @@ } }, "config.Artifact": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5682,7 +5797,7 @@ ] }, "config.Command": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5696,7 +5811,7 @@ ] }, "config.Target": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5714,7 +5829,7 @@ } }, "string": { - "anyOf": [ + "oneOf": [ { "type": "object", "additionalProperties": { @@ -5735,7 +5850,7 @@ "bundle": { "config": { "resources.Grant": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5749,7 +5864,7 @@ ] }, "resources.Permission": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5764,7 +5879,7 @@ } }, "config.ArtifactFile": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5782,7 +5897,7 @@ "databricks-sdk-go": { "service": { "catalog.MonitorMetric": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5796,7 +5911,7 @@ ] }, "compute.InitScriptInfo": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5810,7 +5925,7 @@ ] }, "compute.Library": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5824,7 +5939,7 @@ ] }, "jobs.JobCluster": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5838,7 +5953,7 @@ ] }, "jobs.JobEnvironment": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5852,7 +5967,7 @@ ] }, "jobs.JobParameterDefinition": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5866,7 +5981,7 @@ ] }, "jobs.JobsHealthRule": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5880,7 +5995,7 @@ ] }, "jobs.SqlTaskSubscription": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5894,7 +6009,7 @@ ] }, "jobs.Task": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5908,7 +6023,7 @@ ] }, "jobs.TaskDependency": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5922,7 +6037,7 @@ ] }, "jobs.Webhook": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5936,7 +6051,7 @@ ] }, "ml.ExperimentTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5950,7 +6065,7 @@ ] }, "ml.ModelTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5964,7 +6079,7 @@ ] }, "ml.ModelVersion": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5978,7 +6093,7 @@ ] }, "ml.ModelVersionTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -5992,7 +6107,7 @@ ] }, "pipelines.IngestionConfig": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6006,7 +6121,7 @@ ] }, "pipelines.Notifications": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6020,7 +6135,7 @@ ] }, "pipelines.PipelineCluster": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6034,7 +6149,7 @@ ] }, "pipelines.PipelineLibrary": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6048,7 +6163,7 @@ ] }, "serving.AiGatewayRateLimit": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6062,7 +6177,7 @@ ] }, "serving.EndpointTag": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6076,7 +6191,7 @@ ] }, "serving.RateLimit": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6090,7 +6205,7 @@ ] }, "serving.Route": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6104,7 +6219,7 @@ ] }, "serving.ServedEntityInput": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6118,7 +6233,7 @@ ] }, "serving.ServedModelInput": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6136,7 +6251,7 @@ } }, "string": { - "anyOf": [ + "oneOf": [ { "type": "array", "items": { @@ -6157,39 +6272,57 @@ "type": "object", "properties": { "artifacts": { + "description": "Defines the attributes to build an artifact", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Artifact" }, "bundle": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle" + "description": "The attributes of the bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle", + "markdownDescription": "The attributes of the bundle. See [bundle](https://docs.databricks.com/dev-tools/bundles/settings.html#bundle)" }, "experimental": { + "description": "Defines attributes for experimental features.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Experimental" }, "include": { - "$ref": "#/$defs/slice/string" + "description": "Specifies a list of path globs that contain configuration files to include within the bundle.", + "$ref": "#/$defs/slice/string", + "markdownDescription": "Specifies a list of path globs that contain configuration files to include within the bundle. See [include](https://docs.databricks.com/dev-tools/bundles/settings.html#include)" }, "permissions": { - "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" + "description": "Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle", + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission", + "markdownDescription": "Defines the permissions to apply to experiments, jobs, pipelines, and models defined in the bundle. See [permissions](https://docs.databricks.com/dev-tools/bundles/settings.html#permissions) and [link](https://docs.databricks.com/dev-tools/bundles/permissions.html)." }, "presets": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets" + "description": "Defines bundle deployment presets.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Presets", + "markdownDescription": "Defines bundle deployment presets. See [presets](https://docs.databricks.com/dev-tools/bundles/deployment-modes.html#presets)." }, "resources": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources" + "description": "Specifies information about the Databricks resources used by the bundle", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Resources", + "markdownDescription": "Specifies information about the Databricks resources used by the bundle. See [link](https://docs.databricks.com/dev-tools/bundles/resources.html)." }, "run_as": { + "description": "The identity to use to run the bundle.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs" }, "sync": { - "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync" + "description": "The files and file paths to include or exclude in the bundle.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Sync", + "markdownDescription": "The files and file paths to include or exclude in the bundle. See [link](https://docs.databricks.com/dev-tools/bundles/)" }, "targets": { + "description": "Defines deployment targets for the bundle.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Target" }, "variables": { + "description": "A Map that defines the custom variables for the bundle, where each key is the name of the variable, and the value is a Map that defines the variable.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/variable.Variable" }, "workspace": { + "description": "Defines the Databricks workspace for the bundle.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config.Workspace" } }, diff --git a/libs/jsonschema/extension.go b/libs/jsonschema/extension.go index 3e32caf1ab..9badf86a5a 100644 --- a/libs/jsonschema/extension.go +++ b/libs/jsonschema/extension.go @@ -34,4 +34,10 @@ type Extension struct { // Version of the schema. This is used to determine if the schema is // compatible with the current CLI version. Version *int `json:"version,omitempty"` + + // This field is not in JSON schema spec, but it is supported in VSCode and in the Databricks Workspace + // It is used to provide a rich description of the field in the hover tooltip. + // https://code.visualstudio.com/docs/languages/json#_use-rich-formatting-in-hovers + // Also it can be used in documentation generation. + MarkdownDescription string `json:"markdownDescription,omitempty"` } diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index b9c3fb08cd..e63dde359e 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -69,6 +69,13 @@ type Schema struct { // Schema that must match any of the schemas in the array AnyOf []Schema `json:"anyOf,omitempty"` + + // Schema that must match one of the schemas in the array + OneOf []Schema `json:"oneOf,omitempty"` + + // Title of the object, rendered as inline documentation in the IDE. + // https://json-schema.org/understanding-json-schema/reference/annotations + Title string `json:"title,omitempty"` } // Default value defined in a JSON Schema, represented as a string. From 59f0859e0047a4304a6b7f30ef58543b72e54780 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 18 Dec 2024 13:43:27 +0100 Subject: [PATCH 58/63] Upgrade Go SDK to 0.54.0 (#2029) ## Changes * Added [a.AccountFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#AccountFederationPolicyAPI) account-level service and [a.ServicePrincipalFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#ServicePrincipalFederationPolicyAPI) account-level service. * Added `IsSingleNode`, `Kind` and `UseMlRuntime` fields for Cluster commands. * Added `UpdateParameterSyntax` field for [dashboards.MigrateDashboardRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MigrateDashboardRequest). --- .codegen/_openapi_sha | 2 +- .gitattributes | 2 + .../internal/schema/annotations_openapi.yml | 54 ++- .../schema/annotations_openapi_overrides.yml | 6 + bundle/schema/jsonschema.json | 86 +++- cmd/account/cmd.go | 4 + .../federation-policy/federation-policy.go | 402 ++++++++++++++++ .../service-principal-federation-policy.go | 445 ++++++++++++++++++ cmd/workspace/clusters/clusters.go | 12 + cmd/workspace/lakeview/lakeview.go | 1 + go.mod | 2 +- go.sum | 4 +- 12 files changed, 1009 insertions(+), 11 deletions(-) create mode 100755 cmd/account/federation-policy/federation-policy.go create mode 100755 cmd/account/service-principal-federation-policy/service-principal-federation-policy.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 68cd2f4be8..8622b29cac 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -7016dcbf2e011459416cf408ce21143bcc4b3a25 \ No newline at end of file +a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 3bc3c73c32..0a8ddf3cb7 100755 --- a/.gitattributes +++ b/.gitattributes @@ -8,6 +8,7 @@ cmd/account/custom-app-integration/custom-app-integration.go linguist-generated= cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true cmd/account/encryption-keys/encryption-keys.go linguist-generated=true cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true +cmd/account/federation-policy/federation-policy.go linguist-generated=true cmd/account/groups/groups.go linguist-generated=true cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true @@ -19,6 +20,7 @@ cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=tr cmd/account/personal-compute/personal-compute.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true cmd/account/published-app-integration/published-app-integration.go linguist-generated=true +cmd/account/service-principal-federation-policy/service-principal-federation-policy.go linguist-generated=true cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true cmd/account/service-principals/service-principals.go linguist-generated=true cmd/account/settings/settings.go linguist-generated=true diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 8cedada545..d8f681a282 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -70,6 +70,12 @@ github.com/databricks/cli/bundle/config/resources.Cluster: If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. instance_pool_id: description: The optional ID of the instance pool to which the cluster belongs. + is_single_node: + description: | + This field can only be used with `kind`. + + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` + kind: {} node_type_id: description: | This field encodes, through a single value, the resources available to each of @@ -119,6 +125,11 @@ github.com/databricks/cli/bundle/config/resources.Cluster: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + use_ml_runtime: + description: | + This field can only be used with `kind`. + + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. workload_type: {} github.com/databricks/cli/bundle/config/resources.Dashboard: create_time: @@ -759,6 +770,12 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`. instance_pool_id: description: The optional ID of the instance pool to which the cluster belongs. + is_single_node: + description: | + This field can only be used with `kind`. + + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` + kind: {} node_type_id: description: | This field encodes, through a single value, the resources available to each of @@ -808,6 +825,11 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + use_ml_runtime: + description: | + This field can only be used with `kind`. + + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. workload_type: {} github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode: _: @@ -815,6 +837,12 @@ github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode: Data security mode decides what data governance model to use when accessing data from a cluster. + The following modes can only be used with `kind`. + * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration. + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. + * `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + + The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited. @@ -827,6 +855,9 @@ github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode: * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled. enum: + - DATA_SECURITY_MODE_AUTO + - DATA_SECURITY_MODE_STANDARD + - DATA_SECURITY_MODE_DEDICATED - NONE - SINGLE_USER - USER_ISOLATION @@ -1068,6 +1099,17 @@ github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState: enum: - ACTIVE - TRASHED +github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask: + clean_room_name: + description: The clean room that the notebook belongs to. + etag: + description: |- + Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version). + It can be fetched by calling the :method:cleanroomassets/get API. + notebook_base_parameters: + description: Base parameters to be used for the clean room notebook job. + notebook_name: + description: Name of the notebook being run. github.com/databricks/databricks-sdk-go/service/jobs.Condition: _: enum: @@ -1346,10 +1388,10 @@ github.com/databricks/databricks-sdk-go/service/jobs.JobsHealthMetric: Specifies the health metric that is being evaluated for a particular health rule. * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. - * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview. - * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview. - * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview. - * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview. + * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview. + * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview. enum: - RUN_DURATION_SECONDS - STREAMING_BACKLOG_BYTES @@ -1651,6 +1693,10 @@ github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfigura and can be used to wait for a series of table updates before triggering a run. The minimum allowed value is 60 seconds. github.com/databricks/databricks-sdk-go/service/jobs.Task: + clean_rooms_notebook_task: + description: |- + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + when the `clean_rooms_notebook_task` field is present. condition_task: description: |- The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present. diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index e9e659e687..ef602d6efe 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -5,6 +5,9 @@ github.com/databricks/cli/bundle/config/resources.Cluster: "docker_image": "description": |- PLACEHOLDER + "kind": + "description": |- + PLACEHOLDER "permissions": "description": |- PLACEHOLDER @@ -90,6 +93,9 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: "docker_image": "description": |- PLACEHOLDER + "kind": + "description": |- + PLACEHOLDER "runtime_engine": "description": |- PLACEHOLDER diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 8ff7ff4d46..8e8efa7fc1 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -130,6 +130,13 @@ "description": "The optional ID of the instance pool to which the cluster belongs.", "$ref": "#/$defs/string" }, + "is_single_node": { + "description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n", + "$ref": "#/$defs/bool" + }, + "kind": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind" + }, "node_type_id": { "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n", "$ref": "#/$defs/string" @@ -168,6 +175,10 @@ "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", "$ref": "#/$defs/slice/string" }, + "use_ml_runtime": { + "description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n", + "$ref": "#/$defs/bool" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -1988,6 +1999,13 @@ "description": "The optional ID of the instance pool to which the cluster belongs.", "$ref": "#/$defs/string" }, + "is_single_node": { + "description": "This field can only be used with `kind`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`\n", + "$ref": "#/$defs/bool" + }, + "kind": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind" + }, "node_type_id": { "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n", "$ref": "#/$defs/string" @@ -2023,6 +2041,10 @@ "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", "$ref": "#/$defs/slice/string" }, + "use_ml_runtime": { + "description": "This field can only be used with `kind`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.\n", + "$ref": "#/$defs/bool" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -2037,8 +2059,11 @@ }, "compute.DataSecurityMode": { "type": "string", - "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used with `kind`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.\n", "enum": [ + "DATA_SECURITY_MODE_AUTO", + "DATA_SECURITY_MODE_STANDARD", + "DATA_SECURITY_MODE_DEDICATED", "NONE", "SINGLE_USER", "USER_ISOLATION", @@ -2255,6 +2280,9 @@ } ] }, + "compute.Kind": { + "type": "string" + }, "compute.Library": { "oneOf": [ { @@ -2543,6 +2571,40 @@ "TRASHED" ] }, + "jobs.CleanRoomsNotebookTask": { + "oneOf": [ + { + "type": "object", + "properties": { + "clean_room_name": { + "description": "The clean room that the notebook belongs to.", + "$ref": "#/$defs/string" + }, + "etag": { + "description": "Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version).\nIt can be fetched by calling the :method:cleanroomassets/get API.", + "$ref": "#/$defs/string" + }, + "notebook_base_parameters": { + "description": "Base parameters to be used for the clean room notebook job.", + "$ref": "#/$defs/map/string" + }, + "notebook_name": { + "description": "Name of the notebook being run.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "clean_room_name", + "notebook_name" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.Condition": { "type": "string", "enum": [ @@ -3063,7 +3125,7 @@ }, "jobs.JobsHealthMetric": { "type": "string", - "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview.", + "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", "enum": [ "RUN_DURATION_SECONDS", "STREAMING_BACKLOG_BYTES", @@ -3653,6 +3715,10 @@ { "type": "object", "properties": { + "clean_rooms_notebook_task": { + "description": "The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask" + }, "condition_task": { "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask" @@ -4551,7 +4617,7 @@ "properties": { "days_of_week": { "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" }, "start_hour": { "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", @@ -6162,6 +6228,20 @@ } ] }, + "pipelines.RestartWindowDaysOfWeek": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "serving.AiGatewayRateLimit": { "oneOf": [ { diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 9b4bb8139f..f34966fd9e 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -11,6 +11,7 @@ import ( credentials "github.com/databricks/cli/cmd/account/credentials" custom_app_integration "github.com/databricks/cli/cmd/account/custom-app-integration" encryption_keys "github.com/databricks/cli/cmd/account/encryption-keys" + account_federation_policy "github.com/databricks/cli/cmd/account/federation-policy" account_groups "github.com/databricks/cli/cmd/account/groups" account_ip_access_lists "github.com/databricks/cli/cmd/account/ip-access-lists" log_delivery "github.com/databricks/cli/cmd/account/log-delivery" @@ -21,6 +22,7 @@ import ( o_auth_published_apps "github.com/databricks/cli/cmd/account/o-auth-published-apps" private_access "github.com/databricks/cli/cmd/account/private-access" published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration" + service_principal_federation_policy "github.com/databricks/cli/cmd/account/service-principal-federation-policy" service_principal_secrets "github.com/databricks/cli/cmd/account/service-principal-secrets" account_service_principals "github.com/databricks/cli/cmd/account/service-principals" account_settings "github.com/databricks/cli/cmd/account/settings" @@ -44,6 +46,7 @@ func New() *cobra.Command { cmd.AddCommand(credentials.New()) cmd.AddCommand(custom_app_integration.New()) cmd.AddCommand(encryption_keys.New()) + cmd.AddCommand(account_federation_policy.New()) cmd.AddCommand(account_groups.New()) cmd.AddCommand(account_ip_access_lists.New()) cmd.AddCommand(log_delivery.New()) @@ -54,6 +57,7 @@ func New() *cobra.Command { cmd.AddCommand(o_auth_published_apps.New()) cmd.AddCommand(private_access.New()) cmd.AddCommand(published_app_integration.New()) + cmd.AddCommand(service_principal_federation_policy.New()) cmd.AddCommand(service_principal_secrets.New()) cmd.AddCommand(account_service_principals.New()) cmd.AddCommand(account_settings.New()) diff --git a/cmd/account/federation-policy/federation-policy.go b/cmd/account/federation-policy/federation-policy.go new file mode 100755 index 0000000000..d78ac709a9 --- /dev/null +++ b/cmd/account/federation-policy/federation-policy.go @@ -0,0 +1,402 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package federation_policy + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "federation-policy", + Short: `These APIs manage account federation policies.`, + Long: `These APIs manage account federation policies. + + Account federation policies allow users and service principals in your + Databricks account to securely access Databricks APIs using tokens from your + trusted identity providers (IdPs). + + With token federation, your users and service principals can exchange tokens + from your IdP for Databricks OAuth tokens, which can be used to access + Databricks APIs. Token federation eliminates the need to manage Databricks + secrets, and allows you to centralize management of token issuance policies in + your IdP. Databricks token federation is typically used in combination with + [SCIM], so users in your IdP are synchronized into your Databricks account. + + Token federation is configured in your Databricks account using an account + federation policy. An account federation policy specifies: * which IdP, or + issuer, your Databricks account should accept tokens from * how to determine + which Databricks user, or subject, a token is issued for + + To configure a federation policy, you provide the following: * The required + token __issuer__, as specified in the “iss” claim of your tokens. The + issuer is an https URL that identifies your IdP. * The allowed token + __audiences__, as specified in the “aud” claim of your tokens. This + identifier is intended to represent the recipient of the token. As long as the + audience in the token matches at least one audience in the policy, the token + is considered a match. If unspecified, the default value is your Databricks + account id. * The __subject claim__, which indicates which token claim + contains the Databricks username of the user the token was issued for. If + unspecified, the default value is “sub”. * Optionally, the public keys + used to validate the signature of your tokens, in JWKS format. If unspecified + (recommended), Databricks automatically fetches the public keys from your + issuer’s well known endpoint. Databricks strongly recommends relying on your + issuer’s well known endpoint for discovering public keys. + + An example federation policy is: issuer: "https://idp.mycompany.com/oidc" + audiences: ["databricks"] subject_claim: "sub" + + An example JWT token body that matches this policy and could be used to + authenticate to Databricks as user username@mycompany.com is: { "iss": + "https://idp.mycompany.com/oidc", "aud": "databricks", "sub": + "username@mycompany.com" } + + You may also need to configure your IdP to generate tokens for your users to + exchange with Databricks, if your users do not already have the ability to + generate tokens that are compatible with your federation policy. + + You do not need to configure an OAuth application in Databricks to use token + federation. + + [SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html`, + GroupID: "oauth2", + Annotations: map[string]string{ + "package": "oauth2", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *oauth2.CreateAccountFederationPolicyRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq oauth2.CreateAccountFederationPolicyRequest + createReq.Policy = &oauth2.FederationPolicy{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "create" + cmd.Short = `Create account federation policy.` + cmd.Long = `Create account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := a.FederationPolicy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *oauth2.DeleteAccountFederationPolicyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq oauth2.DeleteAccountFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "delete POLICY_ID" + cmd.Short = `Delete account federation policy.` + cmd.Long = `Delete account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + deleteReq.PolicyId = args[0] + + err = a.FederationPolicy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *oauth2.GetAccountFederationPolicyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq oauth2.GetAccountFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "get POLICY_ID" + cmd.Short = `Get account federation policy.` + cmd.Long = `Get account federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + getReq.PolicyId = args[0] + + response, err := a.FederationPolicy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *oauth2.ListAccountFederationPoliciesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq oauth2.ListAccountFederationPoliciesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List account federation policies.` + cmd.Long = `List account federation policies.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response := a.FederationPolicy.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *oauth2.UpdateAccountFederationPolicyRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq oauth2.UpdateAccountFederationPolicyRequest + updateReq.Policy = &oauth2.FederationPolicy{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "update POLICY_ID UPDATE_MASK" + cmd.Short = `Update account federation policy.` + cmd.Long = `Update account federation policy. + + Arguments: + POLICY_ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.PolicyId = args[0] + updateReq.UpdateMask = args[1] + + response, err := a.FederationPolicy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AccountFederationPolicy diff --git a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go new file mode 100755 index 0000000000..77f73bcd07 --- /dev/null +++ b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go @@ -0,0 +1,445 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package service_principal_federation_policy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "service-principal-federation-policy", + Short: `These APIs manage service principal federation policies.`, + Long: `These APIs manage service principal federation policies. + + Service principal federation, also known as Workload Identity Federation, + allows your automated workloads running outside of Databricks to securely + access Databricks APIs without the need for Databricks secrets. With Workload + Identity Federation, your application (or workload) authenticates to + Databricks as a Databricks service principal, using tokens provided by the + workload runtime. + + Databricks strongly recommends using Workload Identity Federation to + authenticate to Databricks from automated workloads, over alternatives such as + OAuth client secrets or Personal Access Tokens, whenever possible. Workload + Identity Federation is supported by many popular services, including Github + Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes clusters, among + others. + + Workload identity federation is configured in your Databricks account using a + service principal federation policy. A service principal federation policy + specifies: * which IdP, or issuer, the service principal is allowed to + authenticate from * which workload identity, or subject, is allowed to + authenticate as the Databricks service principal + + To configure a federation policy, you provide the following: * The required + token __issuer__, as specified in the “iss” claim of workload identity + tokens. The issuer is an https URL that identifies the workload identity + provider. * The required token __subject__, as specified in the “sub” + claim of workload identity tokens. The subject uniquely identifies the + workload in the workload runtime environment. * The allowed token + __audiences__, as specified in the “aud” claim of workload identity + tokens. The audience is intended to represent the recipient of the token. As + long as the audience in the token matches at least one audience in the policy, + the token is considered a match. If unspecified, the default value is your + Databricks account id. * Optionally, the public keys used to validate the + signature of the workload identity tokens, in JWKS format. If unspecified + (recommended), Databricks automatically fetches the public keys from the + issuer’s well known endpoint. Databricks strongly recommends relying on the + issuer’s well known endpoint for discovering public keys. + + An example service principal federation policy, for a Github Actions workload, + is: issuer: "https://token.actions.githubusercontent.com" audiences: + ["https://github.com/my-github-org"] subject: + "repo:my-github-org/my-repo:environment:prod" + + An example JWT token body that matches this policy and could be used to + authenticate to Databricks is: { "iss": + "https://token.actions.githubusercontent.com", "aud": + "https://github.com/my-github-org", "sub": + "repo:my-github-org/my-repo:environment:prod" } + + You may also need to configure the workload runtime to generate tokens for + your workloads. + + You do not need to configure an OAuth application in Databricks to use token + federation.`, + GroupID: "oauth2", + Annotations: map[string]string{ + "package": "oauth2", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *oauth2.CreateServicePrincipalFederationPolicyRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq oauth2.CreateServicePrincipalFederationPolicyRequest + createReq.Policy = &oauth2.FederationPolicy{} + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "create SERVICE_PRINCIPAL_ID" + cmd.Short = `Create service principal federation policy.` + cmd.Long = `Create service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + _, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + + response, err := a.ServicePrincipalFederationPolicy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *oauth2.DeleteServicePrincipalFederationPolicyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq oauth2.DeleteServicePrincipalFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "delete SERVICE_PRINCIPAL_ID POLICY_ID" + cmd.Short = `Delete service principal federation policy.` + cmd.Long = `Delete service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: ` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &deleteReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + deleteReq.PolicyId = args[1] + + err = a.ServicePrincipalFederationPolicy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *oauth2.GetServicePrincipalFederationPolicyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq oauth2.GetServicePrincipalFederationPolicyRequest + + // TODO: short flags + + cmd.Use = "get SERVICE_PRINCIPAL_ID POLICY_ID" + cmd.Short = `Get service principal federation policy.` + cmd.Long = `Get service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: ` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &getReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + getReq.PolicyId = args[1] + + response, err := a.ServicePrincipalFederationPolicy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *oauth2.ListServicePrincipalFederationPoliciesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq oauth2.ListServicePrincipalFederationPoliciesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list SERVICE_PRINCIPAL_ID" + cmd.Short = `List service principal federation policies.` + cmd.Long = `List service principal federation policies. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + _, err = fmt.Sscan(args[0], &listReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + + response := a.ServicePrincipalFederationPolicy.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *oauth2.UpdateServicePrincipalFederationPolicyRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq oauth2.UpdateServicePrincipalFederationPolicyRequest + updateReq.Policy = &oauth2.FederationPolicy{} + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) + cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Name of the federation policy.`) + // TODO: complex arg: oidc_policy + + cmd.Use = "update SERVICE_PRINCIPAL_ID POLICY_ID UPDATE_MASK" + cmd.Short = `Update service principal federation policy.` + cmd.Long = `Update service principal federation policy. + + Arguments: + SERVICE_PRINCIPAL_ID: The service principal id for the federation policy. + POLICY_ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.Policy) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + _, err = fmt.Sscan(args[0], &updateReq.ServicePrincipalId) + if err != nil { + return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) + } + updateReq.PolicyId = args[1] + updateReq.UpdateMask = args[2] + + response, err := a.ServicePrincipalFederationPolicy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ServicePrincipalFederationPolicy diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index db788753b8..bbb7c578ad 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -204,6 +204,9 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, LEGACY_SINGLE_USER_STANDARD, @@ -220,6 +223,8 @@ func newCreate() *cobra.Command { // TODO: complex arg: gcp_attributes // TODO: array: init_scripts cmd.Flags().StringVar(&createReq.InstancePoolId, "instance-pool-id", createReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`) + cmd.Flags().BoolVar(&createReq.IsSingleNode, "is-single-node", createReq.IsSingleNode, `This field can only be used with kind.`) + cmd.Flags().Var(&createReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`) cmd.Flags().StringVar(&createReq.NodeTypeId, "node-type-id", createReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&createReq.NumWorkers, "num-workers", createReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) @@ -228,6 +233,7 @@ func newCreate() *cobra.Command { // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars // TODO: array: ssh_public_keys + cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used with kind.`) // TODO: complex arg: workload_type cmd.Use = "create SPARK_VERSION" @@ -468,6 +474,9 @@ func newEdit() *cobra.Command { cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ + DATA_SECURITY_MODE_AUTO, + DATA_SECURITY_MODE_DEDICATED, + DATA_SECURITY_MODE_STANDARD, LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, LEGACY_SINGLE_USER_STANDARD, @@ -484,6 +493,8 @@ func newEdit() *cobra.Command { // TODO: complex arg: gcp_attributes // TODO: array: init_scripts cmd.Flags().StringVar(&editReq.InstancePoolId, "instance-pool-id", editReq.InstancePoolId, `The optional ID of the instance pool to which the cluster belongs.`) + cmd.Flags().BoolVar(&editReq.IsSingleNode, "is-single-node", editReq.IsSingleNode, `This field can only be used with kind.`) + cmd.Flags().Var(&editReq.Kind, "kind", `The kind of compute described by this compute specification. Supported values: [CLASSIC_PREVIEW]`) cmd.Flags().StringVar(&editReq.NodeTypeId, "node-type-id", editReq.NodeTypeId, `This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.`) cmd.Flags().IntVar(&editReq.NumWorkers, "num-workers", editReq.NumWorkers, `Number of worker nodes that this cluster should have.`) cmd.Flags().StringVar(&editReq.PolicyId, "policy-id", editReq.PolicyId, `The ID of the cluster policy used to create the cluster if applicable.`) @@ -492,6 +503,7 @@ func newEdit() *cobra.Command { // TODO: map via StringToStringVar: spark_conf // TODO: map via StringToStringVar: spark_env_vars // TODO: array: ssh_public_keys + cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used with kind.`) // TODO: complex arg: workload_type cmd.Use = "edit CLUSTER_ID SPARK_VERSION" diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index f190380622..6686f16dad 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -828,6 +828,7 @@ func newMigrate() *cobra.Command { cmd.Flags().StringVar(&migrateReq.DisplayName, "display-name", migrateReq.DisplayName, `Display name for the new Lakeview dashboard.`) cmd.Flags().StringVar(&migrateReq.ParentPath, "parent-path", migrateReq.ParentPath, `The workspace path of the folder to contain the migrated Lakeview dashboard.`) + cmd.Flags().BoolVar(&migrateReq.UpdateParameterSyntax, "update-parameter-syntax", migrateReq.UpdateParameterSyntax, `Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named syntax (:param) when converting datasets in the dashboard.`) cmd.Use = "migrate SOURCE_DASHBOARD_ID" cmd.Short = `Migrate dashboard.` diff --git a/go.mod b/go.mod index c9a008fb3e..03e2791d18 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.2 require ( github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.53.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.54.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 63bf2be333..be77011cd8 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.53.0 h1:rZMXaTC3HNKZt+m4C4I/dY3EdZj+kl/sVd/Kdq55Qfo= -github.com/databricks/databricks-sdk-go v0.53.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.54.0 h1:L8gsA3NXs+uYU3QtW/OUgjxMQxOH24k0MT9JhB3zLlM= +github.com/databricks/databricks-sdk-go v0.54.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From e3b256e753e1244b45a32301c05140537a356717 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 18 Dec 2024 15:03:08 +0100 Subject: [PATCH 59/63] Upgrade TF provider to 1.62.0 (#2030) ## Changes * Added support for `IsSingleNode`, `Kind` and `UseMlRuntime` for clusters * Added support for `CleanRoomsNotebookTask` * `DaysOfWeek` for pipeline restart window is now a list --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../internal/tf/schema/data_source_cluster.go | 6 + bundle/internal/tf/schema/resource_cluster.go | 3 + bundle/internal/tf/schema/resource_job.go | 142 +++++++++++------- .../internal/tf/schema/resource_pipeline.go | 6 +- bundle/internal/tf/schema/root.go | 2 +- 6 files changed, 99 insertions(+), 62 deletions(-) diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index a3c5c62819..27c4b16cde 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.61.0" +const ProviderVersion = "1.62.0" diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index 94d67bbfab..38cb534f2e 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -317,6 +317,8 @@ type DataSourceClusterClusterInfoSpec struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -326,6 +328,7 @@ type DataSourceClusterClusterInfoSpec struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *DataSourceClusterClusterInfoSpecAutoscale `json:"autoscale,omitempty"` AwsAttributes *DataSourceClusterClusterInfoSpecAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *DataSourceClusterClusterInfoSpecAzureAttributes `json:"azure_attributes,omitempty"` @@ -369,7 +372,9 @@ type DataSourceClusterClusterInfo struct { EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` JdbcPort int `json:"jdbc_port,omitempty"` + Kind string `json:"kind,omitempty"` LastRestartedTime int `json:"last_restarted_time,omitempty"` LastStateLossTime int `json:"last_state_loss_time,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` @@ -386,6 +391,7 @@ type DataSourceClusterClusterInfo struct { State string `json:"state,omitempty"` StateMessage string `json:"state_message,omitempty"` TerminatedTime int `json:"terminated_time,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *DataSourceClusterClusterInfoAutoscale `json:"autoscale,omitempty"` AwsAttributes *DataSourceClusterClusterInfoAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *DataSourceClusterClusterInfoAzureAttributes `json:"azure_attributes,omitempty"` diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 4ae063c89d..50395add90 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -176,6 +176,8 @@ type ResourceCluster struct { IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` IsPinned bool `json:"is_pinned,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NoWait bool `json:"no_wait,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` @@ -188,6 +190,7 @@ type ResourceCluster struct { SshPublicKeys []string `json:"ssh_public_keys,omitempty"` State string `json:"state,omitempty"` Url string `json:"url,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"` diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index c89eafab91..63c8aeb7b0 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -240,6 +240,8 @@ type ResourceJobJobClusterNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -249,6 +251,7 @@ type ResourceJobJobClusterNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobJobClusterNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobJobClusterNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobJobClusterNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -462,6 +465,8 @@ type ResourceJobNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -471,6 +476,7 @@ type ResourceJobNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -548,6 +554,13 @@ type ResourceJobSparkSubmitTask struct { Parameters []string `json:"parameters,omitempty"` } +type ResourceJobTaskCleanRoomsNotebookTask struct { + CleanRoomName string `json:"clean_room_name"` + Etag string `json:"etag,omitempty"` + NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"` + NotebookName string `json:"notebook_name"` +} + type ResourceJobTaskConditionTask struct { Left string `json:"left"` Op string `json:"op"` @@ -578,6 +591,13 @@ type ResourceJobTaskEmailNotifications struct { OnSuccess []string `json:"on_success,omitempty"` } +type ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask struct { + CleanRoomName string `json:"clean_room_name"` + Etag string `json:"etag,omitempty"` + NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"` + NotebookName string `json:"notebook_name"` +} + type ResourceJobTaskForEachTaskTaskConditionTask struct { Left string `json:"left"` Op string `json:"op"` @@ -814,6 +834,8 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -823,6 +845,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -963,34 +986,35 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { } type ResourceJobTaskForEachTaskTask struct { - Description string `json:"description,omitempty"` - DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` - Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + CleanRoomsNotebookTask *ResourceJobTaskForEachTaskTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTaskForEachTask struct { @@ -1205,6 +1229,8 @@ type ResourceJobTaskNewCluster struct { EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` + IsSingleNode bool `json:"is_single_node,omitempty"` + Kind string `json:"kind,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` @@ -1214,6 +1240,7 @@ type ResourceJobTaskNewCluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` SparkVersion string `json:"spark_version"` SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` Autoscale *ResourceJobTaskNewClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceJobTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceJobTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` @@ -1354,35 +1381,36 @@ type ResourceJobTaskWebhookNotifications struct { } type ResourceJobTask struct { - Description string `json:"description,omitempty"` - DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` - ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` - Health *ResourceJobTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + CleanRoomsNotebookTask *ResourceJobTaskCleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` + Health *ResourceJobTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTriggerFileArrival struct { diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 7238d24a85..ebdb850272 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -244,9 +244,9 @@ type ResourcePipelineNotification struct { } type ResourcePipelineRestartWindow struct { - DaysOfWeek string `json:"days_of_week,omitempty"` - StartHour int `json:"start_hour"` - TimeZoneId string `json:"time_zone_id,omitempty"` + DaysOfWeek []string `json:"days_of_week,omitempty"` + StartHour int `json:"start_hour"` + TimeZoneId string `json:"time_zone_id,omitempty"` } type ResourcePipelineTriggerCron struct { diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 6befba596c..1f89dc64d4 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.61.0" +const ProviderVersion = "1.62.0" func NewRoot() *Root { return &Root{ From 6b4b908682b6cd17d4fcd447444d9abae5fd353d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 18 Dec 2024 17:17:02 +0100 Subject: [PATCH 60/63] [Release] Release v0.237.0 (#2031) Bundles: * Allow overriding compute for non-development mode targets ([#1899](https://github.com/databricks/cli/pull/1899)). * Show an error when using a cluster override with 'mode: production' ([#1994](https://github.com/databricks/cli/pull/1994)). API Changes: * Added `databricks account federation-policy` command group. * Added `databricks account service-principal-federation-policy` command group. * Added `databricks aibi-dashboard-embedding-access-policy delete` command. * Added `databricks aibi-dashboard-embedding-approved-domains delete` command. OpenAPI commit a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d (2024-12-16) Dependency updates: * Upgrade TF provider to 1.62.0 ([#2030](https://github.com/databricks/cli/pull/2030)). * Upgrade Go SDK to 0.54.0 ([#2029](https://github.com/databricks/cli/pull/2029)). * Bump TF codegen dependencies to latest ([#1961](https://github.com/databricks/cli/pull/1961)). * Bump golang.org/x/term from 0.26.0 to 0.27.0 ([#1983](https://github.com/databricks/cli/pull/1983)). * Bump golang.org/x/sync from 0.9.0 to 0.10.0 ([#1984](https://github.com/databricks/cli/pull/1984)). * Bump github.com/databricks/databricks-sdk-go from 0.52.0 to 0.53.0 ([#1985](https://github.com/databricks/cli/pull/1985)). * Bump golang.org/x/crypto from 0.24.0 to 0.31.0 ([#2006](https://github.com/databricks/cli/pull/2006)). * Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in /bundle/internal/tf/codegen ([#2005](https://github.com/databricks/cli/pull/2005)). --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56207686a9..6bdb0795b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Version changelog +## [Release] Release v0.237.0 + +Bundles: + * Allow overriding compute for non-development mode targets ([#1899](https://github.com/databricks/cli/pull/1899)). + * Show an error when using a cluster override with 'mode: production' ([#1994](https://github.com/databricks/cli/pull/1994)). + +API Changes: + * Added `databricks account federation-policy` command group. + * Added `databricks account service-principal-federation-policy` command group. + * Added `databricks aibi-dashboard-embedding-access-policy delete` command. + * Added `databricks aibi-dashboard-embedding-approved-domains delete` command. + +OpenAPI commit a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d (2024-12-16) +Dependency updates: + * Upgrade TF provider to 1.62.0 ([#2030](https://github.com/databricks/cli/pull/2030)). + * Upgrade Go SDK to 0.54.0 ([#2029](https://github.com/databricks/cli/pull/2029)). + * Bump TF codegen dependencies to latest ([#1961](https://github.com/databricks/cli/pull/1961)). + * Bump golang.org/x/term from 0.26.0 to 0.27.0 ([#1983](https://github.com/databricks/cli/pull/1983)). + * Bump golang.org/x/sync from 0.9.0 to 0.10.0 ([#1984](https://github.com/databricks/cli/pull/1984)). + * Bump github.com/databricks/databricks-sdk-go from 0.52.0 to 0.53.0 ([#1985](https://github.com/databricks/cli/pull/1985)). + * Bump golang.org/x/crypto from 0.24.0 to 0.31.0 ([#2006](https://github.com/databricks/cli/pull/2006)). + * Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in /bundle/internal/tf/codegen ([#2005](https://github.com/databricks/cli/pull/2005)). + ## [Release] Release v0.236.0 **New features for Databricks Asset Bundles:** From 965a3fcd5347881ec6e0b58f1c475d5911555741 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 19 Dec 2024 09:23:05 +0100 Subject: [PATCH 61/63] Remove dependency on ghodss/yaml (#2032) ## Changes I noticed that #1957 took a dep on this library even though we no longer need it. This change removes the dep and cleans up other (unused) uses of the library. We originally relied on this library to deserialize bundle configuration and JSON payloads to non-bundle CLI commands. Relevant commits: * The YAML flag was added to support apps (very early), and is not longer used: e408b701 * First use for bundle configuration loading: e47fa619 * Switch bundle configuration loading to use `libs/dyn`: 87dd46a3 ## Tests The build works without the dependency. --- NOTICE | 4 --- bundle/internal/schema/main_test.go | 2 +- bundle/internal/schema/parser.go | 3 +-- go.mod | 2 -- go.sum | 4 --- libs/flags/yaml_flag.go | 42 ----------------------------- 6 files changed, 2 insertions(+), 55 deletions(-) delete mode 100644 libs/flags/yaml_flag.go diff --git a/NOTICE b/NOTICE index d8306510e1..52fc537432 100644 --- a/NOTICE +++ b/NOTICE @@ -73,10 +73,6 @@ fatih/color - https://github.com/fatih/color Copyright (c) 2013 Fatih Arslan License - https://github.com/fatih/color/blob/main/LICENSE.md -ghodss/yaml - https://github.com/ghodss/yaml -Copyright (c) 2014 Sam Ghods -License - https://github.com/ghodss/yaml/blob/master/LICENSE - Masterminds/semver - https://github.com/Masterminds/semver Copyright (C) 2014-2019, Matt Butcher and Matt Farina License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt diff --git a/bundle/internal/schema/main_test.go b/bundle/internal/schema/main_test.go index f1389e61ad..607347b6b1 100644 --- a/bundle/internal/schema/main_test.go +++ b/bundle/internal/schema/main_test.go @@ -15,8 +15,8 @@ import ( "github.com/databricks/cli/libs/dyn/merge" "github.com/databricks/cli/libs/dyn/yamlloader" "github.com/databricks/cli/libs/jsonschema" - "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" ) func copyFile(src, dst string) error { diff --git a/bundle/internal/schema/parser.go b/bundle/internal/schema/parser.go index 6fb6e0047d..3fbec05286 100644 --- a/bundle/internal/schema/parser.go +++ b/bundle/internal/schema/parser.go @@ -9,10 +9,9 @@ import ( "reflect" "strings" - "github.com/ghodss/yaml" - "github.com/databricks/cli/libs/dyn/yamlloader" "github.com/databricks/cli/libs/jsonschema" + "gopkg.in/yaml.v3" ) type Components struct { diff --git a/go.mod b/go.mod index 03e2791d18..09b10bed5c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,6 @@ require ( github.com/briandowns/spinner v1.23.1 // Apache 2.0 github.com/databricks/databricks-sdk-go v0.54.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT - github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.7.0 // MPL 2.0 github.com/hashicorp/hc-install v0.9.0 // MPL 2.0 @@ -70,5 +69,4 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index be77011cd8..79775f512a 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,6 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= @@ -263,8 +261,6 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/libs/flags/yaml_flag.go b/libs/flags/yaml_flag.go deleted file mode 100644 index 95cc9b4be0..0000000000 --- a/libs/flags/yaml_flag.go +++ /dev/null @@ -1,42 +0,0 @@ -package flags - -import ( - "fmt" - "os" - - "github.com/ghodss/yaml" -) - -type YamlFlag struct { - raw []byte -} - -func (y *YamlFlag) String() string { - return fmt.Sprintf("YAML (%d bytes)", len(y.raw)) -} - -// TODO: Command.MarkFlagFilename() -func (y *YamlFlag) Set(v string) error { - // Load request from file if it starts with '@' (like curl). - if v[0] != '@' { - y.raw = []byte(v) - return nil - } - buf, err := os.ReadFile(v[1:]) - if err != nil { - return fmt.Errorf("read %s: %w", v, err) - } - y.raw = buf - return nil -} - -func (y *YamlFlag) Unmarshal(v any) error { - if y.raw == nil { - return nil - } - return yaml.Unmarshal(y.raw, v) -} - -func (y *YamlFlag) Type() string { - return "YAML" -} From f939e57f3ad3c89f0865eb471dd03375fb237dc2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 19 Dec 2024 12:50:59 +0100 Subject: [PATCH 62/63] Trigger integration tests on push to main (#2035) ## Changes The existing workflow already had 2 trigger conditions, so instead of adding a third (and seeing more "skipped" jobs), I split them up into dedicated workflow files, each with their own trigger condition. The integration test status is reported back via commit status. ## Tests We can confirm that everything works as expected as this PR moves from here to the merge group to main. --- .github/workflows/integration-approve.yml | 32 ++++++++++ .github/workflows/integration-main.yml | 33 ++++++++++ .github/workflows/integration-pr.yml | 56 ++++++++++++++++ .github/workflows/integration-tests.yml | 78 ----------------------- 4 files changed, 121 insertions(+), 78 deletions(-) create mode 100644 .github/workflows/integration-approve.yml create mode 100644 .github/workflows/integration-main.yml create mode 100644 .github/workflows/integration-pr.yml delete mode 100644 .github/workflows/integration-tests.yml diff --git a/.github/workflows/integration-approve.yml b/.github/workflows/integration-approve.yml new file mode 100644 index 0000000000..4bdeb62a3a --- /dev/null +++ b/.github/workflows/integration-approve.yml @@ -0,0 +1,32 @@ +name: integration-approve + +on: + merge_group: + +jobs: + # Trigger for merge groups. + # + # Statuses and checks apply to specific commits (by hash). + # Enforcement of required checks is done both at the PR level and the merge queue level. + # In case of multiple commits in a single PR, the hash of the squashed commit + # will not match the one for the latest (approved) commit in the PR. + # + # We auto approve the check for the merge queue for two reasons: + # + # * Queue times out due to duration of tests. + # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. + # + trigger: + runs-on: ubuntu-latest + + steps: + - name: Auto-approve squashed commit + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + shell: bash + run: | + gh api -X POST -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/${{ github.repository }}/statuses/${{ github.sha }} \ + -f 'state=success' \ + -f 'context=Integration Tests Check' diff --git a/.github/workflows/integration-main.yml b/.github/workflows/integration-main.yml new file mode 100644 index 0000000000..064e439cf7 --- /dev/null +++ b/.github/workflows/integration-main.yml @@ -0,0 +1,33 @@ +name: integration-main + +on: + push: + branches: + - main + +jobs: + # Trigger for pushes to the main branch. + # + # This workflow triggers the integration test workflow in a different repository. + # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. + trigger: + runs-on: ubuntu-latest + environment: "test-trigger-is" + + steps: + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run cli-isolated-nightly.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f commit_sha=${{ github.event.after }} diff --git a/.github/workflows/integration-pr.yml b/.github/workflows/integration-pr.yml new file mode 100644 index 0000000000..bf2dcd8bc3 --- /dev/null +++ b/.github/workflows/integration-pr.yml @@ -0,0 +1,56 @@ +name: integration-pr + +on: + pull_request: + types: [opened, synchronize] + +jobs: + check-token: + runs-on: ubuntu-latest + environment: "test-trigger-is" + + outputs: + has_token: ${{ steps.set-token-status.outputs.has_token }} + + steps: + - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set + id: set-token-status + run: | + if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then + echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." + echo "::set-output name=has_token::false" + else + echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." + echo "::set-output name=has_token::true" + fi + + # Trigger for pull requests. + # + # This workflow triggers the integration test workflow in a different repository. + # It requires secrets from the "test-trigger-is" environment, which are only available to authorized users. + # It depends on the "check-token" workflow to confirm access to this environment to avoid failures. + trigger: + runs-on: ubuntu-latest + environment: "test-trigger-is" + + if: needs.check-token.outputs.has_token == 'true' + needs: check-token + + steps: + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f pull_request_number=${{ github.event.pull_request.number }} \ + -f commit_sha=${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml deleted file mode 100644 index d56728c286..0000000000 --- a/.github/workflows/integration-tests.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: integration - -on: - - pull_request: - types: [opened, synchronize] - - merge_group: - - -jobs: - check-token: - runs-on: ubuntu-latest - environment: "test-trigger-is" - outputs: - has_token: ${{ steps.set-token-status.outputs.has_token }} - steps: - - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set - id: set-token-status - run: | - if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then - echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." - echo "::set-output name=has_token::false" - else - echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." - echo "::set-output name=has_token::true" - fi - - trigger-tests: - runs-on: ubuntu-latest - needs: check-token - if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true' - environment: "test-trigger-is" - - steps: - - uses: actions/checkout@v4 - - - name: Generate GitHub App Token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} - private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} - owner: ${{ secrets.ORG_NAME }} - repositories: ${{secrets.REPO_NAME}} - - - name: Trigger Workflow in Another Repo - env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} - run: | - gh workflow run cli-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ - --ref main \ - -f pull_request_number=${{ github.event.pull_request.number }} \ - -f commit_sha=${{ github.event.pull_request.head.sha }} - - - - # Statuses and checks apply to specific commits (by hash). - # Enforcement of required checks is done both at the PR level and the merge queue level. - # In case of multiple commits in a single PR, the hash of the squashed commit - # will not match the one for the latest (approved) commit in the PR. - # We auto approve the check for the merge queue for two reasons: - # * Queue times out due to duration of tests. - # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. - auto-approve: - if: github.event_name == 'merge_group' - runs-on: ubuntu-latest - steps: - - name: Mark Check - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - shell: bash - run: | - gh api -X POST -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ github.repository }}/statuses/${{ github.sha }} \ - -f 'state=success' \ - -f 'context=Integration Tests Check' From 07fff20eff8b0023d4f76444182201e5a33f59c9 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Thu, 19 Dec 2024 16:29:51 +0100 Subject: [PATCH 63/63] Remove "Publish test coverage" step on CI (#2036) There is no token for codecov and no plans on getting one. --- .github/workflows/push.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index ea561805bf..5e5a05e6ce 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -54,9 +54,6 @@ jobs: - name: Run tests run: make testonly - - name: Publish test coverage - uses: codecov/codecov-action@v4 - golangci: name: lint runs-on: ubuntu-latest