Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support account-level provider with workspace-level resources #3188

Open
wants to merge 32 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
1ad91bd
Working!
mgyucht Jan 31, 2024
eb7097b
apply to all resources & data sources
mgyucht Jan 31, 2024
148ccaa
Merge branch 'main' into account-level-provider-for-workspace-resources
mgyucht Feb 1, 2024
8cfbb7d
extra
mgyucht Feb 1, 2024
f6cd6d0
fix
mgyucht Feb 1, 2024
703109d
extra
mgyucht Feb 1, 2024
9f35f59
fix
mgyucht Feb 1, 2024
7752299
use actual go SDK
mgyucht Feb 1, 2024
ab4c591
fix
mgyucht Feb 1, 2024
4d4edd2
comment
mgyucht Feb 2, 2024
ddf747a
Do not allow workspace_id for aws policy datasources
mgyucht Feb 2, 2024
468920b
change
mgyucht Feb 2, 2024
782b210
use main for diff-schema
mgyucht Feb 2, 2024
4ef15d3
Add fake create method to customizediff test
mgyucht Feb 5, 2024
f50673e
Merge branch 'main' into account-level-provider-for-workspace-resources
mgyucht Feb 5, 2024
20f35e2
fixes
mgyucht Feb 5, 2024
f3a0e4a
work
mgyucht Feb 6, 2024
affa5a5
Merge branch 'main' into account-level-provider-for-workspace-resources
mgyucht Feb 11, 2024
106d8cc
bump go sdk
mgyucht Feb 11, 2024
74d65f2
vaildate workspace ID
mgyucht Feb 11, 2024
9af9719
set original_workspace_id
mgyucht Feb 11, 2024
fe2a7cd
work
mgyucht Feb 11, 2024
ebbc581
work
mgyucht Feb 14, 2024
a4d3ca5
Merge branch 'main' into account-level-provider-for-workspace-resources
mgyucht Feb 21, 2024
2e385e8
fix merge
mgyucht Feb 21, 2024
a5b2dda
fix
mgyucht Feb 21, 2024
5874ead
fix
mgyucht Feb 21, 2024
d373d3b
fix
mgyucht Feb 21, 2024
1267f35
fix
mgyucht Feb 21, 2024
6f95828
some cleanups
mgyucht Feb 21, 2024
327d471
more cleanup
mgyucht Feb 21, 2024
10e586f
fix tests
mgyucht Feb 21, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion clusters/data_spark_version.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,15 +131,20 @@ func DataSourceSparkVersion() *schema.Resource {

s["photon"].Deprecated = "Specify runtime_engine=\"PHOTON\" in the cluster configuration"
s["graviton"].Deprecated = "Not required anymore - it's automatically enabled on the Graviton-based node types"
common.AddWorkspaceIdField(s)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this won't scale, add it on the level of common.Resource and common.DataSource

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was thinking about how to do this. We don't know which resources are workspace-only or also at the account level. We need some explicit mapping to make sure this isn't forgotten about. I will probably add this at the provider-level to make sure there is an explicit opt-in/out for each resource type.

return s
})

return &schema.Resource{
Schema: s,
ReadContext: func(ctx context.Context, d *schema.ResourceData, m any) diag.Diagnostics {
c, err := m.(*common.DatabricksClient).InConfiguredWorkspace(ctx, d)
if err != nil {
return diag.FromErr(err)
}
var this SparkVersionRequest
common.DataToStructPointer(d, s, &this)
version, err := NewClustersAPI(ctx, m).LatestSparkVersion(this)
version, err := NewClustersAPI(ctx, c).LatestSparkVersion(this)
if err != nil {
return diag.FromErr(err)
}
Expand Down
1 change: 1 addition & 0 deletions clusters/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ func (ClusterResourceProvider) CustomizeSchema(s map[string]*schema.Schema) map[
Schema: common.StructToSchema(MountInfo{}, nil),
},
})
common.AddWorkspaceIdField(s)

return s
}
Expand Down
66 changes: 62 additions & 4 deletions common/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,16 @@ type DatabricksClient struct {
cachedWorkspaceClient *databricks.WorkspaceClient
cachedAccountClient *databricks.AccountClient
mu sync.Mutex

cachedWorkspaceClients map[int64]*databricks.WorkspaceClient
cachedWorkspaceClientsMu sync.Mutex
}

func addCachedMe(w *databricks.WorkspaceClient) {
internalImpl := w.CurrentUser.Impl()
w.CurrentUser.WithImpl(&cachedMe{
internalImpl: internalImpl,
})
}

func (c *DatabricksClient) WorkspaceClient() (*databricks.WorkspaceClient, error) {
Expand All @@ -60,14 +70,59 @@ func (c *DatabricksClient) WorkspaceClient() (*databricks.WorkspaceClient, error
if err != nil {
return nil, err
}
internalImpl := w.CurrentUser.Impl()
w.CurrentUser.WithImpl(&cachedMe{
internalImpl: internalImpl,
})
addCachedMe(w)
c.cachedWorkspaceClient = w
return w, nil
}

func (c *DatabricksClient) InConfiguredWorkspace(ctx context.Context, d *schema.ResourceData) (*DatabricksClient, error) {
workspaceId, ok := d.GetOk("workspace_id")
if !ok {
return c, nil
}
w, err := c.getConfiguredWorkspaceClient(ctx, int64(workspaceId.(int)))
if err != nil {
return nil, err
}
apiClient, err := client.New(w.Config)
if err != nil {
return nil, err
}
return &DatabricksClient{
DatabricksClient: apiClient,
cachedWorkspaceClient: w,
}, nil
}

func (c *DatabricksClient) getConfiguredWorkspaceClient(ctx context.Context, workspaceId int64) (*databricks.WorkspaceClient, error) {
if w, ok := c.cachedWorkspaceClients[workspaceId]; ok {
return w, nil
}
c.cachedWorkspaceClientsMu.Lock()
defer c.cachedWorkspaceClientsMu.Unlock()
if w, ok := c.cachedWorkspaceClients[workspaceId]; ok {
return w, nil
}
a, err := c.AccountClient()
if err != nil {
return nil, err
}
if c.cachedWorkspaceClients == nil {
c.cachedWorkspaceClients = make(map[int64]*databricks.WorkspaceClient)
}
ws, err := a.Workspaces.GetByWorkspaceId(ctx, workspaceId)
if err != nil {
return nil, err
}
w, err := a.GetWorkspaceClient(*ws)
if err != nil {
return nil, err
}
addCachedMe(w)
c.cachedWorkspaceClients[workspaceId] = w
return w, nil
}

// Set the cached workspace client.
func (c *DatabricksClient) SetWorkspaceClient(w *databricks.WorkspaceClient) {
c.mu.Lock()
Expand Down Expand Up @@ -97,6 +152,9 @@ func (c *DatabricksClient) setAccountId(accountId string) error {
}

func (c *DatabricksClient) AccountClient() (*databricks.AccountClient, error) {
if c.cachedAccountClient != nil {
return c.cachedAccountClient, nil
}
c.mu.Lock()
defer c.mu.Unlock()
if c.cachedAccountClient != nil {
Expand Down
34 changes: 29 additions & 5 deletions common/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,10 @@ func (r Resource) ToResource() *schema.Resource {
if r.Update != nil {
update = func(ctx context.Context, d *schema.ResourceData,
m any) diag.Diagnostics {
c := m.(*DatabricksClient)
c, err := m.(*DatabricksClient).InConfiguredWorkspace(ctx, d)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it's not enough, you also need to prepend workspace_id to d.Id() on a read side and on a write side - e.g. fmt.Sprintf("%d:%s", d.Get("workspace_id"), d.Id()). otherwise there's too much risk involved.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This field is marked as ForceNew, so changes to workspace ID would not trigger update, only delete/recreate. However, we do need to store the old ID somewhere in the state to be able to appropriately move from one workspace to another (as there is no way to get the "old" workspace ID during the delete call).

if err != nil {
return diag.FromErr(err)
}
if err := recoverable(r.Update)(ctx, d, c); err != nil {
err = nicerError(ctx, err, "update")
return diag.FromErr(err)
Expand Down Expand Up @@ -124,7 +127,11 @@ func (r Resource) ToResource() *schema.Resource {
m any) diag.Diagnostics {
return func(ctx context.Context, d *schema.ResourceData,
m any) diag.Diagnostics {
err := recoverable(r.Read)(ctx, d, m.(*DatabricksClient))
c, err := m.(*DatabricksClient).InConfiguredWorkspace(ctx, d)
if err != nil {
return diag.FromErr(err)
}
err = recoverable(r.Read)(ctx, d, c)
// TODO: https://github.com/databricks/terraform-provider-databricks/issues/2021
if ignoreMissing && apierr.IsMissing(err) {
log.Printf("[INFO] %s[id=%s] is removed on backend",
Expand All @@ -146,8 +153,11 @@ func (r Resource) ToResource() *schema.Resource {
CustomizeDiff: r.saferCustomizeDiff(),
CreateContext: func(ctx context.Context, d *schema.ResourceData,
m any) diag.Diagnostics {
c := m.(*DatabricksClient)
err := recoverable(r.Create)(ctx, d, c)
c, err := m.(*DatabricksClient).InConfiguredWorkspace(ctx, d)
if err != nil {
return diag.FromErr(err)
}
err = recoverable(r.Create)(ctx, d, c)
if err != nil {
err = nicerError(ctx, err, "create")
return diag.FromErr(err)
Expand All @@ -162,7 +172,11 @@ func (r Resource) ToResource() *schema.Resource {
UpdateContext: update,
DeleteContext: func(ctx context.Context, d *schema.ResourceData,
m any) diag.Diagnostics {
err := recoverable(r.Delete)(ctx, d, m.(*DatabricksClient))
c, err := m.(*DatabricksClient).InConfiguredWorkspace(ctx, d)
if err != nil {
return diag.FromErr(err)
}
err = recoverable(r.Delete)(ctx, d, c)
if apierr.IsMissing(err) {
// TODO: https://github.com/databricks/terraform-provider-databricks/issues/2021
log.Printf("[INFO] %s[id=%s] is removed on backend",
Expand Down Expand Up @@ -447,3 +461,13 @@ func AddAccountIdField(s map[string]*schema.Schema) map[string]*schema.Schema {
}
return s
}

func AddWorkspaceIdField(s map[string]*schema.Schema) map[string]*schema.Schema {
s["workspace_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "The workspace id of the workspace, for example 1234567890. This can be retrieved from `databricks_mws_workspaces.<YOUR_WORKSPACE>.workspace_id`. This attribute is required when using an account-level provider.",
}
return s
}
2 changes: 2 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -82,3 +82,5 @@ require (
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

replace github.com/databricks/databricks-sdk-go => /Users/miles/databricks-sdk-go
24 changes: 24 additions & 0 deletions internal/acceptance/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,27 @@ func TestAccClusterResource_CreateSingleNodeCluster(t *testing.T) {
}`,
})
}

func TestMwsAccClusterResource_CreateClusterWithAccountLevelProvider(t *testing.T) {
accountLevel(t, step{
Template: `
data "databricks_spark_version" "latest" {
workspace_id = 5549111504720597
}
resource "databricks_cluster" "this" {
workspace_id = 5549111504720597
cluster_name = "singlenode-{var.RANDOM}"
spark_version = data.databricks_spark_version.latest.id
instance_pool_id = "1127-101603-faked620-pool-c10aq68g"
num_workers = 0
autotermination_minutes = 10
spark_conf = {
"spark.databricks.cluster.profile" = "singleNode"
"spark.master" = "local[*]"
}
custom_tags = {
"ResourceClass" = "SingleNode"
}
}`,
})
}
Loading