From 6e135f1747b10c2f346621fae4b72d5befb01d94 Mon Sep 17 00:00:00 2001
From: Thulasiraj Komminar
<39799163+thulasirajkomminar@users.noreply.github.com>
Date: Mon, 7 Oct 2024 21:50:43 +0200
Subject: [PATCH] feat: added cluster datasource & resource
---
README.md | 2 +
docs/data-sources/cluster.md | 108 ++++
docs/resources/cluster.md | 90 +++
examples/data-sources/cluster/data-source.tf | 16 +
examples/resources/cluster/main.tf | 26 +
go.mod | 4 +-
go.sum | 4 +-
internal/provider/cluster_data_source.go | 304 ++++++++++
internal/provider/cluster_model.go | 160 ++++++
internal/provider/cluster_resource.go | 554 +++++++++++++++++++
internal/provider/provider.go | 2 +
11 files changed, 1266 insertions(+), 4 deletions(-)
create mode 100644 docs/data-sources/cluster.md
create mode 100644 docs/resources/cluster.md
create mode 100644 examples/data-sources/cluster/data-source.tf
create mode 100755 examples/resources/cluster/main.tf
create mode 100644 internal/provider/cluster_data_source.go
create mode 100644 internal/provider/cluster_model.go
create mode 100644 internal/provider/cluster_resource.go
diff --git a/README.md b/README.md
index 250ba43..b43377a 100644
--- a/README.md
+++ b/README.md
@@ -58,11 +58,13 @@ provider "cratedb" {
### Data Sources
+* `cratedb_cluster`
* `cratedb_organization`
* `cratedb_organizations`
### Resources
+* `cratedb_cluster`
* `cratedb_organization`
## Developing the Provider
diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md
new file mode 100644
index 0000000..d427ed4
--- /dev/null
+++ b/docs/data-sources/cluster.md
@@ -0,0 +1,108 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "cratedb_cluster Data Source - terraform-provider-cratedb"
+subcategory: ""
+description: |-
+ To retrieve a cluster.
+---
+
+# cratedb_cluster (Data Source)
+
+To retrieve a cluster.
+
+
+
+
+## Schema
+
+### Required
+
+- `id` (String) The id of the cluster.
+
+### Read-Only
+
+- `allow_custom_storage` (Boolean) The allow custom storage flag.
+- `allow_suspend` (Boolean) The allow suspend flag.
+- `backup_schedule` (String) The backup schedule.
+- `channel` (String) The channel of the cluster.
+- `crate_version` (String) The CrateDB version of the cluster.
+- `dc` (Attributes) The DublinCore of the cluster. (see [below for nested schema](#nestedatt--dc))
+- `deletion_protected` (Boolean) The deletion protected flag.
+- `external_ip` (String) The external IP address.
+- `fqdn` (String) The Fully Qualified Domain Name.
+- `gc_available` (Boolean) The garbage collection available flag.
+- `hardware_specs` (Attributes) The hardware specs of the cluster. (see [below for nested schema](#nestedatt--hardware_specs))
+- `health` (Attributes) The health of the cluster. (see [below for nested schema](#nestedatt--health))
+- `ip_whitelist` (Attributes List) The IP whitelist of the cluster. (see [below for nested schema](#nestedatt--ip_whitelist))
+- `last_async_operation` (Attributes) The last async operation of the cluster. (see [below for nested schema](#nestedatt--last_async_operation))
+- `name` (String) The name of the cluster.
+- `num_nodes` (Number) The number of nodes in the cluster.
+- `origin` (String) The origin of the cluster.
+- `password` (String, Sensitive) The password of the cluster.
+- `product_name` (String) The product name of the cluster.
+- `product_tier` (String) The product tier of the cluster.
+- `product_unit` (Number) The product unit of the cluster.
+- `project_id` (String) The project id of the cluster.
+- `subscription_id` (String) The subscription id of the cluster.
+- `suspended` (Boolean) The suspended flag.
+- `url` (String) The URL of the cluster.
+- `username` (String) The username of the cluster.
+
+
+### Nested Schema for `dc`
+
+Read-Only:
+
+- `created` (String) The created time.
+- `modified` (String) The modified time.
+
+
+
+### Nested Schema for `hardware_specs`
+
+Read-Only:
+
+- `cpus_per_node` (Number) The cpus per node.
+- `disk_size_per_node_bytes` (Number) The disk size per node in bytes.
+- `disk_type` (String) The disk type.
+- `disks_per_node` (Number) The disks per node.
+- `heap_size_bytes` (Number) The heap size in bytes.
+- `memory_per_node_bytes` (Number) The memory per node in bytes.
+
+
+
+### Nested Schema for `health`
+
+Read-Only:
+
+- `last_seen` (String) The last seen time.
+- `running_operation` (String) The type of the currently running operation. Returns an empty string if there is no operation in progress.
+- `status` (String) The health status of the cluster.
+
+
+
+### Nested Schema for `ip_whitelist`
+
+Read-Only:
+
+- `cidr` (String) The CIDR.
+- `description` (String) The description.
+
+
+
+### Nested Schema for `last_async_operation`
+
+Read-Only:
+
+- `dc` (Attributes) The DublinCore of the cluster. (see [below for nested schema](#nestedatt--last_async_operation--dc))
+- `id` (String) The id of the last async operation.
+- `status` (String) The status of the last async operation.
+- `type` (String) The type of the last async operation.
+
+
+### Nested Schema for `last_async_operation.dc`
+
+Read-Only:
+
+- `created` (String) The created time.
+- `modified` (String) The modified time.
diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md
new file mode 100644
index 0000000..1c0147e
--- /dev/null
+++ b/docs/resources/cluster.md
@@ -0,0 +1,90 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "cratedb_cluster Resource - terraform-provider-cratedb"
+subcategory: ""
+description: |-
+ Creates and manages a cluster.
+---
+
+# cratedb_cluster (Resource)
+
+Creates and manages a cluster.
+
+
+
+
+## Schema
+
+### Required
+
+- `crate_version` (String) The CrateDB version of the cluster.
+- `name` (String) The name of the cluster.
+- `organization_id` (String) The organization id of the cluster.
+- `password` (String, Sensitive) The password of the cluster.
+- `product_name` (String) The product name of the cluster.
+- `product_tier` (String) The product tier of the cluster.
+- `project_id` (String) The project id of the cluster.
+- `subscription_id` (String) The subscription id of the cluster.
+- `username` (String) The username of the cluster.
+
+### Optional
+
+- `channel` (String) The channel of the cluster. Default is 'stable'.
+- `hardware_specs` (Attributes) The hardware specs of the cluster. (see [below for nested schema](#nestedatt--hardware_specs))
+- `product_unit` (Number) The product unit of the cluster. Default is `0`.
+
+### Read-Only
+
+- `allow_custom_storage` (Boolean) The allow custom storage flag.
+- `allow_suspend` (Boolean) The allow suspend flag.
+- `backup_schedule` (String) The backup schedule.
+- `dc` (Attributes) The DublinCore of the cluster. (see [below for nested schema](#nestedatt--dc))
+- `deletion_protected` (Boolean) The deletion protected flag.
+- `external_ip` (String) The external IP address.
+- `fqdn` (String) The Fully Qualified Domain Name.
+- `gc_available` (Boolean) The garbage collection available flag.
+- `health` (Attributes) The health of the cluster. (see [below for nested schema](#nestedatt--health))
+- `id` (String) The id of the cluster.
+- `ip_whitelist` (Attributes Set) The IP whitelist of the cluster. (see [below for nested schema](#nestedatt--ip_whitelist))
+- `num_nodes` (Number) The number of nodes in the cluster.
+- `origin` (String) The origin of the cluster.
+- `suspended` (Boolean) The suspended flag.
+- `url` (String) The URL of the cluster.
+
+
+### Nested Schema for `hardware_specs`
+
+Optional:
+
+- `cpus_per_node` (Number) The cpus per node.
+- `disk_size_per_node_bytes` (Number) The disk size per node in bytes.
+- `disk_type` (String) The disk type.
+- `disks_per_node` (Number) The disks per node.
+- `heap_size_bytes` (Number) The heap size in bytes.
+- `memory_per_node_bytes` (Number) The memory per node in bytes.
+
+
+
+### Nested Schema for `dc`
+
+Read-Only:
+
+- `created` (String) The created time.
+- `modified` (String) The modified time.
+
+
+
+### Nested Schema for `health`
+
+Read-Only:
+
+- `status` (String) The health status of the cluster.
+
+
+
+### Nested Schema for `ip_whitelist`
+
+Read-Only:
+
+- `cidr` (String) The CIDR.
+- `description` (String) The description.
diff --git a/examples/data-sources/cluster/data-source.tf b/examples/data-sources/cluster/data-source.tf
new file mode 100644
index 0000000..f0e8929
--- /dev/null
+++ b/examples/data-sources/cluster/data-source.tf
@@ -0,0 +1,16 @@
+terraform {
+ required_providers {
+ cratedb = {
+ source = "komminarlabs/cratedb"
+ }
+ }
+}
+
+data "cratedb_cluster" "default" {
+ id = "156e7f96-0f6e-4fcc-8940-6e2a52efcee3"
+}
+
+output "default_cluster" {
+ value = data.cratedb_cluster.default
+ sensitive = true
+}
diff --git a/examples/resources/cluster/main.tf b/examples/resources/cluster/main.tf
new file mode 100755
index 0000000..7f6d10e
--- /dev/null
+++ b/examples/resources/cluster/main.tf
@@ -0,0 +1,26 @@
+terraform {
+ required_providers {
+ cratedb = {
+ source = "komminarlabs/cratedb"
+ }
+ }
+}
+
+provider "cratedb" {}
+
+resource "cratedb_cluster" "default" {
+ organization_id = "667796de-3c06-4503-bc3c-a9adc2a849cc"
+ crate_version = "5.8.2"
+ name = "default-cluster"
+ product_name = "cr4"
+ product_tier = "default"
+ project_id = "a99eb2a8-bcf5-418c-866f-67e65a8ada40"
+ subscription_id = "7c156ae9-9c07-4106-8f42-df93855876c1"
+ username = "admin"
+ password = "zyTChd9mfcGBFLb72nJkNeVj6"
+}
+
+output "default_cluster" {
+ value = cratedb_cluster.default.health
+ sensitive = true
+}
diff --git a/go.mod b/go.mod
index cb0a5e0..f270cbb 100644
--- a/go.mod
+++ b/go.mod
@@ -9,8 +9,7 @@ require (
github.com/hashicorp/terraform-plugin-framework-validators v0.13.0
github.com/hashicorp/terraform-plugin-go v0.23.0
github.com/hashicorp/terraform-plugin-log v0.9.0
- github.com/komminarlabs/cratedb v0.1.0
- github.com/oapi-codegen/runtime v1.1.1
+ github.com/komminarlabs/cratedb v0.2.0
)
require (
@@ -52,6 +51,7 @@ require (
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
+ github.com/oapi-codegen/runtime v1.1.1 // indirect
github.com/oklog/run v1.0.0 // indirect
github.com/posener/complete v1.2.3 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
diff --git a/go.sum b/go.sum
index 65c624b..f191532 100644
--- a/go.sum
+++ b/go.sum
@@ -115,8 +115,8 @@ github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gav
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/komminarlabs/cratedb v0.1.0 h1:F0hf7zAaCU2MvF21AJiJIe0DI+98l5QD8xFkCIUpyKA=
-github.com/komminarlabs/cratedb v0.1.0/go.mod h1:U6rR2Y2uwDcasah14nfr/yA0iEmHzIJ5pHBq2Sczn2c=
+github.com/komminarlabs/cratedb v0.2.0 h1:6OrP2t7R9+EY4jo0lH4/5c3rQ3/zNL9drRndqcmZZYQ=
+github.com/komminarlabs/cratedb v0.2.0/go.mod h1:U6rR2Y2uwDcasah14nfr/yA0iEmHzIJ5pHBq2Sczn2c=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
diff --git a/internal/provider/cluster_data_source.go b/internal/provider/cluster_data_source.go
new file mode 100644
index 0000000..3383a07
--- /dev/null
+++ b/internal/provider/cluster_data_source.go
@@ -0,0 +1,304 @@
+package provider
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/komminarlabs/cratedb"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ datasource.DataSource = &ClusterDataSource{}
+ _ datasource.DataSourceWithConfigure = &ClusterDataSource{}
+)
+
+// NewClusterDataSource is a helper function to simplify the provider implementation.
+func NewClusterDataSource() datasource.DataSource {
+ return &ClusterDataSource{}
+}
+
+// ClusterDataSource is the data source implementation.
+type ClusterDataSource struct {
+ client *cratedb.ClientWithResponses
+}
+
+// Metadata returns the data source type name.
+func (d *ClusterDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_cluster"
+}
+
+// Schema defines the schema for the data source.
+func (d *ClusterDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ Description: "To retrieve a cluster.",
+
+ Attributes: map[string]schema.Attribute{
+ "allow_custom_storage": schema.BoolAttribute{
+ Computed: true,
+ Description: "The allow custom storage flag.",
+ },
+ "allow_suspend": schema.BoolAttribute{
+ Computed: true,
+ Description: "The allow suspend flag.",
+ },
+ "backup_schedule": schema.StringAttribute{
+ Computed: true,
+ Description: "The backup schedule.",
+ },
+ "channel": schema.StringAttribute{
+ Computed: true,
+ Description: "The channel of the cluster.",
+ },
+ "crate_version": schema.StringAttribute{
+ Computed: true,
+ Description: "The CrateDB version of the cluster.",
+ },
+ "dc": schema.SingleNestedAttribute{
+ Computed: true,
+ Description: "The DublinCore of the cluster.",
+ Attributes: map[string]schema.Attribute{
+ "created": schema.StringAttribute{
+ Computed: true,
+ Description: "The created time.",
+ },
+ "modified": schema.StringAttribute{
+ Computed: true,
+ Description: "The modified time.",
+ },
+ },
+ },
+ "deletion_protected": schema.BoolAttribute{
+ Computed: true,
+ Description: "The deletion protected flag.",
+ },
+ "external_ip": schema.StringAttribute{
+ Computed: true,
+ Description: "The external IP address.",
+ },
+ "fqdn": schema.StringAttribute{
+ Computed: true,
+ Description: "The Fully Qualified Domain Name.",
+ },
+ "gc_available": schema.BoolAttribute{
+ Computed: true,
+ Description: "The garbage collection available flag.",
+ },
+ "hardware_specs": schema.SingleNestedAttribute{
+ Computed: true,
+ Description: "The hardware specs of the cluster.",
+ Attributes: map[string]schema.Attribute{
+ "cpus_per_node": schema.Int32Attribute{
+ Computed: true,
+ Description: "The cpus per node.",
+ },
+ "disk_size_per_node_bytes": schema.Int64Attribute{
+ Computed: true,
+ Description: "The disk size per node in bytes.",
+ },
+ "disk_type": schema.StringAttribute{
+ Computed: true,
+ Description: "The disk type.",
+ },
+ "disks_per_node": schema.Int32Attribute{
+ Computed: true,
+ Description: "The disks per node.",
+ },
+ "heap_size_bytes": schema.Int64Attribute{
+ Computed: true,
+ Description: "The heap size in bytes.",
+ },
+ "memory_per_node_bytes": schema.Int64Attribute{
+ Computed: true,
+ Description: "The memory per node in bytes.",
+ },
+ },
+ },
+ "health": schema.SingleNestedAttribute{
+ Computed: true,
+ Description: "The health of the cluster.",
+ Attributes: map[string]schema.Attribute{
+ "last_seen": schema.StringAttribute{
+ Computed: true,
+ Description: "The last seen time.",
+ },
+ "running_operation": schema.StringAttribute{
+ Computed: true,
+ Description: "The type of the currently running operation. Returns an empty string if there is no operation in progress.",
+ },
+ "status": schema.StringAttribute{
+ Computed: true,
+ Description: "The health status of the cluster.",
+ },
+ },
+ },
+ "id": schema.StringAttribute{
+ Required: true,
+ Description: "The id of the cluster.",
+ },
+ "ip_whitelist": schema.ListNestedAttribute{
+ Computed: true,
+ Description: "The IP whitelist of the cluster.",
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "cidr": schema.StringAttribute{
+ Computed: true,
+ Description: "The CIDR.",
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ Description: "The description.",
+ },
+ },
+ },
+ },
+ "last_async_operation": schema.SingleNestedAttribute{
+ Computed: true,
+ Description: "The last async operation of the cluster.",
+ Attributes: map[string]schema.Attribute{
+ "dc": schema.SingleNestedAttribute{
+ Computed: true,
+ Description: "The DublinCore of the cluster.",
+ Attributes: map[string]schema.Attribute{
+ "created": schema.StringAttribute{
+ Computed: true,
+ Description: "The created time.",
+ },
+ "modified": schema.StringAttribute{
+ Computed: true,
+ Description: "The modified time.",
+ },
+ },
+ },
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The id of the last async operation.",
+ },
+ "status": schema.StringAttribute{
+ Computed: true,
+ Description: "The status of the last async operation.",
+ },
+ "type": schema.StringAttribute{
+ Computed: true,
+ Description: "The type of the last async operation.",
+ },
+ },
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ Description: "The name of the cluster.",
+ },
+ "num_nodes": schema.Int32Attribute{
+ Computed: true,
+ Description: "The number of nodes in the cluster.",
+ },
+ "origin": schema.StringAttribute{
+ Computed: true,
+ Description: "The origin of the cluster.",
+ },
+ "product_name": schema.StringAttribute{
+ Computed: true,
+ Description: "The product name of the cluster.",
+ },
+ "product_tier": schema.StringAttribute{
+ Computed: true,
+ Description: "The product tier of the cluster.",
+ },
+ "product_unit": schema.Int32Attribute{
+ Computed: true,
+ Description: "The product unit of the cluster.",
+ },
+ "project_id": schema.StringAttribute{
+ Computed: true,
+ Description: "The project id of the cluster.",
+ },
+ "subscription_id": schema.StringAttribute{
+ Computed: true,
+ Description: "The subscription id of the cluster.",
+ },
+ "suspended": schema.BoolAttribute{
+ Computed: true,
+ Description: "The suspended flag.",
+ },
+ "url": schema.StringAttribute{
+ Computed: true,
+ Description: "The URL of the cluster.",
+ },
+ "username": schema.StringAttribute{
+ Computed: true,
+ Description: "The username of the cluster.",
+ },
+ "password": schema.StringAttribute{
+ Computed: true,
+ Sensitive: true,
+ Description: "The password of the cluster.",
+ },
+ },
+ }
+}
+
+// Configure adds the provider configured client to the data source.
+func (d *ClusterDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ client, ok := req.ProviderData.(*cratedb.ClientWithResponses)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Data Source Configure Type",
+ fmt.Sprintf("Expected cratedb.ClientWithResponses, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+ return
+ }
+ d.client = client
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var state ClusterModel
+
+ resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ readClusterResponse, err := d.client.GetApiV2ClustersClusterIdWithResponse(ctx, state.Id.ValueString())
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error getting cluster",
+ err.Error(),
+ )
+ return
+ }
+
+ if readClusterResponse.StatusCode() != 200 {
+ resp.Diagnostics.AddError(
+ "Error getting cluster",
+ fmt.Sprintf("HTTP Status Code: %d\nStatus: %v", readClusterResponse.StatusCode(), readClusterResponse.Status()),
+ )
+ return
+ }
+
+ // Map response body to model
+ clusterState, err := getClusterModel(ctx, *readClusterResponse.JSON200)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error getting cluster model",
+ err.Error(),
+ )
+ return
+ }
+ state = *clusterState
+
+ // Set state
+ diags := resp.State.Set(ctx, &state)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
diff --git a/internal/provider/cluster_model.go b/internal/provider/cluster_model.go
new file mode 100644
index 0000000..516c559
--- /dev/null
+++ b/internal/provider/cluster_model.go
@@ -0,0 +1,160 @@
+package provider
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/komminarlabs/cratedb"
+)
+
+// ClusterModel maps CrateDB cluster schema data.
+type ClusterModel struct {
+ OrganizationId types.String `tfsdk:"organization_id"`
+ AllowCustomStorage types.Bool `tfsdk:"allow_custom_storage"`
+ AllowSuspend types.Bool `tfsdk:"allow_suspend"`
+ BackupSchedule types.String `tfsdk:"backup_schedule"`
+ Channel types.String `tfsdk:"channel"`
+ CrateVersion types.String `tfsdk:"crate_version"`
+ Dc types.Object `tfsdk:"dc"`
+ DeletionProtected types.Bool `tfsdk:"deletion_protected"`
+ ExternalIp types.String `tfsdk:"external_ip"`
+ Fqdn types.String `tfsdk:"fqdn"`
+ GcAvailable types.Bool `tfsdk:"gc_available"`
+ HardwareSpecs types.Object `tfsdk:"hardware_specs"`
+ Health types.Object `tfsdk:"health"`
+ Id types.String `tfsdk:"id"`
+ IpWhitelist []ClusterIpWhitelistModel `tfsdk:"ip_whitelist"`
+ Name types.String `tfsdk:"name"`
+ NumNodes types.Int32 `tfsdk:"num_nodes"`
+ Origin types.String `tfsdk:"origin"`
+ ProductName types.String `tfsdk:"product_name"`
+ ProductTier types.String `tfsdk:"product_tier"`
+ ProductUnit types.Int32 `tfsdk:"product_unit"`
+ ProjectId types.String `tfsdk:"project_id"`
+ SubscriptionId types.String `tfsdk:"subscription_id"`
+ Suspended types.Bool `tfsdk:"suspended"`
+ Url types.String `tfsdk:"url"`
+ Username types.String `tfsdk:"username"`
+ Password types.String `tfsdk:"password"`
+}
+
+// ClusterDCModel maps CrateDB cluster HardwareSpecs schema data.
+type ClusterHardwareSpecsModel struct {
+ CpusPerNode types.Int32 `tfsdk:"cpus_per_node"`
+ DiskSizePerNodeBytes types.Int64 `tfsdk:"disk_size_per_node_bytes"`
+ DiskType types.String `tfsdk:"disk_type"`
+ DisksPerNode types.Int32 `tfsdk:"disks_per_node"`
+ HeapSizeBytes types.Int64 `tfsdk:"heap_size_bytes"`
+ MemoryPerNodeBytes types.Int64 `tfsdk:"memory_per_node_bytes"`
+}
+
+func (c ClusterHardwareSpecsModel) GetAttrType() map[string]attr.Type {
+ return map[string]attr.Type{
+ "cpus_per_node": types.Int32Type,
+ "disk_size_per_node_bytes": types.Int64Type,
+ "disk_type": types.StringType,
+ "disks_per_node": types.Int32Type,
+ "heap_size_bytes": types.Int64Type,
+ "memory_per_node_bytes": types.Int64Type,
+ }
+}
+
+// ClusterHealthModel maps CrateDB cluster Health schema data.
+type ClusterHealthModel struct {
+ Status types.String `tfsdk:"status"`
+}
+
+func (c ClusterHealthModel) GetAttrType() map[string]attr.Type {
+ return map[string]attr.Type{
+ "status": types.StringType,
+ }
+}
+
+// ClusterIpWhitelistModel maps CrateDB cluster IpWhitelist schema data.
+type ClusterIpWhitelistModel struct {
+ Cidr types.String `tfsdk:"cidr"`
+ Description types.String `tfsdk:"description"`
+}
+
+func (c ClusterIpWhitelistModel) GetAttrType() attr.Type {
+ return types.ObjectType{AttrTypes: map[string]attr.Type{
+ "cidr": types.StringType,
+ "description": types.StringType,
+ }}
+}
+
+func getClusterModel(ctx context.Context, cluster cratedb.Cluster) (*ClusterModel, error) {
+ dcValue := DCModel{
+ Created: types.StringValue(cluster.Dc.Created.String()),
+ Modified: types.StringValue(cluster.Dc.Modified.String()),
+ }
+
+ dcObjectValue, diags := types.ObjectValueFrom(ctx, dcValue.GetAttrType(), dcValue)
+ if diags.HasError() {
+ return nil, fmt.Errorf("error getting cluster DC value")
+ }
+
+ hardwareSpecsValue := ClusterHardwareSpecsModel{
+ CpusPerNode: types.Int32Value(int32(*cluster.HardwareSpecs.CpusPerNode)),
+ DiskSizePerNodeBytes: types.Int64Value(int64(*cluster.HardwareSpecs.DiskSizePerNodeBytes)),
+ DiskType: types.StringPointerValue(cluster.HardwareSpecs.DiskType),
+ DisksPerNode: types.Int32Value(int32(*cluster.HardwareSpecs.DisksPerNode)),
+ HeapSizeBytes: types.Int64Value(int64(*cluster.HardwareSpecs.HeapSizeBytes)),
+ MemoryPerNodeBytes: types.Int64Value(int64(*cluster.HardwareSpecs.MemoryPerNodeBytes)),
+ }
+
+ hardwareSpecsObjectValue, diags := types.ObjectValueFrom(ctx, hardwareSpecsValue.GetAttrType(), hardwareSpecsValue)
+ if diags.HasError() {
+ return nil, fmt.Errorf("error getting cluster hardware specs value")
+ }
+
+ healthValue := ClusterHealthModel{
+ Status: types.StringValue(string(*cluster.Health.Status)),
+ }
+
+ healthObjectValue, diags := types.ObjectValueFrom(ctx, healthValue.GetAttrType(), healthValue)
+ if diags.HasError() {
+ return nil, fmt.Errorf("error getting cluster health value")
+ }
+
+ var ipWhitelistValues []ClusterIpWhitelistModel
+ if cluster.IpWhitelist != nil {
+ for _, ipWhitelist := range *cluster.IpWhitelist {
+ ipWhitelistValues = append(ipWhitelistValues, ClusterIpWhitelistModel{
+ Cidr: types.StringValue(ipWhitelist.Cidr),
+ Description: types.StringValue(string(*ipWhitelist.Description)),
+ })
+ }
+ }
+
+ clusterModel := ClusterModel{
+ Dc: dcObjectValue,
+ HardwareSpecs: hardwareSpecsObjectValue,
+ Health: healthObjectValue,
+ Id: types.StringPointerValue(cluster.Id),
+ IpWhitelist: ipWhitelistValues,
+ AllowCustomStorage: types.BoolPointerValue(cluster.AllowCustomStorage),
+ AllowSuspend: types.BoolPointerValue(cluster.AllowSuspend),
+ BackupSchedule: types.StringPointerValue(cluster.BackupSchedule),
+ Channel: types.StringPointerValue(cluster.Channel),
+ CrateVersion: types.StringValue(cluster.CrateVersion),
+ DeletionProtected: types.BoolPointerValue(cluster.DeletionProtected),
+ ExternalIp: types.StringPointerValue(cluster.ExternalIp),
+ Fqdn: types.StringPointerValue(cluster.Fqdn),
+ GcAvailable: types.BoolPointerValue(cluster.GcAvailable),
+ Name: types.StringValue(cluster.Name),
+ NumNodes: types.Int32Value(int32(*cluster.NumNodes)),
+ Origin: types.StringPointerValue(cluster.Origin),
+ ProductName: types.StringValue(cluster.ProductName),
+ ProductTier: types.StringValue(cluster.ProductTier),
+ ProductUnit: types.Int32Value(int32(*cluster.ProductUnit)),
+ ProjectId: types.StringValue(cluster.ProjectId),
+ SubscriptionId: types.StringPointerValue(cluster.SubscriptionId),
+ Suspended: types.BoolPointerValue(cluster.Suspended),
+ Url: types.StringPointerValue(cluster.Url),
+ Username: types.StringValue(cluster.Username),
+ }
+ return &clusterModel, nil
+}
diff --git a/internal/provider/cluster_resource.go b/internal/provider/cluster_resource.go
new file mode 100644
index 0000000..89d5890
--- /dev/null
+++ b/internal/provider/cluster_resource.go
@@ -0,0 +1,554 @@
+package provider
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32default"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/setdefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/komminarlabs/cratedb"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var (
+ _ resource.Resource = &ClusterResource{}
+ _ resource.ResourceWithImportState = &ClusterResource{}
+ _ resource.ResourceWithImportState = &ClusterResource{}
+)
+
+// NewClusterResource is a helper function to simplify the provider implementation.
+func NewClusterResource() resource.Resource {
+ return &ClusterResource{}
+}
+
+// ClusterResource defines the resource implementation.
+type ClusterResource struct {
+ client *cratedb.ClientWithResponses
+}
+
+// Metadata returns the resource type name.
+func (r *ClusterResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_cluster"
+}
+
+// Schema defines the schema for the resource.
+func (r *ClusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Creates and manages a cluster.",
+
+ Attributes: map[string]schema.Attribute{
+ "organization_id": schema.StringAttribute{
+ Required: true,
+ Description: "The organization id of the cluster.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "allow_custom_storage": schema.BoolAttribute{
+ Computed: true,
+ Description: "The allow custom storage flag.",
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "allow_suspend": schema.BoolAttribute{
+ Computed: true,
+ Description: "The allow suspend flag.",
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "backup_schedule": schema.StringAttribute{
+ Computed: true,
+ Description: "The backup schedule.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "channel": schema.StringAttribute{
+ Computed: true,
+ Optional: true,
+ Default: stringdefault.StaticString("stable"),
+ Description: "The channel of the cluster. Default is 'stable'.",
+ },
+ "crate_version": schema.StringAttribute{
+ Required: true,
+ Description: "The CrateDB version of the cluster.",
+ },
+ "dc": schema.SingleNestedAttribute{
+ Computed: true,
+ Description: "The DublinCore of the cluster.",
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.UseStateForUnknown(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "created": schema.StringAttribute{
+ Computed: true,
+ Description: "The created time.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "modified": schema.StringAttribute{
+ Computed: true,
+ Description: "The modified time.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ },
+ "deletion_protected": schema.BoolAttribute{
+ Computed: true,
+ Description: "The deletion protected flag.",
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "external_ip": schema.StringAttribute{
+ Computed: true,
+ Description: "The external IP address.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "fqdn": schema.StringAttribute{
+ Computed: true,
+ Description: "The Fully Qualified Domain Name.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "gc_available": schema.BoolAttribute{
+ Computed: true,
+ Description: "The garbage collection available flag.",
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "hardware_specs": schema.SingleNestedAttribute{
+ Computed: true,
+ Optional: true,
+ Description: "The hardware specs of the cluster.",
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.UseStateForUnknown(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "cpus_per_node": schema.Int32Attribute{
+ Computed: true,
+ Optional: true,
+ Description: "The cpus per node.",
+ },
+ "disk_size_per_node_bytes": schema.Int64Attribute{
+ Computed: true,
+ Optional: true,
+ Description: "The disk size per node in bytes.",
+ },
+ "disk_type": schema.StringAttribute{
+ Computed: true,
+ Optional: true,
+ Description: "The disk type.",
+ },
+ "disks_per_node": schema.Int32Attribute{
+ Computed: true,
+ Optional: true,
+ Description: "The disks per node.",
+ },
+ "heap_size_bytes": schema.Int64Attribute{
+ Computed: true,
+ Optional: true,
+ Description: "The heap size in bytes.",
+ },
+ "memory_per_node_bytes": schema.Int64Attribute{
+ Computed: true,
+ Optional: true,
+ Description: "The memory per node in bytes.",
+ },
+ },
+ },
+ "health": schema.SingleNestedAttribute{
+ Computed: true,
+ Description: "The health of the cluster.",
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.UseStateForUnknown(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "status": schema.StringAttribute{
+ Computed: true,
+ Description: "The health status of the cluster.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ },
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The id of the cluster.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "ip_whitelist": schema.SetNestedAttribute{
+ Computed: true,
+ Description: "The IP whitelist of the cluster.",
+ Default: setdefault.StaticValue(types.SetNull(ClusterIpWhitelistModel{}.GetAttrType())),
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "cidr": schema.StringAttribute{
+ Computed: true,
+ Description: "The CIDR.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ Description: "The description.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ },
+ },
+ "name": schema.StringAttribute{
+ Required: true,
+ Description: "The name of the cluster.",
+ },
+ "num_nodes": schema.Int32Attribute{
+ Computed: true,
+ Description: "The number of nodes in the cluster.",
+ PlanModifiers: []planmodifier.Int32{
+ int32planmodifier.UseStateForUnknown(),
+ },
+ },
+ "origin": schema.StringAttribute{
+ Computed: true,
+ Description: "The origin of the cluster.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "product_name": schema.StringAttribute{
+ Required: true,
+ Description: "The product name of the cluster.",
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(1),
+ stringvalidator.LengthAtMost(512),
+ stringvalidator.RegexMatches(
+ regexp.MustCompile(`^\w[\w\-\. ]*$`),
+ "Product name must start with a letter and contain only letters, numbers, hyphens, underscores, and periods.",
+ ),
+ },
+ },
+ "product_tier": schema.StringAttribute{
+ Required: true,
+ Description: "The product tier of the cluster.",
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(1),
+ stringvalidator.LengthAtMost(512),
+ stringvalidator.RegexMatches(
+ regexp.MustCompile(`^\w[\w\-\. ]*$`),
+ "Product name must start with a letter and contain only letters, numbers, hyphens, underscores, and periods.",
+ ),
+ },
+ },
+ "product_unit": schema.Int32Attribute{
+ Computed: true,
+ Optional: true,
+ Default: int32default.StaticInt32(0),
+ Description: "The product unit of the cluster. Default is `0`.",
+ },
+ "project_id": schema.StringAttribute{
+ Required: true,
+ Description: "The project id of the cluster.",
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(36),
+ stringvalidator.LengthAtMost(36),
+ stringvalidator.RegexMatches(
+ regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`),
+ "Project ID must be a valid UUID.",
+ ),
+ },
+ },
+ "subscription_id": schema.StringAttribute{
+ Required: true,
+ Description: "The subscription id of the cluster.",
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(1),
+ stringvalidator.LengthAtMost(512),
+ },
+ },
+ "suspended": schema.BoolAttribute{
+ Computed: true,
+ Description: "The suspended flag.",
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "url": schema.StringAttribute{
+ Computed: true,
+ Description: "The URL of the cluster.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "username": schema.StringAttribute{
+ Required: true,
+ Description: "The username of the cluster.",
+ },
+ "password": schema.StringAttribute{
+ Required: true,
+ Sensitive: true,
+ Description: "The password of the cluster.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(24),
+ },
+ },
+ },
+ }
+}
+
+// Create creates the resource and sets the initial Terraform state.
+func (r *ClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ var plan ClusterModel
+
+ // Read Terraform plan data into the model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Generate API request body from plan
+ productUnit := int(plan.ProductUnit.ValueInt32())
+ password := plan.Password
+ organizationId := plan.OrganizationId
+ createPartialClusterRequest := cratedb.PartialCluster{
+ Channel: plan.Channel.ValueStringPointer(),
+ CrateVersion: plan.CrateVersion.ValueString(),
+ Name: plan.Name.ValueString(),
+ ProductName: plan.ProductName.ValueString(),
+ ProductTier: plan.ProductTier.ValueString(),
+ ProductUnit: &productUnit,
+ Username: plan.Username.ValueString(),
+ Password: password.ValueStringPointer(),
+ }
+
+ createClusterRequest := cratedb.ClusterProvision{
+ Cluster: createPartialClusterRequest,
+ ProjectId: plan.ProjectId.ValueStringPointer(),
+ SubscriptionId: plan.SubscriptionId.ValueString(),
+ }
+
+ createClusterResponse, err := r.client.PostApiV2OrganizationsOrganizationIdClustersWithResponse(ctx, organizationId.ValueString(), createClusterRequest)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error creating cluster",
+ "Could not create cluster, unexpected error: "+err.Error(),
+ )
+ return
+ }
+
+ if createClusterResponse.StatusCode() != 201 {
+ resp.Diagnostics.AddError(
+ "Error creating cluster",
+ fmt.Sprintf("HTTP Status Code: %d\nStatus: %v", createClusterResponse.StatusCode(), createClusterResponse.Status()),
+ )
+ return
+ }
+
+ // Map response body to schema and populate Computed attribute values
+ clusterPlan, err := getClusterModel(ctx, *createClusterResponse.JSON201)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error getting cluster model",
+ err.Error(),
+ )
+ return
+ }
+ plan = *clusterPlan
+ plan.OrganizationId = organizationId
+ plan.Password = password
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *ClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ // Get current state
+ var state ClusterModel
+
+ // Read Terraform prior state data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Get refreshed cluster value from InfluxDB
+ password := state.Password
+ organizationId := state.OrganizationId
+ readClusterResponse, err := r.client.GetApiV2ClustersClusterIdWithResponse(ctx, state.Id.ValueString())
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error getting cluster",
+ err.Error(),
+ )
+ return
+ }
+
+ if readClusterResponse.StatusCode() != 200 {
+ resp.Diagnostics.AddError(
+ "Error getting cluster",
+ fmt.Sprintf("HTTP Status Code: %d\nStatus: %v", readClusterResponse.StatusCode(), readClusterResponse.Status()),
+ )
+ return
+ }
+
+ // Map response body to model
+ clusterState, err := getClusterModel(ctx, *readClusterResponse.JSON200)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error getting cluster model",
+ err.Error(),
+ )
+ return
+ }
+ // Overwrite items with refreshed state
+ state = *clusterState
+ state.OrganizationId = organizationId
+ state.Password = password
+
+ // Save updated data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
+
+// Update updates the resource and sets the updated Terraform state on success.
+func (r *ClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ var plan ClusterModel
+
+ // Read Terraform plan data into the model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Generate API request body from plan
+ password := plan.Password
+ organizationId := plan.OrganizationId
+ updateClusterRequest := cratedb.ClusterEdit{
+ Password: plan.Password.ValueStringPointer(),
+ }
+
+ // Update existing cluster
+ updateClusterResponse, err := r.client.PatchApiV2ClustersClusterIdWithResponse(ctx, plan.Id.ValueString(), updateClusterRequest)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error updating cluster",
+ "Could not update cluster, unexpected error: "+err.Error(),
+ )
+ return
+ }
+
+ if updateClusterResponse.StatusCode() != 200 {
+ resp.Diagnostics.AddError(
+ "Error updating cluster",
+ fmt.Sprintf("HTTP Status Code: %d\nStatus: %v", updateClusterResponse.StatusCode(), updateClusterResponse.Status()),
+ )
+ return
+ }
+
+ // Map response body to schema and populate Computed attribute values
+ clusterPlan, err := getClusterModel(ctx, *updateClusterResponse.JSON200)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error getting cluster model",
+ err.Error(),
+ )
+ return
+ }
+ plan = *clusterPlan
+ plan.OrganizationId = organizationId
+ plan.Password = password
+
+ // Save updated data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
+
+// Delete deletes the resource and removes the Terraform state on success.
+func (r *ClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ var state ClusterModel
+
+ // Read Terraform prior state data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Delete existing cluster
+ deleteClustersResponse, err := r.client.DeleteApiV2ClustersClusterIdWithResponse(ctx, state.Id.ValueString())
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error deleting cluster",
+ "Could not delete cluster, unexpected error: "+err.Error(),
+ )
+ return
+ }
+
+ if deleteClustersResponse.StatusCode() != 204 {
+ resp.Diagnostics.AddError(
+ "Error deleting cluster",
+ fmt.Sprintf("HTTP Status Code: %d\nStatus: %v", deleteClustersResponse.StatusCode(), deleteClustersResponse.Status()),
+ )
+ return
+ }
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *ClusterResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ client, ok := req.ProviderData.(*cratedb.ClientWithResponses)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Data Source Configure Type",
+ fmt.Sprintf("Expected cratedb.ClientWithResponses, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+ return
+ }
+ r.client = client
+}
+
+func (r *ClusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ resource.ImportStatePassthroughID(ctx, path.Root("name"), req, resp)
+}
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index 3710a0c..529a1c4 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -206,6 +206,7 @@ func (p *CrateDBProvider) Configure(ctx context.Context, req provider.ConfigureR
// Resources defines the resources implemented in the provider.
func (p *CrateDBProvider) Resources(ctx context.Context) []func() resource.Resource {
return []func() resource.Resource{
+ NewClusterResource,
NewOrganizationResource,
}
}
@@ -213,6 +214,7 @@ func (p *CrateDBProvider) Resources(ctx context.Context) []func() resource.Resou
// DataSources defines the data sources implemented in the provider.
func (p *CrateDBProvider) DataSources(ctx context.Context) []func() datasource.DataSource {
return []func() datasource.DataSource{
+ NewClusterDataSource,
NewOrganizationDataSource,
NewOrganizationsDataSource,
}