diff --git a/README.md b/README.md index 9f4e4c5d..e2c658e2 100644 --- a/README.md +++ b/README.md @@ -852,7 +852,7 @@ module "cluster_pattern" { | Name | Source | Version | |------|--------|---------| | [bastion\_host](#module\_bastion\_host) | terraform-ibm-modules/landing-zone-vsi/ibm | 4.2.0 | -| [cluster](#module\_cluster) | terraform-ibm-modules/base-ocp-vpc/ibm | 3.30.1 | +| [cluster](#module\_cluster) | terraform-ibm-modules/base-ocp-vpc/ibm | 3.31.0 | | [dynamic\_values](#module\_dynamic\_values) | ./dynamic_values | n/a | | [f5\_vsi](#module\_f5\_vsi) | terraform-ibm-modules/landing-zone-vsi/ibm | 4.2.0 | | [key\_management](#module\_key\_management) | ./kms | n/a | @@ -911,7 +911,7 @@ module "cluster_pattern" { |------|-------------|------|---------|:--------:| | [appid](#input\_appid) | The App ID instance to be used for the teleport vsi deployments |
object({
name = optional(string)
resource_group = optional(string)
use_data = optional(bool)
keys = optional(list(string))
use_appid = bool
})
|
{
"use_appid": false
}
| no | | [atracker](#input\_atracker) | atracker variables |
object({
resource_group = string
receive_global_events = bool
collector_bucket_name = string
add_route = bool
})
| n/a | yes | -| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_worker_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_private_endpoint = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
ibm-storage-operator = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)
worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = optional(string) # The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool.
})
)
)
})
)
| n/a | yes | +| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
ibm-storage-operator = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)
worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = optional(string) # The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool.
})
)
)
})
)
| n/a | yes | | [cos](#input\_cos) | Object describing the cloud object storage instance, buckets, and keys. Set `use_data` to false to create instance |
list(
object({
name = string
use_data = optional(bool)
resource_group = string
plan = optional(string)
random_suffix = optional(bool) # Use a random suffix for COS instance
access_tags = optional(list(string), [])
skip_kms_s2s_auth_policy = optional(bool, false) # skip auth policy between this instance and kms instance, useful if existing resources are used
skip_flowlogs_s2s_auth_policy = optional(bool, false) # skip auth policy between flow logs service and this instance, set to true if this policy is already in place on account
skip_atracker_s2s_auth_policy = optional(bool, false) # skip auth policyt between atracker service and this instance, set to true if this is existing recipient of atracker already
buckets = list(object({
name = string
storage_class = string
endpoint_type = string
force_delete = bool
single_site_location = optional(string)
region_location = optional(string)
cross_region_location = optional(string)
kms_key = optional(string)
access_tags = optional(list(string), [])
allowed_ip = optional(list(string), [])
hard_quota = optional(number)
archive_rule = optional(object({
days = number
enable = bool
rule_id = optional(string)
type = string
}))
expire_rule = optional(object({
days = optional(number)
date = optional(string)
enable = bool
expired_object_delete_marker = optional(string)
prefix = optional(string)
rule_id = optional(string)
}))
activity_tracking = optional(object({
activity_tracker_crn = string
read_data_events = bool
write_data_events = bool
management_events = bool
}))
metrics_monitoring = optional(object({
metrics_monitoring_crn = string
request_metrics_enabled = optional(bool)
usage_metrics_enabled = optional(bool)
}))
}))
keys = optional(
list(object({
name = string
role = string
enable_HMAC = bool
}))
)

})
)
| n/a | yes | | [enable\_transit\_gateway](#input\_enable\_transit\_gateway) | Create transit gateway | `bool` | `true` | no | | [existing\_vpc\_cbr\_zone\_id](#input\_existing\_vpc\_cbr\_zone\_id) | ID of the existing CBR (Context-based restrictions) network zone, with context set to the VPC. This zone is used in a CBR rule, which allows traffic to flow only from the landing zone VPCs to specific cloud services. | `string` | `null` | no | diff --git a/cluster.tf b/cluster.tf index 5a0a76cf..13672435 100644 --- a/cluster.tf +++ b/cluster.tf @@ -30,6 +30,7 @@ locals { cluster.name => { crn = cluster.crn id = cluster.id + cluster_name = cluster.name resource_group_name = cluster.resource_group_name resource_group_id = cluster.resource_group_id vpc_id = cluster.vpc_id @@ -45,6 +46,7 @@ locals { cluster.cluster_name => { crn = cluster.cluster_crn id = cluster.cluster_id + cluster_name = cluster.cluster_name resource_group_id = cluster.resource_group_id vpc_id = cluster.vpc_id region = var.region @@ -241,24 +243,26 @@ module "cluster" { for index, cluster in local.clusters_map : index => cluster if cluster.kube_type == "openshift" } - source = "terraform-ibm-modules/base-ocp-vpc/ibm" - version = "3.30.1" - resource_group_id = local.resource_groups[each.value.resource_group] - region = var.region - cluster_name = each.value.cluster_name - vpc_id = each.value.vpc_id - ocp_entitlement = each.value.entitlement - vpc_subnets = each.value.vpc_subnets - access_tags = each.value.access_tags + source = "terraform-ibm-modules/base-ocp-vpc/ibm" + version = "3.31.0" + resource_group_id = local.resource_groups[each.value.resource_group] + region = var.region + cluster_name = each.value.cluster_name + vpc_id = each.value.vpc_id + ocp_entitlement = each.value.entitlement + vpc_subnets = each.value.vpc_subnets + cluster_ready_when = var.wait_till + access_tags = each.value.access_tags worker_pools = concat( [ { - subnet_prefix = each.value.subnet_names[0] - pool_name = "default" - machine_type = each.value.machine_type - workers_per_zone = each.value.workers_per_subnet - operating_system = each.value.operating_system - labels = each.value.labels + subnet_prefix = each.value.subnet_names[0] + pool_name = "default" + machine_type = each.value.machine_type + workers_per_zone = each.value.workers_per_subnet + operating_system = each.value.operating_system + labels = each.value.labels + secondary_storage = each.value.secondary_storage boot_volume_encryption_kms_config = { crk = each.value.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0] kms_instance_id = each.value.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0] @@ -269,12 +273,13 @@ module "cluster" { each.value.worker != null ? [ for pool in each.value.worker : { - vpc_subnets = pool.vpc_subnets - pool_name = pool.name - machine_type = pool.flavor - workers_per_zone = pool.workers_per_subnet - operating_system = pool.operating_system - labels = pool.labels + vpc_subnets = pool.vpc_subnets + pool_name = pool.name + machine_type = pool.flavor + workers_per_zone = pool.workers_per_subnet + operating_system = pool.operating_system + labels = pool.labels + secondary_storage = pool.secondary_storage boot_volume_encryption_kms_config = { crk = pool.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0] kms_instance_id = pool.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0] @@ -292,8 +297,8 @@ module "cluster" { use_existing_cos = true existing_cos_id = each.value.cos_instance_crn disable_public_endpoint = coalesce(each.value.disable_public_endpoint, true) # disable if not set or null - verify_worker_network_readiness = each.value.verify_worker_network_readiness - use_private_endpoint = each.value.use_private_endpoint + verify_worker_network_readiness = each.value.verify_cluster_network_readiness + use_private_endpoint = each.value.use_ibm_cloud_private_api_endpoints addons = each.value.addons manage_all_addons = each.value.manage_all_addons disable_outbound_traffic_protection = each.value.disable_outbound_traffic_protection diff --git a/examples/override-example/override.json b/examples/override-example/override.json index ef49db38..5865f58c 100644 --- a/examples/override-example/override.json +++ b/examples/override-example/override.json @@ -18,6 +18,8 @@ "name": "workload-cluster", "secondary_storage": "300gb.5iops-tier", "resource_group": "slz-workload-rg", + "use_ibm_cloud_private_api_endpoints": false, + "verify_cluster_network_readiness": false, "kms_config": { "crk_name": "slz-key", "private_endpoint": true diff --git a/outputs.tf b/outputs.tf index f4d4647d..773a6bbd 100644 --- a/outputs.tf +++ b/outputs.tf @@ -87,7 +87,7 @@ output "workload_cluster_id" { output "workload_cluster_name" { description = "The name of the workload cluster. If the cluster name does not exactly match the prefix-workload-cluster pattern it will be null." - value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-workload-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-workload-cluster"].name : null + value = lookup(local.cluster_data, "${var.prefix}-workload-cluster", null) != null ? local.cluster_data["${var.prefix}-workload-cluster"].cluster_name : null } output "management_cluster_id" { @@ -97,7 +97,7 @@ output "management_cluster_id" { output "management_cluster_name" { description = "The name of the management cluster. If the cluster name does not exactly match the prefix-management-cluster pattern it will be null." - value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-management-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-management-cluster"].name : null + value = lookup(local.cluster_data, "${var.prefix}-management-cluster", null) != null ? local.cluster_data["${var.prefix}-management-cluster"].cluster_name : null } output "cluster_data" { diff --git a/patterns/roks-quickstart/main.tf b/patterns/roks-quickstart/main.tf index 9f80b9b2..00a8b162 100644 --- a/patterns/roks-quickstart/main.tf +++ b/patterns/roks-quickstart/main.tf @@ -49,7 +49,8 @@ locals { "workers_per_subnet": 1, "entitlement": ${local.entitlement_val}, "disable_public_endpoint": false, - "import_default_worker_pool_on_create" : false + "import_default_worker_pool_on_create" : false, + "use_ibm_cloud_private_api_endpoints": false } ], "cos": [ diff --git a/patterns/roks/main.tf b/patterns/roks/main.tf index 96170203..209e4d7c 100644 --- a/patterns/roks/main.tf +++ b/patterns/roks/main.tf @@ -33,6 +33,7 @@ module "roks_landing_zone" { ssh_public_key = var.ssh_public_key existing_ssh_key_name = var.existing_ssh_key_name entitlement = var.entitlement + secondary_storage = var.secondary_storage workers_per_zone = var.workers_per_zone flavor = var.flavor kube_version = var.kube_version diff --git a/patterns/roks/module/config.tf b/patterns/roks/module/config.tf index bf05bddd..d30aa1a4 100644 --- a/patterns/roks/module/config.tf +++ b/patterns/roks/module/config.tf @@ -106,8 +106,8 @@ locals { cluster_force_delete_storage = var.cluster_force_delete_storage operating_system = var.operating_system kms_wait_for_apply = var.kms_wait_for_apply - use_private_endpoint = var.use_ibm_cloud_private_api_endpoints - verify_worker_network_readiness = var.verify_cluster_network_readiness + use_ibm_cloud_private_api_endpoints = var.use_ibm_cloud_private_api_endpoints + verify_cluster_network_readiness = var.verify_cluster_network_readiness import_default_worker_pool_on_create = false # By default, create dedicated pool for logging worker_pools = [ diff --git a/patterns/roks/variables.tf b/patterns/roks/variables.tf index cfd67053..7f2a2cdf 100644 --- a/patterns/roks/variables.tf +++ b/patterns/roks/variables.tf @@ -247,6 +247,12 @@ variable "entitlement" { default = null } +variable "secondary_storage" { + description = "Optionally specify a secondary storage option to attach to all cluster worker nodes. This value is immutable and can't be changed after provisioning. Use the IBM Cloud CLI command ibmcloud ks flavors to find valid options, e.g ibmcloud ks flavor get --flavor bx2.16x64 --provider vpc-gen2 --zone us-south-1." + type = string + default = null +} + variable "cluster_addons" { type = object({ debug-tool = optional(string) diff --git a/variables.tf b/variables.tf index 45039d7b..534a634f 100644 --- a/variables.tf +++ b/variables.tf @@ -856,8 +856,8 @@ variable "clusters" { cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed - verify_worker_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. - use_private_endpoint = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. + verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. + use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.