Skip to content

Commit

Permalink
fix: added a fix where secondary storage was not being provisioned<br…
Browse files Browse the repository at this point in the history
…>- added a fix where `workload_cluster_name` and `management_cluster_name` outputs were not working (#889)
  • Loading branch information
Aashiq-J authored Sep 26, 2024
1 parent 51b4d11 commit 6e70225
Show file tree
Hide file tree
Showing 9 changed files with 47 additions and 32 deletions.
4 changes: 2 additions & 2 deletions README.md

Large diffs are not rendered by default.

51 changes: 28 additions & 23 deletions cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ locals {
cluster.name => {
crn = cluster.crn
id = cluster.id
cluster_name = cluster.name
resource_group_name = cluster.resource_group_name
resource_group_id = cluster.resource_group_id
vpc_id = cluster.vpc_id
Expand All @@ -45,6 +46,7 @@ locals {
cluster.cluster_name => {
crn = cluster.cluster_crn
id = cluster.cluster_id
cluster_name = cluster.cluster_name
resource_group_id = cluster.resource_group_id
vpc_id = cluster.vpc_id
region = var.region
Expand Down Expand Up @@ -241,24 +243,26 @@ module "cluster" {
for index, cluster in local.clusters_map : index => cluster
if cluster.kube_type == "openshift"
}
source = "terraform-ibm-modules/base-ocp-vpc/ibm"
version = "3.30.1"
resource_group_id = local.resource_groups[each.value.resource_group]
region = var.region
cluster_name = each.value.cluster_name
vpc_id = each.value.vpc_id
ocp_entitlement = each.value.entitlement
vpc_subnets = each.value.vpc_subnets
access_tags = each.value.access_tags
source = "terraform-ibm-modules/base-ocp-vpc/ibm"
version = "3.31.0"
resource_group_id = local.resource_groups[each.value.resource_group]
region = var.region
cluster_name = each.value.cluster_name
vpc_id = each.value.vpc_id
ocp_entitlement = each.value.entitlement
vpc_subnets = each.value.vpc_subnets
cluster_ready_when = var.wait_till
access_tags = each.value.access_tags
worker_pools = concat(
[
{
subnet_prefix = each.value.subnet_names[0]
pool_name = "default"
machine_type = each.value.machine_type
workers_per_zone = each.value.workers_per_subnet
operating_system = each.value.operating_system
labels = each.value.labels
subnet_prefix = each.value.subnet_names[0]
pool_name = "default"
machine_type = each.value.machine_type
workers_per_zone = each.value.workers_per_subnet
operating_system = each.value.operating_system
labels = each.value.labels
secondary_storage = each.value.secondary_storage
boot_volume_encryption_kms_config = {
crk = each.value.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0]
kms_instance_id = each.value.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0]
Expand All @@ -269,12 +273,13 @@ module "cluster" {
each.value.worker != null ? [
for pool in each.value.worker :
{
vpc_subnets = pool.vpc_subnets
pool_name = pool.name
machine_type = pool.flavor
workers_per_zone = pool.workers_per_subnet
operating_system = pool.operating_system
labels = pool.labels
vpc_subnets = pool.vpc_subnets
pool_name = pool.name
machine_type = pool.flavor
workers_per_zone = pool.workers_per_subnet
operating_system = pool.operating_system
labels = pool.labels
secondary_storage = pool.secondary_storage
boot_volume_encryption_kms_config = {
crk = pool.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0]
kms_instance_id = pool.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0]
Expand All @@ -292,8 +297,8 @@ module "cluster" {
use_existing_cos = true
existing_cos_id = each.value.cos_instance_crn
disable_public_endpoint = coalesce(each.value.disable_public_endpoint, true) # disable if not set or null
verify_worker_network_readiness = each.value.verify_worker_network_readiness
use_private_endpoint = each.value.use_private_endpoint
verify_worker_network_readiness = each.value.verify_cluster_network_readiness
use_private_endpoint = each.value.use_ibm_cloud_private_api_endpoints
addons = each.value.addons
manage_all_addons = each.value.manage_all_addons
disable_outbound_traffic_protection = each.value.disable_outbound_traffic_protection
Expand Down
2 changes: 2 additions & 0 deletions examples/override-example/override.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
"name": "workload-cluster",
"secondary_storage": "300gb.5iops-tier",
"resource_group": "slz-workload-rg",
"use_ibm_cloud_private_api_endpoints": false,
"verify_cluster_network_readiness": false,
"kms_config": {
"crk_name": "slz-key",
"private_endpoint": true
Expand Down
4 changes: 2 additions & 2 deletions outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ output "workload_cluster_id" {
output "workload_cluster_name" {
description = "The name of the workload cluster. If the cluster name does not exactly match the prefix-workload-cluster pattern it will be null."
value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-workload-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-workload-cluster"].name : null
value = lookup(local.cluster_data, "${var.prefix}-workload-cluster", null) != null ? local.cluster_data["${var.prefix}-workload-cluster"].cluster_name : null
}
output "management_cluster_id" {
Expand All @@ -97,7 +97,7 @@ output "management_cluster_id" {
output "management_cluster_name" {
description = "The name of the management cluster. If the cluster name does not exactly match the prefix-management-cluster pattern it will be null."
value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-management-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-management-cluster"].name : null
value = lookup(local.cluster_data, "${var.prefix}-management-cluster", null) != null ? local.cluster_data["${var.prefix}-management-cluster"].cluster_name : null
}
output "cluster_data" {
Expand Down
3 changes: 2 additions & 1 deletion patterns/roks-quickstart/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ locals {
"workers_per_subnet": 1,
"entitlement": ${local.entitlement_val},
"disable_public_endpoint": false,
"import_default_worker_pool_on_create" : false
"import_default_worker_pool_on_create" : false,
"use_ibm_cloud_private_api_endpoints": false
}
],
"cos": [
Expand Down
1 change: 1 addition & 0 deletions patterns/roks/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ module "roks_landing_zone" {
ssh_public_key = var.ssh_public_key
existing_ssh_key_name = var.existing_ssh_key_name
entitlement = var.entitlement
secondary_storage = var.secondary_storage
workers_per_zone = var.workers_per_zone
flavor = var.flavor
kube_version = var.kube_version
Expand Down
4 changes: 2 additions & 2 deletions patterns/roks/module/config.tf
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ locals {
cluster_force_delete_storage = var.cluster_force_delete_storage
operating_system = var.operating_system
kms_wait_for_apply = var.kms_wait_for_apply
use_private_endpoint = var.use_ibm_cloud_private_api_endpoints
verify_worker_network_readiness = var.verify_cluster_network_readiness
use_ibm_cloud_private_api_endpoints = var.use_ibm_cloud_private_api_endpoints
verify_cluster_network_readiness = var.verify_cluster_network_readiness
import_default_worker_pool_on_create = false
# By default, create dedicated pool for logging
worker_pools = [
Expand Down
6 changes: 6 additions & 0 deletions patterns/roks/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,12 @@ variable "entitlement" {
default = null
}

variable "secondary_storage" {
description = "Optionally specify a secondary storage option to attach to all cluster worker nodes. This value is immutable and can't be changed after provisioning. Use the IBM Cloud CLI command ibmcloud ks flavors to find valid options, e.g ibmcloud ks flavor get --flavor bx2.16x64 --provider vpc-gen2 --zone us-south-1."
type = string
default = null
}

variable "cluster_addons" {
type = object({
debug-tool = optional(string)
Expand Down
4 changes: 2 additions & 2 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -856,8 +856,8 @@ variable "clusters" {
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_worker_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_private_endpoint = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
Expand Down

0 comments on commit 6e70225

Please sign in to comment.