From 801fc9834a9d82df4893058675be697e29f1442e Mon Sep 17 00:00:00 2001 From: Bikash Roy Choudhury Date: Wed, 20 Jan 2021 11:24:09 -0800 Subject: [PATCH 01/12] add install_portworx Signed-off-by: Bikash Roy Choudhury --- main.tf | 40 +++++++++++++++++++++++++++++++++++++++- templates/device-name.sh | 38 ++++++++++++++++++++++++++++++++++++++ variables.tf | 5 +++++ 3 files changed, 82 insertions(+), 1 deletion(-) create mode 100755 templates/device-name.sh diff --git a/main.tf b/main.tf index 6fbe7f1..39fcf23 100644 --- a/main.tf +++ b/main.tf @@ -394,4 +394,42 @@ resource "null_resource" "install_kube_vip_daemonset" { ] } } - +resource "null_resource" "worker_disks" { + count = var.worker_count + depends_on = [ + null_resource.add_kubelet_flags_to_workers + ] + connection { + type = "ssh" + user = "root" + private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) + host = element(metal_device.worker_nodes.*.access_public_ipv4, count.index) + } + provisioner "file" { + source = "${path.module}/templates/device-name.sh" + destination = "/root/bootstrap/device-name.sh" + } + provisioner "remote-exec" { + inline = [ + "bash /root/bootstrap/device-name.sh" + ] + } +} +resource "null_resource" "install_portworx" { + depends_on = [ + null_resource.worker_disks + ] + connection { + type = "ssh" + user = "root" + private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) + host = metal_device.control_plane.0.access_public_ipv4 + } + provisioner "remote-exec" { + inline = [ + "VER=$(kubectl version --short | awk -Fv '/Server Version: / {print $3}')", + "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver=$VER&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${local.cluster_name}&stork=true&st=k8s", + "kubectl --kubeconfig /root/baremetal/bmctl-workspace/${local.cluster_name}/${local.cluster_name}-kubeconfig apply -f $URL" + ] + } +} diff --git a/templates/device-name.sh b/templates/device-name.sh new file mode 100755 index 0000000..dfa59be --- /dev/null +++ b/templates/device-name.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +createlvm=true +deletelvm=false +dsksize="0" +disks=$(lsblk -f -d -b -n -oNAME,SIZE,FSTYPE| egrep -v "xfs|ext3|ext4|btrfs|sr0") +while IFS= read -r line +do +tmpsize=$(echo $line|awk {'print $2'}) +tmpname=$(echo $line|awk {'print $1'}) + if [[ "$dsksize" = "0" ]] + then + dsksize=$tmpsize + dskname=$tmpname + elif [[ "$dsksize" -gt "$tmpsize" ]] + then + dsksize=$tmpsize + dskname=$tmpname + fi +done <<< "$disks" +echo "Will use $dskname for Portworx KVDB LVM by running the following commands(will only run if createlvm=true)" +dev="/dev/$dskname" +echo "pvcreate $dev" +echo "vgcreate pwx_vg $dev" +echo "lvcreate -l 100%FREE -n pwxkvdb pwx_vg" +if $createlvm +then + pvcreate $dev + vgcreate pwx_vg $dev + lvcreate -l 100%FREE -n pwxkvdb pwx_vg +fi +if $deletelvm + then + lvremove /dev/pwx_vg/pwxkvdb + vgremove pwx_vg + pvremove /dev/sdf + wipefs -a /dev/sdf +fi + diff --git a/variables.tf b/variables.tf index 4fb6c0f..77db75b 100644 --- a/variables.tf +++ b/variables.tf @@ -131,3 +131,8 @@ variable "kube_vip_daemonset_url" { description = "The deploy url for the Kube-VIP Daemonset" } +variable "portworx_version" { + type = string + description = "Portworx Version to install" + default = "2.6" +} \ No newline at end of file From 5c4de203f9a2c1724935c8fdab374d8d9463f41a Mon Sep 17 00:00:00 2001 From: Bikash Roy Choudhury Date: Wed, 27 Jan 2021 10:08:01 -0800 Subject: [PATCH 02/12] install prereqs for worker disk management Signed-off-by: Bikash Roy Choudhury --- main.tf | 51 ++++++++++++++++++-- templates/{device-name.sh => device_name.sh} | 13 ++--- templates/pre_reqs.sh | 4 +- templates/pre_reqs_worker.sh | 27 +++++++++++ 4 files changed, 80 insertions(+), 15 deletions(-) rename templates/{device-name.sh => device_name.sh} (90%) create mode 100644 templates/pre_reqs_worker.sh diff --git a/main.tf b/main.tf index 39fcf23..ab15011 100644 --- a/main.tf +++ b/main.tf @@ -140,6 +140,13 @@ data "template_file" "deploy_anthos_cluster" { } } +data "template_file" "pre_reqs_worker" { + template = file("templates/pre_reqs_worker.sh") + vars = { + operating_system = var.operating_system + } +} + resource "null_resource" "prep_anthos_cluster" { depends_on = [ google_project_service.enabled-apis @@ -394,29 +401,62 @@ resource "null_resource" "install_kube_vip_daemonset" { ] } } + +resource "null_resource" "worker_pre_reqs" { + count = var.worker_count + + connection { + type = "ssh" + user = "root" + private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) + host = element(metal_device.worker_nodes.*.access_public_ipv4, count.index) + } + + provisioner "remote-exec" { + inline = ["mkdir -p /root/bootstrap/"] + } + + # Unless /root/bootstrap/ is created in advance, this will be + # copied to /root/bootstrap (file) + # https://github.com/hashicorp/terraform/issues/16330 + provisioner "file" { + content = data.template_file.pre_reqs_worker.rendered + destination = "/root/bootstrap/pre_reqs_worker.sh" + } + + provisioner "remote-exec" { + inline = ["bash /root/bootstrap/pre_reqs_worker.sh"] + } +} + resource "null_resource" "worker_disks" { count = var.worker_count depends_on = [ - null_resource.add_kubelet_flags_to_workers + null_resource.worker_pre_reqs ] + connection { type = "ssh" user = "root" private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) host = element(metal_device.worker_nodes.*.access_public_ipv4, count.index) } + provisioner "file" { - source = "${path.module}/templates/device-name.sh" - destination = "/root/bootstrap/device-name.sh" + source = "${path.module}/templates/device_name.sh" + destination = "/root/bootstrap/device_name.sh" + } provisioner "remote-exec" { inline = [ - "bash /root/bootstrap/device-name.sh" + "bash /root/bootstrap/device_name.sh" ] } } + resource "null_resource" "install_portworx" { depends_on = [ + null_resource.add_kubelet_flags_to_workers, null_resource.worker_disks ] connection { @@ -425,10 +465,11 @@ resource "null_resource" "install_portworx" { private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) host = metal_device.control_plane.0.access_public_ipv4 } + provisioner "remote-exec" { inline = [ "VER=$(kubectl version --short | awk -Fv '/Server Version: / {print $3}')", - "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver=$VER&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${local.cluster_name}&stork=true&st=k8s", + "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver='$VER'&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${local.cluster_name}&stork=true&st=k8s'", "kubectl --kubeconfig /root/baremetal/bmctl-workspace/${local.cluster_name}/${local.cluster_name}-kubeconfig apply -f $URL" ] } diff --git a/templates/device-name.sh b/templates/device_name.sh similarity index 90% rename from templates/device-name.sh rename to templates/device_name.sh index dfa59be..bd293f0 100755 --- a/templates/device-name.sh +++ b/templates/device_name.sh @@ -22,17 +22,14 @@ dev="/dev/$dskname" echo "pvcreate $dev" echo "vgcreate pwx_vg $dev" echo "lvcreate -l 100%FREE -n pwxkvdb pwx_vg" -if $createlvm -then +if $createlvm; then pvcreate $dev vgcreate pwx_vg $dev lvcreate -l 100%FREE -n pwxkvdb pwx_vg fi -if $deletelvm - then +if $deletelvm; then lvremove /dev/pwx_vg/pwxkvdb vgremove pwx_vg - pvremove /dev/sdf - wipefs -a /dev/sdf -fi - + pvremove $dev + wipefs -a $dev +fi \ No newline at end of file diff --git a/templates/pre_reqs.sh b/templates/pre_reqs.sh index 3844b5b..6ae70a0 100644 --- a/templates/pre_reqs.sh +++ b/templates/pre_reqs.sh @@ -54,7 +54,7 @@ EOM function unknown_os { - echo "I don't konw who I am" > /root/who_am_i.txt + echo "I don't know who I am" > /root/who_am_i.txt } if [ "$${OS:0:6}" = "centos" ] || [ "$${OS:0:4}" = "rhel" ]; then @@ -106,4 +106,4 @@ sed -i "s|# ingressVIP: 10.0.0.2|ingressVIP: $INGRESS_VIP|g" $cluster_config sed -i "s| - address: |$cp_string|g" $cluster_config sed -i "s| - address: |$worker_string|g" $cluster_config sed -i "s|- 10.96.0.0/12|- 172.31.0.0/16|g" $cluster_config -sed -i "s|- 192.168.0.0/16|- 172.30.0.0/16|g" $cluster_config +sed -i "s|- 192.168.0.0/16|- 172.30.0.0/16|g" $cluster_config \ No newline at end of file diff --git a/templates/pre_reqs_worker.sh b/templates/pre_reqs_worker.sh new file mode 100644 index 0000000..df630ef --- /dev/null +++ b/templates/pre_reqs_worker.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +OS='${operating_system}' + +function ubuntu_pre_reqs { + # Install Docker + export DEBIAN_FRONTEND=noninteractive + sudo apt-get update -qy + sudo apt-get install -qy lvm2 +} + + +function rhel_pre_reqs { + sudo dnf install lvm2 -y +} + + +function unknown_os { + echo "I don't know who I am" > /root/who_am_i.txt +} + +if [ "$${OS:0:6}" = "centos" ] || [ "$${OS:0:4}" = "rhel" ]; then + rhel_pre_reqs +elif [ "$${OS:0:6}" = "ubuntu" ]; then + ubuntu_pre_reqs +else + unknown_os +fi From 9662adb23cc3df9008490ec5361e64752edc8f72 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 28 Jan 2021 12:15:48 -0500 Subject: [PATCH 03/12] fix device_names.sh used disk detection when disks are partitioned Signed-off-by: Marques Johansson --- templates/device_name.sh | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/templates/device_name.sh b/templates/device_name.sh index bd293f0..3c58cd7 100755 --- a/templates/device_name.sh +++ b/templates/device_name.sh @@ -2,21 +2,18 @@ createlvm=true deletelvm=false dsksize="0" -disks=$(lsblk -f -d -b -n -oNAME,SIZE,FSTYPE| egrep -v "xfs|ext3|ext4|btrfs|sr0") -while IFS= read -r line -do -tmpsize=$(echo $line|awk {'print $2'}) -tmpname=$(echo $line|awk {'print $1'}) - if [[ "$dsksize" = "0" ]] - then - dsksize=$tmpsize - dskname=$tmpname - elif [[ "$dsksize" -gt "$tmpsize" ]] - then - dsksize=$tmpsize - dskname=$tmpname - fi -done <<< "$disks" + +function largest_free_disk { + lsblk -f -d -b -n -oNAME,SIZE | while read disk size; do + # ignore disks with filesystems + if ! lsblk -f -b -n -oNAME,SIZE,FSTYPE -i /dev/$disk | egrep "xfs|ext3|ext4|btrfs|sr0" >/dev/null; then + echo -en "$disk $size" + fi + done | sort -n -k2 | head -n1 | cut -f1 -d" " +} + + +dskname=$(largest_free_disk) echo "Will use $dskname for Portworx KVDB LVM by running the following commands(will only run if createlvm=true)" dev="/dev/$dskname" echo "pvcreate $dev" @@ -32,4 +29,4 @@ if $deletelvm; then vgremove pwx_vg pvremove $dev wipefs -a $dev -fi \ No newline at end of file +fi From ae7c04e92439574b7968f843aa10ef73cad1ff76 Mon Sep 17 00:00:00 2001 From: Bikash Roy Choudhury Date: Wed, 3 Feb 2021 10:36:18 -0800 Subject: [PATCH 04/12] set portworx image pullpolicy to 'IfNotPresent' Signed-off-by: Bikash Roy Choudhury --- main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.tf b/main.tf index ab15011..d19ba49 100644 --- a/main.tf +++ b/main.tf @@ -469,7 +469,7 @@ resource "null_resource" "install_portworx" { provisioner "remote-exec" { inline = [ "VER=$(kubectl version --short | awk -Fv '/Server Version: / {print $3}')", - "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver='$VER'&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${local.cluster_name}&stork=true&st=k8s'", + "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver='$VER'&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${local.cluster_name}&stork=true&st=k8s&pp=IfNotPresent'", "kubectl --kubeconfig /root/baremetal/bmctl-workspace/${local.cluster_name}/${local.cluster_name}-kubeconfig apply -f $URL" ] } From bcb9093a0cb93b3eaa5f57ed0994a81b9183f5ee Mon Sep 17 00:00:00 2001 From: Bikash Roy Choudhury Date: Wed, 3 Feb 2021 10:37:36 -0800 Subject: [PATCH 05/12] Add portworx details in the README.md Signed-off-by: Bikash Roy Choudhury --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index f1e542b..bae51e7 100644 --- a/README.md +++ b/README.md @@ -208,3 +208,12 @@ metal_project_id = "YOUR-PROJECT-ID" ## Google Anthos Documentation Once Anthos is deployed on Equinix Metal, all of the documentation for using Google Anthos is located on the [Anthos Documentation Page](https://cloud.google.com/anthos/docs). +## Pure Storage Portworx installation + +Portworx by Pure Storage is a distributed and high available data storage that takes advantage of the local and attached storage provided on each Equinix Metal device. Portworx includes a [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) driver. + +Portworx differentiates between device disks using priority labels that can be applied to create distinct `StorageClasses`. See [Portworx: Dynamic Provisioning](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-pvcs/dynamic-provisioning/) for more details. + +Login to any one of the Anthos cluster nodes and run `pxctl status` to check the portworx state or can run `kubectl get pods -lapp=portworx -n kube-system` to check if the portworx pods are running. Portworx logs can be viewed by running: `kubectl logs -lapp=portworx -n kube-system --all-containers`. + +By default, Portworx 2.6 is installed in the Anthos Cluster. The version of Portworx can be changed using the `portworx_version` variable. From 3fa26e57f5bb741a9afcbd06975348b33ab270b6 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 11 Feb 2021 00:18:05 -0500 Subject: [PATCH 06/12] modularize storage providers (Portworx) Signed-off-by: Marques Johansson --- .terraform.lock.hcl | 0 README.md | 15 ++++-- main.tf | 50 +++++-------------- modules/portworx/README.md | 9 ++++ .../portworx/assets/portworx_disk_setup.sh | 0 modules/portworx/main.tf | 46 +++++++++++++++++ modules/portworx/variables.tf | 22 ++++++++ modules/storage/main.tf | 7 +++ modules/storage/variables.tf | 27 ++++++++++ variables.tf | 14 ++++-- 10 files changed, 143 insertions(+), 47 deletions(-) mode change 100755 => 100644 .terraform.lock.hcl create mode 100644 modules/portworx/README.md rename templates/device_name.sh => modules/portworx/assets/portworx_disk_setup.sh (100%) create mode 100644 modules/portworx/main.tf create mode 100644 modules/portworx/variables.tf create mode 100644 modules/storage/main.tf create mode 100644 modules/storage/variables.tf diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl old mode 100755 new mode 100644 diff --git a/README.md b/README.md index bae51e7..0fdf595 100644 --- a/README.md +++ b/README.md @@ -208,12 +208,17 @@ metal_project_id = "YOUR-PROJECT-ID" ## Google Anthos Documentation Once Anthos is deployed on Equinix Metal, all of the documentation for using Google Anthos is located on the [Anthos Documentation Page](https://cloud.google.com/anthos/docs). -## Pure Storage Portworx installation +## Storage Providers -Portworx by Pure Storage is a distributed and high available data storage that takes advantage of the local and attached storage provided on each Equinix Metal device. Portworx includes a [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) driver. +Storage providers are made available through optional storage modules. These storage providers include CSI (Container Native Storage) `StorageClasses`. To enable a storage module, set the `storage_module` variable to the name of the name of the included module. -Portworx differentiates between device disks using priority labels that can be applied to create distinct `StorageClasses`. See [Portworx: Dynamic Provisioning](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-pvcs/dynamic-provisioning/) for more details. +* `portworx`: To enable the [Pure Storage Portworx installation](modules/portworx/README), use the following settings in `terraform.tfvars`: -Login to any one of the Anthos cluster nodes and run `pxctl status` to check the portworx state or can run `kubectl get pods -lapp=portworx -n kube-system` to check if the portworx pods are running. Portworx logs can be viewed by running: `kubectl logs -lapp=portworx -n kube-system --all-containers`. + ```hcl + storage_module = "portworx" + storage_options = { + portworx_version = "2.6" + } + ``` -By default, Portworx 2.6 is installed in the Anthos Cluster. The version of Portworx can be changed using the `portworx_version` variable. + When enabled, Portworx will manage the local disks attached to each worker node, providing a fault tolerant distributed storage solution. diff --git a/main.tf b/main.tf index d19ba49..b91beca 100644 --- a/main.tf +++ b/main.tf @@ -429,48 +429,22 @@ resource "null_resource" "worker_pre_reqs" { } } -resource "null_resource" "worker_disks" { - count = var.worker_count - depends_on = [ - null_resource.worker_pre_reqs - ] +module "storage" { + source = "./modules/storage" - connection { - type = "ssh" - user = "root" - private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) - host = element(metal_device.worker_nodes.*.access_public_ipv4, count.index) - } - - provisioner "file" { - source = "${path.module}/templates/device_name.sh" - destination = "/root/bootstrap/device_name.sh" - - } - provisioner "remote-exec" { - inline = [ - "bash /root/bootstrap/device_name.sh" - ] - } -} - -resource "null_resource" "install_portworx" { depends_on = [ null_resource.add_kubelet_flags_to_workers, - null_resource.worker_disks ] - connection { - type = "ssh" - user = "root" - private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) - host = metal_device.control_plane.0.access_public_ipv4 - } - provisioner "remote-exec" { - inline = [ - "VER=$(kubectl version --short | awk -Fv '/Server Version: / {print $3}')", - "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver='$VER'&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${local.cluster_name}&stork=true&st=k8s&pp=IfNotPresent'", - "kubectl --kubeconfig /root/baremetal/bmctl-workspace/${local.cluster_name}/${local.cluster_name}-kubeconfig apply -f $URL" - ] + ssh = { + host = metal_device.control_plane.0.access_public_ipv4 + private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) + user = "root" + kubeconfig = "/root/baremetal/bmctl-workspace/${local.cluster_name}/${local.cluster_name}-kubeconfig" + worker_addresses = metal_device.worker_nodes.*.access_public_ipv4 } + + cluster_name = local.cluster_name + storage_module = var.storage_module + storage_options = var.storage_options } diff --git a/modules/portworx/README.md b/modules/portworx/README.md new file mode 100644 index 0000000..5fdf23b --- /dev/null +++ b/modules/portworx/README.md @@ -0,0 +1,9 @@ +# Pure Storage Portworx installation + +Portworx by Pure Storage is a distributed and high available data storage that takes advantage of the local and attached storage provided on each Equinix Metal device. Portworx includes a [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) driver. + +Portworx differentiates between device disks using priority labels that can be applied to create distinct `StorageClasses`. See [Portworx: Dynamic Provisioning](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-pvcs/dynamic-provisioning/) for more details. + +Login to any one of the Anthos cluster nodes and run `pxctl status` to check the portworx state or run `kubectl get pods -lapp=portworx -n kube-system` to check if the portworx pods are running. Portworx logs can be viewed by running: `kubectl logs -lapp=portworx -n kube-system --all-containers`. + +By default, Portworx 2.6 is installed in the Anthos Cluster. The version of Portworx can be changed using the `portworx_version` variable. diff --git a/templates/device_name.sh b/modules/portworx/assets/portworx_disk_setup.sh similarity index 100% rename from templates/device_name.sh rename to modules/portworx/assets/portworx_disk_setup.sh diff --git a/modules/portworx/main.tf b/modules/portworx/main.tf new file mode 100644 index 0000000..703a985 --- /dev/null +++ b/modules/portworx/main.tf @@ -0,0 +1,46 @@ +resource "null_resource" "worker_disks" { + for_each = toset(var.ssh.worker_addresses) + + connection { + type = "ssh" + user = var.ssh.user + private_key = var.ssh.private_key + host = each.key + } + + provisioner "remote-exec" { + inline = ["mkdir -p /root/bootstrap/"] + } + + provisioner "file" { + source = "${path.module}/assets/portworx_disk_setup.sh" + destination = "/root/bootstrap/portworx_disk_setup.sh" + + } + provisioner "remote-exec" { + inline = [ + "bash /root/bootstrap/portworx_disk_setup.sh" + ] + } +} + + +resource "null_resource" "install_portworx" { + depends_on = [ + null_resource.worker_disks + ] + connection { + type = "ssh" + user = var.ssh.user + private_key = var.ssh.private_key + host = var.ssh.host + } + + provisioner "remote-exec" { + inline = [ + "VER=$(kubectl version --short | awk -Fv '/Server Version: / {print $3}')", + "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver='$VER'&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${var.cluster_name}&stork=true&st=k8s&pp=IfNotPresent'", + "kubectl --kubeconfig ${var.ssh.kubeconfig} apply -f $URL" + ] + } +} diff --git a/modules/portworx/variables.tf b/modules/portworx/variables.tf new file mode 100644 index 0000000..85a44f8 --- /dev/null +++ b/modules/portworx/variables.tf @@ -0,0 +1,22 @@ +variable "portworx_version" { + type = string + description = "The version of Portworx to install" + default = "2.6" +} + +variable "cluster_name" { + type = string + description = "Name of the cluster" +} + +variable "ssh" { + description = "SSH options for the storage provider including SSH details to access the control plane including the remote path to the kubeconfig file and a list of worker addresses" + + type = object({ + host = string + private_key = string + user = string + kubeconfig = string + worker_addresses = list(string) + }) +} diff --git a/modules/storage/main.tf b/modules/storage/main.tf new file mode 100644 index 0000000..ad1c112 --- /dev/null +++ b/modules/storage/main.tf @@ -0,0 +1,7 @@ +module "portworx" { + count = var.storage_module == "portworx" ? 1 : 0 + source = "../portworx" + portworx_version = try(var.storage_options.version, null) + ssh = var.ssh + cluster_name = var.cluster_name +} diff --git a/modules/storage/variables.tf b/modules/storage/variables.tf new file mode 100644 index 0000000..9a8241e --- /dev/null +++ b/modules/storage/variables.tf @@ -0,0 +1,27 @@ +variable "storage_module" { + description = "The name of the Storage provider module (ex. \"portworx\")" + default = "" +} + +variable "storage_options" { + type = any + description = "Options for the Storage provider module" + default = {} +} + +variable "cluster_name" { + type = string + description = "Name of the cluster" +} + +variable "ssh" { + description = "SSH options for the storage provider including SSH details to access the control plane including the remote path to the kubeconfig file and a list of worker addresses." + + type = object({ + host = string + private_key = string + user = string + kubeconfig = string + worker_addresses = list(string) + }) +} diff --git a/variables.tf b/variables.tf index 77db75b..83adc42 100644 --- a/variables.tf +++ b/variables.tf @@ -131,8 +131,14 @@ variable "kube_vip_daemonset_url" { description = "The deploy url for the Kube-VIP Daemonset" } -variable "portworx_version" { +variable "storage_module" { type = string - description = "Portworx Version to install" - default = "2.6" -} \ No newline at end of file + description = "The name of the storage module to enable. If set, use storage_options." + default = "" +} + +variable "storage_options" { + type = any + description = "Options specific to the storage module. Check the documentation for the storage module for details." + default = null +} From ede379eccc3cadccc8e2e84960a7fed7ab4a6c75 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 11 Feb 2021 08:30:25 -0500 Subject: [PATCH 07/12] replace portworx worker_disks for_each with count/index Signed-off-by: Marques Johansson --- modules/portworx/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/portworx/main.tf b/modules/portworx/main.tf index 703a985..72ce073 100644 --- a/modules/portworx/main.tf +++ b/modules/portworx/main.tf @@ -1,11 +1,11 @@ resource "null_resource" "worker_disks" { - for_each = toset(var.ssh.worker_addresses) + count = length(var.ssh.worker_addresses) connection { type = "ssh" user = var.ssh.user private_key = var.ssh.private_key - host = each.key + host = var.ssh.worker_addresses[count.index] } provisioner "remote-exec" { From aaa8e052683a565965d67e7ca1a77b00c8b1d38c Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 11 Feb 2021 09:35:15 -0500 Subject: [PATCH 08/12] use latest_portworx_version when portworx_version is null or empty Signed-off-by: Marques Johansson --- modules/portworx/main.tf | 6 +++++- modules/portworx/variables.tf | 6 ++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/modules/portworx/main.tf b/modules/portworx/main.tf index 72ce073..f79b217 100644 --- a/modules/portworx/main.tf +++ b/modules/portworx/main.tf @@ -24,6 +24,10 @@ resource "null_resource" "worker_disks" { } } +locals { + portworx_version = try(length(var.portworx_version) ? var.portworx_version + : var.latest_portworx_version, var.latest_portworx_version) +} resource "null_resource" "install_portworx" { depends_on = [ @@ -39,7 +43,7 @@ resource "null_resource" "install_portworx" { provisioner "remote-exec" { inline = [ "VER=$(kubectl version --short | awk -Fv '/Server Version: / {print $3}')", - "URL='https://install.portworx.com/${var.portworx_version}?mc=false&kbver='$VER'&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${var.cluster_name}&stork=true&st=k8s&pp=IfNotPresent'", + "URL='https://install.portworx.com/${local.portworx_version}?mc=false&kbver='$VER'&b=true&j=auto&kd=${urlencode("/dev/pwx_vg/pwxkvdb")}&c=${var.cluster_name}&stork=true&st=k8s&pp=IfNotPresent'", "kubectl --kubeconfig ${var.ssh.kubeconfig} apply -f $URL" ] } diff --git a/modules/portworx/variables.tf b/modules/portworx/variables.tf index 85a44f8..65e7c40 100644 --- a/modules/portworx/variables.tf +++ b/modules/portworx/variables.tf @@ -1,4 +1,10 @@ variable "portworx_version" { + type = string + description = "The version of Portworx to install (latest_portworx_version will be used if not set)" + default = "" +} + +variable "latest_portworx_version" { type = string description = "The version of Portworx to install" default = "2.6" From 1fc26be9c20d3d2f70aa25bd98bf1bf66548699c Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 11 Feb 2021 09:54:56 -0500 Subject: [PATCH 09/12] add README note warning about changing storage providers Signed-off-by: Marques Johansson --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0fdf595..1ffe7e1 100644 --- a/README.md +++ b/README.md @@ -210,7 +210,11 @@ Once Anthos is deployed on Equinix Metal, all of the documentation for using Goo ## Storage Providers -Storage providers are made available through optional storage modules. These storage providers include CSI (Container Native Storage) `StorageClasses`. To enable a storage module, set the `storage_module` variable to the name of the name of the included module. +Storage providers are made available through optional storage modules. These storage providers include CSI (Container Native Storage) `StorageClasses`. + +Changing or disabling a storage provider is not currently supported. + +To enable a storage module, set the `storage_module` variable to the name of the name of the included module. * `portworx`: To enable the [Pure Storage Portworx installation](modules/portworx/README), use the following settings in `terraform.tfvars`: From 5ac069f9bfb8f2367520216b68cf64653aec037e Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 11 Feb 2021 15:09:34 -0500 Subject: [PATCH 10/12] add portworx_license option Signed-off-by: Marques Johansson --- README.md | 7 +++++-- modules/portworx/README.md | 27 +++++++++++++++++++++++++-- modules/portworx/main.tf | 20 ++++++++++++++++++++ modules/portworx/variables.tf | 6 ++++++ modules/storage/main.tf | 3 ++- modules/storage/variables.tf | 2 +- 6 files changed, 59 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 1ffe7e1..19aee5f 100644 --- a/README.md +++ b/README.md @@ -216,13 +216,16 @@ Changing or disabling a storage provider is not currently supported. To enable a storage module, set the `storage_module` variable to the name of the name of the included module. -* `portworx`: To enable the [Pure Storage Portworx installation](modules/portworx/README), use the following settings in `terraform.tfvars`: +* `portworx`: To enable the Pure Storage Portworx installation, use the following settings in `terraform.tfvars`: ```hcl storage_module = "portworx" storage_options = { - portworx_version = "2.6" + # portworx_version = "2.6" + # portworx_license = "c0ffe-fefe-activation-123" } ``` When enabled, Portworx will manage the local disks attached to each worker node, providing a fault tolerant distributed storage solution. + + [Read more about the Portworx module](modules/portworx/README). diff --git a/modules/portworx/README.md b/modules/portworx/README.md index 5fdf23b..d662089 100644 --- a/modules/portworx/README.md +++ b/modules/portworx/README.md @@ -4,6 +4,29 @@ Portworx by Pure Storage is a distributed and high available data storage that t Portworx differentiates between device disks using priority labels that can be applied to create distinct `StorageClasses`. See [Portworx: Dynamic Provisioning](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-pvcs/dynamic-provisioning/) for more details. -Login to any one of the Anthos cluster nodes and run `pxctl status` to check the portworx state or run `kubectl get pods -lapp=portworx -n kube-system` to check if the portworx pods are running. Portworx logs can be viewed by running: `kubectl logs -lapp=portworx -n kube-system --all-containers`. +Login to any one of the Anthos cluster nodes and run `pxctl status` to check the state of the Portworx services. -By default, Portworx 2.6 is installed in the Anthos Cluster. The version of Portworx can be changed using the `portworx_version` variable. +You can also use the Kubernetes API to check the status: + +```sh +kubectl get pods -lapp=portworx -n kube-system +``` + +Portworx logs can be viewed by running: + +```sh +kubectl logs -lapp=portworx -n kube-system --all-containers +``` + +By default, Portworx 2.6 is installed in the Anthos Cluster. The version of Portworx can be changed using the `portworx_version` variable. + +## Licensing + +Portworx is installed with a Trial license. To continue use at the end of the trial period, you will need a Portworx Enterprise Metal license. + +More information about these licenses, their restrictions and enablement can be found at . + +To active the Portworx license through this module: + +* Set the `portworx_license` variable to your license key +* Run `terraform apply` diff --git a/modules/portworx/main.tf b/modules/portworx/main.tf index f79b217..bc76471 100644 --- a/modules/portworx/main.tf +++ b/modules/portworx/main.tf @@ -48,3 +48,23 @@ resource "null_resource" "install_portworx" { ] } } + +resource "null_resource" "license_portworx" { + count = length(var.portworx_license) > 0 ? 1 : 0 + + depends_on = [ + null_resource.install_portworx + ] + connection { + type = "ssh" + user = var.ssh.user + private_key = var.ssh.private_key + host = var.ssh.worker_addresses[0] + } + + provisioner "remote-exec" { + inline = [ + "/opt/pwx/bin/pxctl license activate ${var.portworx_license}" + ] + } +} diff --git a/modules/portworx/variables.tf b/modules/portworx/variables.tf index 65e7c40..32f2945 100644 --- a/modules/portworx/variables.tf +++ b/modules/portworx/variables.tf @@ -4,6 +4,12 @@ variable "portworx_version" { default = "" } +variable "portworx_license" { + type = string + description = "License key for Portworx (a Trial license is used by default)" + default = "" +} + variable "latest_portworx_version" { type = string description = "The version of Portworx to install" diff --git a/modules/storage/main.tf b/modules/storage/main.tf index ad1c112..cd39cf7 100644 --- a/modules/storage/main.tf +++ b/modules/storage/main.tf @@ -1,7 +1,8 @@ module "portworx" { count = var.storage_module == "portworx" ? 1 : 0 source = "../portworx" - portworx_version = try(var.storage_options.version, null) + portworx_version = try(var.storage_options.portworx_version, "") + portworx_license = try(var.storage_options.portworx_license, "") ssh = var.ssh cluster_name = var.cluster_name } diff --git a/modules/storage/variables.tf b/modules/storage/variables.tf index 9a8241e..024eedf 100644 --- a/modules/storage/variables.tf +++ b/modules/storage/variables.tf @@ -5,7 +5,7 @@ variable "storage_module" { variable "storage_options" { type = any - description = "Options for the Storage provider module" + description = "Options for the Storage provider module. Option names can be found in the documentation for each module and are prefixed with the vendor name (\"portworx_version\")" default = {} } From 90ba37548836b807f8958d57f33d9b2590c4ee22 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 18 Feb 2021 14:34:46 -0500 Subject: [PATCH 11/12] note Portworx licensing behavior The Portworx licensing step can fail if applied too soon. The documentation is updated to reflect that, providing remediations. --- modules/portworx/README.md | 4 ++++ modules/portworx/variables.tf | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/portworx/README.md b/modules/portworx/README.md index d662089..c7acad0 100644 --- a/modules/portworx/README.md +++ b/modules/portworx/README.md @@ -30,3 +30,7 @@ To active the Portworx license through this module: * Set the `portworx_license` variable to your license key * Run `terraform apply` + +**Note**: The `portworx_license` variable can not be set and defined before the Portworx installation is ready. This takes about 15 minutes today. If you attempt to provide the license too early the `terraform apply` will fail, affecting only this licensing task. A subsequent and successful `terraform apply` will be needed to correct the licensing. + +Alternatively, `ssh` into any worker node and run `/opt/pwx/bin/pxctl license activate _key_`. diff --git a/modules/portworx/variables.tf b/modules/portworx/variables.tf index 32f2945..ec85d86 100644 --- a/modules/portworx/variables.tf +++ b/modules/portworx/variables.tf @@ -6,7 +6,7 @@ variable "portworx_version" { variable "portworx_license" { type = string - description = "License key for Portworx (a Trial license is used by default)" + description = "License key for Portworx. A Trial license is used by default. Setting this value before Portworx is installed and ready will result in a failed `apply` that can be corrected by applying again after the Portworx install has completed." default = "" } From c02b703e0554c52d68adcaa928bbf7e76dec8acd Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Wed, 24 Feb 2021 13:21:10 -0500 Subject: [PATCH 12/12] avoid race by having worker_pre_reqs depend on deploy_anthos_cluster deploy_anthos_cluster is run on the first control plane node, but it triggers ssh provisioning from that node to the workers outside of what Terraform is doing. Signed-off-by: Marques Johansson --- main.tf | 6 ++++++ templates/pre_reqs.sh | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/main.tf b/main.tf index b91beca..afe13bb 100644 --- a/main.tf +++ b/main.tf @@ -188,6 +188,9 @@ data "template_file" "create_cluster" { } } +// Initialize Anthos on the first control plane node. +// This will also trigger installs (including apt) +// on the worker nodes. resource "null_resource" "deploy_anthos_cluster" { depends_on = [ null_resource.prep_anthos_cluster, @@ -404,6 +407,9 @@ resource "null_resource" "install_kube_vip_daemonset" { resource "null_resource" "worker_pre_reqs" { count = var.worker_count + depends_on = [ + null_resource.deploy_anthos_cluster, + ] connection { type = "ssh" diff --git a/templates/pre_reqs.sh b/templates/pre_reqs.sh index 6ae70a0..b99cda4 100644 --- a/templates/pre_reqs.sh +++ b/templates/pre_reqs.sh @@ -106,4 +106,4 @@ sed -i "s|# ingressVIP: 10.0.0.2|ingressVIP: $INGRESS_VIP|g" $cluster_config sed -i "s| - address: |$cp_string|g" $cluster_config sed -i "s| - address: |$worker_string|g" $cluster_config sed -i "s|- 10.96.0.0/12|- 172.31.0.0/16|g" $cluster_config -sed -i "s|- 192.168.0.0/16|- 172.30.0.0/16|g" $cluster_config \ No newline at end of file +sed -i "s|- 192.168.0.0/16|- 172.30.0.0/16|g" $cluster_config