diff --git a/aws.go b/aws.go index e383c44..62d102c 100644 --- a/aws.go +++ b/aws.go @@ -24,6 +24,7 @@ func aws_create_variables(config *Config) []string { var pxduser string var tf_variables []string var tf_variables_ocp4 []string + var tf_variables_rancher []string var tf_variables_eks []string var tf_cluster_instance_type string var tf_cluster_nodes string @@ -66,6 +67,11 @@ func aws_create_variables(config *Config) []string { tf_variables = append(tf_variables, "ocp4_nodes = \""+config.Nodes+"\"") config.Nodes = "0" } + case "rancher": + { + tf_variables = append(tf_variables, "rancher_nodes = \""+config.Nodes+"\"") + config.Nodes = "0" + } case "eks": { tf_variables = append(tf_variables, "eks_nodes = \""+config.Nodes+"\"") @@ -98,6 +104,14 @@ func aws_create_variables(config *Config) []string { { tf_variables_eks = append(tf_variables_eks, "eksclusters = {") } + case "rancher": + { + tf_variables = append(tf_variables, "rancher_domain = \""+config.Ocp4_Domain+"\"") + tf_variables = append(tf_variables, "rancher_k3s_version = \""+config.Rancher_K3s_Version+"\"") + tf_variables = append(tf_variables, "rancher_k8s_version = \""+config.Rancher_K8s_Version+"\"") + tf_variables = append(tf_variables, "rancher_version = \""+config.Rancher_Version+"\"") + tf_variables_rancher = append(tf_variables_rancher, "rancherclusters = {") + } } tf_variables = append(tf_variables, "nodeconfig = [") @@ -146,6 +160,10 @@ func aws_create_variables(config *Config) []string { { tf_variables_ocp4 = append(tf_variables_ocp4, " \""+masternum+"\" = \""+tf_cluster_instance_type+"\",") } + case "rancher": + { + tf_variables_rancher = append(tf_variables_rancher, " \""+masternum+"\" = \""+tf_cluster_instance_type+"\",") + } case "eks": { tf_variables_eks = append(tf_variables_eks, " \""+masternum+"\" = \""+tf_cluster_instance_type+"\",") @@ -160,6 +178,11 @@ func aws_create_variables(config *Config) []string { tf_variables_ocp4 = append(tf_variables_ocp4, "}") tf_variables = append(tf_variables, tf_variables_ocp4...) } + case "rancher": + { + tf_variables_rancher = append(tf_variables_rancher, "}") + tf_variables = append(tf_variables, tf_variables_rancher...) + } case "eks": { tf_variables_eks = append(tf_variables_eks, "}") diff --git a/defaults.yml b/defaults.yml index 331e200..33cb64a 100644 --- a/defaults.yml +++ b/defaults.yml @@ -37,6 +37,10 @@ azure_tenant_id: "" azure_subscription_id: "" aks_version: "1.28" +rancher_version: "2.9.3" +rancher_k3s_version: "1.30.6+k3s1" +rancher_k8s_version: "1.30.6+rke2r1" + vsphere_host: "" vsphere_compute_resource: "" vsphere_resource_pool: "" diff --git a/px-deploy.go b/px-deploy.go index 0ed7b52..9e12b8c 100644 --- a/px-deploy.go +++ b/px-deploy.go @@ -72,6 +72,9 @@ type Config struct { Azure_Subscription_Id string Azure_Tenant_Id string Aks_Version string + Rancher_K3s_Version string + Rancher_K8s_Version string + Rancher_Version string Vsphere_Host string Vsphere_Compute_Resource string Vsphere_Resource_Pool string @@ -410,7 +413,7 @@ func main() { defaults := parse_yaml("defaults.yml") cmdCreate.Flags().StringVarP(&createName, "name", "n", "", "name of deployment to be created (if blank, generate UUID)") - cmdCreate.Flags().StringVarP(&flags.Platform, "platform", "p", "", "k8s | dockeree | none | k3s | ocp4 | eks | gke | aks | nomad (default "+defaults.Platform+")") + cmdCreate.Flags().StringVarP(&flags.Platform, "platform", "p", "", "k8s | dockeree | none | k3s | ocp4 | rancher | eks | gke | aks | nomad (default "+defaults.Platform+")") cmdCreate.Flags().StringVarP(&flags.Clusters, "clusters", "c", "", "number of clusters to be deployed (default "+defaults.Clusters+")") cmdCreate.Flags().StringVarP(&flags.Nodes, "nodes", "N", "", "number of nodes to be deployed in each cluster (default "+defaults.Nodes+")") cmdCreate.Flags().StringVarP(&flags.K8s_Version, "k8s_version", "k", "", "Kubernetes version to be deployed (default "+defaults.K8s_Version+")") @@ -577,7 +580,7 @@ func validate_config(config *Config) []string { config.Vsphere_Folder = strings.TrimRight(config.Vsphere_Folder, "/") } - if config.Platform != "k8s" && config.Platform != "k3s" && config.Platform != "none" && config.Platform != "dockeree" && config.Platform != "ocp4" && config.Platform != "eks" && config.Platform != "gke" && config.Platform != "aks" && config.Platform != "nomad" { + if config.Platform != "k8s" && config.Platform != "k3s" && config.Platform != "none" && config.Platform != "dockeree" && config.Platform != "ocp4" && config.Platform != "rancher" && config.Platform != "eks" && config.Platform != "gke" && config.Platform != "aks" && config.Platform != "nomad" { errormsg = append(errormsg, "Invalid platform '"+config.Platform+"'") } @@ -643,7 +646,17 @@ func validate_config(config *Config) []string { emptyVars := isEmpty(config.Ocp4_Domain, config.Ocp4_Pull_Secret) if len(emptyVars) > 0 { for _, i := range emptyVars { - errormsg = append(errormsg, "please set \"%s\" in defaults.yml", checkvar[i]) + errormsg = append(errormsg, fmt.Sprintf("please set \"%s\" in defaults.yml", checkvar[i])) + } + } + } + + if config.Platform == "rancher" { + checkvar := []string{"rancher_k3s_version", "rancher_k8s_version", "rancher_version"} + emptyVars := isEmpty(config.Rancher_K3s_Version, config.Rancher_K8s_Version, config.Rancher_Version) + if len(emptyVars) > 0 { + for _, i := range emptyVars { + errormsg = append(errormsg, fmt.Sprintf("please set \"%s\" in defaults.yml", checkvar[i])) } } } @@ -654,6 +667,9 @@ func validate_config(config *Config) []string { if config.Platform == "ocp4" && config.Cloud != "aws" { errormsg = append(errormsg, "Openshift 4 only supported on AWS (not "+config.Cloud+")") } + if config.Platform == "rancher" && config.Cloud != "aws" { + errormsg = append(errormsg, "Rancher only supported on AWS (not "+config.Cloud+")") + } if config.Platform == "gke" && config.Cloud != "gcp" { errormsg = append(errormsg, "GKE only makes sense with GCP (not "+config.Cloud+")") } @@ -749,7 +765,7 @@ func get_deployment_status(config *Config, cluster int, c chan Deployment_Status var Nodes int var returnvalue string - if (config.Platform == "ocp4") || (config.Platform == "eks") || (config.Platform == "aks") || (config.Platform == "gke") { + if (config.Platform == "ocp4") || (config.Platform == "rancher") || (config.Platform == "eks") || (config.Platform == "aks") || (config.Platform == "gke") { Nodes = 0 } else { Nodes, _ = strconv.Atoi(config.Nodes) @@ -801,6 +817,19 @@ func get_deployment_status(config *Config, cluster int, c chan Deployment_Status returnvalue = fmt.Sprintf("%v OCP4 credentials not yet available\n", returnvalue) } } + + if config.Platform == "rancher" { + if ready_nodes["url"] != "" { + returnvalue = fmt.Sprintf("%v URL: %v \n", returnvalue, ready_nodes["url"]) + } else { + returnvalue = fmt.Sprintf("%v Rancher Server URL not yet available\n", returnvalue) + } + if ready_nodes["cred"] != "" { + returnvalue = fmt.Sprintf("%v Credentials: admin / %v \n", returnvalue, ready_nodes["cred"]) + } else { + returnvalue = fmt.Sprintf("%v Rancher Server credentials not yet available\n", returnvalue) + } + } for n := 1; n <= Nodes; n++ { if ready_nodes[fmt.Sprintf("node-%v-%v", cluster, n)] != "" { returnvalue = fmt.Sprintf("%vReady\t node-%v-%v\n", returnvalue, cluster, n) @@ -847,6 +876,11 @@ func create_deployment(config Config) bool { exec.Command("cp", "-a", `/px-deploy/terraform/aws/eks/eks.tf`, `/px-deploy/.px-deploy/tf-deployments/`+config.Name).Run() exec.Command("cp", "-a", `/px-deploy/terraform/aws/eks/eks_run_everywhere.tpl`, `/px-deploy/.px-deploy/tf-deployments/`+config.Name).Run() } + case "rancher": + { + exec.Command("cp", "-a", `/px-deploy/terraform/aws/rancher/rancher-server.tf`, `/px-deploy/.px-deploy/tf-deployments/`+config.Name).Run() + exec.Command("cp", "-a", `/px-deploy/terraform/aws/rancher/rancher-variables.tf`, `/px-deploy/.px-deploy/tf-deployments/`+config.Name).Run() + } } write_nodescripts(config) write_tf_file(config.Name, ".tfvars", aws_create_variables(&config)) @@ -984,6 +1018,9 @@ func run_terraform_apply(config *Config) string { case "aws": cloud_auth = append(cloud_auth, fmt.Sprintf("AWS_ACCESS_KEY_ID=%s", config.Aws_Access_Key_Id)) cloud_auth = append(cloud_auth, fmt.Sprintf("AWS_SECRET_ACCESS_KEY=%s", config.Aws_Secret_Access_Key)) + // make aws keys consumeable within the terraform scripts + cloud_auth = append(cloud_auth, fmt.Sprintf("TF_VAR_AWS_ACCESS_KEY_ID=%s", config.Aws_Access_Key_Id)) + cloud_auth = append(cloud_auth, fmt.Sprintf("TF_VAR_AWS_SECRET_ACCESS_KEY=%s", config.Aws_Secret_Access_Key)) } cmd.Env = append(cmd.Env, cloud_auth...) err := cmd.Run() @@ -1163,6 +1200,30 @@ func destroy_deployment(name string, destroyForce bool) { prepare_predelete(&config, "script", destroyForce) prepare_predelete(&config, "platform", destroyForce) } + case "rancher": + { + var cloud_auth []string + cloud_auth = append(cloud_auth, fmt.Sprintf("AWS_ACCESS_KEY_ID=%s", config.Aws_Access_Key_Id)) + cloud_auth = append(cloud_auth, fmt.Sprintf("AWS_SECRET_ACCESS_KEY=%s", config.Aws_Secret_Access_Key)) + fmt.Println(Red + "FIXME: removing helm deployments from rancher state." + Reset) + cmd1 := exec.Command("terraform", "-chdir=/px-deploy/.px-deploy/tf-deployments/"+config.Name, "state", "rm", "helm_release.rancher_server") + cmd1.Stdout = os.Stdout + cmd1.Stderr = os.Stderr + cmd1.Env = append(cmd1.Env, cloud_auth...) + errstate1 := cmd1.Run() + if errstate1 != nil { + fmt.Println(Yellow + "ERROR: Terraform state rm helm_release.rancher_server failed. Check validity of terraform scripts" + Reset) + } + cmd2 := exec.Command("terraform", "-chdir=/px-deploy/.px-deploy/tf-deployments/"+config.Name, "state", "rm", "helm_release.cert_manager") + cmd2.Stdout = os.Stdout + cmd2.Stderr = os.Stderr + cmd2.Env = append(cmd2.Env, cloud_auth...) + errstate2 := cmd2.Run() + if errstate2 != nil { + fmt.Println(Yellow + "ERROR: Terraform state rm helm_release.cert_manager failed. Check validity of terraform scripts" + Reset) + } + + } case "eks": { prepare_predelete(&config, "script", destroyForce) @@ -1199,9 +1260,16 @@ func destroy_deployment(name string, destroyForce bool) { } } - // delete elb instances & attached SGs (+referncing rules) from VPC + // delete elb instances & attached SGs (+referencing rules) from VPC delete_elb_instances(config.Aws__Vpc, cfg) + // remove all terraform based infra + tf_error := run_terraform_destroy(&config) + if tf_error != "" { + fmt.Printf("%s\n", tf_error) + return + } + // at this point px clouddrive volumes must no longer be attached // as instances are terminated if len(aws_volumes) > 0 { @@ -1220,11 +1288,6 @@ func destroy_deployment(name string, destroyForce bool) { } } - tf_error := run_terraform_destroy(&config) - if tf_error != "" { - fmt.Printf("%s\n", tf_error) - return - } aws_show_iamkey_age(&config) } else if config.Cloud == "gcp" { diff --git a/scripts/install-px b/scripts/install-px index 6890e5f..bfbea31 100644 --- a/scripts/install-px +++ b/scripts/install-px @@ -146,7 +146,7 @@ if [ "$operator" != false ]; then kubectl -n portworx create configmap grafana-source-config --from-file=grafana-datasource.yaml=<(curl -s https://docs.portworx.com/samples/k8s/pxc/grafana-datasource.yaml) kubectl -n portworx create configmap grafana-dashboards --from-file=portworx-cluster-dashboard.json=<(curl -s https://docs.portworx.com/samples/k8s/pxc/portworx-cluster-dashboard.json) --from-file=portworx-node-dashboard.json=<(curl -s https://docs.portworx.com/samples/k8s/pxc/portworx-node-dashboard.json) --from-file=portworx-volume-dashboard.json=<(curl -s https://docs.portworx.com/samples/k8s/pxc/portworx-volume-dashboard.json) --from-file=portworx-etcd-dashboard.json=<(curl -s https://docs.portworx.com/samples/k8s/pxc/portworx-etcd-dashboard.json) --from-file=portworx-performance-dashboard.json=<(curl -s https://docs.portworx.com/samples/k8s/pxc/portworx-performance-dashboard.json) kubectl apply -f <(curl -s https://docs.portworx.com/samples/k8s/pxc/grafana.yaml | sed s/kube-system/portworx/) - if [ $platform != eks ] && [ $platform != gke ] && [ $platform != ocp4 ] && [ $platform != aks ]; then + if [ $platform != eks ] && [ $platform != gke ] && [ $platform != ocp4 ] && [ $platform != rancher ] && [ $platform != aks ]; then kubectl patch svc grafana -n portworx -p '{"spec": { "type": "NodePort", "ports": [ { "nodePort": 30112, "port": 3000, "protocol": "TCP", "targetPort": 3000 } ] } }' while ! curl -m 1 -s -X POST -H "Content-Type: application/json" -d '{"Name":"portworx","type":"prometheus","access":"server","url":"http://px-prometheus:9090"}' http://admin:admin@localhost:30112/api/datasources; do echo waiting for grafana diff --git a/terraform/aws/main.tf b/terraform/aws/main.tf index 71aa7da..2fa945d 100644 --- a/terraform/aws/main.tf +++ b/terraform/aws/main.tf @@ -12,6 +12,21 @@ terraform { } tls = { source = "hashicorp/tls" + } + helm = { + source = "hashicorp/helm" + version = "2.16.1" + } + rancher2 = { + source = "rancher/rancher2" + version = "5.1.0" + } + random = { + source ="hashicorp/random" + } + ssh = { + source = "loafoe/ssh" + version = "2.7.0" } } } @@ -171,6 +186,13 @@ resource "aws_security_group" "sg_px-deploy" { protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] } + ingress { + description = "tcp 6443" + from_port = 6443 + to_port = 6443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } ingress { description = "k8s nodeport" from_port = 30000 diff --git a/terraform/aws/rancher/TODO.txt b/terraform/aws/rancher/TODO.txt new file mode 100644 index 0000000..c4e3235 --- /dev/null +++ b/terraform/aws/rancher/TODO.txt @@ -0,0 +1,26 @@ +large scale test needed as clouddrive destroy function has been changed (now running after terrform destroy) (all platforms/clouds) + +OK use data source for rancher ami id +-> test on other regions + +? create option for rancher_domain or merge with ocp4_domain + +multiple clusters, handle exceptions for clusters (nodes,types...) + +? route53 for workload clusters +? aws elb for l4 +? no external IP for cluster nodes + +TODO: +implement run_everywhere +secure rancher cluster PW handling +test AWS key change during runtime (new key on deletion) + +KNOWN ISSUES: +cloud-init check sometimes shows errors +creation of downstream clusters sometimes fails because amazonec2 node driver not yet cloudInitReady (unknow schema error) + +terraform destroy failing on helm releases because they're throwing error. +current workaround: remove helm releases from terraform state +terraform -chdir=/px-deploy/.px-deploy/tf-deployments/dpaul-rancher/ state rm helm_release.cert_manager +terraform -chdir=/px-deploy/.px-deploy/tf-deployments/dpaul-rancher/ state rm helm_release.rancher_server diff --git a/terraform/aws/rancher/rancher-server.tf b/terraform/aws/rancher/rancher-server.tf new file mode 100644 index 0000000..0d1f058 --- /dev/null +++ b/terraform/aws/rancher/rancher-server.tf @@ -0,0 +1,352 @@ +data "aws_route53_zone" "rancher" { + name = "${var.rancher_domain}." +} + +resource "aws_lb" "rancher" { + name = format("px-deploy-rancher-%s",var.config_name) + security_groups = [aws_security_group.sg_px-deploy.id] + internal = false + load_balancer_type = "network" + subnets = [aws_subnet.subnet[0].id] +} + +resource "aws_lb_listener" "rancher-ui" { + load_balancer_arn = aws_lb.rancher.arn + port = "443" + protocol = "TCP" + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.rancher-ui.arn + } +} + +resource "aws_lb_listener" "rancher-api" { + load_balancer_arn = aws_lb.rancher.arn + port = "6443" + protocol = "TCP" + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.rancher-api.arn + } +} + + +resource "aws_lb_target_group" "rancher-ui" { + name = format("pxd-r-%s-ui",var.config_name) + port = 443 + protocol = "TCP" + target_type = "ip" + vpc_id = aws_vpc.vpc.id + + health_check { + port = 443 + interval = 10 + protocol = "TCP" + } +} + +resource "aws_lb_target_group" "rancher-api" { + name = format("pxd-r-%s-api",var.config_name) + port = 6443 + protocol = "TCP" + target_type = "ip" + vpc_id = aws_vpc.vpc.id + + health_check { + port = 6443 + interval = 10 + protocol = "TCP" + } +} + +resource "aws_lb_target_group_attachment" "rancher-ui" { + target_group_arn = aws_lb_target_group.rancher-ui.arn + //target_id = aws_instance.node["master-1-1"].id + target_id = "192.168.101.90" + port = 443 +} + +resource "aws_lb_target_group_attachment" "rancher-api" { + target_group_arn = aws_lb_target_group.rancher-api.arn + target_id = "192.168.101.90" + port = 6443 +} + +resource "aws_route53_record" "rancher-server" { + zone_id = data.aws_route53_zone.rancher.zone_id + name = "rancher.${var.config_name}.${data.aws_route53_zone.rancher.name}" + type = "A" + alias { + name = aws_lb.rancher.dns_name + zone_id = aws_lb.rancher.zone_id + evaluate_target_health = true + } +} + +data "aws_availability_zone" "rancher" { + for_each = var.rancherclusters + name = aws_subnet.subnet[each.key - 1].availability_zone +} + +data "aws_ami" "ubuntu" { + owners = ["099720109477"] + include_deprecated = true + most_recent = true + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20240720"] + } + + filter { + name = "architecture" + values = ["x86_64"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } +} + +resource "random_password" "bootstrap" { + length = 16 + special = false +} + +resource "null_resource" "cloudInitReady" { + +provisioner "remote-exec" { + inline = [ + "echo 'Waiting for cloud-init to complete...'", + "cloud-init status --wait > /dev/null", + "echo 'Completed cloud-init!'", + ] + + connection { + type = "ssh" + user = "rocky" + host = aws_instance.node["master-1-1"].public_ip + private_key = tls_private_key.ssh.private_key_openssh + } + } +} + +# K3s cluster for Rancher + +resource "ssh_resource" "install_k3s" { + depends_on = [ null_resource.cloudInitReady, ] + host = aws_instance.node["master-1-1"].public_ip + commands = [ + "curl https://get.k3s.io > /tmp/k3s.sh", + "chmod +x /tmp/k3s.sh", + "INSTALL_K3S_VERSION=v${var.rancher_k3s_version} INSTALL_K3S_EXEC=\"server --tls-san ${aws_route53_record.rancher-server.name} --node-external-ip ${aws_instance.node["master-1-1"].public_ip} --node-ip ${aws_instance.node["master-1-1"].private_ip}\" /tmp/k3s.sh", + "while [ ! -f /etc/rancher/k3s/k3s.yaml ]; do sleep 2; done" + ] + user = "root" + private_key = tls_private_key.ssh.private_key_openssh +} + +resource "ssh_resource" "retrieve_config" { + depends_on = [ + ssh_resource.install_k3s + ] + host = aws_instance.node["master-1-1"].public_ip + commands = [ + "sudo sed \"s/127.0.0.1/${aws_route53_record.rancher-server.name}/g\" /etc/rancher/k3s/k3s.yaml" + ] + user = "rocky" + private_key = tls_private_key.ssh.private_key_openssh +} + +# Save kubeconfig file for interacting with the RKE cluster on your local machine +resource "local_file" "kube_config_server_yaml" { + filename = format("%s/%s", path.root, "kube_config_server.yaml") + content = ssh_resource.retrieve_config.result +} + +provider "helm" { + kubernetes { + insecure = true + config_path = local_file.kube_config_server_yaml.filename + } +} + +# Helm resources + +# Install cert-manager helm chart +resource "helm_release" "cert_manager" { + depends_on = [ + aws_route_table_association.rt, + aws_iam_role_policy_attachment.px-pol-attach, + ] + name = "cert-manager" + chart = "https://charts.jetstack.io/charts/cert-manager-v${var.cert_manager_version}.tgz" + namespace = "cert-manager" + create_namespace = true + wait = true + + set { + name = "installCRDs" + value = "true" + } +} + +# Install Rancher helm chart +resource "helm_release" "rancher_server" { + depends_on = [ + helm_release.cert_manager, + aws_route_table_association.rt, + aws_iam_role_policy_attachment.px-pol-attach, + ] + + name = "rancher" + chart = "${var.rancher_helm_repository}/rancher-${var.rancher_version}.tgz" + namespace = "cattle-system" + create_namespace = true + wait = true + + set { + name = "hostname" + value = aws_route53_record.rancher-server.name + } + + set { + name = "replicas" + value = "1" + } + + set { + name = "bootstrapPassword" + value = "admin" //random_password.bootstrap.result + } +} + +provider "rancher2" { + alias = "bootstrap" + api_url = format("https://%s",aws_route53_record.rancher-server.name) + insecure = true + bootstrap = true +} + +# Initialize Rancher server +resource "rancher2_bootstrap" "admin" { + depends_on = [ + helm_release.rancher_server + ] + provider = rancher2.bootstrap + initial_password = "admin" //random_password.bootstrap.result + password = "portworx1!portworx1!" + telemetry = false +} + +provider "rancher2" { + alias = "admin" + api_url = rancher2_bootstrap.admin.url + token_key = rancher2_bootstrap.admin.token + insecure = true +} + +# Create a new rancher2 Cloud Credential +resource "rancher2_cloud_credential" "aws" { + provider = rancher2.admin + name = "AWS" + description = "AWS Credentials" + amazonec2_credential_config { + access_key = var.AWS_ACCESS_KEY_ID + secret_key = var.AWS_SECRET_ACCESS_KEY + } +} + +resource "rancher2_machine_config_v2" "node" { + for_each = var.rancherclusters + depends_on = [ + helm_release.rancher_server, + rancher2_cloud_credential.aws + ] + provider = rancher2.admin + generate_name = format("node-templ-%s",each.key) + amazonec2_config { + ami = data.aws_ami.ubuntu.id + root_size = "50" + region = var.aws_region + instance_type = each.value + iam_instance_profile = aws_iam_instance_profile.ec2_profile.name + security_group = [aws_security_group.sg_px-deploy.name] + subnet_id = aws_subnet.subnet[each.key - 1].id + vpc_id = aws_vpc.vpc.id + zone = data.aws_availability_zone.rancher[each.key].name_suffix + tags= join(",", formatlist("%s,%s", keys(var.aws_tags), values(var.aws_tags))) + userdata = format("#cloud-config\nssh_authorized_keys:\n - %s\n", tls_private_key.ssh.public_key_openssh) + } +} +// add use_private_address = true later + +resource "rancher2_cluster_v2" "rancher-cluster" { + for_each = var.rancherclusters + depends_on = [ + helm_release.rancher_server, + aws_lb_listener.rancher-api, + aws_lb_target_group_attachment.rancher-api, + aws_lb_listener.rancher-ui, + aws_lb_target_group_attachment.rancher-ui + ] + provider = rancher2.admin + name = format("%s-%s",var.config_name,each.key) + kubernetes_version = format("v%s",var.rancher_k8s_version) + enable_network_policy = false + rke_config { + machine_global_config = </etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=https://pkgs.k8s.io/core:/stable:/v$repo/rpm/ +enabled=1 +gpgcheck=1 +gpgkey=https://pkgs.k8s.io/core:/stable:/v$repo/rpm/repodata/repomd.xml.key +EOF + +k8sversion=$(echo $rancher_k8s_version | grep -o "^[0-9]*\.[0-9]*\.[0-9]*") +while ! dnf install -y kubectl-$k8sversion; do sleep 1; done + +# install awscli2 +curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip" +unzip -o /tmp/awscliv2.zip -d /tmp >/dev/null +/tmp/aws/install +ln -s /usr/local/bin/aws /usr/bin/aws + +mkdir /root/.kube +echo "waiting for /root/.kube/config to be created" +while [ ! -f "/root/.kube/config" ]; do sleep 5; done +echo "/root/.kube/config found" + +# remove k3s implementation of kubectl +rm /usr/local/bin/kubectl + +echo "url https://rancher.$name.$ocp4_domain" >> /var/log/px-deploy/completed/tracking +echo "cred portworx1!portworx1!" >> /var/log/px-deploy/completed/tracking + + +cat <> /etc/motd ++================================================+ +Rancher Web UI: https://rancher.$name.$ocp4_domain +Admin User Name: admin +Password: portworx1!portworx1! ++================================================+ +EOF \ No newline at end of file