From d2292324c7800e5fe5f932c9365efd15a5d440a2 Mon Sep 17 00:00:00 2001 From: Pavel Nikolov Date: Fri, 26 Jul 2024 00:08:25 +0300 Subject: [PATCH] update terraform and add k8s files --- README.md | 17 ++++++++- k8s/client.yaml | 41 ++++++++++++++++++++++ k8s/kustomization.yaml | 15 ++++++++ k8s/server.yaml | 34 ++++++++++++++++++ terraform/main.tf | 80 ++++++++++++++++++++++++++++++++++++++---- terraform/output.tf | 6 ++-- terraform/variables.tf | 22 ++++++++++++ 7 files changed, 205 insertions(+), 10 deletions(-) create mode 100644 k8s/client.yaml create mode 100644 k8s/kustomization.yaml create mode 100644 k8s/server.yaml diff --git a/README.md b/README.md index f032f7c..ceb3cb5 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,22 @@ Some functional tests have been added which test the process of registration, at ### Client and Server setup -Assuming that Docker is present on your machine, the client and the server can be started by running `docker compose up`. Alternatively, if Docker is not available, one can always run the binaries using `cargo` like this: +Assuming that Docker is present on your machine, the client and the server can be started by running using the `docker-compose.yaml` file: + +```bash +$ docker compose up +[+] Running 2/0 + ✔ Container zkp-auth-server-1 Created 0.0s + ✔ Container zkp-auth-client-1 Created 0.0s +Attaching to client-1, server-1 +server-1 | Listening for connections on 0.0.0.0:50051 +client-1 | Registration successful. +client-1 | Received challenge from server. +client-1 | Successfully logged in! Session ID: OooJ8n7FOOU1ZyhxOqfBhsvK5x4mwdP7 +client-1 exited with code 0 +``` + +Alternatively, if Docker is not available, one can always run the binaries using `cargo` like this: * Run `cargo run --bin zkpauth-server` in one terminal; and then * Run `cargo run --bin zkpauth-client` in another terminal diff --git a/k8s/client.yaml b/k8s/client.yaml new file mode 100644 index 0000000..d46fb2b --- /dev/null +++ b/k8s/client.yaml @@ -0,0 +1,41 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: app + namespace: zkpauth-client + labels: + app: app +spec: + template: + metadata: + labels: + app: app + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - server + namespaces: + - zkpauth + topologyKey: "kubernetes.io/hostname" + containers: + - name: app + image: ghcr.io/pavelnikolov/zkpauth-client:overridden-later + env: + - name: SERVER_ADDR + value: "http://server.zkpauth:50051" + - name: CLIENT_ID + value: "client" + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 100m + memory: 100Mi + restartPolicy: Never diff --git a/k8s/kustomization.yaml b/k8s/kustomization.yaml new file mode 100644 index 0000000..0b45a09 --- /dev/null +++ b/k8s/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: zkp-auth + +resources: + - server.yaml + - client.yaml + +images: + - name: ghcr.io/pavelnikolov/zkpauth-server + newName: ghcr.io/pavelnikolov/zkpauth-server + newTag: latest + - name: ghcr.io/pavelnikolov/zkpauth-client + newName: ghcr.io/pavelnikolov/zkpauth-client + newTag: latest diff --git a/k8s/server.yaml b/k8s/server.yaml new file mode 100644 index 0000000..7d9e28b --- /dev/null +++ b/k8s/server.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: server + namespace: zkpauth + labels: + app: server +spec: + replicas: 1 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + restartPolicy: Always + containers: + - name: server + image: ghcr.io/pavelnikolov/zkpauth-server:overridden-later + ports: + - name: grpc + containerPort: 50051 + env: + - name: LISTEN_ADDR + value: "0.0.0.0:50051" + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi diff --git a/terraform/main.tf b/terraform/main.tf index 116f0dc..41ed926 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -16,14 +16,24 @@ provider "aws" { region = var.aws_region } - resource "aws_vpc" "main" { cidr_block = "10.0.0.0/16" } -resource "aws_subnet" "main" { - vpc_id = aws_vpc.main.id - cidr_block = "10.0.1.0/24" +resource "aws_subnet" "az_a" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.1.0/24" + availability_zone = format("%s%s", var.aws_region, "a") + + tags = { + Name = "main" + } +} + +resource "aws_subnet" "az_b" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.2.0/24" + availability_zone = format("%s%s", var.aws_region, "b") tags = { Name = "main" @@ -53,8 +63,32 @@ resource "aws_iam_role_policy_attachment" "cluster_policy" { role = aws_iam_role.cluster_role.name } -# Optionally, enable Security Groups for Pods -# Reference: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html +resource "aws_iam_role" "NodeGroupRole" { + name = "EKSNodeGroupRole" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }, + ] + }) +} + +resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + role = aws_iam_role.NodeGroupRole.name +} + +resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + role = aws_iam_role.NodeGroupRole.name +} + resource "aws_iam_role_policy_attachment" "vpc_resource_controller_policy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController" role = aws_iam_role.cluster_role.name @@ -65,7 +99,7 @@ resource "aws_eks_cluster" "default" { role_arn = aws_iam_role.cluster_role.arn vpc_config { - subnet_ids = [aws_subnet.main.id] + subnet_ids = [aws_subnet.az_a.id, aws_subnet.az_b.id] } # Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling. @@ -75,3 +109,35 @@ resource "aws_eks_cluster" "default" { aws_iam_role_policy_attachment.vpc_resource_controller_policy, ] } + +resource "aws_eks_node_group" "cluster_node_group" { + cluster_name = aws_eks_cluster.default.name + node_group_name = "${terraform.workspace}-cluster-node_group" + node_role_arn = aws_iam_role.NodeGroupRole.arn + subnet_ids = [aws_subnet.az_a.id, aws_subnet.az_b.id] + + scaling_config { + desired_size = 2 + max_size = 2 + min_size = 2 + } + + ami_type = "AL2_x86_64" + instance_types = ["t3.micro"] + capacity_type = "ON_DEMAND" + disk_size = 20 + + depends_on = [ + aws_iam_role_policy_attachment.AmazonEKSWorkerNodePolicy, + aws_iam_role_policy_attachment.AmazonEKS_CNI_Policy + ] +} + +# use managed addons in order to make it easier to upgrade the Kubernetes version in future +resource "aws_eks_addon" "addons" { + for_each = { for addon in var.cluster_addons : addon.name => addon } + cluster_name = aws_eks_cluster.default.name + addon_name = each.value.name + addon_version = each.value.version + service_account_role_arn = aws_iam_role.cluster_role.arn +} diff --git a/terraform/output.tf b/terraform/output.tf index c182771..6c4e1ac 100644 --- a/terraform/output.tf +++ b/terraform/output.tf @@ -1,7 +1,9 @@ output "endpoint" { - value = aws_eks_cluster.default.endpoint + value = aws_eks_cluster.default.endpoint + sensitive = true } output "kubeconfig-certificate-authority-data" { - value = aws_eks_cluster.default.certificate_authority[0].data + value = aws_eks_cluster.default.certificate_authority[0].data + sensitive = true } diff --git a/terraform/variables.tf b/terraform/variables.tf index 58bc288..3628f0e 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -3,3 +3,25 @@ variable "aws_region" { default = "eu-central-1" type = string } + +variable "cluster_addons" { + type = list(object({ + name = string + version = string + })) + + default = [ + { + name = "kube-proxy" + version = "v1.30.0-eksbuild.3" + }, + { + name = "vpc-cni" + version = "v1.18.2-eksbuild.1" + }, + { + name = "coredns" + version = "v1.11.1-eksbuild.9" + } + ] +} \ No newline at end of file