forked from niallthomson/eks-graviton-terraform
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnode_group.tf
121 lines (95 loc) · 3.08 KB
/
node_group.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
resource "aws_eks_node_group" "managed_workers_arm" {
cluster_name = aws_eks_cluster.cluster.name
node_group_name = "${var.environment_name}-workers-${var.availability_zones[count.index]}-ondemand"
node_role_arn = aws_iam_role.managed_workers.arn
subnet_ids = [module.vpc.private_subnets[count.index]]
scaling_config {
desired_size = 2
max_size = 2
min_size = 1
}
instance_types = [var.node_pool_instance_type]
ami_type = "AL2_ARM_64"
labels = {
lifecycle = "OnDemand"
az = var.availability_zones[count.index]
}
remote_access {
ec2_ssh_key = aws_key_pair.generated_key.key_name
source_security_group_ids = [aws_security_group.dummy.id]
}
tags = var.tags
depends_on = [
aws_iam_role_policy_attachment.eks_worker_node_policy,
aws_iam_role_policy_attachment.eks_cni_policy,
aws_iam_role_policy_attachment.eks_ecr_policy,
]
lifecycle {
create_before_destroy = true
}
count = length(var.availability_zones)
}
## spot
resource "aws_eks_node_group" "managed_workers_arm_spot" {
cluster_name = aws_eks_cluster.cluster.name
node_group_name = "${var.environment_name}-workers-${var.availability_zones[count.index]}-spot-arm"
node_role_arn = aws_iam_role.managed_workers.arn
subnet_ids = [module.vpc.private_subnets[count.index]]
scaling_config {
desired_size = 3
max_size = 5
min_size = 1
}
instance_types = [var.node_pool_instance_type_spot_arm]
ami_type = "AL2_ARM_64"
labels = {
lifecycle = "spot"
az = var.availability_zones[count.index]
}
remote_access {
ec2_ssh_key = aws_key_pair.generated_key.key_name
source_security_group_ids = [aws_security_group.dummy.id]
}
tags = var.tags
depends_on = [
aws_iam_role_policy_attachment.eks_worker_node_policy,
aws_iam_role_policy_attachment.eks_cni_policy,
aws_iam_role_policy_attachment.eks_ecr_policy,
]
lifecycle {
create_before_destroy = true
}
count = length(var.availability_zones)
}
## spot x86
resource "aws_eks_node_group" "managed_workers_x86_spot" {
cluster_name = aws_eks_cluster.cluster.name
node_group_name = "${var.environment_name}-workers-${var.availability_zones[count.index]}-spot-x86"
node_role_arn = aws_iam_role.managed_workers.arn
subnet_ids = [module.vpc.private_subnets[count.index]]
scaling_config {
desired_size = 3
max_size = 5
min_size = 1
}
instance_types = [var.node_pool_instance_type_spot_x86]
ami_type = "AL2_x86_64"
labels = {
lifecycle = "spot"
az = var.availability_zones[count.index]
}
remote_access {
ec2_ssh_key = aws_key_pair.generated_key.key_name
source_security_group_ids = [aws_security_group.dummy.id]
}
tags = var.tags
depends_on = [
aws_iam_role_policy_attachment.eks_worker_node_policy,
aws_iam_role_policy_attachment.eks_cni_policy,
aws_iam_role_policy_attachment.eks_ecr_policy,
]
lifecycle {
create_before_destroy = true
}
count = length(var.availability_zones)
}