-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcluster.yaml
77 lines (60 loc) · 3.16 KB
/
cluster.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# cluster name
cluster_name: web-banking
# AWS region
region: eu-central-1
# list of availability zones for your region
availability_zones: # default: 3 random availability zones in your region, e.g. [us-east-1a, us-east-1b, us-east-1c]
# list of cluster node groups;
node_groups:
- name: ng-cpu # name of the node group
instance_type: m5.large # instance type
min_instances: 1 # minimum number of instances
max_instances: 4 # maximum number of instances
priority: 1 # priority of the node group; the higher the value, the higher the priority [1-100]
instance_volume_size: 50 # disk storage size per instance (GB)
instance_volume_type: gp3 # instance volume type [gp2 | gp3 | io1 | st1 | sc1]
# instance_volume_iops: 3000 # instance volume iops (only applicable to io1/gp3)
# instance_volume_throughput: 125 # instance volume throughput (only applicable to gp3)
spot: false # whether to use spot instances
# - name: ng-gpu
# instance_type: g4dn.xlarge
# min_instances: 1
# max_instances: 5
# instance_volume_size: 50
# instance_volume_type: gp3
# spot: false
# # ...
# subnet visibility for instances [public (instances will have public IPs) | private (instances will not have public IPs)]
subnet_visibility: public
# NAT gateway (required when using private subnets) [none | single | highly_available (a NAT gateway per availability zone)]
nat_gateway: none
# API load balancer type [nlb | elb]
api_load_balancer_type: nlb
# API load balancer scheme [internet-facing | internal]
api_load_balancer_scheme: internet-facing
# operator load balancer scheme [internet-facing | internal]
# note: if using "internal", you must configure VPC Peering to connect your CLI to your cluster operator
operator_load_balancer_scheme: internet-facing
# to install Cortex in an existing VPC, you can provide a list of subnets for your cluster to use
# subnet_visibility (specified above in this file) must match your subnets' visibility
# this is an advanced feature (not recommended for first-time users) and requires your VPC to be configured correctly; see https://eksctl.io/usage/vpc-networking/#use-existing-vpc-other-custom-configuration
# here is an example:
# subnets:
# - availability_zone: us-west-2a
# subnet_id: subnet-060f3961c876872ae
# - availability_zone: us-west-2b
# subnet_id: subnet-0faed05adf6042ab7
# restrict access to APIs by cidr blocks/ip address ranges
api_load_balancer_cidr_white_list: [0.0.0.0/0]
# restrict access to the Operator by cidr blocks/ip address ranges
operator_load_balancer_cidr_white_list: [0.0.0.0/0]
# additional tags to assign to AWS resources (all resources will automatically be tagged with cortex.dev/cluster-name: <cluster_name>)
tags: # <string>: <string> map of key/value pairs
# SSL certificate ARN (only necessary when using a custom domain)
ssl_certificate_arn:
# list of IAM policies to attach to your Cortex APIs
iam_policy_arns: ["arn:aws:iam::aws:policy/AmazonS3FullAccess"]
# primary CIDR block for the cluster's VPC
vpc_cidr: 192.168.0.0/16
# instance type for prometheus (use an instance with more memory for clusters exceeding 300 nodes or 300 pods)
prometheus_instance_type: "t3.medium"