-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathubuntu-install-k8s.txt
160 lines (106 loc) · 7.06 KB
/
ubuntu-install-k8s.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
#
# Install Kubernetes and Ceph using Ansible and Kubespray
#
# Ansible inventory values have been overridden to install helm tiller, ceph and the k8s dashboard.
# Also optionally override apt and docker registries to pull from local sources rather than internet.
# See offline folder for (an outdated) offline no-internet setup.
#
# ssh into the seba-control-repo virtual machine. This will be the ansible control host. Packages should have already been
# extracted and repos setup if online. If online, should pull from internet.
# TODO: convert ansible ad-hoc commands into custom seba playbooks
# Copy and modify seba-pod specific ansible/kubespray inventory and configuration
# Set 3 k8s hosts in inventory.ini, replace 10.242.11.X with yours.
# Copy already prepared kubespray config. use default or make your own config if needed
cd ~/seba-control-repo/kubespray-inventory/
cp -a seba-pod-v2 ~/kubespray-2.10.3/inventory
cd ~/kubespray-2.10.3/
vi seba-pod-v2/inventory.ini
# replace ip with 3 target kube/ceph hosts
# example ip entries to change
lab-kube-01 ansible_host=10.242.11.1 etcd_member_name=etcd1
lab-kube-02 ansible_host=10.242.11.2 etcd_member_name=etcd2
lab-kube-03 ansible_host=10.242.11.3 etcd_member_name=etcd3
# Preinstallation and verification
cd ~/kubespray-2.10.3/
# Verify ansible can reach 3 worker hosts
cd ~/kubespray-2.10.3/
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "whoami; uname -a" --become --become-user=root
##
## Use ansible to install setup any manual prerequisites
##
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "apt install -y ntp ntpdate ntp-doc" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "systemctl start ntp" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "systemctl enable ntp" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "ntpq -p" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "date" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "lsblk" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "sysctl -w vm.max_map_count=262144" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "echo 'vm.max_map_count=262144' >> /etc/sysctl.conf" --become --become-user=root
##
## use ceph-deploy from ansible host to install ceph daemons and disks
##
# Create local directory on ansible host to store generated config.
cd ~/
mkdir ceph-cluster
cd ceph-cluster/
# create new "config". replace hostnames with /etc/hosts names given to 3 workers
ceph-deploy new lab-kube-01 lab-kube-02 lab-kube-03
# install software on workers
ceph-deploy install lab-kube-01 lab-kube-02 lab-kube-03
# setup mon, admin, mgr on all 3
ceph-deploy mon create-initial
ceph-deploy admin lab-kube-01 lab-kube-02 lab-kube-03
ceph-deploy mgr create lab-kube-01 lab-kube-02 lab-kube-03
# create osd disks, providing available partition to each.
ceph-deploy osd create --data /dev/vdb lab-kube-01
ceph-deploy osd create --data /dev/vdb lab-kube-02
ceph-deploy osd create --data /dev/vdb lab-kube-03
# create mds and rgw
ceph-deploy mds create lab-kube-01 lab-kube-02 lab-kube-03
ceph-deploy rgw create lab-kube-01 lab-kube-02 lab-kube-03
# Ad-hoc commands for environment pre-setup and verification
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "ceph -s" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "ceph health" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "ceph quorum_status --format json-pretty" --become --become-user=root
# Optionally install POD CA cert if you setup a docker registry.
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m copy -a "src=~/seba-pod-ca/ca/ca.crt dest=/usr/local/share/ca-certificates/seba-pod-ca.crt" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "update-ca-certificates" --become --become-user=root
# Inform kubespray where to find ceph. last chance before the big stuff below!
# Installing k8s with ceph as storage
# modify kubespray ceph config to use newly defined cluster
# Get the ceph client.admin key from any one of the workers/nodes.
# sudo ceph auth get-key client.admin
ansible -i inventory/seba-pod-v2/inventory.ini lab-kube-01 -m shell -a "ceph auth get-key client.admin" --become --become-user=root
# Edit the ceph configuration that will be used by kubespray in setting up the k8s storageclass
vi inventory/seba-pod-v2/group_vars/k8s-cluster/addons.yml
# replace rbd_provisioner_user_secret and rbd_provisioner_secret with above copied client.admin base64 key
# replace ip:port for ceph mons, one for each ip defined as a ceph worker
#
# Run the ansible kubespray kubernetes installation! Takes about 12 minutes.
#
cd ~/kubespray-2.10.3/
ansible-playbook -i inventory/seba-pod-v2/inventory.ini --become --become-user=root cluster.yml
# Ad-hoc commands for environment post-setup and verification
# Verify k8s. All 3 hosts must be able to lookup services
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "dig +short kubernetes.default.svc.cluster.local @10.233.0.3"
# setup kubectl for each host
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "mkdir .kube"
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m copy -a "src=inventory/seba-pod-v2/artifacts/admin.conf dest=~/.kube/config"
# verify kubectl works on each host
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "kubectl get nodes -o wide"
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "kubectl get pods --all-namespaces -o wide"
# Initialize helm repos, add ONF helm repo
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "helm init --client-only"
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "helm repo list"
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "helm repo add onf https://charts.opencord.org/"
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "helm repo update"
# Verify helm
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "helm list"
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -a "helm search voltha"
# Allow not-root-user to run docker commands. Replace with your user
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "usermod -aG docker foundry" --become --become-user=root
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "docker ps" --become --become-user=root
# Optionally install any bashrc shortcuts
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m copy -a "src=~/seba-control-repo/scripts/bashrc dest=~/voltha-bashrc"
ansible -i inventory/seba-pod-v2/inventory.ini kube-node -m shell -a "cat ~/voltha-bashrc >> ~/.bashrc"
# From here helm is used to install seba/voltha on k8s nodes themselves. See helm-seba-voltha-install.txt