-
kubectl drain <nodename> --ignore-daemonsets //first try without ignore daemonsets
-
if there this you have to uncordon kubectl uncordon <nodename>
Note
|
every drained node is also cordoned by default (unschedulable) |
kubectl cordon <nodename> #this doesnt remove any running pods
#if the node has any pods that are not part of replicaset you have to force it
kubectl drain <nodename> --ignore-daemonsets --force (the pods will not be rescheduled though)
kubectl get nodes #check version sof every node kubeadm upgrade plan #shows current version and upgrade options
apt install kubeadm=1.17.4-00
apt-get upgrade kubeadm=1.17.4-00
kubeadm upgrade apply v1.17.0 #you shouldnt stop kubelet while doing this, it has to be running
kubeadm upgrade apply v1.17.0 --ignore-preflight-errors ControlPlaneNodesReady -may be potentially required to ignore errors for drained node
apt install kubelet=1.17.0-00
service restart kubelet
kubectl describe pod etcd-master -n kube-system and look for --listen-client-urls for cluster url
export ETCDCTL_API=3 #alternately you can pass this in each command as a prefix.
etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key snapshot save /tmp/snapshot-pre-boot.db
status of snapshot: ECTDCTL_API=3 etcdtl snapshot status <snapshotname.db>
-
first stop kube-apiserver - service kube-apiserver stop
-
restore snapshot (creates a new etcd clsuter) - etcdctl snapshot restore <snapshot.db> --data-dir /var/lib/etcd-from-backup --initial-cluster <clusterurl> --initial-cluster-token etcd-cluster-1
-
update the data directory in the etcd yaml
-
if running as a system process
-
systemctl daemon-reload -
-
service etcd start
-
service kube-apiserver start
-