Teal-Projects
12/10/2019 - 10:14 AM

Kubernetes-cluster

[Kubernetes-cluster] #Kubernetes #Cluster

curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -

cat << EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF

sudo apt-get update

sudo apt-get install -y kubelet=1.12.7-00 kubeadm=1.12.7-00 kubectl=1.12.7-00

sudo apt-mark hold kubelet kubeadm kubectl
# Setup
## Initialize the cluster on master node
sudo kubeadm init --pod-network-cidr=10.244.0.0/16
## Set-up the local kubeconfig on master node
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Test and check for the Server version on master node
kubectl version
# Join a Worker node
sudo kubeadm join $some_ip:6443 --token $some_token --discovery-token-ca-cert-hash $some_hash
# Check  on the master node if the worker succeffuly joined the master
kubectl get nodes
#on all nodes Add the iptables rule :
echo "net.bridge.bridge-nf-call-iptables=1" | sudo tee -a /etc/sysctl.conf
# Enable IPtables
sudo sysctl -p
# Install flannel on the master node
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
# Chack on master node the all the nodes are ready
kubectl get pods -n kube-system
# Check if the flannel pods are ready
kubectl get pods -n kube-system
# Backing up the cluster
## Get "ETCD"
wget https://github-production-release-asset-2e65be.s3.amazonaws.com/11225014/bbf59d80-03c8-11ea-8295-29234860f87e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20191118%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20191118T062757Z&X-Amz-Expires=300&X-Amz-Signature=202affdba7a29d7780a057a0dad9aa4395b86bb6a7bac0874b8f190c8f80638f&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Detcd-v3.2.28-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream
## Unzip it
tar xvf etcd-v3.2.28-linux-amd64.tar.gz
## Move to /usr/local/bin
sudo mv etcd-v3.2.28-linux-amd64/etcd* /usr/local/bin
## Take a snapshot of the etcd datastore (snapshot.db file)
sudo ETCDCTL_API=3 etcdctl snapshot save snapshot.db --cacert /etc/kubernetes/pki/etcd/server.crt --cert /etc/kubernetes/pki/etcd/ca.crt --key /etc/kubernetes/pki/etcd/ca.key
## Take on consideration certificates are stored "/etc/kubernetes/pki/etcd/"

## Check that the snapshot is successfull
ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshot.db
## Make a zip of "etcd" directory
sudo tar -zcvf etcd.tar.gz /etc/kubernetes/pki/etcd
## Copy etcd to an other server
scp etcd.tar.gz cloud_user@18.219.235.42:~/
# Tear down a node
## Check the running pods on the node
kubectl get pods -o wide
## Evict pods on a node
kubectl drain <node_name> --ignore-daemonsets
## Check if the node's status is "SchedulingDisabled"
kubectl get nodes -w
## At this point we can make the maintenance services for the node.

## Put back the node on services
kubectl uncordon <node_name>
## Remove completely the node from a cluster
kubectl delete node <node_name>
# Update the cluster
## Release the hold version of kubeadm and kubectl
sudo apt-mark unhold kubeadm kubelet

## Install new version of kubeadm
sudo apt install -y kubeadm=1.16.1-00
## Hold kubeadm version
sudo apt-mark hold kubeadm
## Test the kubeadm version
kubeadm version
## Plan the upgrade of all the controller components
sudo kubeadm upgrade plan
## Upgrade the controller components
sudo kubeadm upgrade apply v1.16.1

## Release the hold version of kubectl
sudo apt-mark unhold kubectl
## Upgrade kubectl
sudo apt install -y kubectl=1.16.1-00
## Hold kubectl version
sudo apt-mark hold kubectl

## Upgrade kubelet version
sudo apt install -y kubelet=1.16.1-00
## Hold Kubelet version
sudo apt-mark hold kubelet