rrichards
4/4/2019 - 6:26 PM

Kubernetes on GKE with Nginx

Kubernetes on GKE with Nginx

Kubernetes Configuration

I have included a .env, ingress-nginx.yaml, controller-nginx.yaml, and bash script that will build your cluster and deploys everything in your folder you specifify in the parameter for the bash script parameters

To deploy from your cloud-config/staging folder and create the cluster do this:

./deploy.sh staging y

If you want to upgrade your existing app deployment, simply remove the y

./deploy.sh staging

Here is an explanation in a more readable format

Set Project

gcloud config set project kube-tutorial

Get Compute Zones

gcloud compute zones list

Set Compute Zones and Region

gcloud config set compute/zone us-east1-b
gcloud config set compute/region us-east1

Create Clusters

gcloud container --project "kube-tutorial" clusters create "my-app-staging-cluster" --zone "us-east1-b" --username "admin" --cluster-version "1.11.7-gke.4" --machine-type "custom-2-8192" --image-type "COS" --disk-type "pd-standard" --disk-size "100" --scopes "https://www.googleapis.com/auth/cloud-platform" --num-nodes "3" --enable-cloud-logging --enable-cloud-monitoring --no-enable-ip-alias --network "projects/kube-tutorial/global/networks/default" --subnetwork "projects/kube-tutorial/regions/us-east1/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --enable-autoupgrade --enable-autorepair

Instal Helm

Helm is a package manager for Kubernetes and the packages are called "charts" . We use this to install nginx ingress controller.

curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
helm init

Configure Tiller for Helm

kubectl create namespace staging
helm init --tiller-namespace staging
kubectl create serviceaccount --namespace staging tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=staging:tiller
helm init --service-account tiller --upgrade

Verify Tiller Deployment Ready

kubectl get deployments -n staging

Install an App and Expose it (Cluster IP only)

In this example, I'm using an app-deploy.yaml, but you can use any image you want. (see app-deploy.yaml)

kubectl create -f app-deploy.yaml            
kubectl expose deployment my-app-staging --port=8087 --namespace staging

Install Nginx Load Balancer using Helm and then update the settings with a config yaml

helm install --name nginx-ingress-my-app-staging stable/nginx-ingress --set rbac.create=true --tiller-namespace staging
kubectl apply -f cloud-config/$ENV/controller-nginx.yaml

Verify Controller is Live (external ip)

kubectl get service nginx-ingress-my-app-staging-controller

Install Certificate - If you want ssl

kubectl create secret tls tls-secret-staging --key my-app.com.key --cert my-app.com.crt -n staging   

Create Ingress-Resource for Traffic

Create an ingress-nginx.yaml file that has your paths (see yaml file)

kubectl apply -f ingress-nginx.yaml

Browse to: http://external-ip-of-ingress-controller/

TADA


If Clusters are already created, and you are updating your ONLY updating your image then do this:

gcloud container clusters get-credentials my-app-staging-cluster
kubectl set image deployment/my-app-staging  my-app-staging=gcr.io/my-repo-01/my-app-staging:<TAG HERE/> --namespace staging

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  annotations:
    deployment.kubernetes.io/revision: "1"
  generation: 1
  labels:
    app: my-app-staging
  name: my-app-staging
  namespace: staging
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: my-app-staging
  strategy:
    rollingUpdate:
      maxSurge: 25%
      maxUnavailable: 25%
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: my-app-staging
    spec:
      containers:
      - image: gcr.io/my-repo-01/my-app-staging:latest
        imagePullPolicy: IfNotPresent
        name: my-app-staging
        resources: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
apiVersion: v1
kind: ConfigMap
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.3.1
    component: controller
    heritage: Tiller
    release: nginx-ingress-my-app-staging
  name: nginx-ingress-my-app-controller
  namespace: default
data:
  enable-vts-status: "false"
  hsts: "true"
  hsts-include-subdomains: "false"
  hsts-max-age: "31536000"
  hsts-preload: "false"
  client_max_body_size: "25M"
#!/bin/bash

#Setup a cloud-config directory with all of my yamls for each environment
source cloud-config/$1/.env

#A simple way to increment my version number
increment_version ()
{
  declare -a part=( ${1//\-/ } )
  declare    new
  declare -i carry=1

  for (( CNTR=${#part[@]}-1; CNTR>=0; CNTR-=1 )); do
    len=${#part[CNTR]}
    new=$((part[CNTR]+carry))
    [ ${#new} -gt $len ] && carry=1 || carry=0
    [ $CNTR -gt 0 ] && part[CNTR]=${new: -len} || part[CNTR]=${new}
  done
  new="${part[*]}"
  newversion="${new// /-}"
} 

#My Verions
newversion=''
version=$(cat "version.txt") 
increment_version $version

#This is important to break if the docker build breaks
set -xe

#Determine if I need to create the cluster or update the deployment image
export createcluster=${2:-"N"}
export TAG=$newversion

#Writes out where it is about to go
echo Project: $PROJECTID
echo Environment: $ENV
echo TAG: $TAG

# Are YOU SURE?!
read -p "Do you wish to continue? [y/N]" -n 1 -r
echo    # (optional) move to a new line

if [[ $REPLY =~ ^[Yy]$ ]]
then
	#Docker Build  - I don't use cloud build
    echo Docker Build
	docker build --build-arg docker_env=$ENV --build-arg tag=$TAG-$ENV -t my-app .

	#Docker Tag latest and new version number
	docker tag my-app:latest gcr.io/$PROJECTID/my-app-$ENV:$TAG
	docker tag my-app:latest gcr.io/$PROJECTID/my-app-$ENV:latest
	
	#Docker Push to gcr repo
	docker push gcr.io/$PROJECTID/my-app-$ENV:$TAG
	docker push gcr.io/$PROJECTID/my-app-$ENV:latest
	
	#Make sure the project and zone and region are configured correctly
	gcloud config set project $PROJECTID
	gcloud config set compute/zone $ZONE
	gcloud config set compute/region $REGION
	
	#If you need to create a new cluster, then TADA
	if [[ $createcluster =~ [Yy]$ ]]
	then
		if [[ $ENV =~ [staging]$ ]] 
		then
			gcloud container --project "$PROJECTID" clusters create "my-app-cluster-$ENV" --zone "$ZONE" --username "admin" --cluster-version "1.11.7-gke.12" --machine-type "n1-standard-2" --image-type "COS" --disk-type "pd-standard" --disk-size "100" --scopes "https://www.googleapis.com/auth/cloud-platform" --num-nodes "3" --enable-cloud-logging --enable-cloud-monitoring --no-enable-ip-alias --network "projects/$PROJECTID/global/networks/default" --subnetwork "projects/$PROJECTID/regions/$REGION/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --enable-autoupgrade --enable-autorepair --enable-autoscaling --min-nodes 3 --max-nodes 6
		else
			gcloud container --project "$PROJECTID" clusters create "my-app-cluster-$ENV" --zone "$ZONE" --username "admin" --cluster-version "1.11.7-gke.12" --machine-type "n1-standard-1" --image-type "COS" --disk-type "pd-standard" --disk-size "100" --scopes "https://www.googleapis.com/auth/cloud-platform" --num-nodes "2" --enable-cloud-logging --enable-cloud-monitoring --no-enable-ip-alias --network "projects/$PROJECTID/global/networks/default" --subnetwork "projects/$PROJECTID/regions/$REGION/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --enable-autorepair --enable-autoscaling --min-nodes 1 --max-nodes 4
		fi
		curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
		chmod 700 get_helm.sh
		./get_helm.sh
		kubectl create namespace $ENV
		helm init --tiller-namespace $ENV
		kubectl create serviceaccount --namespace $ENV tiller
		kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=$ENV:tiller
		helm init --service-account tiller --tiller-namespace $ENV --upgrade
		kubectl create -f cloud-config/$ENV/app-deploy.yaml  
		kubectl expose deployment my-app-app-$ENV --port=8087 --namespace $ENV
		kubectl autoscale deployment my-app-app-$ENV --max 6 --min 2 --cpu-percent 50 -n $ENV
		kubectl create secret tls tls-secret-$ENV --key your-domain.com.key --cert your-domain.com.crt -n $ENV   
		kubectl apply -f cloud-config/$ENV/ingress-nginx.yaml
		echo $newversion > version.txt
		echo waiting for tiller to become live
		kubectl get pods -n $ENV
		sleep 20
		helm install --name nginx-ingress-my-app-$ENV stable/nginx-ingress --set rbac.create=true --tiller-namespace $ENV
		kubectl apply -f cloud-config/$ENV/controller-nginx.yaml
	else # You are only updating with the new deployment
		gcloud container clusters get-credentials my-app-cluster-$ENV
		kubectl set image deployment/my-app-app-$ENV my-app-app-$ENV=gcr.io/$PROJECTID/my-app-$ENV:$TAG --namespace $ENV
		echo $newversion > version.txt
	fi
fi

#You should put this in a folder cloud-config/YourEnviromentHere/.env
export ENV=staging
export PROJECTID=my-project-id
export REGION=us-east1
export ZONE=us-east1-b
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: ingress-resource-staging
  namespace: staging
  annotations:
    kubernetes.io/ingress.class: nginx
    ingress.kubernetes.io/ssl-redirect: "true"
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - host: staging.my-app.com
    http:
      paths:
      - backend:
          serviceName: my-app-staging
          servicePort: 8080
        path: /
  tls:
  - hosts:
    - staging.my-app.com
    secretName: tls-secret-staging