kubernetes

   1 minikube version
   2 # minikube version: v1.2.0
   3 minikube start
   4 # * minikube v1.2.0 on linux (amd64)
   5 # * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ...
   6 # * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5
   7 #   - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf
   8 # * Pulling images ...
   9 # * Launching Kubernetes ...
  10 # 
  11 # * Configuring local host environment ...
  12 # * Verifying: apiserver proxy etcd scheduler controller dns
  13 # * Done! kubectl is now configured to use "minikube"
  14 

cluster details and health status

   1 kubectl cluster-info
   2 # Kubernetes master is running at https://172.17.0.30:8443
   3 # KubeDNS is running at https://172.17.0.30:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
   4 # 
   5 # To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
   6 

get cluster nodes

   1 kubectl get nodes
   2 # NAME       STATUS   ROLES    AGE    VERSION
   3 # minikube   Ready    master   3m1s   v1.15.0
   4 

Deploy containers

   1 # deploy container
   2 kubectl create deployment first-deployment --image=katacoda/docker-http-server
   3 # deployment.apps/first-deployment created
   4 # deploy container in cluster
   5 # check pods
   6 kubectl get pods
   7 # NAME                               READY   STATUS    RESTARTS   AGE
   8 # first-deployment-8cbf74484-s2fkl   1/1     Running   0          25s
   9 # expose deployment
  10 kubectl expose deployment first-deployment --port=80 --type=NodePort
  11 # service/first-deployment exposed
  12 
  13 kubectl get svc first-deployment
  14 # NAME               TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
  15 # first-deployment   NodePort   10.98.246.87   <none>        80:31219/TCP   105s
  16 # do request to port 80 in cluster ip
  17 curl 10.98.246.87:80
  18 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1>
  19 # 
  20 curl host01:31219
  21 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1>
  22 

dashboard

   1 minikube addons enable dashboard 
   2 #The Kubernetes dashboard allows you to view your applications
   3 # in a UI.
   4 # * dashboard was successfully enabled
   5 kubectl apply -f /opt/kubernetes-dashboard.yaml 
   6 # only in katacoda
   7 # service/kubernetes-dashboard-katacoda created
   8 # check progress
   9 kubectl get pods -n kube-system -w  #check progress
  10 # NAME                                    READY   STATUS    RESTARTS   AGE
  11 # coredns-5c98db65d4-b2kxm                1/1     Running   0          17m
  12 # coredns-5c98db65d4-mm567                1/1     Running   1          17m
  13 # etcd-minikube                           1/1     Running   0          16m
  14 # kube-addon-manager-minikube             1/1     Running   0          16m
  15 # kube-apiserver-minikube                 1/1     Running   0          16m
  16 # kube-controller-manager-minikube        1/1     Running   0          16m
  17 # kube-proxy-pngm9                        1/1     Running   0          17m
  18 # kube-scheduler-minikube                 1/1     Running   0          16m
  19 # kubernetes-dashboard-7b8ddcb5d6-xt5nt   1/1     Running   0          76s
  20 # storage-provisioner                     1/1     Running   0          17m
  21 
  22 # dashboard url https://2886795294-30000-kitek05.environments.katacoda.com/
  23 # how to launch a Single Node Kubernetes cluster. 
  24 

Init master

   1 master $ kubeadm init --kubernetes-version $(kubeadm version -o short)
   2 [init] Using Kubernetes version: v1.14.0
   3 [preflight] Running pre-flight checks
   4 [preflight] Pulling images required for setting up a Kubernetes cluster
   5 [preflight] This might take a minute or two, depending on the speed of your internet connection
   6 [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
   7 [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
   8 [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
   9 [kubelet-start] Activating the kubelet service
  10 [certs] Using certificateDir folder "/etc/kubernetes/pki"
  11 [certs] Generating "ca" certificate and key
  12 [certs] Generating "apiserver" certificate and key
  13 [certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.17.0.69]
  14 [certs] Generating "apiserver-kubelet-client" certificate and key
  15 [certs] Generating "front-proxy-ca" certificate and key
  16 [certs] Generating "front-proxy-client" certificate and key
  17 [certs] Generating "etcd/ca" certificate and key
  18 [certs] Generating "etcd/healthcheck-client" certificate and key
  19 [certs] Generating "apiserver-etcd-client" certificate and key
  20 [certs] Generating "etcd/server" certificate and key
  21 [certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [172.17.0.69 127.0.0.1 ::1]
  22 [certs] Generating "etcd/peer" certificate and key
  23 [certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [172.17.0.69 127.0.0.1 ::1]
  24 [certs] Generating "sa" key and public key
  25 [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
  26 [kubeconfig] Writing "admin.conf" kubeconfig file
  27 [kubeconfig] Writing "kubelet.conf" kubeconfig file
  28 [kubeconfig] Writing "controller-manager.conf" kubeconfig file
  29 [kubeconfig] Writing "scheduler.conf" kubeconfig file
  30 [control-plane] Using manifest folder "/etc/kubernetes/manifests"
  31 [control-plane] Creating static Pod manifest for "kube-apiserver"
  32 [control-plane] Creating static Pod manifest for "kube-controller-manager"
  33 [control-plane] Creating static Pod manifest for "kube-scheduler"
  34 [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
  35 [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
  36 [apiclient] All control plane components are healthy after 16.503433 seconds
  37 [upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system"Namespace
  38 [kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
  39 [upload-certs] Skipping phase. Please see --experimental-upload-certs
  40 [mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
  41 [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
  42 [bootstrap-token] Using token: xfvno5.q2xfb2m3nw7grdjm
  43 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
  44 [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
  45 [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approveCSRs from a Node Bootstrap Token
  46 [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
  47 [bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
  48 [addons] Applied essential addon: CoreDNS
  49 [addons] Applied essential addon: kube-proxy
  50 
  51 Your Kubernetes control-plane has initialized successfully!
  52 
  53 To start using your cluster, you need to run the following as a regular user:
  54 
  55   mkdir -p $HOME/.kube
  56   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  57   sudo chown $(id -u):$(id -g) $HOME/.kube/config
  58 
  59 You should now deploy a pod network to the cluster.
  60 Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  61   https://kubernetes.io/docs/concepts/cluster-administration/addons/
  62 
  63 Then you can join any number of worker nodes by running the following on each as root:
  64 
  65 kubeadm join 172.17.0.69:6443 --token xfvno5.q2xfb2m3nw7grdjm \
  66     --discovery-token-ca-cert-hash sha256:26d11c038d236967630d401747f210af9e3679fb1638e8b599a2da4cb98ab159

   1 # In master 
   2 mkdir -p $HOME/.kube
   3 pwd
   4 # /root
   5 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
   6 sudo chown $(id -u):$(id -g) $HOME/.kube/config
   7 export KUBECONFIG=$HOME/.kube/config
   8 echo $KUBECONFIG/root/.kube/config

Deploy cni weaveworks - deploy a pod network to the cluster

Container Network Interface (CNI) defines how the different nodes and their workloads should communicate. Weave Net provides a network to connect all pods together, implementing the Kubernetes model. Kubernetes uses the Container Network Interface (CNI) to join pods onto Weave Net.

   1 # In master
   2 kubectl apply -f /opt/weave-kube
   3 # serviceaccount/weave-net created
   4 # clusterrole.rbac.authorization.k8s.io/weave-net created
   5 # clusterrolebinding.rbac.authorization.k8s.io/weave-net created
   6 # role.rbac.authorization.k8s.io/weave-net created
   7 # rolebinding.rbac.authorization.k8s.io/weave-net created
   8 # daemonset.extensions/weave-net created
   9 kubectl get pod -n kube-system
  10 # NAME                             READY   STATUS    RESTARTS   AGE
  11 # coredns-fb8b8dccf-b9rd7          1/1     Running   0          11m
  12 # coredns-fb8b8dccf-sfgbn          1/1     Running   0          11m
  13 # etcd-master                      1/1     Running   0          10m
  14 # kube-apiserver-master            1/1     Running   0          10m
  15 # kube-controller-manager-master   1/1     Running   0          10m
  16 # kube-proxy-l42wp                 1/1     Running   0          11m
  17 # kube-scheduler-master            1/1     Running   1          10m
  18 # weave-net-mcxml                  2/2     Running   0          84s
  19 

Join cluster

   1 # In master
   2 kubeadm token list # check tokens
   3 # TOKEN                     TTL       EXPIRES                USAGES                   DESCRIPTION
   4 #                     EXTRA GROUPS
   5 # xfvno5.q2xfb2m3nw7grdjm   23h       2019-07-28T16:19:18Z   authentication,signing   The default bootstrap # token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
   6 

   1 # in node01
   2 # join cluster
   3 kubeadm join --discovery-token-unsafe-skip-ca-verification --token=xfvno5.q2xfb2m3nw7grdjm 172.17.0.69:6443
   4 # [preflight] Running pre-flight checks
   5 # [preflight] Reading configuration from the cluster...
   6 # [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
   7 # [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace
   8 # [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
   9 # [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
  10 # [kubelet-start] Activating the kubelet service
  11 # [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
  12 # 
  13 # This node has joined the cluster:
  14 # * Certificate signing request was sent to apiserver and a response was received.
  15 # * The Kubelet was informed of the new secure connection details.
  16 # 
  17 # Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
  18 # The --discovery-token-unsafe-skip-ca-verification tag is used to bypass the Discovery Token verification. 
  19 
  20 # in master
  21 kubectl get nodes
  22 # NAME     STATUS   ROLES    AGE    VERSION
  23 # master   Ready    master   17m    v1.14.0
  24 # node01   Ready    <none>   107s   v1.14.0                                                       
  25 # bootstrap token generated b
  26 
  27 # in node01
  28 kubectl get nodes 
  29 # The connection to the server localhost:8080 was refused - did you specify the right host or port
  30 

Deploy container in cluster

   1 # In master
   2 kubectl create deployment http --image=katacoda/docker-http-server:latest
   3 # deployment.apps/http created
   4 kubectl get pods
   5 # NAME                    READY   STATUS    RESTARTS   AGE
   6 # http-7f8cbdf584-74pd9   1/1     Running   0          11s
   7 docker ps | grep http-server
   8 
   9 # In node01
  10 docker ps | grep http-serveradb3cde7f861        
  11 # katacoda/docker-http-server   "/app"                   
  12 # About a minute ago
  13 # Up About a minute                       k8s_docker-http-server_http-7f8cbdf584-74pd9_default_04a
  14 # 17065-b08d-11e9-bff1-0242ac110045_0
  15 
  16 # expose deployment in master
  17 kubectl get pods
  18 # NAME                    READY   STATUS    RESTARTS   AGE
  19 # http-7f8cbdf584-74pd9   1/1     Running   0          17m                                        bootstrap # token generated b
  20 kubectl expose deployment http  --port=80 --type=NodePort
  21 # service/http exposed
  22 
  23 kubectl get service http
  24 # NAME   TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
  25 # http   NodePort   10.101.65.149   <none>        80:30982/TCP   49s
  26 
  27 curl 10.101.65.149:80
  28 # <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1>
  29 
  30 curl http://10.101.65.149
  31 # <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1>
  32 

Apply dashboard in cluster

   1 #  In master 
   2 kubectl apply -f dashboard.yaml
   3 # secret/kubernetes-dashboard-certs created
   4 # serviceaccount/kubernetes-dashboard created
   5 # role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
   6 # rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
   7 # deployment.apps/kubernetes-dashboard created
   8 # service/kubernetes-dashboard created
   9 kubectl get pods -n kube-system
  10 # NAME                                    READY   STATUS              RESTARTS   AGEcoredns-fb8b8dccf-b9rd7                 # 1/1     Running             0          42mcoredns-fb8b8dccf-sfgbn                 1/1     Running             # 0          42m
  11 # etcd-master                             1/1     Running             0          41m
  12 # kube-apiserver-master                   1/1     Running             0          40m
  13 # kube-controller-manager-master          1/1     Running             0          40m
  14 # kube-proxy-gwrps                        1/1     Running             0          26m
  15 # kube-proxy-l42wp                        1/1     Running             0          42m
  16 # kube-scheduler-master                   1/1     Running             1          40m
  17 # kubernetes-dashboard-5f57845f9d-ls7q2   0/1     ContainerCreating   0          2s
  18 # weave-net-gww8b                         2/2     Running             0          26m
  19 # weave-net-mcxml                         2/2     Running             0          31m
  20 

Create service account for dashboard

   1 cat <<EOF | kubectl create -f - 
   2 apiVersion: v1
   3 kind: ServiceAccount
   4 metadata:
   5   name: admin-user
   6   namespace: kube-system
   7 ---
   8 apiVersion: rbac.authorization.k8s.io/v1beta1
   9 kind: ClusterRoleBinding
  10 metadata:
  11   name: admin-user
  12 roleRef:
  13   apiGroup: rbac.authorization.k8s.io
  14   kind: ClusterRole
  15   name: cluster-admin
  16 subjects:
  17 - kind: ServiceAccount
  18   name: admin-user
  19   namespace: kube-system
  20 EOF
  21 
  22 # Get login token
  23 kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
  24 
  25 When the dashboard was deployed, it used externalIPs to bind the service to port 8443. This makes the dashboard available to outside of the cluster and viewable at https://2886795335-8443-kitek05.environments.katacoda.com/
  26 
  27 # Use the admin-user token to access the dashboard.
  28 https://2886795335-8443-kitek05.environments.katacoda.com/#!/login
  29 # sign in using token
  30 eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXNzcTl4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI2Y2RiNGZmMy1iMDkwLTExZTktYmZmMS0wMjQyYWMxMTAwNDUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.R2OtDYxXaR0Pgluzq1m8FMZflF2tdYtJdG5XhkVC28vf1WkJu-Zo51I5ONUiK2WdBEMPw-N2PW_R9l6lak1clvlxfUSn777nThYSxhmR5pfxi6GmDlFo928KJvWVPDen1jrzAaQOEUZ1maOzPcnjKGpR-CRTgmYDnxZY84rqi68y0vfdn16ER8HeW-wkJ-hfGyUAhryk_ob1CUBjjbs-vefpaLcHLdrWNaKaFi1j5fCc_eJi10FpSTmuBsb04xgN0I17hkTlSw2fyOAj7LtC3pBDrK0nOdHCJkBEtsg89rkvLufYph5AFeoWQVKdW9JZH8BYS91BFla7pZnTwdBVeA
  31 
  32 https://2886795335-8443-kitek05.environments.katacoda.com/#!/overview?namespace=default

List services

   1 # In master
   2 kubectl get service
   3 # NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
   4 # http         NodePort    10.101.65.149   <none>        80:30982/TCP   17m
   5 # kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        56m
   6 

Start containers using Kubectl

   1 minikube start # start kubernetes cluster and its components
   2 # * minikube v1.2.0 on linux (amd64)
   3 # * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ...
   4 # * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5
   5 #   - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf
   6 # * Pulling images ...
   7 # * Launching Kubernetes ...
   8 # * Configuring local host environment ...
   9 # * Verifying: apiserver proxy etcd
  10 # 
  11 #  scheduler controller dns
  12 # * Done! kubectl is now configured to use "minikube"
  13 kubectl get nodes
  14 # NAME       STATUS   ROLES    AGE    VERSION
  15 # minikube   Ready    master   2m2s   v1.15.0
  16 
  17 kubectl get service
  18 # NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
  19 # kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   2m18s
  20 # This deployment is issued to the Kubernetes master which launches the Pods and containers required. Kubectl run_ is similar to docker run but at a cluster 
  21 level.
  22 #  launch a deployment called http which will start a container based on the Docker Image katacoda/docker-http-server:latest.
  23 kubectl run http --image=katacoda/docker-http-server:latest --replicas=1
  24 # kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.Use kubectl run --generator=run-pod/v1 or kubectl create instead.
  25 # deployment.apps/http created
  26 kubectl get deployments
  27 # NAME   READY   UP-TO-DATE   AVAILABLE   AGE
  28 # http   1/1     1            1           6s
  29 
  30 # you can describe the deployment process.
  31 kubectl describe deployment http
  32 
  33 # expose the container port 80 on the host 8000 binding to the external-ip of the host.
  34 kubectl expose deployment http --external-ip="172.17.0.13" --port=8000 --target-port=80
  35 # service/http exposed
  36 
  37 curl http://172.17.0.13:8000
  38 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
  39 
  40 kubectl get pods
  41 # NAME                    READY   STATUS    RESTARTS   AGE
  42 # http-5fcf9dd9cb-zfkkz   1/1     Running   0          3m26s
  43 
  44 kubectl get service
  45 # NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
  46 # http         ClusterIP   10.100.157.159   172.17.0.13   8000/TCP   57s
  47 # kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP    7m41s
  48 
  49 curl http://10.100.157.159:8000
  50 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
  51 
  52 kubectl run httpexposed --image=katacoda/docker-http-server:latest --replicas=1 --port=80 --host
  53 port=8001
  54 # kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.
  55 # Use kubectl run --generator=run-pod/v1 or kubectl create instead.
  56 # deployment.apps/httpexposed created
  57 curl http://172.17.0.13:8001
  58 # <h1>This request was processed by host: httpexposed-569df5d86-rzzhb</h1>
  59 kubectl get svc
  60 # NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
  61 # http         ClusterIP   10.100.157.159   172.17.0.13   8000/TCP   3m50s
  62 # kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP    10m
  63 
  64 kubectl get pods
  65 # NAME                          READY   STATUS    RESTARTS   AGE
  66 # http-5fcf9dd9cb-zfkkz         1/1     Running   0          7m9s
  67 # httpexposed-569df5d86-rzzhb   1/1     Running   0          36s
  68 
  69 # Scaling the deployment will request Kubernetes to launch additional Pods.
  70 kubectl scale --replicas=3 deployment http
  71 # deployment.extensions/http scaled
  72 
  73 kubectl get pods # amount of pods for service http increased to 3
  74 # NAME                          READY   STATUS    RESTARTS   AGE
  75 # http-5fcf9dd9cb-fhljh         1/1     Running   0          31s
  76 # http-5fcf9dd9cb-wb2dh         1/1     Running   0          31s
  77 # http-5fcf9dd9cb-zfkkz         1/1     Running   0          9m27s
  78 # httpexposed-569df5d86-rzzhb   1/1     Running   0          2m54s
  79 
  80 # Once each Pod starts it will be added to the load balancer service.
  81 kubectl get service
  82 # NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
  83 # http         ClusterIP   10.100.157.159   172.17.0.13   8000/TCP   7m28s
  84 # kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP    14m
  85 
  86 kubectl describe svc http
  87 # Name:              http
  88 # Namespace:         defaultLabels:            run=httpAnnotations:       <none>
  89 # Selector:          run=httpType:              ClusterIPIP:                10.100.157.159
  90 # External IPs:      172.17.0.13
  91 # Port:              <unset>  8000/TCP
  92 # TargetPort:        80/TCP
  93 # Endpoints:         172.18.0.4:80,172.18.0.6:80,172.18.0.7:80
  94 # Session Affinity:  None
  95 # Events:            <none>
  96 
  97 curl http://172.17.0.13:8000
  98 # <h1>This request was processed by host: http-5fcf9dd9cb-wb2dh</h1>
  99 curl http://172.17.0.13:8000
 100 # <h1>This request was processed by host: http-5fcf9dd9cb-fhljh</h1>
 101 curl http://172.17.0.13:8000
 102 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
 103 

Certified kubernetes application developer

   1 # In master 
   2 launch.sh
   3 # Waiting for Kubernetes to start...
   4 # Kubernetes started
   5 kubectl get nodes
   6 # NAME     STATUS   ROLES    AGE   VERSION
   7 # master   Ready    master   85m   v1.14.0
   8 # node01   Ready    <none>   85m   v1.14.0
   9 
  10 # deploy app
  11 kubectl create deployment examplehttpapp --image=katacoda/docker-http-server
  12 # deployment.apps/examplehttpapp created
  13 
  14 # view all deployments
  15 kubectl get deployments
  16 # NAME             READY   UP-TO-DATE   AVAILABLE   AGE
  17 # examplehttpapp   1/1     1            1           25s
  18 
  19 # A deployment will launch a set of Pods. A pod is a group of one or more containers deployed across the cluster. 
  20 kubectl get pods
  21 # NAME                            READY   STATUS    RESTARTS   AGE
  22 # examplehttpapp-58f66848-n7wn7   1/1     Running   0          71s
  23 
  24 # show pod ip and node where it is
  25 kubectl get pods -o wide
  26 # NAME                            READY   STATUS    RESTARTS   AGE    IP          NODE     NOMINATED NODE   # READINESS GATES
  27 # examplehttpapp-58f66848-n7wn7   1/1     Running   0          113s   10.44.0.2   node01   <none>           <none>
  28 
  29 # describe pod
  30 kubectl describe pod examplehttpapp-58f66848-n7wn7
  31 # Name:               examplehttpapp-58f66848-n7wn7
  32 # Namespace:          default
  33 # Priority:           0
  34 # PriorityClassName:  <none>
  35 # Node:               node01/172.17.0.24
  36 # Start Time:         Sat, 27 Jul 2019 17:59:35 +0000
  37 # Labels:             app=examplehttpapp
  38 #                     pod-template-hash=58f66848
  39 # Annotations:        <none>
  40 # Status:             Running
  41 # IP:                 10.44.0.2
  42 
  43 # List all namespaces with in the cluster with 
  44 kubectl get namespaces
  45 kubectl get ns
  46 
  47 # The namespaces can be used to filter queries to the available objects. 
  48 kubectl get pods -n kube-system
  49 
  50 kubectl create ns testns
  51 # namespace/testns created
  52 kubectl create deployment namespacedeg -n testns --image=katacoda/docker-http-server
  53 # deployment.apps/namespacedeg created
  54 kubectl get pods -n testns
  55 # NAME                            READY   STATUS    RESTARTS   AGE
  56 # namespacedeg-74dcc7dc64-wcxnj   1/1     Running   0          3s
  57 kubectl get pods -n testns -o wide
  58 # NAME                            READY   STATUS    RESTARTS   AGE   IP          NODE     NOMINATED NODE   READINESS GATES
  59 # namespacedeg-74dcc7dc64-wcxnj   1/1     Running   0          18s   10.44.0.3   node01   <none>           <none>
  60 
  61 # Kubectl can help scale the number of Pods running for a deployment, referred to as replicas.
  62 kubectl scale deployment examplehttpapp --replicas=5
  63 # deployment.extensions/examplehttpapp scaled
  64 kubectl get deployments -o wide
  65 # NAME             READY   UP-TO-DATE   AVAILABLE   AGE    CONTAINERS           IMAGES                        SELECTOR
  66 # examplehttpapp   5/5     5            5           9m6s   docker-http-server   katacoda/docker-http-server   app=examplehttpapp
  67 kubectl get pods -o wide
  68 # NAME                            READY   STATUS    RESTARTS   AGE     IP          NODE     NOMINATED NODE   READINESS GATES
  69 # examplehttpapp-58f66848-cf6pl   1/1     Running   0          65s     10.44.0.6   node01   <none>           <none>
  70 # examplehttpapp-58f66848-lfrq4   1/1     Running   0          65s     10.44.0.5   node01   <none>           <none>
  71 # examplehttpapp-58f66848-n7wn7   1/1     Running   0          9m26s   10.44.0.2   node01   <none>           <none>
  72 # examplehttpapp-58f66848-snwl7   1/1     Running   0          65s     10.44.0.7   node01   <none>           <none>
  73 # examplehttpapp-58f66848-vd8db   1/1     Running   0          65s     10.44.0.4   node01   <none>           <none>
  74 
  75 # everything within Kubernetes is controllable as YAML.
  76 kubectl edit deployment examplehttpapp # opens vi/vim, changing the spec.replicas to 10 after saving the file increased the number of pods
  77 
  78 kubectl get nodes -o wide
  79 # NAME     STATUS   ROLES    AGE    VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
  80 # master   Ready    master   102m   v1.14.0   172.17.0.19   <none>        Ubuntu 16.04.6 LTS   4.4.0-150-generic   docker://18.9.5
  81 # node01   Ready    <none>   102m   v1.14.0   172.17.0.24   <none>        Ubuntu 16.04.6 LTS   4.4.0-150-generic   docker://18.9.5
  82 
  83 # Image can be changed using the set image command, rollout
  84 # apply new image to the pods/containers
  85 kubectl --record=true set image deployment examplehttpapp docker-http-server=katacoda/docker-http-server:v2
  86 # deployment.extensions/examplehttpapp image updated
  87 kubectl rollout status deployment examplehttpapp
  88 # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
  89 # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
  90 # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
  91 # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
  92 # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
  93 # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
  94 # Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination...
  95 # Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination...
  96 deployment "examplehttpapp" successfully rolled out
  97 
  98 # rollback deployment
  99 kubectl rollout undo deployment examplehttpapp
 100 # deployment.extensions/examplehttpapp rolled back
 101 
 102 # The expose command will create a new service for a deployment. The port specifies the port of the application we want to available
 103 kubectl expose deployment examplehttpapp --port 80
 104 # service/examplehttpapp exposed
 105 kubectl get svc -o wide
 106 # NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE    SELECTOR
 107 # examplehttpapp   ClusterIP   10.103.93.196   <none>        80/TCP    13s    app=examplehttpapp
 108 # kubernetes       ClusterIP   10.96.0.1       <none>        443/TCP   105m   <none>
 109 kubectl describe svc examplehttpapp
 110 # Name:              examplehttpapp
 111 # Namespace:         default
 112 # Labels:            app=examplehttpapp
 113 # Annotations:       <none>
 114 # Selector:          app=examplehttpapp
 115 # Type:              ClusterIP
 116 # IP:                10.103.93.196
 117 # Port:              <unset>  80/TCP
 118 # TargetPort:        80/TCP
 119 # Endpoints:         10.44.0.2:80,10.44.0.4:80,10.44.0.5:80
 120 # Session Affinity:  None
 121 # Events:            <none>
 122 
 123 # But how does Kubernetes know where to send traffic? That is managed by Labels. 
 124 # Each Object within Kubernetes can have a label attached, allowing Kubernetes to discover and use the configuration. 
 125 
 126 kubectl get services -l app=examplehttpapp -o go-template='{{(index .items 0).spec.clusterIP}}'
 127 kubectl get services
 128 # NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
 129 # examplehttpapp   ClusterIP   10.103.93.196   <none>        80/TCP    2m32s
 130 # kubernetes       ClusterIP   10.96.0.1       <none>        443/TCP   107m
 131 
 132 kubectl get services -o wide
 133 # NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE     SELECTOR
 134 # examplehttpapp   ClusterIP   10.103.93.196   <none>        80/TCP    2m36s   app=examplehttpapp
 135 # kubernetes       ClusterIP   10.96.0.1       <none>        443/TCP   107m    <none>
 136 
 137 # kubectl logs to to view the logs for Pods
 138 kubectl logs $(kubectl get pods -l app=examplehttpapp -o go-template='{{(index .items 0).metadata.name}}')
 139 # Web Server started. Listening on 0.0.0.0:80
 140 
 141 # view the CPU or Memory usage of a node or Pod
 142 kubectl top node
 143 # NAME     CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
 144 # master   133m         3%     1012Mi          53%
 145 # node01   49m          1%     673Mi           17%
 146 kubectl top pod
 147 # NAME                            CPU(cores)   MEMORY(bytes)
 148 # examplehttpapp-58f66848-ctnml   0m           0Mi
 149 # examplehttpapp-58f66848-gljk9   1m           0Mi
 150 # examplehttpapp-58f66848-hfqts   1m           0Mi
 151 

k3s - Lightweight Kubernetes

K3S works great from something as small as a Raspberry Pi to an AWS a1.4xlarge 32GiB server. Download k3s - latest release, x86_64, ARMv7, and ARM64 are supported Situations where a PhD in k8s clusterology is infeasible

   1 curl -sfL https://get.k3s.io | sh -
   2 sudo curl -sfL https://get.k3s.io | sh -
   3 # [INFO]  Finding latest release
   4 # [INFO]  Using v1.17.0+k3s.1 as release
   5 # [INFO]  Downloading hash https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/sha256sum-amd64.txt
   6 # [INFO]  Downloading binary https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/k3s
   7 # [INFO]  Verifying binary download
   8 # [INFO]  Installing k3s to /usr/local/bin/k3s
   9 # [INFO]  Creating /usr/local/bin/kubectl symlink to k3s
  10 # [INFO]  Creating /usr/local/bin/crictl symlink to k3s
  11 # [INFO]  Creating /usr/local/bin/ctr symlink to k3s
  12 # [INFO]  Creating killall script /usr/local/bin/k3s-killall.sh
  13 # [INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh
  14 # [INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env
  15 # [INFO]  systemd: Creating service file /etc/systemd/system/k3s.service
  16 # [INFO]  systemd: Enabling k3s unit
  17 # Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.
  18 # [INFO]  systemd: Starting k3s
  19 # as root
  20 k3s kubectl cluster-info
  21 kubectl create deployment springboot-test --image=vbodocker/springboot-test:latest
  22 kubectl expose deployment springboot-test --port=8000 --target-port=8080 --type=NodePort
  23 kubectl get services
  24 IP_SPRINGBOOT=$(kubectl get services | grep springboot | awk '//{print $3}')
  25 curl http://$IP_SPRINGBOOT:8000/dummy
  26 # list containerd images and containers
  27 k3s crictl images
  28 k3s crictl ps
  29 # connect to container id
  30 crictl exec -it 997a2ad8c763a  sh
  31 # connect to container/pod
  32 kubectl get pods
  33 kubectl exec -it springboot-test-6bb5fdfc48-phh8k  sh
  34 cat /etc/os-release # alpine linux in container
  35 # give sudo rights to user
  36 /sbin/usermod -aG sudo user
  37 # scale pods
  38 sudo kubectl scale deployment springboot-test --replicas=3
  39 sudo kubectl get pods -o wide
  40 # add mariadb pod/service
  41 sudo kubectl create deployment mariadb-test --image=mariadb:latest
  42 sudo kubectl get pods -o wide
  43 sudo kubectl delete deployment mariadb-test
  44 # https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/
  45 sudo kubectl apply -f mariadb-pv.yaml
  46 #persistentvolume/mariadb-pv-volume created
  47 #persistentvolumeclaim/mariadb-pv-claim created
  48 sudo kubectl apply -f mariadb-deployment.yaml 
  49 #service/mariadb created
  50 #deployment.apps/mariadb created
  51 sudo kubectl describe deployment mariadb
  52 sudo kubectl get svc -o wide
  53 # connect to mariabdb pod
  54 sudo kubectl exec -it mariadb-8578f4dc8c-r4ftv /bin/bash
  55 ss -atn # show ports tcp listening
  56 ip address # show ip addresses
  57 mysql -h localhost -p
  58 mysql -u root -h 10.42.0.12 -p 
  59 
  60 # delete service, persistent volume claim and persistent volume
  61 sudo kubectl delete deployment,svc mariadb
  62 sudo kubectl delete pvc mariadb-pv-claim
  63 sudo kubectl delete pv mariadb-pv-volume

mariadb-pv.yaml

   1 apiVersion: v1
   2 kind: PersistentVolume
   3 metadata:
   4   name: mariadb-pv-volume
   5   labels:
   6     type: local
   7 spec:
   8   storageClassName: manual
   9   capacity:
  10     storage: 1Gi
  11   accessModes:
  12     - ReadWriteOnce
  13   hostPath:
  14     path: "/mnt/data"
  15 ---
  16 apiVersion: v1
  17 kind: PersistentVolumeClaim
  18 metadata:
  19   name: mariadb-pv-claim
  20 spec:
  21   storageClassName: manual
  22   accessModes:
  23     - ReadWriteOnce
  24   resources:
  25     requests:
  26       storage: 1Gi

mariadb-deployment.yaml

   1 apiVersion: v1
   2 kind: Service
   3 metadata:
   4   name: mariadb
   5 spec:
   6   ports:
   7   - port: 3306
   8   selector:
   9     app: mariadb
  10   clusterIP: None
  11 ---
  12 apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
  13 kind: Deployment
  14 metadata:
  15   name: mariadb
  16 spec:
  17   selector:
  18     matchLabels:
  19       app: mariadb
  20   strategy:
  21     type: Recreate
  22   template:
  23     metadata:
  24       labels:
  25         app: mariadb
  26     spec:
  27       containers:
  28       - image: mariadb:latest
  29         name: mariadb
  30         env:
  31           # Use secret in real usage
  32         - name: MYSQL_ROOT_PASSWORD
  33           value: password
  34         ports:
  35         - containerPort: 3306
  36           name: mariadb
  37         volumeMounts:
  38         - name: mariadb-persistent-storage
  39           mountPath: /var/lib/mariadb
  40       volumes:
  41       - name: mariadb-persistent-storage
  42         persistentVolumeClaim:
  43           claimName: mariadb-pv-claim

Init containers

Init containers can contain utilities or setup scripts not present in an app image.

systemctl commands

   1 systemctl start k3s
   2 systemctl stop k3s
   3 systemctl status k3s
   4 systemctl disable k3s.service
   5 systemctl enable k3s

Ubuntu pod

ubuntu.yaml

   1 apiVersion: v1
   2 kind: Pod
   3 metadata:
   4   name: ubuntu
   5   labels:
   6     app: ubuntu
   7 spec:
   8   containers:
   9   - name: ubuntu
  10     image: ubuntu:latest
  11     command: ["/bin/sleep", "3650d"]
  12     imagePullPolicy: IfNotPresent
  13   restartPolicy: Always

   1 sudo kubectl apply -f ubuntu.yaml
   2 sudo kubectl get pods
   3 sudo kubectl exec -it ubuntu -- bash
   4 sudo kubectl delete pod ubuntu

Alpine pod

alpine.yaml

   1 apiVersion: v1
   2 kind: Pod
   3 metadata:
   4   name: alpine
   5   labels:
   6     app: alpine
   7 spec:
   8   containers:
   9   - name: alpine
  10     image: alpine:latest
  11     command: ["/bin/sleep", "3650d"]
  12     imagePullPolicy: IfNotPresent
  13   restartPolicy: Always

   1 # Pods use PersistentVolumeClaims to request physical storage.
   2 sudo kubectl apply -f alpine.yaml
   3 sudo kubectl exec -it alpine -- sh

Nginx with persistent volume

   1 cd /tmp
   2 mkdir -p /tmp/data
   3 echo 'Hello from Kubernetes storage' > /tmp/data/index.html

pv-volume.yaml

   1 apiVersion: v1
   2 kind: PersistentVolume
   3 metadata:
   4   name: task-pv-volume
   5   labels:
   6     type: local
   7 spec:
   8   storageClassName: manual
   9   capacity:
  10     storage: 0.2Gi
  11   accessModes:
  12     - ReadWriteOnce
  13   hostPath:
  14     path: "/tmp/data"

pv-claim.yaml

   1 apiVersion: v1
   2 kind: PersistentVolumeClaim
   3 metadata:
   4   name: task-pv-claim
   5 spec:
   6   storageClassName: manual
   7   accessModes:
   8     - ReadWriteOnce
   9   resources:
  10     requests:
  11       storage: 0.2Gi

pv-pod.yaml

   1 apiVersion: v1
   2 kind: Pod
   3 metadata:
   4   name: task-pv-pod
   5 spec:
   6   volumes:
   7     - name: task-pv-storage
   8       persistentVolumeClaim:
   9         claimName: task-pv-claim
  10   containers:
  11     - name: task-pv-container
  12       image: nginx
  13       ports:
  14         - containerPort: 80
  15           name: "http-server"
  16       volumeMounts:
  17         - mountPath: "/usr/share/nginx/html"
  18           name: task-pv-storage

   1 sudo kubectl apply -f pv-volume.yaml
   2 sudo kubectl apply -f pv-claim.yaml
   3 sudo kubectl apply -f pv-pod.yaml
   4 sudo kubectl get pods -o wide 
   5 curl http://10.42.0.28/
   6 sudo kubectl exec -it task-pv-pod -- bash
   7 cd /usr/share/nginx/html
   8 echo "Hey from Kubernetes storage" > index.html
   9 cat /etc/os-release # debian buster 
  10 kubectl delete pod task-pv-pod
  11 kubectl delete pvc task-pv-claim
  12 kubectl delete pv task-pv-volume
  13 cat /tmp/data/index.html 

Generate yaml

   1 sudo kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test --dry-run=client --output=yaml
   2 sudo kubectl expose deployment cherrypy-test --port=8080 --type=NodePort --dry-run=client --output=yaml
   3 sudo kubectl scale deployment cherrypy-test --replicas=3 --dry-run=client --output=yaml

Alpine persistent volume

alpine-shared.yaml

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: alpine-pv-volume
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 0.2Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/tmp/alpine-data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: alpine-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 0.2Gi
---
apiVersion: v1
kind: Pod
metadata:
  name: alpine-pod
  labels:
    app: alpine-pod
spec:
  volumes:
    - name: alpine-pv-storage
      persistentVolumeClaim:
        claimName: alpine-pv-claim
  containers:
  - name: alpine
    image: alpine:latest
    command: ["/bin/sleep", "3650d"]
    imagePullPolicy: IfNotPresent
    volumeMounts:
      - mountPath: "/mnt/alpine/data"
        name: alpine-pv-storage
  restartPolicy: Always

   1 sudo kubectl apply -f alpine-shared.yaml 
   2 sudo kubectl exec -it alpine-pod -- sh
   3 /mnt/alpine/data # echo "teste" > x.txt # inside pod
   4 cat /tmp/alpine-data/x.txt # k8s host
   5 

MariaDB + NFS

   1 /vol *(rw,sync,insecure,fsid=0,no_subtree_check,no_root_squash)
   2 exportfs -rav
   3 exporting *:/vol
   4 mkdir -p /vol/mariadb-0
   5 
   6 kubectl apply -f mariadb-nfs.yaml
   7 kubectl exec -it mariadb-79847f5d97-smbdx -- bash
   8 touch  /var/lib/mariadb/b
   9 mount | grep nfs
  10 kubectl delete -f mariadb-nfs.yaml
  11 kubectl get pods
  12 kubectl get pvc
  13 kubectl get pv

mariadb-nfs.yaml

   1 ---
   2 apiVersion: v1
   3 kind: PersistentVolume
   4 metadata:
   5   name: mdb-vol-0
   6   labels:
   7     volume: mdb-volume
   8 spec:
   9   storageClassName: manual
  10   capacity:
  11     storage: 1Gi
  12   accessModes:
  13     - ReadWriteOnce
  14   nfs:
  15     server: 127.0.0.1
  16     path: "/vol/mariadb-0"
  17 ---
  18 apiVersion: v1
  19 kind: PersistentVolumeClaim
  20 metadata:
  21   name: mdb-pv-claim
  22 spec:
  23   storageClassName: manual
  24   accessModes:
  25     - ReadWriteOnce
  26   resources:
  27     requests:
  28       storage: 1Gi
  29 ---
  30 apiVersion: v1
  31 kind: Service
  32 metadata:
  33   name: mariadb
  34 spec:
  35   ports:
  36   - port: 3306
  37   selector:
  38     app: mariadb
  39   clusterIP: None
  40 ---
  41 apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
  42 kind: Deployment
  43 metadata:
  44   name: mariadb
  45 spec:
  46   selector:
  47     matchLabels:
  48       app: mariadb
  49   strategy:
  50     type: Recreate
  51   template:
  52     metadata:
  53       labels:
  54         app: mariadb
  55     spec:
  56       containers:
  57       - image: mariadb:latest
  58         name: mariadb
  59         env:
  60           # Use secret in real usage
  61         - name: MYSQL_ROOT_PASSWORD
  62           value: password
  63         ports:
  64         - containerPort: 3306
  65           name: mariadb
  66         volumeMounts:
  67         - name: mdb-persistent-storage
  68           mountPath: /var/lib/mariadb
  69       volumes:
  70       - name: mdb-persistent-storage
  71         persistentVolumeClaim:
  72           claimName: mdb-pv-claim

Persistent volumes

A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes.

A PersistentVolumeClaim (PVC) is a request for storage by a user.

Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see AccessModes).

Types of Persistent Volumes:

Ingress controller nginx example

ingress-cherrypy-test.yml

   1 apiVersion: networking.k8s.io/v1
   2 kind: Ingress
   3 metadata:
   4   name: ingress-cherrypy-test
   5 spec:
   6   rules:
   7   - host: cp.info
   8     http:
   9       paths:
  10       - path: /
  11         pathType: Prefix
  12         backend:
  13           service:
  14             name: cherrypy-test
  15             port:
  16               number: 8000
  17   ingressClassName: nginx

Steps

   1 # install k3s
   2 curl -sfL https://get.k3s.io | sh -
   3 KUBECONFIG=~/.kube/config
   4 mkdir ~/.kube 2> /dev/null
   5 sudo k3s kubectl config view --raw > "$KUBECONFIG"
   6 chmod 600 "$KUBECONFIG"
   7 nano ~/.bashrc 
   8 export KUBECONFIG=~/.kube/config
   9 source . ~/.bashrc 
  10 
  11 sudo nano /etc/systemd/system/k3s.service
  12 ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644
  13 sudo systemctl daemon-reload
  14 sudo service k3s start
  15 sudo service k3s status
  16 kubectl get pods 
  17 
  18 k3s kubectl cluster-info 
  19 
  20 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
  21 sudo service k3s stop
  22 sudo nano /etc/systemd/system/k3s.service
  23 # ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 --no-deploy traefik 
  24 sudo systemctl daemon-reload
  25 sudo rm /var/lib/rancher/k3s/server/manifests/traefik.yaml
  26 sudo service k3s start
  27 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
  28 sudo systemctl restart k3s
  29 
  30 kubectl get nodes 
  31 kubectl delete node localhost 
  32 kubectl get pods --all-namespaces 
  33 kubectl get services --all-namespaces
  34 kubectl get deployment --all-namespaces
  35 
  36 # install nginx ingress controller 
  37 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml
  38 kubectl get pods --namespace=ingress-nginx
  39 
  40 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
  41 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP # cluster ip port 8000
  42 kubectl get services
  43 
  44 kubectl apply -f ingress-cherrypy-test.yml
  45 
  46 EXTERNAL_IP=$(ip addr show | grep wlp | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
  47 echo $EXTERNAL_IP
  48 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts  "
  49 kubectl get ingress 
  50 curl cp.info 
  51 
  52 kubectl scale deployment cherrypy-test --replicas=5
  53 curl http://cp.info/ -vvv
  54 sudo apt install apache2-utils
  55 ab -n 10 -c 10 http://cp.info/
  56 
  57 # Push image to docker hub
  58 docker build -t vbodocker/cherrypy-test . 
  59 docker run -p 8080:8080 vbodocker/cherrypy-test 
  60 docker login # login to docker hub
  61 docker push vbodocker/cherrypy-test 
  62 docker pull vbodocker/cherrypy-test:latest
  63 
  64 # Rollout, deploy new image
  65 kubectl get deployments -o wide # shows image urls 
  66 kubectl rollout restart deployment cherrypy-test # redeploy image url for cherrypy-test
  67 kubectl rollout status deployment cherrypy-test
  68 kubectl get deployments -o wide 
  69 kubectl get pods -o wide # age should be low for the newly deployed pods 
  70 

Install k3s static binary in Slack64

   1 sudo mv ~/Downloads/k3s /usr/bin/
   2 sudo chmod 744 /usr/bin/k3s

/etc/rc.d/rc.k3s

   1 #!/bin/sh
   2 PATH=$PATH:/usr/sbin
   3 
   4 k3s_start() {
   5   /usr/bin/k3s server --write-kubeconfig-mode=644 \
   6   --disable traefik > /var/log/k3s.log 2>&1 &
   7 }
   8 
   9 k3s_stop() {
  10   kill $(ps uax | grep "/usr/bin/k3s" | head -1 | awk '//{print $2}')
  11   ps uax | grep containerd | awk '//{print $2}' | xargs -i kill {}
  12 }
  13 
  14 k3s_restart() {
  15   k3s_stop
  16   k3s_start
  17 }
  18 
  19 case "$1" in
  20 'start')
  21   k3s_start
  22   ;;
  23 'stop')
  24   k3s_stop
  25   ;;
  26 'restart')
  27   k3s_restart
  28   ;;
  29 *)
  30   echo "usage $0 start|stop|restart"
  31 esac

ingress-cherrypy-test.yml

   1 apiVersion: networking.k8s.io/v1
   2 kind: Ingress
   3 metadata:
   4   name: ingress-cherrypy-test
   5 spec:
   6   rules:
   7   - host: cp.info
   8     http:
   9       paths:
  10       - path: /
  11         pathType: Prefix
  12         backend:
  13           service:
  14             name: cherrypy-test
  15             port:
  16               number: 8000
  17   ingressClassName: nginx

Steps

   1 echo "alias kubectl='/usr/bin/k3s kubectl'" >> ~/.bashrc
   2 source ~/.bashrc
   3 sudo sh /etc/rc.d/rc.k3s start
   4 kubectl get nodes
   5 kubectl get deployments --all-namespaces
   6 kubectl get services --all-namespaces
   7 kubectl get pods --all-namespaces
   8 kubectl cluster-info 
   9 # install nginx ingress controller
  10 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml 
  11 # wait for nginx ingress controller to finish 
  12 sleep 120
  13 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
  14 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP
  15 kubectl get pods --all-namespaces
  16 kubectl get services --all-namespaces
  17 kubectl apply -f ingress-cherrypy-test.yml 
  18 EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
  19 echo $EXTERNAL_IP
  20 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts  "
  21 cat /etc/hosts
  22 kubectl get ingress 
  23 curl cp.info 

kubernetes (last edited 2022-11-05 13:32:14 by localhost)