kubernetes
- deploy docker images in containers. Cluster has nodes. Nodes has pods/containers. Each cluster might correspond to a service.
- Ingress, An API object that manages external access to the services in a cluster, typically HTTP. Ideas like reverse-proxy and load balancer.
- minikube version # check version 1.2.0
- minikube start
1 minikube version
2 # minikube version: v1.2.0
3 minikube start
4 # * minikube v1.2.0 on linux (amd64)
5 # * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ...
6 # * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5
7 # - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf
8 # * Pulling images ...
9 # * Launching Kubernetes ...
10 #
11 # * Configuring local host environment ...
12 # * Verifying: apiserver proxy etcd scheduler controller dns
13 # * Done! kubectl is now configured to use "minikube"
14
cluster details and health status
get cluster nodes
Deploy containers
1 # deploy container
2 kubectl create deployment first-deployment --image=katacoda/docker-http-server
3 # deployment.apps/first-deployment created
4 # deploy container in cluster
5 # check pods
6 kubectl get pods
7 # NAME READY STATUS RESTARTS AGE
8 # first-deployment-8cbf74484-s2fkl 1/1 Running 0 25s
9 # expose deployment
10 kubectl expose deployment first-deployment --port=80 --type=NodePort
11 # service/first-deployment exposed
12
13 kubectl get svc first-deployment
14 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
15 # first-deployment NodePort 10.98.246.87 <none> 80:31219/TCP 105s
16 # do request to port 80 in cluster ip
17 curl 10.98.246.87:80
18 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1>
19 #
20 curl host01:31219
21 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1>
22
dashboard
1 minikube addons enable dashboard
2 #The Kubernetes dashboard allows you to view your applications
3 # in a UI.
4 # * dashboard was successfully enabled
5 kubectl apply -f /opt/kubernetes-dashboard.yaml
6 # only in katacoda
7 # service/kubernetes-dashboard-katacoda created
8 # check progress
9 kubectl get pods -n kube-system -w #check progress
10 # NAME READY STATUS RESTARTS AGE
11 # coredns-5c98db65d4-b2kxm 1/1 Running 0 17m
12 # coredns-5c98db65d4-mm567 1/1 Running 1 17m
13 # etcd-minikube 1/1 Running 0 16m
14 # kube-addon-manager-minikube 1/1 Running 0 16m
15 # kube-apiserver-minikube 1/1 Running 0 16m
16 # kube-controller-manager-minikube 1/1 Running 0 16m
17 # kube-proxy-pngm9 1/1 Running 0 17m
18 # kube-scheduler-minikube 1/1 Running 0 16m
19 # kubernetes-dashboard-7b8ddcb5d6-xt5nt 1/1 Running 0 76s
20 # storage-provisioner 1/1 Running 0 17m
21
22 # dashboard url https://2886795294-30000-kitek05.environments.katacoda.com/
23 # how to launch a Single Node Kubernetes cluster.
24
Init master
1 master $ kubeadm init --kubernetes-version $(kubeadm version -o short)
2 [init] Using Kubernetes version: v1.14.0
3 [preflight] Running pre-flight checks
4 [preflight] Pulling images required for setting up a Kubernetes cluster
5 [preflight] This might take a minute or two, depending on the speed of your internet connection
6 [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
7 [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
8 [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
9 [kubelet-start] Activating the kubelet service
10 [certs] Using certificateDir folder "/etc/kubernetes/pki"
11 [certs] Generating "ca" certificate and key
12 [certs] Generating "apiserver" certificate and key
13 [certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.17.0.69]
14 [certs] Generating "apiserver-kubelet-client" certificate and key
15 [certs] Generating "front-proxy-ca" certificate and key
16 [certs] Generating "front-proxy-client" certificate and key
17 [certs] Generating "etcd/ca" certificate and key
18 [certs] Generating "etcd/healthcheck-client" certificate and key
19 [certs] Generating "apiserver-etcd-client" certificate and key
20 [certs] Generating "etcd/server" certificate and key
21 [certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [172.17.0.69 127.0.0.1 ::1]
22 [certs] Generating "etcd/peer" certificate and key
23 [certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [172.17.0.69 127.0.0.1 ::1]
24 [certs] Generating "sa" key and public key
25 [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
26 [kubeconfig] Writing "admin.conf" kubeconfig file
27 [kubeconfig] Writing "kubelet.conf" kubeconfig file
28 [kubeconfig] Writing "controller-manager.conf" kubeconfig file
29 [kubeconfig] Writing "scheduler.conf" kubeconfig file
30 [control-plane] Using manifest folder "/etc/kubernetes/manifests"
31 [control-plane] Creating static Pod manifest for "kube-apiserver"
32 [control-plane] Creating static Pod manifest for "kube-controller-manager"
33 [control-plane] Creating static Pod manifest for "kube-scheduler"
34 [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
35 [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
36 [apiclient] All control plane components are healthy after 16.503433 seconds
37 [upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system"Namespace
38 [kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
39 [upload-certs] Skipping phase. Please see --experimental-upload-certs
40 [mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
41 [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
42 [bootstrap-token] Using token: xfvno5.q2xfb2m3nw7grdjm
43 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
44 [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
45 [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approveCSRs from a Node Bootstrap Token
46 [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
47 [bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
48 [addons] Applied essential addon: CoreDNS
49 [addons] Applied essential addon: kube-proxy
50
51 Your Kubernetes control-plane has initialized successfully!
52
53 To start using your cluster, you need to run the following as a regular user:
54
55 mkdir -p $HOME/.kube
56 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
57 sudo chown $(id -u):$(id -g) $HOME/.kube/config
58
59 You should now deploy a pod network to the cluster.
60 Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
61 https://kubernetes.io/docs/concepts/cluster-administration/addons/
62
63 Then you can join any number of worker nodes by running the following on each as root:
64
65 kubeadm join 172.17.0.69:6443 --token xfvno5.q2xfb2m3nw7grdjm \
66 --discovery-token-ca-cert-hash sha256:26d11c038d236967630d401747f210af9e3679fb1638e8b599a2da4cb98ab159
Deploy cni weaveworks - deploy a pod network to the cluster
Container Network Interface (CNI) defines how the different nodes and their workloads should communicate. Weave Net provides a network to connect all pods together, implementing the Kubernetes model. Kubernetes uses the Container Network Interface (CNI) to join pods onto Weave Net.
1 # In master
2 kubectl apply -f /opt/weave-kube
3 # serviceaccount/weave-net created
4 # clusterrole.rbac.authorization.k8s.io/weave-net created
5 # clusterrolebinding.rbac.authorization.k8s.io/weave-net created
6 # role.rbac.authorization.k8s.io/weave-net created
7 # rolebinding.rbac.authorization.k8s.io/weave-net created
8 # daemonset.extensions/weave-net created
9 kubectl get pod -n kube-system
10 # NAME READY STATUS RESTARTS AGE
11 # coredns-fb8b8dccf-b9rd7 1/1 Running 0 11m
12 # coredns-fb8b8dccf-sfgbn 1/1 Running 0 11m
13 # etcd-master 1/1 Running 0 10m
14 # kube-apiserver-master 1/1 Running 0 10m
15 # kube-controller-manager-master 1/1 Running 0 10m
16 # kube-proxy-l42wp 1/1 Running 0 11m
17 # kube-scheduler-master 1/1 Running 1 10m
18 # weave-net-mcxml 2/2 Running 0 84s
19
Join cluster
1 # in node01
2 # join cluster
3 kubeadm join --discovery-token-unsafe-skip-ca-verification --token=xfvno5.q2xfb2m3nw7grdjm 172.17.0.69:6443
4 # [preflight] Running pre-flight checks
5 # [preflight] Reading configuration from the cluster...
6 # [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
7 # [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace
8 # [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
9 # [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
10 # [kubelet-start] Activating the kubelet service
11 # [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
12 #
13 # This node has joined the cluster:
14 # * Certificate signing request was sent to apiserver and a response was received.
15 # * The Kubelet was informed of the new secure connection details.
16 #
17 # Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
18 # The --discovery-token-unsafe-skip-ca-verification tag is used to bypass the Discovery Token verification.
19
20 # in master
21 kubectl get nodes
22 # NAME STATUS ROLES AGE VERSION
23 # master Ready master 17m v1.14.0
24 # node01 Ready <none> 107s v1.14.0
25 # bootstrap token generated b
26
27 # in node01
28 kubectl get nodes
29 # The connection to the server localhost:8080 was refused - did you specify the right host or port
30
Deploy container in cluster
1 master $ kubectl create deployment http --image=katacoda/docker-http-server:latest
2 deployment.apps/http created
3 master $ kubectl get pods
4 NAME READY STATUS RESTARTS AGE
5 http-7f8cbdf584-74pd9 1/1 Running 0 11s
6
7 master $ docker ps | grep http-server
8 master $
9
10 node01 $ docker ps | grep http-serveradb3cde7f861 katacoda/docker-http-server "/app" About a minute ago
11 Up About a minute k8s_docker-http-server_http-7f8cbdf584-74pd9_default_04a
12 17065-b08d-11e9-bff1-0242ac110045_0
13
14 # expose deployment
15 master $ kubectl get pods
16 NAME READY STATUS RESTARTS AGE
17 http-7f8cbdf584-74pd9 1/1 Running 0 17m bootstrap token generated b
18 master $ kubectl expose deployment http --port=80 --type=NodePort
19 service/http exposed
20
21 master $ kubectl get service http
22 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
23 http NodePort 10.101.65.149 <none> 80:30982/TCP 49s
24
25 master $ curl 10.101.65.149:80
26 <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1>
27
28 master $ curl http://10.101.65.149
29 <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1>
apply dashboard in cluster
- Dashboard General-purpose web UI for Kubernetes clusters Dashboard Version: v1.10.0
master $ kubectl apply -f dashboard.yaml secret/kubernetes-dashboard-certs created serviceaccount/kubernetes-dashboard created role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created deployment.apps/kubernetes-dashboard created service/kubernetes-dashboard created master $ kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGEcoredns-fb8b8dccf-b9rd7 1/1 Running 0 42mcoredns-fb8b8dccf-sfgbn 1/1 Running 0 42m etcd-master 1/1 Running 0 41m kube-apiserver-master 1/1 Running 0 40m kube-controller-manager-master 1/1 Running 0 40m kube-proxy-gwrps 1/1 Running 0 26m kube-proxy-l42wp 1/1 Running 0 42m kube-scheduler-master 1/1 Running 1 40m kubernetes-dashboard-5f57845f9d-ls7q2 0/1 ContainerCreating 0 2s weave-net-gww8b 2/2 Running 0 26m weave-net-mcxml 2/2 Running 0 31m
Create service accoun for dashboard
cat <<EOF | kubectl create -f - apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system EOF # Get login token kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') When the dashboard was deployed, it used externalIPs to bind the service to port 8443. This makes the dashboard available to outside of the cluster and viewable at https://2886795335-8443-kitek05.environments.katacoda.com/ # Use the admin-user token to access the dashboard. https://2886795335-8443-kitek05.environments.katacoda.com/#!/login # sign in using token eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXNzcTl4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI2Y2RiNGZmMy1iMDkwLTExZTktYmZmMS0wMjQyYWMxMTAwNDUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.R2OtDYxXaR0Pgluzq1m8FMZflF2tdYtJdG5XhkVC28vf1WkJu-Zo51I5ONUiK2WdBEMPw-N2PW_R9l6lak1clvlxfUSn777nThYSxhmR5pfxi6GmDlFo928KJvWVPDen1jrzAaQOEUZ1maOzPcnjKGpR-CRTgmYDnxZY84rqi68y0vfdn16ER8HeW-wkJ-hfGyUAhryk_ob1CUBjjbs-vefpaLcHLdrWNaKaFi1j5fCc_eJi10FpSTmuBsb04xgN0I17hkTlSw2fyOAj7LtC3pBDrK0nOdHCJkBEtsg89rkvLufYph5AFeoWQVKdW9JZH8BYS91BFla7pZnTwdBVeA https://2886795335-8443-kitek05.environments.katacoda.com/#!/overview?namespace=default
services
Start containers using Kubectl
1 minikube start # start kubernetes cluster and its components
2 * minikube v1.2.0 on linux (amd64)
3 * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ...
4 * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5
5 - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf
6 * Pulling images ...
7 * Launching Kubernetes ...
8 * Configuring local host environment ...
9 * Verifying: apiserver proxy etcd
10
11 scheduler controller dns
12 * Done! kubectl is now configured to use "minikube"
13
14 $ kubectl get nodes
15 NAME STATUS ROLES AGE VERSION
16 minikube Ready master 2m2s v1.15.0
17
18 $ kubectl get service
19 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
20 kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2m18s
21 # This deployment is issued to the Kubernetes master which launches the Pods and containers required. Kubectl run_ is similar to docker run but at a cluster
22 level.
23
24 # launch a deployment called http which will start a container based on the Docker Image katacoda/docker-http-server:latest.
25 $ kubectl run http --image=katacoda/docker-http-server:latest --replicas=1
26 kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.Use kubectl run --generator=run-pod/v1 or kubectl create instead.
27 deployment.apps/http created
28 $ kubectl get deployments
29 NAME READY UP-TO-DATE AVAILABLE AGE
30 http 1/1 1 1 6s
31
32 # you can describe the deployment process.
33 kubectl describe deployment http
34
35 # expose the container port 80 on the host 8000 binding to the external-ip of the host.
36 $ kubectl expose deployment http --external-ip="172.17.0.13" --port=8000 --target-port=80
37 service/http exposed
38
39 $ curl http://172.17.0.13:8000
40 <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
41
42 $ kubectl get pods
43 NAME READY STATUS RESTARTS AGE
44 http-5fcf9dd9cb-zfkkz 1/1 Running 0 3m26s
45
46 $ kubectl get service
47 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
48 http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 57s
49 kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 7m41s
50
51 $ curl http://10.100.157.159:8000
52 <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
53
54 $ kubectl run httpexposed --image=katacoda/docker-http-server:latest --replicas=1 --port=80 --host
55 port=8001
56 kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.
57 Use kubectl run --generator=run-pod/v1 or kubectl create instead.
58 deployment.apps/httpexposed created
59 $ curl http://172.17.0.13:8001
60 <h1>This request was processed by host: httpexposed-569df5d86-rzzhb</h1>
61 $ kubectl get svc
62 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
63 http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 3m50s
64 kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 10m
65
66 $ kubectl get pods
67 NAME READY STATUS RESTARTS AGE
68 http-5fcf9dd9cb-zfkkz 1/1 Running 0 7m9s
69 httpexposed-569df5d86-rzzhb 1/1 Running 0 36s
70
71 # Scaling the deployment will request Kubernetes to launch additional Pods.
72 $ kubectl scale --replicas=3 deployment http
73 deployment.extensions/http scaled
74
75 $ kubectl get pods # amount of pods for service http increased to 3
76 NAME READY STATUS RESTARTS AGE
77 http-5fcf9dd9cb-fhljh 1/1 Running 0 31s
78 http-5fcf9dd9cb-wb2dh 1/1 Running 0 31s
79 http-5fcf9dd9cb-zfkkz 1/1 Running 0 9m27s
80 httpexposed-569df5d86-rzzhb 1/1 Running 0 2m54s
81
82 # Once each Pod starts it will be added to the load balancer service.
83
84 $ kubectl get service
85 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
86 http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 7m28s
87 kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14m
88
89 $ kubectl describe svc http
90 Name: http
91 Namespace: defaultLabels: run=httpAnnotations: <none>
92 Selector: run=httpType: ClusterIPIP: 10.100.157.159
93 External IPs: 172.17.0.13
94 Port: <unset> 8000/TCP
95 TargetPort: 80/TCP
96 Endpoints: 172.18.0.4:80,172.18.0.6:80,172.18.0.7:80
97 Session Affinity: None
98 Events: <none>
99
100 $ curl http://172.17.0.13:8000
101 <h1>This request was processed by host: http-5fcf9dd9cb-wb2dh</h1>
102 $ curl http://172.17.0.13:8000
103 <h1>This request was processed by host: http-5fcf9dd9cb-fhljh</h1>
104 $ curl http://172.17.0.13:8000
105 <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
Certified kubernetes application developer
1 master $ launch.sh
2 Waiting for Kubernetes to start...
3 Kubernetes started
4 master $ kubectl get nodes
5 NAME STATUS ROLES AGE VERSION
6 master Ready master 85m v1.14.0
7 node01 Ready <none> 85m v1.14.0
8
9 # deploy app
10 master $ kubectl create deployment examplehttpapp --image=katacoda/docker-http-server
11 deployment.apps/examplehttpapp created
12
13 # view all deployments
14 kubectl get deployments
15 NAME READY UP-TO-DATE AVAILABLE AGE
16 examplehttpapp 1/1 1 1 25s
17
18 # A deployment will launch a set of Pods. A pod is a group of one or more containers deployed across the cluster.
19 kubectl get pods
20 NAME READY STATUS RESTARTS AGE
21 examplehttpapp-58f66848-n7wn7 1/1 Running 0 71s
22
23 # show pod ip and node where it is
24 master $ kubectl get pods -o wide
25 NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
26 examplehttpapp-58f66848-n7wn7 1/1 Running 0 113s 10.44.0.2 node01 <none> <none>
27
28 # describe pod
29 master $ kubectl describe pod examplehttpapp-58f66848-n7wn7
30 Name: examplehttpapp-58f66848-n7wn7
31 Namespace: default
32 Priority: 0
33 PriorityClassName: <none>
34 Node: node01/172.17.0.24
35 Start Time: Sat, 27 Jul 2019 17:59:35 +0000
36 Labels: app=examplehttpapp
37 pod-template-hash=58f66848
38 Annotations: <none>
39 Status: Running
40 IP: 10.44.0.2
41
42 # List all namespaces with in the cluster with
43 kubectl get namespaces
44 kubectl get ns
45
46 # The namespaces can be used to filter queries to the available objects.
47 kubectl get pods -n kube-system
48
49 master $ kubectl create ns testns
50 namespace/testns created
51 master $ kubectl create deployment namespacedeg -n testns --image=katacoda/docker-http-server
52 deployment.apps/namespacedeg created
53 master $ kubectl get pods -n testns
54 NAME READY STATUS RESTARTS AGE
55 namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 3s
56 master $ kubectl get pods -n testns -o wide
57 NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
58 namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 18s 10.44.0.3 node01 <none> <none>
59
60 # Kubectl can help scale the number of Pods running for a deployment, referred to as replicas.
61 master $ kubectl scale deployment examplehttpapp --replicas=5
62 deployment.extensions/examplehttpapp scaled
63 master $ kubectl get deployments -o wide
64 NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
65 examplehttpapp 5/5 5 5 9m6s docker-http-server katacoda/docker-http-server app=examplehttpapp
66 master $ kubectl get pods -o wide
67 NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
68 examplehttpapp-58f66848-cf6pl 1/1 Running 0 65s 10.44.0.6 node01 <none> <none>
69 examplehttpapp-58f66848-lfrq4 1/1 Running 0 65s 10.44.0.5 node01 <none> <none>
70 examplehttpapp-58f66848-n7wn7 1/1 Running 0 9m26s 10.44.0.2 node01 <none> <none>
71 examplehttpapp-58f66848-snwl7 1/1 Running 0 65s 10.44.0.7 node01 <none> <none>
72 examplehttpapp-58f66848-vd8db 1/1 Running 0 65s 10.44.0.4 node01 <none> <none>
73
74 # everything within Kubernetes is controllable as YAML.
75 kubectl edit deployment examplehttpapp # opens vi/vim, changing the spec.replicas to 10 after saving the file increased the number of pods
76
77 master $ kubectl get nodes -o wide
78 NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
79 master Ready master 102m v1.14.0 172.17.0.19 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5
80 node01 Ready <none> 102m v1.14.0 172.17.0.24 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5
81
82 # Image can be changed using the set image command, rollout
83 # apply new image to the pods/containers
84 master $ kubectl --record=true set image deployment examplehttpapp docker-http-server=katacoda/docker-http-server:v2
85 deployment.extensions/examplehttpapp image updated
86 master $ kubectl rollout status deployment examplehttpapp
87 Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
88 Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
89 Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
90 Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
91 Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
92 Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
93 Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination...
94 Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination...
95 deployment "examplehttpapp" successfully rolled out
96
97 # rollback deployment
98 master $ kubectl rollout undo deployment examplehttpapp
99 deployment.extensions/examplehttpapp rolled back
100
101 # The expose command will create a new service for a deployment. The port specifies the port of the application we want to available
102 master $ kubectl expose deployment examplehttpapp --port 80
103 service/examplehttpapp exposed
104 master $ kubectl get svc -o wide
105 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
106 examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 13s app=examplehttpapp
107 kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 105m <none>
108 master $ kubectl describe svc examplehttpapp
109 Name: examplehttpapp
110 Namespace: default
111 Labels: app=examplehttpapp
112 Annotations: <none>
113 Selector: app=examplehttpapp
114 Type: ClusterIP
115 IP: 10.103.93.196
116 Port: <unset> 80/TCP
117 TargetPort: 80/TCP
118 Endpoints: 10.44.0.2:80,10.44.0.4:80,10.44.0.5:80
119 Session Affinity: None
120 Events: <none>
121
122 # But how does Kubernetes know where to send traffic? That is managed by Labels.
123 Each Object within Kubernetes can have a label attached, allowing Kubernetes to discover and use the configuration.
124
125 master $ kubectl get services -l app=examplehttpapp -o go-template='{{(index .items 0).spec.clusterIP}}'
126 10.103.93.196master $ kubectl get services
127 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
128 examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m32s
129 kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m
130
131 master $ kubectl get services -o wide
132 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
133 examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m36s app=examplehttpapp
134 kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m <none>
135
136 # kubectl logs to to view the logs for Pods
137 master $ kubectl logs $(kubectl get pods -l app=examplehttpapp -o go-template='{{(index .items 0).metadata.name}}')
138 Web Server started. Listening on 0.0.0.0:80
139
140 # view the CPU or Memory usage of a node or Pod
141 master $ kubectl top node
142 NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
143 master 133m 3% 1012Mi 53%
144 node01 49m 1% 673Mi 17%
145 master $ kubectl top pod
146 NAME CPU(cores) MEMORY(bytes)
147 examplehttpapp-58f66848-ctnml 0m 0Mi
148 examplehttpapp-58f66848-gljk9 1m 0Mi
149 examplehttpapp-58f66848-hfqts 1m 0Mi
k3s - Lightweight Kubernetes
K3S works great from something as small as a Raspberry Pi to an AWS a1.4xlarge 32GiB server. Download k3s - latest release, x86_64, ARMv7, and ARM64 are supported Situations where a PhD in k8s clusterology is infeasible
1 curl -sfL https://get.k3s.io | sh -
2
3 root@debian:/home/user# curl -sfL https://get.k3s.io | sh -
4 [INFO] Finding latest release
5 [INFO] Using v1.17.0+k3s.1 as release
6 [INFO] Downloading hash https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/sha256sum-amd64.txt
7 [INFO] Downloading binary https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/k3s
8 [INFO] Verifying binary download
9 [INFO] Installing k3s to /usr/local/bin/k3s
10 [INFO] Creating /usr/local/bin/kubectl symlink to k3s
11 [INFO] Creating /usr/local/bin/crictl symlink to k3s
12 [INFO] Creating /usr/local/bin/ctr symlink to k3s
13 [INFO] Creating killall script /usr/local/bin/k3s-killall.sh
14 [INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh
15 [INFO] env: Creating environment file /etc/systemd/system/k3s.service.env
16 [INFO] systemd: Creating service file /etc/systemd/system/k3s.service
17 [INFO] systemd: Enabling k3s unit
18 Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.
19 [INFO] systemd: Starting k3s
20 root@debian:/home/user#
21
22 k3s kubectl cluster-info
23 kubectl create deployment springboot-test --image=vbodocker/springboot-test:latest
24 kubectl expose deployment springboot-test --port=8000 --target-port=8080 --type=NodePort
25 kubectl get services
26 IP_SPRINGBOOT=$(kubectl get services | grep springboot | awk '//{print $3}')
27 curl http://$IP_SPRINGBOOT:8000/dummy
28
29 # list containerd images and containers
30 k3s crictl images
31 k3s crictl ps
32 # connect to container id
33 crictl exec -it 997a2ad8c763a sh
34
35 # connect to container/pod
36 kubectl get pods
37 kubectl exec -it springboot-test-6bb5fdfc48-phh8k sh
38 cat /etc/os-release # alpine linux in container
39
40 # give sudo rights to user
41 /sbin/usermod -aG sudo user
42 # scale pods
43 sudo kubectl scale deployment springboot-test --replicas=3
44 sudo kubectl get pods -o wide
45 # add mariadb pod/service
46 sudo kubectl create deployment mariadb-test --image=mariadb:latest
47 sudo kubectl get pods -o wide
48 sudo kubectl delete deployment mariadb-test
49 # https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/
50 sudo kubectl apply -f mariadb-pv.yaml
51 #persistentvolume/mariadb-pv-volume created
52 #persistentvolumeclaim/mariadb-pv-claim created
53 sudo kubectl apply -f mariadb-deployment.yaml
54 #service/mariadb created
55 #deployment.apps/mariadb created
56 sudo kubectl describe deployment mariadb
57 sudo kubectl get svc -o wide
58 # connect to mariabdb pod
59 sudo kubectl exec -it mariadb-8578f4dc8c-r4ftv /bin/bash
60 ss -atn # show ports tcp listening
61 ip address # show ip addresses
62 mysql -h localhost -p
63 mysql -u root -h 10.42.0.12 -p
64
65 # delete service, persistent volume claim and persistent volume
66 sudo kubectl delete deployment,svc mariadb
67 sudo kubectl delete pvc mariadb-pv-claim
68 sudo kubectl delete pv mariadb-pv-volume
mariadb-pv.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: mariadb-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 1Gi accessModes: - ReadWriteOnce hostPath: path: "/mnt/data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mariadb-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 1Gi
mariadb-deployment.yaml
apiVersion: v1 kind: Service metadata: name: mariadb spec: ports: - port: 3306 selector: app: mariadb clusterIP: None --- apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: mariadb spec: selector: matchLabels: app: mariadb strategy: type: Recreate template: metadata: labels: app: mariadb spec: containers: - image: mariadb:latest name: mariadb env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD value: password ports: - containerPort: 3306 name: mariadb volumeMounts: - name: mariadb-persistent-storage mountPath: /var/lib/mariadb volumes: - name: mariadb-persistent-storage persistentVolumeClaim: claimName: mariadb-pv-claim
Init containers
Init containers can contain utilities or setup scripts not present in an app image.
systemctl commands
Ubuntu pod
Alpine pod
Nginx persistent volume
1 # pv-pod.yaml
2 apiVersion: v1
3 kind: Pod
4 metadata:
5 name: task-pv-pod
6 spec:
7 volumes:
8 - name: task-pv-storage
9 persistentVolumeClaim:
10 claimName: task-pv-claim
11 containers:
12 - name: task-pv-container
13 image: nginx
14 ports:
15 - containerPort: 80
16 name: "http-server"
17 volumeMounts:
18 - mountPath: "/usr/share/nginx/html"
19 name: task-pv-storage
1 sudo kubectl apply -f pv-volume.yaml
2 sudo kubectl apply -f pv-claim.yaml
3 sudo kubectl apply -f pv-pod.yaml
4 sudo kubectl get pods -o wide
5 curl http://10.42.0.28/
6 sudo kubectl exec -it task-pv-pod -- bash
7 cd /usr/share/nginx/html
8 echo "Hey from Kubernetes storage" > index.html
9 cat /etc/os-release # debian buster
10 kubectl delete pod task-pv-pod
11 kubectl delete pvc task-pv-claim
12 kubectl delete pv task-pv-volume
13 cat /tmp/data/index.html
Generate yaml
- sudo kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test --dry-run=client --output=yaml
sudo kubectl expose deployment cherrypy-test --port=8080 --type=NodePort --dry-run=client --output=yaml
- sudo kubectl scale deployment cherrypy-test --replicas=3 --dry-run=client --output=yaml
Alpine persistent volume
$cat alpine-shared.yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: alpine-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 0.2Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/alpine-data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: alpine-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 0.2Gi --- apiVersion: v1 kind: Pod metadata: name: alpine-pod labels: app: alpine-pod spec: volumes: - name: alpine-pv-storage persistentVolumeClaim: claimName: alpine-pv-claim containers: - name: alpine image: alpine:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: "/mnt/alpine/data" name: alpine-pv-storage restartPolicy: Always
MariaDB + NFS
/vol *(rw,sync,insecure,fsid=0,no_subtree_check,no_root_squash) exportfs -rav exporting *:/vol mkdir -p /vol/mariadb-0 kubectl apply -f mariadb-nfs.yaml kubectl exec -it mariadb-79847f5d97-smbdx -- bash touch /var/lib/mariadb/b mount | grep nfs kubectl delete -f mariadb-nfs.yaml kubectl get pods kubectl get pvc kubectl get pv
mariadb-nfs.yaml
1 ---
2 apiVersion: v1
3 kind: PersistentVolume
4 metadata:
5 name: mdb-vol-0
6 labels:
7 volume: mdb-volume
8 spec:
9 storageClassName: manual
10 capacity:
11 storage: 1Gi
12 accessModes:
13 - ReadWriteOnce
14 nfs:
15 server: 127.0.0.1
16 path: "/vol/mariadb-0"
17 ---
18 apiVersion: v1
19 kind: PersistentVolumeClaim
20 metadata:
21 name: mdb-pv-claim
22 spec:
23 storageClassName: manual
24 accessModes:
25 - ReadWriteOnce
26 resources:
27 requests:
28 storage: 1Gi
29 ---
30 apiVersion: v1
31 kind: Service
32 metadata:
33 name: mariadb
34 spec:
35 ports:
36 - port: 3306
37 selector:
38 app: mariadb
39 clusterIP: None
40 ---
41 apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
42 kind: Deployment
43 metadata:
44 name: mariadb
45 spec:
46 selector:
47 matchLabels:
48 app: mariadb
49 strategy:
50 type: Recreate
51 template:
52 metadata:
53 labels:
54 app: mariadb
55 spec:
56 containers:
57 - image: mariadb:latest
58 name: mariadb
59 env:
60 # Use secret in real usage
61 - name: MYSQL_ROOT_PASSWORD
62 value: password
63 ports:
64 - containerPort: 3306
65 name: mariadb
66 volumeMounts:
67 - name: mdb-persistent-storage
68 mountPath: /var/lib/mariadb
69 volumes:
70 - name: mdb-persistent-storage
71 persistentVolumeClaim:
72 claimName: mdb-pv-claim
Persistent volumes
A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes.
A PersistentVolumeClaim (PVC) is a request for storage by a user.
Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see AccessModes).
Types of Persistent Volumes:
- local - local storage devices mounted on nodes.
- nfs - Network File System (NFS) storage
Ingress controller nginx example
ingress-cherrypy-test.yml
Steps
1 # install k3s
2 curl -sfL https://get.k3s.io | sh -
3 KUBECONFIG=~/.kube/config
4 mkdir ~/.kube 2> /dev/null
5 sudo k3s kubectl config view --raw > "$KUBECONFIG"
6 chmod 600 "$KUBECONFIG"
7 nano ~/.bashrc
8 export KUBECONFIG=~/.kube/config
9 source . ~/.bashrc
10
11 sudo nano /etc/systemd/system/k3s.service
12 ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644
13 sudo systemctl daemon-reload
14 sudo service k3s start
15 sudo service k3s status
16 kubectl get pods
17
18 k3s kubectl cluster-info
19
20 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
21 sudo service k3s stop
22 sudo nano /etc/systemd/system/k3s.service
23 ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 --no-deploy traefik
24 sudo systemctl daemon-reload
25 sudo rm /var/lib/rancher/k3s/server/manifests/traefik.yaml
26 sudo service k3s start
27 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
28 sudo systemctl restart k3s
29
30 kubectl get nodes
31 kubectl delete node localhost
32 kubectl get pods --all-namespaces
33 kubectl get services --all-namespaces
34 kubectl get deployment --all-namespaces
35
36 # install nginx ingress controller
37 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml
38 kubectl get pods --namespace=ingress-nginx
39
40 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
41 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP # cluster ip port 8000
42 kubectl get services
43
44 kubectl apply -f ingress-cherrypy-test.yml
45
46 EXTERNAL_IP=$(ip addr show | grep wlp | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
47 echo $EXTERNAL_IP
48 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts "
49 kubectl get ingress
50 curl cp.info
51
52 kubectl scale deployment cherrypy-test --replicas=5
53 curl http://cp.info/ -vvv
54 sudo apt install apache2-utils
55 ab -n 10 -c 10 http://cp.info/
56
57 # Push image to docker hub
58 docker build -t vbodocker/cherrypy-test .
59 docker run -p 8080:8080 vbodocker/cherrypy-test
60 docker login # login to docker hub
61 docker push vbodocker/cherrypy-test
62 docker pull vbodocker/cherrypy-test:latest
63
64 # Rollout, deploy new image
65 kubectl get deployments -o wide # shows image urls
66 kubectl rollout restart deployment cherrypy-test # redeploy image url for cherrypy-test
67 kubectl rollout status deployment cherrypy-test
68 kubectl get deployments -o wide
69 kubectl get pods -o wide # age should be low for the newly deployed pods
70
Install k3s static binary in Slack64
Binaries available in https://github.com/k3s-io/k3s#manual-download
wget https://github.com/k3s-io/k3s/releases/download/v1.25.3%2Bk3s1/k3s
/etc/rc.d/rc.k3s
1 #!/bin/sh
2 PATH=$PATH:/usr/sbin
3
4 k3s_start() {
5 /usr/bin/k3s server --write-kubeconfig-mode=644 \
6 --disable traefik > /var/log/k3s.log 2>&1 &
7 }
8
9 k3s_stop() {
10 kill $(ps uax | grep "/usr/bin/k3s" | head -1 | awk '//{print $2}')
11 ps uax | grep containerd | awk '//{print $2}' | xargs -i kill {}
12 }
13
14 k3s_restart() {
15 k3s_stop
16 k3s_start
17 }
18
19 case "$1" in
20 'start')
21 k3s_start
22 ;;
23 'stop')
24 k3s_stop
25 ;;
26 'restart')
27 k3s_restart
28 ;;
29 *)
30 echo "usage $0 start|stop|restart"
31 esac
ingress-cherrypy-test.yml
Steps
1 echo "alias kubectl='/usr/bin/k3s kubectl'" >> ~/.bashrc
2 source ~/.bashrc
3 sudo sh /etc/rc.d/rc.k3s start
4 kubectl get nodes
5 kubectl get deployments --all-namespaces
6 kubectl get services --all-namespaces
7 kubectl get pods --all-namespaces
8 kubectl cluster-info
9 # install nginx ingress controller
10 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml
11 # wait for nginx ingress controller to finish
12 sleep 120
13 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
14 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP
15 kubectl get pods --all-namespaces
16 kubectl get services --all-namespaces
17 kubectl apply -f ingress-cherrypy-test.yml
18 EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
19 echo $EXTERNAL_IP
20 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts "
21 cat /etc/hosts
22 kubectl get ingress
23 curl cp.info