Size: 44545
Comment:
|
← Revision 90 as of 2024-10-26 09:41:38 ⇥
Size: 30818
Comment:
|
Deletions are marked like this. | Additions are marked like this. |
Line 2: | Line 2: |
<<TableOfContents(2)>> | |
Line 9: | Line 10: |
$ minikube version minikube version: v1.2.0 $ minikube start * minikube v1.2.0 on linux (amd64) * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ... * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5 - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf * Pulling images ... * Launching Kubernetes ... * Configuring local host environment ... * Verifying: apiserver proxy etcd scheduler controller dns * Done! kubectl is now configured to use "minikube" |
minikube version # minikube version: v1.2.0 minikube start # * minikube v1.2.0 on linux (amd64) # * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ... # * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5 # - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf # * Pulling images ... # * Launching Kubernetes ... # # * Configuring local host environment ... # * Verifying: apiserver proxy etcd scheduler controller dns # * Done! kubectl is now configured to use "minikube" |
Line 26: | Line 27: |
$ kubectl cluster-info Kubernetes master is running at https://172.17.0.30:8443 KubeDNS is running at https://172.17.0.30:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. |
kubectl cluster-info # Kubernetes master is running at https://172.17.0.30:8443 # KubeDNS is running at https://172.17.0.30:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy # # To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. |
Line 35: | Line 36: |
$ kubectl get nodes NAME STATUS ROLES AGE VERSION minikube Ready master 3m1s v1.15.0 }}} == deploy containers == |
kubectl get nodes # NAME STATUS ROLES AGE VERSION # minikube Ready master 3m1s v1.15.0 }}} == Deploy containers == |
Line 43: | Line 44: |
$ kubectl create deployment first-deployment --image=katacoda/docker-http-server deployment.apps/first-deployment created $ # deploy container in cluster |
kubectl create deployment first-deployment --image=katacoda/docker-http-server # deployment.apps/first-deployment created # deploy container in cluster |
Line 47: | Line 48: |
$ kubectl get pods NAME READY STATUS RESTARTS AGE first-deployment-8cbf74484-s2fkl 1/1 Running 0 25s |
kubectl get pods # NAME READY STATUS RESTARTS AGE # first-deployment-8cbf74484-s2fkl 1/1 Running 0 25s |
Line 51: | Line 52: |
$ kubectl expose deployment first-deployment --port=80 --type=NodePort service/first-deployment exposed $ kubectl get svc first-deployment NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE first-deployment NodePort 10.98.246.87 <none> 80:31219/TCP 105s |
kubectl expose deployment first-deployment --port=80 --type=NodePort # service/first-deployment exposed kubectl get svc first-deployment # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # first-deployment NodePort 10.98.246.87 <none> 80:31219/TCP 105s |
Line 58: | Line 59: |
$ curl 10.98.246.87:80 <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1> $curl host01:31219 <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1> |
curl 10.98.246.87:80 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1> # curl host01:31219 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1> |
Line 68: | Line 69: |
$ minikube addons enable dashboard | minikube addons enable dashboard |
Line 70: | Line 71: |
in a UI. * dashboard was successfully enabled $ kubectl apply -f /opt/kubernetes-dashboard.yaml |
# in a UI. # * dashboard was successfully enabled kubectl apply -f /opt/kubernetes-dashboard.yaml |
Line 74: | Line 75: |
service/kubernetes-dashboard-katacoda created |
# service/kubernetes-dashboard-katacoda created |
Line 77: | Line 77: |
$ kubectl get pods -n kube-system -w #check progress NAME READY STATUS RESTARTS AGE coredns-5c98db65d4-b2kxm 1/1 Running 0 17m coredns-5c98db65d4-mm567 1/1 Running 1 17m etcd-minikube 1/1 Running 0 16m kube-addon-manager-minikube 1/1 Running 0 16m kube-apiserver-minikube 1/1 Running 0 16m kube-controller-manager-minikube 1/1 Running 0 16m kube-proxy-pngm9 1/1 Running 0 17m kube-scheduler-minikube 1/1 Running 0 16m kubernetes-dashboard-7b8ddcb5d6-xt5nt 1/1 Running 0 76s storage-provisioner 1/1 Running 0 17m ^C$ |
kubectl get pods -n kube-system -w #check progress # NAME READY STATUS RESTARTS AGE # coredns-5c98db65d4-b2kxm 1/1 Running 0 17m # coredns-5c98db65d4-mm567 1/1 Running 1 17m # etcd-minikube 1/1 Running 0 16m # kube-addon-manager-minikube 1/1 Running 0 16m # kube-apiserver-minikube 1/1 Running 0 16m # kube-controller-manager-minikube 1/1 Running 0 16m # kube-proxy-pngm9 1/1 Running 0 17m # kube-scheduler-minikube 1/1 Running 0 16m # kubernetes-dashboard-7b8ddcb5d6-xt5nt 1/1 Running 0 76s # storage-provisioner 1/1 Running 0 17m |
Line 167: | Line 166: |
master $ mkdir -p $HOME/.kube master $ pwd /root master $ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config master $ sudo chown $(id -u):$(id -g) $HOME/.kube/config master $ export KUBECONFIG=$HOME/.kube/config master $ echo $KUBECONFIG/root/.kube/config }}} == deploy cni weaveworks - deploy a pod network to the cluster == |
# In master mkdir -p $HOME/.kube pwd # /root sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=$HOME/.kube/config echo $KUBECONFIG/root/.kube/config }}} == Deploy cni weaveworks - deploy a pod network to the cluster == |
Line 181: | Line 181: |
master $ kubectl apply -f /opt/weave-kube serviceaccount/weave-net created clusterrole.rbac.authorization.k8s.io/weave-net created clusterrolebinding.rbac.authorization.k8s.io/weave-net created role.rbac.authorization.k8s.io/weave-net created rolebinding.rbac.authorization.k8s.io/weave-net created daemonset.extensions/weave-net created master $ kubectl get pod -n kube-system NAME READY STATUS RESTARTS AGE coredns-fb8b8dccf-b9rd7 1/1 Running 0 11m coredns-fb8b8dccf-sfgbn 1/1 Running 0 11m etcd-master 1/1 Running 0 10m kube-apiserver-master 1/1 Running 0 10m kube-controller-manager-master 1/1 Running 0 10m kube-proxy-l42wp 1/1 Running 0 11m kube-scheduler-master 1/1 Running 1 10m weave-net-mcxml 2/2 Running 0 84s }}} == join cluster == {{{#!highlight bash master $ kubeadm token list # check tokens TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS xfvno5.q2xfb2m3nw7grdjm 23h 2019-07-28T16:19:18Z authentication,signing The default bootstrap token generated b y 'kubeadm init'. system:bootstrappers:kubeadm:default-node-token |
# In master kubectl apply -f /opt/weave-kube # serviceaccount/weave-net created # clusterrole.rbac.authorization.k8s.io/weave-net created # clusterrolebinding.rbac.authorization.k8s.io/weave-net created # role.rbac.authorization.k8s.io/weave-net created # rolebinding.rbac.authorization.k8s.io/weave-net created # daemonset.extensions/weave-net created kubectl get pod -n kube-system # NAME READY STATUS RESTARTS AGE # coredns-fb8b8dccf-b9rd7 1/1 Running 0 11m # coredns-fb8b8dccf-sfgbn 1/1 Running 0 11m # etcd-master 1/1 Running 0 10m # kube-apiserver-master 1/1 Running 0 10m # kube-controller-manager-master 1/1 Running 0 10m # kube-proxy-l42wp 1/1 Running 0 11m # kube-scheduler-master 1/1 Running 1 10m # weave-net-mcxml 2/2 Running 0 84s }}} == Join cluster == {{{#!highlight bash # In master kubeadm token list # check tokens # TOKEN TTL EXPIRES USAGES DESCRIPTION # EXTRA GROUPS # xfvno5.q2xfb2m3nw7grdjm 23h 2019-07-28T16:19:18Z authentication,signing The default bootstrap # token generated by 'kubeadm init'. system:bootstrappers:kubeadm:default-node-token |
Line 214: | Line 214: |
[preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Activating the kubelet service [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. |
# [preflight] Running pre-flight checks # [preflight] Reading configuration from the cluster... # [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' # [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace # [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" # [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" # [kubelet-start] Activating the kubelet service # [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... # # This node has joined the cluster: # * Certificate signing request was sent to apiserver and a response was received. # * The Kubelet was informed of the new secure connection details. # # Run 'kubectl get nodes' on the control-plane to see this node join the cluster. |
Line 231: | Line 231: |
master $ kubectl get nodes NAME STATUS ROLES AGE VERSION master Ready master 17m v1.14.0 node01 Ready <none> 107s v1.14.0 bootstrap token generated b master $ |
kubectl get nodes # NAME STATUS ROLES AGE VERSION # master Ready master 17m v1.14.0 # node01 Ready <none> 107s v1.14.0 # bootstrap token generated b |
Line 238: | Line 238: |
node01 $ kubectl get nodesThe connection to the server localhost:8080 was refused - did you specify the right host or port ? node01 $ }}} == deploy container in cluster == {{{#!highlight bash master $ kubectl create deployment http --image=katacoda/docker-http-server:latest deployment.apps/http created master $ kubectl get pods NAME READY STATUS RESTARTS AGE http-7f8cbdf584-74pd9 1/1 Running 0 11s master $ docker ps | grep http-server master $ node01 $ docker ps | grep http-serveradb3cde7f861 katacoda/docker-http-server "/app" About a minute ago Up About a minute k8s_docker-http-server_http-7f8cbdf584-74pd9_default_04a 17065-b08d-11e9-bff1-0242ac110045_0 # expose deployment master $ kubectl get pods NAME READY STATUS RESTARTS AGE http-7f8cbdf584-74pd9 1/1 Running 0 17m bootstrap token generated b master $ kubectl expose deployment http --port=80 --type=NodePort service/http exposed master $ kubectl get service http NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE http NodePort 10.101.65.149 <none> 80:30982/TCP 49s master $ curl 10.101.65.149:80 <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1> master $ curl http://10.101.65.149 <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1> }}} == apply dashboard in cluster == |
kubectl get nodes # The connection to the server localhost:8080 was refused - did you specify the right host or port }}} == Deploy container in cluster == {{{#!highlight bash # In master kubectl create deployment http --image=katacoda/docker-http-server:latest # deployment.apps/http created kubectl get pods # NAME READY STATUS RESTARTS AGE # http-7f8cbdf584-74pd9 1/1 Running 0 11s docker ps | grep http-server # In node01 docker ps | grep http-serveradb3cde7f861 # katacoda/docker-http-server "/app" # About a minute ago # Up About a minute k8s_docker-http-server_http-7f8cbdf584-74pd9_default_04a # 17065-b08d-11e9-bff1-0242ac110045_0 # expose deployment in master kubectl get pods # NAME READY STATUS RESTARTS AGE # http-7f8cbdf584-74pd9 1/1 Running 0 17m bootstrap # token generated b kubectl expose deployment http --port=80 --type=NodePort # service/http exposed kubectl get service http # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # http NodePort 10.101.65.149 <none> 80:30982/TCP 49s curl 10.101.65.149:80 # <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1> curl http://10.101.65.149 # <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1> }}} == Apply dashboard in cluster == |
Line 278: | Line 279: |
{{{ master $ kubectl apply -f dashboard.yaml secret/kubernetes-dashboard-certs created serviceaccount/kubernetes-dashboard created role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created deployment.apps/kubernetes-dashboard created service/kubernetes-dashboard created master $ kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGEcoredns-fb8b8dccf-b9rd7 1/1 Running 0 42mcoredns-fb8b8dccf-sfgbn 1/1 Running 0 42m etcd-master 1/1 Running 0 41m kube-apiserver-master 1/1 Running 0 40m kube-controller-manager-master 1/1 Running 0 40m kube-proxy-gwrps 1/1 Running 0 26m kube-proxy-l42wp 1/1 Running 0 42m kube-scheduler-master 1/1 Running 1 40m kubernetes-dashboard-5f57845f9d-ls7q2 0/1 ContainerCreating 0 2s weave-net-gww8b 2/2 Running 0 26m weave-net-mcxml 2/2 Running 0 31m }}} Create service accoun for dashboard {{{ |
{{{#!highlight sh # In master kubectl apply -f dashboard.yaml # secret/kubernetes-dashboard-certs created # serviceaccount/kubernetes-dashboard created # role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created # rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created # deployment.apps/kubernetes-dashboard created # service/kubernetes-dashboard created kubectl get pods -n kube-system # NAME READY STATUS RESTARTS AGEcoredns-fb8b8dccf-b9rd7 # 1/1 Running 0 42mcoredns-fb8b8dccf-sfgbn 1/1 Running # 0 42m # etcd-master 1/1 Running 0 41m # kube-apiserver-master 1/1 Running 0 40m # kube-controller-manager-master 1/1 Running 0 40m # kube-proxy-gwrps 1/1 Running 0 26m # kube-proxy-l42wp 1/1 Running 0 42m # kube-scheduler-master 1/1 Running 1 40m # kubernetes-dashboard-5f57845f9d-ls7q2 0/1 ContainerCreating 0 2s # weave-net-gww8b 2/2 Running 0 26m # weave-net-mcxml 2/2 Running 0 31m }}} Create service account for dashboard {{{#!highlight yaml |
Line 335: | Line 337: |
== services == {{{#!highlight bash master $ kubectl get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE http NodePort 10.101.65.149 <none> 80:30982/TCP 17m kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 56m |
== List services == {{{#!highlight bash # In master kubectl get service # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # http NodePort 10.101.65.149 <none> 80:30982/TCP 17m # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 56m |
Line 346: | Line 349: |
* minikube v1.2.0 on linux (amd64) * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ... * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5 - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf * Pulling images ... * Launching Kubernetes ... * Configuring local host environment ... * Verifying: apiserver proxy etcd scheduler controller dns * Done! kubectl is now configured to use "minikube" $ kubectl get nodes NAME STATUS ROLES AGE VERSION minikube Ready master 2m2s v1.15.0 $ kubectl get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2m18s |
# * minikube v1.2.0 on linux (amd64) # * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ... # * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5 # - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf # * Pulling images ... # * Launching Kubernetes ... # * Configuring local host environment ... # * Verifying: apiserver proxy etcd # # scheduler controller dns # * Done! kubectl is now configured to use "minikube" kubectl get nodes # NAME STATUS ROLES AGE VERSION # minikube Ready master 2m2s v1.15.0 kubectl get service # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2m18s |
Line 367: | Line 369: |
Line 369: | Line 370: |
$ kubectl run http --image=katacoda/docker-http-server:latest --replicas=1 kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.Use kubectl run --generator=run-pod/v1 or kubectl create instead. deployment.apps/http created $ kubectl get deployments NAME READY UP-TO-DATE AVAILABLE AGE http 1/1 1 1 6s |
kubectl run http --image=katacoda/docker-http-server:latest --replicas=1 # kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.Use kubectl run --generator=run-pod/v1 or kubectl create instead. # deployment.apps/http created kubectl get deployments # NAME READY UP-TO-DATE AVAILABLE AGE # http 1/1 1 1 6s |
Line 380: | Line 381: |
$ kubectl expose deployment http --external-ip="172.17.0.13" --port=8000 --target-port=80 service/http exposed $ curl http://172.17.0.13:8000 <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1> $ kubectl get pods NAME READY STATUS RESTARTS AGE http-5fcf9dd9cb-zfkkz 1/1 Running 0 3m26s $ kubectl get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 57s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 7m41s $ curl http://10.100.157.159:8000 <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1> $ kubectl run httpexposed --image=katacoda/docker-http-server:latest --replicas=1 --port=80 --host |
kubectl expose deployment http --external-ip="172.17.0.13" --port=8000 --target-port=80 # service/http exposed curl http://172.17.0.13:8000 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1> kubectl get pods # NAME READY STATUS RESTARTS AGE # http-5fcf9dd9cb-zfkkz 1/1 Running 0 3m26s kubectl get service # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 57s # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 7m41s curl http://10.100.157.159:8000 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1> kubectl run httpexposed --image=katacoda/docker-http-server:latest --replicas=1 --port=80 --host |
Line 400: | Line 401: |
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead. deployment.apps/httpexposed created $ curl http://172.17.0.13:8001 <h1>This request was processed by host: httpexposed-569df5d86-rzzhb</h1> $ kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 3m50s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 10m $ kubectl get pods NAME READY STATUS RESTARTS AGE http-5fcf9dd9cb-zfkkz 1/1 Running 0 7m9s httpexposed-569df5d86-rzzhb 1/1 Running 0 36s |
# kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. # Use kubectl run --generator=run-pod/v1 or kubectl create instead. # deployment.apps/httpexposed created curl http://172.17.0.13:8001 # <h1>This request was processed by host: httpexposed-569df5d86-rzzhb</h1> kubectl get svc # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 3m50s # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 10m kubectl get pods # NAME READY STATUS RESTARTS AGE # http-5fcf9dd9cb-zfkkz 1/1 Running 0 7m9s # httpexposed-569df5d86-rzzhb 1/1 Running 0 36s |
Line 416: | Line 417: |
$ kubectl scale --replicas=3 deployment http deployment.extensions/http scaled $ kubectl get pods # amount of pods for service http increased to 3 NAME READY STATUS RESTARTS AGE http-5fcf9dd9cb-fhljh 1/1 Running 0 31s http-5fcf9dd9cb-wb2dh 1/1 Running 0 31s http-5fcf9dd9cb-zfkkz 1/1 Running 0 9m27s httpexposed-569df5d86-rzzhb 1/1 Running 0 2m54s |
kubectl scale --replicas=3 deployment http # deployment.extensions/http scaled kubectl get pods # amount of pods for service http increased to 3 # NAME READY STATUS RESTARTS AGE # http-5fcf9dd9cb-fhljh 1/1 Running 0 31s # http-5fcf9dd9cb-wb2dh 1/1 Running 0 31s # http-5fcf9dd9cb-zfkkz 1/1 Running 0 9m27s # httpexposed-569df5d86-rzzhb 1/1 Running 0 2m54s |
Line 427: | Line 428: |
$ kubectl get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 7m28s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14m $ kubectl describe svc http Name: http Namespace: defaultLabels: run=httpAnnotations: <none> Selector: run=httpType: ClusterIPIP: 10.100.157.159 External IPs: 172.17.0.13 Port: <unset> 8000/TCP TargetPort: 80/TCP Endpoints: 172.18.0.4:80,172.18.0.6:80,172.18.0.7:80 Session Affinity: None Events: <none> $ curl http://172.17.0.13:8000 <h1>This request was processed by host: http-5fcf9dd9cb-wb2dh</h1> $ curl http://172.17.0.13:8000 <h1>This request was processed by host: http-5fcf9dd9cb-fhljh</h1> $ curl http://172.17.0.13:8000 <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1> |
kubectl get service # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 7m28s # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14m kubectl describe svc http # Name: http # Namespace: defaultLabels: run=httpAnnotations: <none> # Selector: run=httpType: ClusterIPIP: 10.100.157.159 # External IPs: 172.17.0.13 # Port: <unset> 8000/TCP # TargetPort: 80/TCP # Endpoints: 172.18.0.4:80,172.18.0.6:80,172.18.0.7:80 # Session Affinity: None # Events: <none> curl http://172.17.0.13:8000 # <h1>This request was processed by host: http-5fcf9dd9cb-wb2dh</h1> curl http://172.17.0.13:8000 # <h1>This request was processed by host: http-5fcf9dd9cb-fhljh</h1> curl http://172.17.0.13:8000 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1> |
Line 455: | Line 455: |
master $ launch.sh Waiting for Kubernetes to start... Kubernetes started master $ kubectl get nodes NAME STATUS ROLES AGE VERSION master Ready master 85m v1.14.0 node01 Ready <none> 85m v1.14.0 |
# In master launch.sh # Waiting for Kubernetes to start... # Kubernetes started kubectl get nodes # NAME STATUS ROLES AGE VERSION # master Ready master 85m v1.14.0 # node01 Ready <none> 85m v1.14.0 |
Line 464: | Line 465: |
master $ kubectl create deployment examplehttpapp --image=katacoda/docker-http-server deployment.apps/examplehttpapp created |
kubectl create deployment examplehttpapp --image=katacoda/docker-http-server # deployment.apps/examplehttpapp created |
Line 469: | Line 470: |
NAME READY UP-TO-DATE AVAILABLE AGE examplehttpapp 1/1 1 1 25s |
# NAME READY UP-TO-DATE AVAILABLE AGE # examplehttpapp 1/1 1 1 25s |
Line 474: | Line 475: |
NAME READY STATUS RESTARTS AGE examplehttpapp-58f66848-n7wn7 1/1 Running 0 71s |
# NAME READY STATUS RESTARTS AGE # examplehttpapp-58f66848-n7wn7 1/1 Running 0 71s |
Line 478: | Line 479: |
master $ kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES examplehttpapp-58f66848-n7wn7 1/1 Running 0 113s 10.44.0.2 node01 <none> <none> |
kubectl get pods -o wide # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE # READINESS GATES # examplehttpapp-58f66848-n7wn7 1/1 Running 0 113s 10.44.0.2 node01 <none> <none> |
Line 483: | Line 484: |
master $ kubectl describe pod examplehttpapp-58f66848-n7wn7 Name: examplehttpapp-58f66848-n7wn7 Namespace: default Priority: 0 PriorityClassName: <none> Node: node01/172.17.0.24 Start Time: Sat, 27 Jul 2019 17:59:35 +0000 Labels: app=examplehttpapp pod-template-hash=58f66848 Annotations: <none> Status: Running IP: 10.44.0.2 |
kubectl describe pod examplehttpapp-58f66848-n7wn7 # Name: examplehttpapp-58f66848-n7wn7 # Namespace: default # Priority: 0 # PriorityClassName: <none> # Node: node01/172.17.0.24 # Start Time: Sat, 27 Jul 2019 17:59:35 +0000 # Labels: app=examplehttpapp # pod-template-hash=58f66848 # Annotations: <none> # Status: Running # IP: 10.44.0.2 |
Line 503: | Line 504: |
master $ kubectl create ns testns namespace/testns created master $ kubectl create deployment namespacedeg -n testns --image=katacoda/docker-http-server deployment.apps/namespacedeg created master $ kubectl get pods -n testns NAME READY STATUS RESTARTS AGE namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 3s master $ kubectl get pods -n testns -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 18s 10.44.0.3 node01 <none> <none> |
kubectl create ns testns # namespace/testns created kubectl create deployment namespacedeg -n testns --image=katacoda/docker-http-server # deployment.apps/namespacedeg created kubectl get pods -n testns # NAME READY STATUS RESTARTS AGE # namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 3s kubectl get pods -n testns -o wide # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES # namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 18s 10.44.0.3 node01 <none> <none> |
Line 515: | Line 516: |
master $ kubectl scale deployment examplehttpapp --replicas=5 deployment.extensions/examplehttpapp scaled master $ kubectl get deployments -o wide NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR examplehttpapp 5/5 5 5 9m6s docker-http-server katacoda/docker-http-server app=examplehttpapp master $ kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES examplehttpapp-58f66848-cf6pl 1/1 Running 0 65s 10.44.0.6 node01 <none> <none> examplehttpapp-58f66848-lfrq4 1/1 Running 0 65s 10.44.0.5 node01 <none> <none> examplehttpapp-58f66848-n7wn7 1/1 Running 0 9m26s 10.44.0.2 node01 <none> <none> examplehttpapp-58f66848-snwl7 1/1 Running 0 65s 10.44.0.7 node01 <none> <none> examplehttpapp-58f66848-vd8db 1/1 Running 0 65s 10.44.0.4 node01 <none> <none> |
kubectl scale deployment examplehttpapp --replicas=5 # deployment.extensions/examplehttpapp scaled kubectl get deployments -o wide # NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR # examplehttpapp 5/5 5 5 9m6s docker-http-server katacoda/docker-http-server app=examplehttpapp kubectl get pods -o wide # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES # examplehttpapp-58f66848-cf6pl 1/1 Running 0 65s 10.44.0.6 node01 <none> <none> # examplehttpapp-58f66848-lfrq4 1/1 Running 0 65s 10.44.0.5 node01 <none> <none> # examplehttpapp-58f66848-n7wn7 1/1 Running 0 9m26s 10.44.0.2 node01 <none> <none> # examplehttpapp-58f66848-snwl7 1/1 Running 0 65s 10.44.0.7 node01 <none> <none> # examplehttpapp-58f66848-vd8db 1/1 Running 0 65s 10.44.0.4 node01 <none> <none> |
Line 531: | Line 532: |
master $ kubectl get nodes -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME master Ready master 102m v1.14.0 172.17.0.19 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5 node01 Ready <none> 102m v1.14.0 172.17.0.24 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5 |
kubectl get nodes -o wide # NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME # master Ready master 102m v1.14.0 172.17.0.19 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5 # node01 Ready <none> 102m v1.14.0 172.17.0.24 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5 |
Line 538: | Line 539: |
master $ kubectl --record=true set image deployment examplehttpapp docker-http-server=katacoda/docker-http-server:v2 deployment.extensions/examplehttpapp image updated master $ kubectl rollout status deployment examplehttpapp Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination... |
kubectl --record=true set image deployment examplehttpapp docker-http-server=katacoda/docker-http-server:v2 # deployment.extensions/examplehttpapp image updated kubectl rollout status deployment examplehttpapp # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated... # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated... # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated... # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated... # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated... # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated... # Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination... # Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination... |
Line 552: | Line 553: |
master $ kubectl rollout undo deployment examplehttpapp deployment.extensions/examplehttpapp rolled back |
kubectl rollout undo deployment examplehttpapp # deployment.extensions/examplehttpapp rolled back |
Line 556: | Line 557: |
master $ kubectl expose deployment examplehttpapp --port 80 service/examplehttpapp exposed master $ kubectl get svc -o wide NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 13s app=examplehttpapp kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 105m <none> master $ kubectl describe svc examplehttpapp Name: examplehttpapp Namespace: default Labels: app=examplehttpapp Annotations: <none> Selector: app=examplehttpapp Type: ClusterIP IP: 10.103.93.196 Port: <unset> 80/TCP TargetPort: 80/TCP Endpoints: 10.44.0.2:80,10.44.0.4:80,10.44.0.5:80 Session Affinity: None Events: <none> |
kubectl expose deployment examplehttpapp --port 80 # service/examplehttpapp exposed kubectl get svc -o wide # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR # examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 13s app=examplehttpapp # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 105m <none> kubectl describe svc examplehttpapp # Name: examplehttpapp # Namespace: default # Labels: app=examplehttpapp # Annotations: <none> # Selector: app=examplehttpapp # Type: ClusterIP # IP: 10.103.93.196 # Port: <unset> 80/TCP # TargetPort: 80/TCP # Endpoints: 10.44.0.2:80,10.44.0.4:80,10.44.0.5:80 # Session Affinity: None # Events: <none> |
Line 577: | Line 578: |
Each Object within Kubernetes can have a label attached, allowing Kubernetes to discover and use the configuration. master $ kubectl get services -l app=examplehttpapp -o go-template='{{(index .items 0).spec.clusterIP}}' 10.103.93.196master $ kubectl get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m32s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m master $ kubectl get services -o wide NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m36s app=examplehttpapp kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m <none> |
# Each Object within Kubernetes can have a label attached, allowing Kubernetes to discover and use the configuration. kubectl get services -l app=examplehttpapp -o go-template='{{(index .items 0).spec.clusterIP}}' kubectl get services # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m32s # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m kubectl get services -o wide # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR # examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m36s app=examplehttpapp # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m <none> |
Line 591: | Line 592: |
master $ kubectl logs $(kubectl get pods -l app=examplehttpapp -o go-template='{{(index .items 0).metadata.name}}') Web Server started. Listening on 0.0.0.0:80 |
kubectl logs $(kubectl get pods -l app=examplehttpapp -o go-template='{{(index .items 0).metadata.name}}') # Web Server started. Listening on 0.0.0.0:80 |
Line 595: | Line 596: |
master $ kubectl top node NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% master 133m 3% 1012Mi 53% node01 49m 1% 673Mi 17% master $ kubectl top pod NAME CPU(cores) MEMORY(bytes) examplehttpapp-58f66848-ctnml 0m 0Mi examplehttpapp-58f66848-gljk9 1m 0Mi examplehttpapp-58f66848-hfqts 1m 0Mi }}} == k3s - Lightweight Kubernetes == * https://k3s.io/ * https://github.com/rancher/k3s/releases/tag/v1.17.0+k3s.1 * https://rancher.com/docs/k3s/latest/en/ * https://rancher.com/docs/k3s/latest/en/quick-start/ K3S works great from something as small as a Raspberry Pi to an AWS a1.4xlarge 32GiB server. Download k3s - latest release, x86_64, ARMv7, and ARM64 are supported Situations where a PhD in k8s clusterology is infeasible {{{#!highlight bash curl -sfL https://get.k3s.io | sh - root@debian:/home/user# curl -sfL https://get.k3s.io | sh - [INFO] Finding latest release [INFO] Using v1.17.0+k3s.1 as release [INFO] Downloading hash https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/sha256sum-amd64.txt [INFO] Downloading binary https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/k3s [INFO] Verifying binary download [INFO] Installing k3s to /usr/local/bin/k3s [INFO] Creating /usr/local/bin/kubectl symlink to k3s [INFO] Creating /usr/local/bin/crictl symlink to k3s [INFO] Creating /usr/local/bin/ctr symlink to k3s [INFO] Creating killall script /usr/local/bin/k3s-killall.sh [INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh [INFO] env: Creating environment file /etc/systemd/system/k3s.service.env [INFO] systemd: Creating service file /etc/systemd/system/k3s.service [INFO] systemd: Enabling k3s unit Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service. [INFO] systemd: Starting k3s root@debian:/home/user# k3s kubectl cluster-info kubectl create deployment springboot-test --image=vbodocker/springboot-test:latest kubectl expose deployment springboot-test --port=8000 --target-port=8080 --type=NodePort kubectl get services IP_SPRINGBOOT=$(kubectl get services | grep springboot | awk '//{print $3}') curl http://$IP_SPRINGBOOT:8000/dummy # list containerd images and containers k3s crictl images k3s crictl ps # connect to container id crictl exec -it 997a2ad8c763a sh # connect to container/pod kubectl get pods kubectl exec -it springboot-test-6bb5fdfc48-phh8k sh cat /etc/os-release # alpine linux in container # give sudo rights to user /sbin/usermod -aG sudo user # scale pods sudo kubectl scale deployment springboot-test --replicas=3 sudo kubectl get pods -o wide # add mariadb pod/service sudo kubectl create deployment mariadb-test --image=mariadb:latest sudo kubectl get pods -o wide sudo kubectl delete deployment mariadb-test # https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/ sudo kubectl apply -f mariadb-pv.yaml #persistentvolume/mariadb-pv-volume created #persistentvolumeclaim/mariadb-pv-claim created sudo kubectl apply -f mariadb-deployment.yaml #service/mariadb created #deployment.apps/mariadb created sudo kubectl describe deployment mariadb sudo kubectl get svc -o wide # connect to mariabdb pod sudo kubectl exec -it mariadb-8578f4dc8c-r4ftv /bin/bash ss -atn # show ports tcp listening ip address # show ip addresses mysql -h localhost -p mysql -u root -h 10.42.0.12 -p # delete service, persistent volume claim and persistent volume sudo kubectl delete deployment,svc mariadb sudo kubectl delete pvc mariadb-pv-claim sudo kubectl delete pv mariadb-pv-volume }}} === mariadb-pv.yaml === {{{ apiVersion: v1 kind: PersistentVolume metadata: name: mariadb-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 1Gi accessModes: - ReadWriteOnce hostPath: path: "/mnt/data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mariadb-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 1Gi }}} === mariadb-deployment.yaml === {{{ apiVersion: v1 kind: Service metadata: name: mariadb spec: ports: - port: 3306 selector: app: mariadb clusterIP: None --- apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: mariadb spec: selector: matchLabels: app: mariadb strategy: type: Recreate template: metadata: labels: app: mariadb spec: containers: - image: mariadb:latest name: mariadb env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD value: password ports: - containerPort: 3306 name: mariadb volumeMounts: - name: mariadb-persistent-storage mountPath: /var/lib/mariadb volumes: - name: mariadb-persistent-storage persistentVolumeClaim: claimName: mariadb-pv-claim }}} == Init containers == * https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Init containers can contain utilities or setup scripts not present in an app image. == systemctl commands == {{{#!highlight bash systemctl start k3s systemctl stop k3s systemctl status k3s systemctl disable k3s.service systemctl enable k3s }}} == Ubuntu pod == {{{#!highlight yaml #ubuntu.yaml apiVersion: v1 kind: Pod metadata: name: ubuntu labels: app: ubuntu spec: containers: - name: ubuntu image: ubuntu:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent restartPolicy: Always }}} {{{#!highlight bash sudo kubectl apply -f ubuntu.yaml sudo kubectl get pods sudo kubectl exec -it ubuntu -- bash sudo kubectl delete pod ubuntu }}} == Alpine pod == {{{#!highlight yaml #alpine.yaml apiVersion: v1 kind: Pod metadata: name: alpine labels: app: alpine spec: containers: - name: alpine image: alpine:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent restartPolicy: Always }}} {{{#!highlight bash # Pods use PersistentVolumeClaims to request physical storage. sudo kubectl apply -f alpine.yaml sudo kubectl exec -it alpine -- sh }}} == Nginx persistent volume == {{{#!highlight bash cd /tmp mkdir -p /tmp/data echo 'Hello from Kubernetes storage' > /tmp/data/index.html }}} {{{#!highlight yaml #pv-volume.yaml apiVersion: v1 kind: PersistentVolume metadata: name: task-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 0.2Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/data" }}} {{{#!highlight yaml # pv-claim.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: task-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 0.2Gi }}} {{{#!highlight yaml # pv-pod.yaml apiVersion: v1 kind: Pod metadata: name: task-pv-pod spec: volumes: - name: task-pv-storage persistentVolumeClaim: claimName: task-pv-claim containers: - name: task-pv-container image: nginx ports: - containerPort: 80 name: "http-server" volumeMounts: - mountPath: "/usr/share/nginx/html" name: task-pv-storage }}} {{{#!highlight bash sudo kubectl apply -f pv-volume.yaml sudo kubectl apply -f pv-claim.yaml sudo kubectl apply -f pv-pod.yaml sudo kubectl get pods -o wide curl http://10.42.0.28/ sudo kubectl exec -it task-pv-pod -- bash cd /usr/share/nginx/html echo "Hey from Kubernetes storage" > index.html cat /etc/os-release # debian buster kubectl delete pod task-pv-pod kubectl delete pvc task-pv-claim kubectl delete pv task-pv-volume cat /tmp/data/index.html }}} == Generate yaml == * sudo kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test --dry-run=client --output=yaml * sudo kubectl expose deployment cherrypy-test --port=8080 --type=NodePort --dry-run=client --output=yaml * sudo kubectl scale deployment cherrypy-test --replicas=3 --dry-run=client --output=yaml == Alpine persistent volume == {{{#!highlignt yaml $cat alpine-shared.yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: alpine-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 0.2Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/alpine-data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: alpine-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 0.2Gi --- apiVersion: v1 kind: Pod metadata: name: alpine-pod labels: app: alpine-pod spec: volumes: - name: alpine-pv-storage persistentVolumeClaim: claimName: alpine-pv-claim containers: - name: alpine image: alpine:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: "/mnt/alpine/data" name: alpine-pv-storage restartPolicy: Always }}} {{{#!highlight bash sudo kubectl apply -f alpine-shared.yaml sudo kubectl exec -it alpine-pod -- sh /mnt/alpine/data # echo "teste" > x.txt # inside pod cat /tmp/alpine-data/x.txt # k8s host }}} == MariaDB + NFS == {{{ /vol *(rw,sync,insecure,fsid=0,no_subtree_check,no_root_squash) exportfs -rav exporting *:/vol mkdir -p /vol/mariadb-0 kubectl apply -f mariadb-nfs.yaml kubectl exec -it mariadb-79847f5d97-smbdx -- bash touch /var/lib/mariadb/b mount | grep nfs kubectl delete -f mariadb-nfs.yaml kubectl get pods kubectl get pvc kubectl get pv }}} === mariadb-nfs.yaml === {{{#!highlight yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: mdb-vol-0 labels: volume: mdb-volume spec: storageClassName: manual capacity: storage: 1Gi accessModes: - ReadWriteOnce nfs: server: 127.0.0.1 path: "/vol/mariadb-0" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mdb-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 1Gi --- apiVersion: v1 kind: Service metadata: name: mariadb spec: ports: - port: 3306 selector: app: mariadb clusterIP: None --- apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: mariadb spec: selector: matchLabels: app: mariadb strategy: type: Recreate template: metadata: labels: app: mariadb spec: containers: - image: mariadb:latest name: mariadb env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD value: password ports: - containerPort: 3306 name: mariadb volumeMounts: - name: mdb-persistent-storage mountPath: /var/lib/mariadb volumes: - name: mdb-persistent-storage persistentVolumeClaim: claimName: mdb-pv-claim }}} == Persistent volumes == * https://kubernetes.io/docs/concepts/storage/persistent-volumes/ A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes. A PersistentVolumeClaim (PVC) is a request for storage by a user. Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see AccessModes). Types of Persistent Volumes: * local - local storage devices mounted on nodes. * nfs - Network File System (NFS) storage == Ingress nginx example == === ingress-cherrypy-test.yml === {{{#!highlight yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress-cherrypy-test spec: rules: - host: cp.info http: paths: - path: / pathType: Prefix backend: service: name: cherrypy-test port: number: 8000 ingressClassName: nginx }}} === Steps === {{{#!highlight bash # install k3s curl -sfL https://get.k3s.io | sh - KUBECONFIG=~/.kube/config mkdir ~/.kube 2> /dev/null sudo k3s kubectl config view --raw > "$KUBECONFIG" chmod 600 "$KUBECONFIG" nano ~/.bashrc export KUBECONFIG=~/.kube/config source . ~/.bashrc sudo nano /etc/systemd/system/k3s.service ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 sudo systemctl daemon-reload sudo service k3s start sudo service k3s status kubectl get pods k3s kubectl cluster-info kubectl -n kube-system delete helmcharts.helm.cattle.io traefik sudo service k3s stop sudo nano /etc/systemd/system/k3s.service ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 --no-deploy traefik sudo systemctl daemon-reload sudo rm /var/lib/rancher/k3s/server/manifests/traefik.yaml sudo service k3s start kubectl -n kube-system delete helmcharts.helm.cattle.io traefik sudo systemctl restart k3s kubectl get nodes kubectl delete node localhost kubectl get pods --all-namespaces kubectl get services --all-namespaces kubectl get deployment --all-namespaces kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml kubectl get pods --namespace=ingress-nginx kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP kubectl get services kubectl apply -f ingress-cherrypy-test.yml EXTERNAL_IP=$(ip addr show | grep wlp | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}') echo $EXTERNAL_IP sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts " kubectl get ingress curl cp.info kubectl scale deployment cherrypy-test --replicas=5 curl cp.info/dummy -vvv sudo apt install apache2-utils ab -n 10 -c 10 http://cp.info/ }}} |
kubectl top node # NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% # master 133m 3% 1012Mi 53% # node01 49m 1% 673Mi 17% kubectl top pod # NAME CPU(cores) MEMORY(bytes) # examplehttpapp-58f66848-ctnml 0m 0Mi # examplehttpapp-58f66848-gljk9 1m 0Mi # examplehttpapp-58f66848-hfqts 1m 0Mi }}} == Shows mapping between services and pods == {{{#!highlight sh kubectl get endpoints }}} == Show events ordered by last timestamp ascending == {{{#!highlight sh kubectl get events --sort-by='.lastTimestamp' }}} |
kubernetes
Contents
-
kubernetes
- cluster details and health status
- get cluster nodes
- Deploy containers
- dashboard
- Init master
- Deploy cni weaveworks - deploy a pod network to the cluster
- Join cluster
- Deploy container in cluster
- Apply dashboard in cluster
- List services
- Start containers using Kubectl
- Certified kubernetes application developer
- Shows mapping between services and pods
- Show events ordered by last timestamp ascending
- deploy docker images in containers. Cluster has nodes. Nodes has pods/containers. Each cluster might correspond to a service.
- Ingress, An API object that manages external access to the services in a cluster, typically HTTP. Ideas like reverse-proxy and load balancer.
- minikube version # check version 1.2.0
- minikube start
1 minikube version
2 # minikube version: v1.2.0
3 minikube start
4 # * minikube v1.2.0 on linux (amd64)
5 # * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ...
6 # * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5
7 # - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf
8 # * Pulling images ...
9 # * Launching Kubernetes ...
10 #
11 # * Configuring local host environment ...
12 # * Verifying: apiserver proxy etcd scheduler controller dns
13 # * Done! kubectl is now configured to use "minikube"
14
cluster details and health status
get cluster nodes
Deploy containers
1 # deploy container
2 kubectl create deployment first-deployment --image=katacoda/docker-http-server
3 # deployment.apps/first-deployment created
4 # deploy container in cluster
5 # check pods
6 kubectl get pods
7 # NAME READY STATUS RESTARTS AGE
8 # first-deployment-8cbf74484-s2fkl 1/1 Running 0 25s
9 # expose deployment
10 kubectl expose deployment first-deployment --port=80 --type=NodePort
11 # service/first-deployment exposed
12
13 kubectl get svc first-deployment
14 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
15 # first-deployment NodePort 10.98.246.87 <none> 80:31219/TCP 105s
16 # do request to port 80 in cluster ip
17 curl 10.98.246.87:80
18 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1>
19 #
20 curl host01:31219
21 # <h1>This request was processed by host: first-deployment-8cbf74484-s2fkl</h1>
22
dashboard
1 minikube addons enable dashboard
2 #The Kubernetes dashboard allows you to view your applications
3 # in a UI.
4 # * dashboard was successfully enabled
5 kubectl apply -f /opt/kubernetes-dashboard.yaml
6 # only in katacoda
7 # service/kubernetes-dashboard-katacoda created
8 # check progress
9 kubectl get pods -n kube-system -w #check progress
10 # NAME READY STATUS RESTARTS AGE
11 # coredns-5c98db65d4-b2kxm 1/1 Running 0 17m
12 # coredns-5c98db65d4-mm567 1/1 Running 1 17m
13 # etcd-minikube 1/1 Running 0 16m
14 # kube-addon-manager-minikube 1/1 Running 0 16m
15 # kube-apiserver-minikube 1/1 Running 0 16m
16 # kube-controller-manager-minikube 1/1 Running 0 16m
17 # kube-proxy-pngm9 1/1 Running 0 17m
18 # kube-scheduler-minikube 1/1 Running 0 16m
19 # kubernetes-dashboard-7b8ddcb5d6-xt5nt 1/1 Running 0 76s
20 # storage-provisioner 1/1 Running 0 17m
21
22 # dashboard url https://2886795294-30000-kitek05.environments.katacoda.com/
23 # how to launch a Single Node Kubernetes cluster.
24
Init master
1 master $ kubeadm init --kubernetes-version $(kubeadm version -o short)
2 [init] Using Kubernetes version: v1.14.0
3 [preflight] Running pre-flight checks
4 [preflight] Pulling images required for setting up a Kubernetes cluster
5 [preflight] This might take a minute or two, depending on the speed of your internet connection
6 [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
7 [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
8 [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
9 [kubelet-start] Activating the kubelet service
10 [certs] Using certificateDir folder "/etc/kubernetes/pki"
11 [certs] Generating "ca" certificate and key
12 [certs] Generating "apiserver" certificate and key
13 [certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.17.0.69]
14 [certs] Generating "apiserver-kubelet-client" certificate and key
15 [certs] Generating "front-proxy-ca" certificate and key
16 [certs] Generating "front-proxy-client" certificate and key
17 [certs] Generating "etcd/ca" certificate and key
18 [certs] Generating "etcd/healthcheck-client" certificate and key
19 [certs] Generating "apiserver-etcd-client" certificate and key
20 [certs] Generating "etcd/server" certificate and key
21 [certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [172.17.0.69 127.0.0.1 ::1]
22 [certs] Generating "etcd/peer" certificate and key
23 [certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [172.17.0.69 127.0.0.1 ::1]
24 [certs] Generating "sa" key and public key
25 [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
26 [kubeconfig] Writing "admin.conf" kubeconfig file
27 [kubeconfig] Writing "kubelet.conf" kubeconfig file
28 [kubeconfig] Writing "controller-manager.conf" kubeconfig file
29 [kubeconfig] Writing "scheduler.conf" kubeconfig file
30 [control-plane] Using manifest folder "/etc/kubernetes/manifests"
31 [control-plane] Creating static Pod manifest for "kube-apiserver"
32 [control-plane] Creating static Pod manifest for "kube-controller-manager"
33 [control-plane] Creating static Pod manifest for "kube-scheduler"
34 [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
35 [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
36 [apiclient] All control plane components are healthy after 16.503433 seconds
37 [upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system"Namespace
38 [kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
39 [upload-certs] Skipping phase. Please see --experimental-upload-certs
40 [mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
41 [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
42 [bootstrap-token] Using token: xfvno5.q2xfb2m3nw7grdjm
43 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
44 [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
45 [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approveCSRs from a Node Bootstrap Token
46 [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
47 [bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
48 [addons] Applied essential addon: CoreDNS
49 [addons] Applied essential addon: kube-proxy
50
51 Your Kubernetes control-plane has initialized successfully!
52
53 To start using your cluster, you need to run the following as a regular user:
54
55 mkdir -p $HOME/.kube
56 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
57 sudo chown $(id -u):$(id -g) $HOME/.kube/config
58
59 You should now deploy a pod network to the cluster.
60 Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
61 https://kubernetes.io/docs/concepts/cluster-administration/addons/
62
63 Then you can join any number of worker nodes by running the following on each as root:
64
65 kubeadm join 172.17.0.69:6443 --token xfvno5.q2xfb2m3nw7grdjm \
66 --discovery-token-ca-cert-hash sha256:26d11c038d236967630d401747f210af9e3679fb1638e8b599a2da4cb98ab159
Deploy cni weaveworks - deploy a pod network to the cluster
Container Network Interface (CNI) defines how the different nodes and their workloads should communicate. Weave Net provides a network to connect all pods together, implementing the Kubernetes model. Kubernetes uses the Container Network Interface (CNI) to join pods onto Weave Net.
1 # In master
2 kubectl apply -f /opt/weave-kube
3 # serviceaccount/weave-net created
4 # clusterrole.rbac.authorization.k8s.io/weave-net created
5 # clusterrolebinding.rbac.authorization.k8s.io/weave-net created
6 # role.rbac.authorization.k8s.io/weave-net created
7 # rolebinding.rbac.authorization.k8s.io/weave-net created
8 # daemonset.extensions/weave-net created
9 kubectl get pod -n kube-system
10 # NAME READY STATUS RESTARTS AGE
11 # coredns-fb8b8dccf-b9rd7 1/1 Running 0 11m
12 # coredns-fb8b8dccf-sfgbn 1/1 Running 0 11m
13 # etcd-master 1/1 Running 0 10m
14 # kube-apiserver-master 1/1 Running 0 10m
15 # kube-controller-manager-master 1/1 Running 0 10m
16 # kube-proxy-l42wp 1/1 Running 0 11m
17 # kube-scheduler-master 1/1 Running 1 10m
18 # weave-net-mcxml 2/2 Running 0 84s
19
Join cluster
1 # in node01
2 # join cluster
3 kubeadm join --discovery-token-unsafe-skip-ca-verification --token=xfvno5.q2xfb2m3nw7grdjm 172.17.0.69:6443
4 # [preflight] Running pre-flight checks
5 # [preflight] Reading configuration from the cluster...
6 # [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
7 # [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace
8 # [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
9 # [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
10 # [kubelet-start] Activating the kubelet service
11 # [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
12 #
13 # This node has joined the cluster:
14 # * Certificate signing request was sent to apiserver and a response was received.
15 # * The Kubelet was informed of the new secure connection details.
16 #
17 # Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
18 # The --discovery-token-unsafe-skip-ca-verification tag is used to bypass the Discovery Token verification.
19
20 # in master
21 kubectl get nodes
22 # NAME STATUS ROLES AGE VERSION
23 # master Ready master 17m v1.14.0
24 # node01 Ready <none> 107s v1.14.0
25 # bootstrap token generated b
26
27 # in node01
28 kubectl get nodes
29 # The connection to the server localhost:8080 was refused - did you specify the right host or port
30
Deploy container in cluster
1 # In master
2 kubectl create deployment http --image=katacoda/docker-http-server:latest
3 # deployment.apps/http created
4 kubectl get pods
5 # NAME READY STATUS RESTARTS AGE
6 # http-7f8cbdf584-74pd9 1/1 Running 0 11s
7 docker ps | grep http-server
8
9 # In node01
10 docker ps | grep http-serveradb3cde7f861
11 # katacoda/docker-http-server "/app"
12 # About a minute ago
13 # Up About a minute k8s_docker-http-server_http-7f8cbdf584-74pd9_default_04a
14 # 17065-b08d-11e9-bff1-0242ac110045_0
15
16 # expose deployment in master
17 kubectl get pods
18 # NAME READY STATUS RESTARTS AGE
19 # http-7f8cbdf584-74pd9 1/1 Running 0 17m bootstrap # token generated b
20 kubectl expose deployment http --port=80 --type=NodePort
21 # service/http exposed
22
23 kubectl get service http
24 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
25 # http NodePort 10.101.65.149 <none> 80:30982/TCP 49s
26
27 curl 10.101.65.149:80
28 # <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1>
29
30 curl http://10.101.65.149
31 # <h1>This request was processed by host: http-7f8cbdf584-74pd9</h1>
32
Apply dashboard in cluster
- Dashboard General-purpose web UI for Kubernetes clusters Dashboard Version: v1.10.0
1 # In master
2 kubectl apply -f dashboard.yaml
3 # secret/kubernetes-dashboard-certs created
4 # serviceaccount/kubernetes-dashboard created
5 # role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
6 # rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
7 # deployment.apps/kubernetes-dashboard created
8 # service/kubernetes-dashboard created
9 kubectl get pods -n kube-system
10 # NAME READY STATUS RESTARTS AGEcoredns-fb8b8dccf-b9rd7 # 1/1 Running 0 42mcoredns-fb8b8dccf-sfgbn 1/1 Running # 0 42m
11 # etcd-master 1/1 Running 0 41m
12 # kube-apiserver-master 1/1 Running 0 40m
13 # kube-controller-manager-master 1/1 Running 0 40m
14 # kube-proxy-gwrps 1/1 Running 0 26m
15 # kube-proxy-l42wp 1/1 Running 0 42m
16 # kube-scheduler-master 1/1 Running 1 40m
17 # kubernetes-dashboard-5f57845f9d-ls7q2 0/1 ContainerCreating 0 2s
18 # weave-net-gww8b 2/2 Running 0 26m
19 # weave-net-mcxml 2/2 Running 0 31m
20
Create service account for dashboard
1 cat <<EOF | kubectl create -f -
2 apiVersion: v1
3 kind: ServiceAccount
4 metadata:
5 name: admin-user
6 namespace: kube-system
7 ---
8 apiVersion: rbac.authorization.k8s.io/v1beta1
9 kind: ClusterRoleBinding
10 metadata:
11 name: admin-user
12 roleRef:
13 apiGroup: rbac.authorization.k8s.io
14 kind: ClusterRole
15 name: cluster-admin
16 subjects:
17 - kind: ServiceAccount
18 name: admin-user
19 namespace: kube-system
20 EOF
21
22 # Get login token
23 kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
24
25 When the dashboard was deployed, it used externalIPs to bind the service to port 8443. This makes the dashboard available to outside of the cluster and viewable at https://2886795335-8443-kitek05.environments.katacoda.com/
26
27 # Use the admin-user token to access the dashboard.
28 https://2886795335-8443-kitek05.environments.katacoda.com/#!/login
29 # sign in using token
30 eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXNzcTl4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI2Y2RiNGZmMy1iMDkwLTExZTktYmZmMS0wMjQyYWMxMTAwNDUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.R2OtDYxXaR0Pgluzq1m8FMZflF2tdYtJdG5XhkVC28vf1WkJu-Zo51I5ONUiK2WdBEMPw-N2PW_R9l6lak1clvlxfUSn777nThYSxhmR5pfxi6GmDlFo928KJvWVPDen1jrzAaQOEUZ1maOzPcnjKGpR-CRTgmYDnxZY84rqi68y0vfdn16ER8HeW-wkJ-hfGyUAhryk_ob1CUBjjbs-vefpaLcHLdrWNaKaFi1j5fCc_eJi10FpSTmuBsb04xgN0I17hkTlSw2fyOAj7LtC3pBDrK0nOdHCJkBEtsg89rkvLufYph5AFeoWQVKdW9JZH8BYS91BFla7pZnTwdBVeA
31
32 https://2886795335-8443-kitek05.environments.katacoda.com/#!/overview?namespace=default
List services
Start containers using Kubectl
1 minikube start # start kubernetes cluster and its components
2 # * minikube v1.2.0 on linux (amd64)
3 # * Creating none VM (CPUs=2, Memory=2048MB, Disk=20000MB) ...
4 # * Configuring environment for Kubernetes v1.15.0 on Docker 18.09.5
5 # - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf
6 # * Pulling images ...
7 # * Launching Kubernetes ...
8 # * Configuring local host environment ...
9 # * Verifying: apiserver proxy etcd
10 #
11 # scheduler controller dns
12 # * Done! kubectl is now configured to use "minikube"
13 kubectl get nodes
14 # NAME STATUS ROLES AGE VERSION
15 # minikube Ready master 2m2s v1.15.0
16
17 kubectl get service
18 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
19 # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2m18s
20 # This deployment is issued to the Kubernetes master which launches the Pods and containers required. Kubectl run_ is similar to docker run but at a cluster
21 level.
22 # launch a deployment called http which will start a container based on the Docker Image katacoda/docker-http-server:latest.
23 kubectl run http --image=katacoda/docker-http-server:latest --replicas=1
24 # kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.Use kubectl run --generator=run-pod/v1 or kubectl create instead.
25 # deployment.apps/http created
26 kubectl get deployments
27 # NAME READY UP-TO-DATE AVAILABLE AGE
28 # http 1/1 1 1 6s
29
30 # you can describe the deployment process.
31 kubectl describe deployment http
32
33 # expose the container port 80 on the host 8000 binding to the external-ip of the host.
34 kubectl expose deployment http --external-ip="172.17.0.13" --port=8000 --target-port=80
35 # service/http exposed
36
37 curl http://172.17.0.13:8000
38 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
39
40 kubectl get pods
41 # NAME READY STATUS RESTARTS AGE
42 # http-5fcf9dd9cb-zfkkz 1/1 Running 0 3m26s
43
44 kubectl get service
45 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
46 # http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 57s
47 # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 7m41s
48
49 curl http://10.100.157.159:8000
50 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
51
52 kubectl run httpexposed --image=katacoda/docker-http-server:latest --replicas=1 --port=80 --host
53 port=8001
54 # kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version.
55 # Use kubectl run --generator=run-pod/v1 or kubectl create instead.
56 # deployment.apps/httpexposed created
57 curl http://172.17.0.13:8001
58 # <h1>This request was processed by host: httpexposed-569df5d86-rzzhb</h1>
59 kubectl get svc
60 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
61 # http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 3m50s
62 # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 10m
63
64 kubectl get pods
65 # NAME READY STATUS RESTARTS AGE
66 # http-5fcf9dd9cb-zfkkz 1/1 Running 0 7m9s
67 # httpexposed-569df5d86-rzzhb 1/1 Running 0 36s
68
69 # Scaling the deployment will request Kubernetes to launch additional Pods.
70 kubectl scale --replicas=3 deployment http
71 # deployment.extensions/http scaled
72
73 kubectl get pods # amount of pods for service http increased to 3
74 # NAME READY STATUS RESTARTS AGE
75 # http-5fcf9dd9cb-fhljh 1/1 Running 0 31s
76 # http-5fcf9dd9cb-wb2dh 1/1 Running 0 31s
77 # http-5fcf9dd9cb-zfkkz 1/1 Running 0 9m27s
78 # httpexposed-569df5d86-rzzhb 1/1 Running 0 2m54s
79
80 # Once each Pod starts it will be added to the load balancer service.
81 kubectl get service
82 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
83 # http ClusterIP 10.100.157.159 172.17.0.13 8000/TCP 7m28s
84 # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14m
85
86 kubectl describe svc http
87 # Name: http
88 # Namespace: defaultLabels: run=httpAnnotations: <none>
89 # Selector: run=httpType: ClusterIPIP: 10.100.157.159
90 # External IPs: 172.17.0.13
91 # Port: <unset> 8000/TCP
92 # TargetPort: 80/TCP
93 # Endpoints: 172.18.0.4:80,172.18.0.6:80,172.18.0.7:80
94 # Session Affinity: None
95 # Events: <none>
96
97 curl http://172.17.0.13:8000
98 # <h1>This request was processed by host: http-5fcf9dd9cb-wb2dh</h1>
99 curl http://172.17.0.13:8000
100 # <h1>This request was processed by host: http-5fcf9dd9cb-fhljh</h1>
101 curl http://172.17.0.13:8000
102 # <h1>This request was processed by host: http-5fcf9dd9cb-zfkkz</h1>
103
Certified kubernetes application developer
1 # In master
2 launch.sh
3 # Waiting for Kubernetes to start...
4 # Kubernetes started
5 kubectl get nodes
6 # NAME STATUS ROLES AGE VERSION
7 # master Ready master 85m v1.14.0
8 # node01 Ready <none> 85m v1.14.0
9
10 # deploy app
11 kubectl create deployment examplehttpapp --image=katacoda/docker-http-server
12 # deployment.apps/examplehttpapp created
13
14 # view all deployments
15 kubectl get deployments
16 # NAME READY UP-TO-DATE AVAILABLE AGE
17 # examplehttpapp 1/1 1 1 25s
18
19 # A deployment will launch a set of Pods. A pod is a group of one or more containers deployed across the cluster.
20 kubectl get pods
21 # NAME READY STATUS RESTARTS AGE
22 # examplehttpapp-58f66848-n7wn7 1/1 Running 0 71s
23
24 # show pod ip and node where it is
25 kubectl get pods -o wide
26 # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE # READINESS GATES
27 # examplehttpapp-58f66848-n7wn7 1/1 Running 0 113s 10.44.0.2 node01 <none> <none>
28
29 # describe pod
30 kubectl describe pod examplehttpapp-58f66848-n7wn7
31 # Name: examplehttpapp-58f66848-n7wn7
32 # Namespace: default
33 # Priority: 0
34 # PriorityClassName: <none>
35 # Node: node01/172.17.0.24
36 # Start Time: Sat, 27 Jul 2019 17:59:35 +0000
37 # Labels: app=examplehttpapp
38 # pod-template-hash=58f66848
39 # Annotations: <none>
40 # Status: Running
41 # IP: 10.44.0.2
42
43 # List all namespaces with in the cluster with
44 kubectl get namespaces
45 kubectl get ns
46
47 # The namespaces can be used to filter queries to the available objects.
48 kubectl get pods -n kube-system
49
50 kubectl create ns testns
51 # namespace/testns created
52 kubectl create deployment namespacedeg -n testns --image=katacoda/docker-http-server
53 # deployment.apps/namespacedeg created
54 kubectl get pods -n testns
55 # NAME READY STATUS RESTARTS AGE
56 # namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 3s
57 kubectl get pods -n testns -o wide
58 # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
59 # namespacedeg-74dcc7dc64-wcxnj 1/1 Running 0 18s 10.44.0.3 node01 <none> <none>
60
61 # Kubectl can help scale the number of Pods running for a deployment, referred to as replicas.
62 kubectl scale deployment examplehttpapp --replicas=5
63 # deployment.extensions/examplehttpapp scaled
64 kubectl get deployments -o wide
65 # NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
66 # examplehttpapp 5/5 5 5 9m6s docker-http-server katacoda/docker-http-server app=examplehttpapp
67 kubectl get pods -o wide
68 # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
69 # examplehttpapp-58f66848-cf6pl 1/1 Running 0 65s 10.44.0.6 node01 <none> <none>
70 # examplehttpapp-58f66848-lfrq4 1/1 Running 0 65s 10.44.0.5 node01 <none> <none>
71 # examplehttpapp-58f66848-n7wn7 1/1 Running 0 9m26s 10.44.0.2 node01 <none> <none>
72 # examplehttpapp-58f66848-snwl7 1/1 Running 0 65s 10.44.0.7 node01 <none> <none>
73 # examplehttpapp-58f66848-vd8db 1/1 Running 0 65s 10.44.0.4 node01 <none> <none>
74
75 # everything within Kubernetes is controllable as YAML.
76 kubectl edit deployment examplehttpapp # opens vi/vim, changing the spec.replicas to 10 after saving the file increased the number of pods
77
78 kubectl get nodes -o wide
79 # NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
80 # master Ready master 102m v1.14.0 172.17.0.19 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5
81 # node01 Ready <none> 102m v1.14.0 172.17.0.24 <none> Ubuntu 16.04.6 LTS 4.4.0-150-generic docker://18.9.5
82
83 # Image can be changed using the set image command, rollout
84 # apply new image to the pods/containers
85 kubectl --record=true set image deployment examplehttpapp docker-http-server=katacoda/docker-http-server:v2
86 # deployment.extensions/examplehttpapp image updated
87 kubectl rollout status deployment examplehttpapp
88 # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
89 # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
90 # Waiting for deployment "examplehttpapp" rollout to finish: 1 out of 3 new replicas have been updated...
91 # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
92 # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
93 # Waiting for deployment "examplehttpapp" rollout to finish: 2 out of 3 new replicas have been updated...
94 # Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination...
95 # Waiting for deployment "examplehttpapp" rollout to finish: 1 old replicas are pending termination...
96 deployment "examplehttpapp" successfully rolled out
97
98 # rollback deployment
99 kubectl rollout undo deployment examplehttpapp
100 # deployment.extensions/examplehttpapp rolled back
101
102 # The expose command will create a new service for a deployment. The port specifies the port of the application we want to available
103 kubectl expose deployment examplehttpapp --port 80
104 # service/examplehttpapp exposed
105 kubectl get svc -o wide
106 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
107 # examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 13s app=examplehttpapp
108 # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 105m <none>
109 kubectl describe svc examplehttpapp
110 # Name: examplehttpapp
111 # Namespace: default
112 # Labels: app=examplehttpapp
113 # Annotations: <none>
114 # Selector: app=examplehttpapp
115 # Type: ClusterIP
116 # IP: 10.103.93.196
117 # Port: <unset> 80/TCP
118 # TargetPort: 80/TCP
119 # Endpoints: 10.44.0.2:80,10.44.0.4:80,10.44.0.5:80
120 # Session Affinity: None
121 # Events: <none>
122
123 # But how does Kubernetes know where to send traffic? That is managed by Labels.
124 # Each Object within Kubernetes can have a label attached, allowing Kubernetes to discover and use the configuration.
125
126 kubectl get services -l app=examplehttpapp -o go-template='{{(index .items 0).spec.clusterIP}}'
127 kubectl get services
128 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
129 # examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m32s
130 # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m
131
132 kubectl get services -o wide
133 # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
134 # examplehttpapp ClusterIP 10.103.93.196 <none> 80/TCP 2m36s app=examplehttpapp
135 # kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 107m <none>
136
137 # kubectl logs to to view the logs for Pods
138 kubectl logs $(kubectl get pods -l app=examplehttpapp -o go-template='{{(index .items 0).metadata.name}}')
139 # Web Server started. Listening on 0.0.0.0:80
140
141 # view the CPU or Memory usage of a node or Pod
142 kubectl top node
143 # NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
144 # master 133m 3% 1012Mi 53%
145 # node01 49m 1% 673Mi 17%
146 kubectl top pod
147 # NAME CPU(cores) MEMORY(bytes)
148 # examplehttpapp-58f66848-ctnml 0m 0Mi
149 # examplehttpapp-58f66848-gljk9 1m 0Mi
150 # examplehttpapp-58f66848-hfqts 1m 0Mi
151
Shows mapping between services and pods
1 kubectl get endpoints
Show events ordered by last timestamp ascending
1 kubectl get events --sort-by='.lastTimestamp'