= k3s = <> * k3s - Lightweight Kubernetes * https://k3s.io/ * https://github.com/rancher/k3s/releases/tag/v1.17.0+k3s.1 * https://rancher.com/docs/k3s/latest/en/ * https://rancher.com/docs/k3s/latest/en/quick-start/ K3S works great from something as small as a Raspberry Pi to an AWS a1.4xlarge 32GiB server. Download k3s - latest release, x86_64, ARMv7, and ARM64 are supported Situations where a PhD in k8s clusterology is infeasible {{{#!highlight bash curl -sfL https://get.k3s.io | sh - sudo curl -sfL https://get.k3s.io | sh - # [INFO] Finding latest release # [INFO] Using v1.17.0+k3s.1 as release # [INFO] Downloading hash https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/sha256sum-amd64.txt # [INFO] Downloading binary https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/k3s # [INFO] Verifying binary download # [INFO] Installing k3s to /usr/local/bin/k3s # [INFO] Creating /usr/local/bin/kubectl symlink to k3s # [INFO] Creating /usr/local/bin/crictl symlink to k3s # [INFO] Creating /usr/local/bin/ctr symlink to k3s # [INFO] Creating killall script /usr/local/bin/k3s-killall.sh # [INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh # [INFO] env: Creating environment file /etc/systemd/system/k3s.service.env # [INFO] systemd: Creating service file /etc/systemd/system/k3s.service # [INFO] systemd: Enabling k3s unit # Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service. # [INFO] systemd: Starting k3s # as root k3s kubectl cluster-info kubectl create deployment springboot-test --image=vbodocker/springboot-test:latest kubectl expose deployment springboot-test --port=8000 --target-port=8080 --type=NodePort kubectl get services IP_SPRINGBOOT=$(kubectl get services | grep springboot | awk '//{print $3}') curl http://$IP_SPRINGBOOT:8000/dummy # list containerd images and containers k3s crictl images k3s crictl ps # connect to container id crictl exec -it 997a2ad8c763a sh # connect to container/pod kubectl get pods kubectl exec -it springboot-test-6bb5fdfc48-phh8k sh cat /etc/os-release # alpine linux in container # give sudo rights to user /sbin/usermod -aG sudo user # scale pods sudo kubectl scale deployment springboot-test --replicas=3 sudo kubectl get pods -o wide # add mariadb pod/service sudo kubectl create deployment mariadb-test --image=mariadb:latest sudo kubectl get pods -o wide sudo kubectl delete deployment mariadb-test # https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/ sudo kubectl apply -f mariadb-pv.yaml #persistentvolume/mariadb-pv-volume created #persistentvolumeclaim/mariadb-pv-claim created sudo kubectl apply -f mariadb-deployment.yaml #service/mariadb created #deployment.apps/mariadb created sudo kubectl describe deployment mariadb sudo kubectl get svc -o wide # connect to mariabdb pod sudo kubectl exec -it mariadb-8578f4dc8c-r4ftv /bin/bash ss -atn # show ports tcp listening ip address # show ip addresses mysql -h localhost -p mysql -u root -h 10.42.0.12 -p # delete service, persistent volume claim and persistent volume sudo kubectl delete deployment,svc mariadb sudo kubectl delete pvc mariadb-pv-claim sudo kubectl delete pv mariadb-pv-volume }}} == mariadb-pv.yaml == {{{#!highlight yaml apiVersion: v1 kind: PersistentVolume metadata: name: mariadb-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 1Gi accessModes: - ReadWriteOnce hostPath: path: "/mnt/data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mariadb-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 1Gi }}} == mariadb-deployment.yaml == {{{#!highlight yaml apiVersion: v1 kind: Service metadata: name: mariadb spec: ports: - port: 3306 selector: app: mariadb clusterIP: None --- apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: mariadb spec: selector: matchLabels: app: mariadb strategy: type: Recreate template: metadata: labels: app: mariadb spec: containers: - image: mariadb:latest name: mariadb env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD value: password ports: - containerPort: 3306 name: mariadb volumeMounts: - name: mariadb-persistent-storage mountPath: /var/lib/mariadb volumes: - name: mariadb-persistent-storage persistentVolumeClaim: claimName: mariadb-pv-claim }}} = Init containers = * https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Init containers can contain utilities or setup scripts not present in an app image. = systemctl commands = {{{#!highlight bash systemctl start k3s systemctl stop k3s systemctl status k3s systemctl disable k3s.service systemctl enable k3s }}} = Ubuntu pod = == ubuntu.yaml == {{{#!highlight yaml apiVersion: v1 kind: Pod metadata: name: ubuntu labels: app: ubuntu spec: containers: - name: ubuntu image: ubuntu:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent restartPolicy: Always }}} {{{#!highlight bash sudo kubectl apply -f ubuntu.yaml sudo kubectl get pods sudo kubectl exec -it ubuntu -- bash sudo kubectl delete pod ubuntu }}} = Alpine pod = == alpine.yaml == {{{#!highlight yaml apiVersion: v1 kind: Pod metadata: name: alpine labels: app: alpine spec: containers: - name: alpine image: alpine:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent restartPolicy: Always }}} {{{#!highlight bash # Pods use PersistentVolumeClaims to request physical storage. sudo kubectl apply -f alpine.yaml sudo kubectl exec -it alpine -- sh }}} = Nginx with persistent volume = {{{#!highlight bash cd /tmp mkdir -p /tmp/data echo 'Hello from Kubernetes storage' > /tmp/data/index.html }}} == pv-volume.yaml == {{{#!highlight yaml apiVersion: v1 kind: PersistentVolume metadata: name: task-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 0.2Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/data" }}} == pv-claim.yaml == {{{#!highlight yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: task-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 0.2Gi }}} == pv-pod.yaml == {{{#!highlight yaml apiVersion: v1 kind: Pod metadata: name: task-pv-pod spec: volumes: - name: task-pv-storage persistentVolumeClaim: claimName: task-pv-claim containers: - name: task-pv-container image: nginx ports: - containerPort: 80 name: "http-server" volumeMounts: - mountPath: "/usr/share/nginx/html" name: task-pv-storage }}} {{{#!highlight bash sudo kubectl apply -f pv-volume.yaml sudo kubectl apply -f pv-claim.yaml sudo kubectl apply -f pv-pod.yaml sudo kubectl get pods -o wide curl http://10.42.0.28/ sudo kubectl exec -it task-pv-pod -- bash cd /usr/share/nginx/html echo "Hey from Kubernetes storage" > index.html cat /etc/os-release # debian buster kubectl delete pod task-pv-pod kubectl delete pvc task-pv-claim kubectl delete pv task-pv-volume cat /tmp/data/index.html }}} = Generate yaml = {{{#!highlight sh sudo kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test --dry-run=client --output=yaml sudo kubectl expose deployment cherrypy-test --port=8080 --type=NodePort --dry-run=client --output=yaml sudo kubectl scale deployment cherrypy-test --replicas=3 --dry-run=client --output=yaml }}} = Alpine persistent volume = == alpine-shared.yaml == {{{#!highlignt yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: alpine-pv-volume labels: type: local spec: storageClassName: manual capacity: storage: 0.2Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/alpine-data" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: alpine-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 0.2Gi --- apiVersion: v1 kind: Pod metadata: name: alpine-pod labels: app: alpine-pod spec: volumes: - name: alpine-pv-storage persistentVolumeClaim: claimName: alpine-pv-claim containers: - name: alpine image: alpine:latest command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: "/mnt/alpine/data" name: alpine-pv-storage restartPolicy: Always }}} {{{#!highlight bash mkdir -p /tmp/alpine-data/ sudo kubectl apply -f alpine-shared.yaml sudo kubectl exec -it alpine-pod -- sh # inside pod cd /mnt/alpine/data # inside pod echo "teste" > x.txt # inside pod # in host cat /tmp/alpine-data/x.txt # k8s host }}} = MariaDB + NFS = {{{#!highlight sh /vol *(rw,sync,insecure,fsid=0,no_subtree_check,no_root_squash) exportfs -rav exporting *:/vol mkdir -p /vol/mariadb-0 kubectl apply -f mariadb-nfs.yaml kubectl exec -it mariadb-79847f5d97-smbdx -- bash touch /var/lib/mariadb/b mount | grep nfs kubectl delete -f mariadb-nfs.yaml kubectl get pods kubectl get pvc kubectl get pv }}} == mariadb-nfs.yaml == {{{#!highlight yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: mdb-vol-0 labels: volume: mdb-volume spec: storageClassName: manual capacity: storage: 1Gi accessModes: - ReadWriteOnce nfs: server: 127.0.0.1 path: "/vol/mariadb-0" --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mdb-pv-claim spec: storageClassName: manual accessModes: - ReadWriteOnce resources: requests: storage: 1Gi --- apiVersion: v1 kind: Service metadata: name: mariadb spec: ports: - port: 3306 selector: app: mariadb clusterIP: None --- apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: mariadb spec: selector: matchLabels: app: mariadb strategy: type: Recreate template: metadata: labels: app: mariadb spec: containers: - image: mariadb:latest name: mariadb env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD value: password ports: - containerPort: 3306 name: mariadb volumeMounts: - name: mdb-persistent-storage mountPath: /var/lib/mariadb volumes: - name: mdb-persistent-storage persistentVolumeClaim: claimName: mdb-pv-claim }}} = Persistent volumes = * https://kubernetes.io/docs/concepts/storage/persistent-volumes/ A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes. A PersistentVolumeClaim (PVC) is a request for storage by a user. Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see AccessModes). Types of Persistent Volumes: * local - local storage devices mounted on nodes. * nfs - Network File System (NFS) storage = Ingress controller nginx example = == ingress-cherrypy-test.yml == {{{#!highlight yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress-cherrypy-test spec: rules: - host: cp.info http: paths: - path: / pathType: Prefix backend: service: name: cherrypy-test port: number: 8000 ingressClassName: nginx }}} == Steps == {{{#!highlight bash # install k3s curl -sfL https://get.k3s.io | sh - KUBECONFIG=~/.kube/config mkdir ~/.kube 2> /dev/null sudo k3s kubectl config view --raw > "$KUBECONFIG" chmod 600 "$KUBECONFIG" nano ~/.bashrc export KUBECONFIG=~/.kube/config source . ~/.bashrc sudo nano /etc/systemd/system/k3s.service ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 sudo systemctl daemon-reload sudo service k3s start sudo service k3s status kubectl get pods k3s kubectl cluster-info kubectl -n kube-system delete helmcharts.helm.cattle.io traefik sudo service k3s stop sudo nano /etc/systemd/system/k3s.service # ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 --no-deploy traefik sudo systemctl daemon-reload sudo rm /var/lib/rancher/k3s/server/manifests/traefik.yaml sudo service k3s start kubectl -n kube-system delete helmcharts.helm.cattle.io traefik sudo systemctl restart k3s kubectl get nodes kubectl delete node localhost kubectl get pods --all-namespaces kubectl get services --all-namespaces kubectl get deployment --all-namespaces # install nginx ingress controller kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml kubectl get pods --namespace=ingress-nginx kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP # cluster ip port 8000 kubectl get services kubectl apply -f ingress-cherrypy-test.yml EXTERNAL_IP=$(ip addr show | grep wlp | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}') echo $EXTERNAL_IP sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts " kubectl get ingress curl cp.info kubectl scale deployment cherrypy-test --replicas=5 curl http://cp.info/ -vvv sudo apt install apache2-utils ab -n 10 -c 10 http://cp.info/ # Push image to docker hub docker build -t vbodocker/cherrypy-test . docker run -p 8080:8080 vbodocker/cherrypy-test docker login # login to docker hub docker push vbodocker/cherrypy-test docker pull vbodocker/cherrypy-test:latest # Rollout, deploy new image kubectl get deployments -o wide # shows image urls kubectl rollout restart deployment cherrypy-test # redeploy image url for cherrypy-test kubectl rollout status deployment cherrypy-test kubectl get deployments -o wide kubectl get pods -o wide # age should be low for the newly deployed pods }}} = Install k3s static binary in Slack64 = * https://github.com/k3s-io/k3s#k3s---lightweight-kubernetes * Binaries available in https://github.com/k3s-io/k3s#manual-download * wget https://github.com/k3s-io/k3s/releases/download/v1.25.3%2Bk3s1/k3s {{{#!highlight sh sudo mv ~/Downloads/k3s /usr/bin/ sudo chmod 744 /usr/bin/k3s }}} == /etc/rc.d/rc.k3s == {{{#!highlight sh #!/bin/sh PATH=$PATH:/usr/sbin k3s_start() { /usr/bin/k3s server --write-kubeconfig-mode=644 \ --disable traefik > /var/log/k3s.log 2>&1 & } k3s_stop() { kill $(ps uax | grep "/usr/bin/k3s" | head -1 | awk '//{print $2}') ps uax | grep containerd | awk '//{print $2}' | xargs -i kill {} } k3s_restart() { k3s_stop k3s_start } case "$1" in 'start') k3s_start ;; 'stop') k3s_stop ;; 'restart') k3s_restart ;; *) echo "usage $0 start|stop|restart" esac }}} == ingress-cherrypy-test.yml == {{{#!highlight yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress-cherrypy-test spec: rules: - host: cp.info http: paths: - path: / pathType: Prefix backend: service: name: cherrypy-test port: number: 8000 ingressClassName: nginx }}} == Steps == {{{#!highlight sh echo "alias kubectl='/usr/bin/k3s kubectl'" >> ~/.bashrc source ~/.bashrc sudo sh /etc/rc.d/rc.k3s start kubectl get nodes kubectl get deployments --all-namespaces kubectl get services --all-namespaces kubectl get pods --all-namespaces kubectl cluster-info # install nginx ingress controller kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml # wait for nginx ingress controller to finish sleep 120 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP kubectl get pods --all-namespaces kubectl get services --all-namespaces kubectl apply -f ingress-cherrypy-test.yml EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}') echo $EXTERNAL_IP sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts " cat /etc/hosts kubectl get ingress curl cp.info }}} == Local docker registry example == === Dockerfile === {{{#!highlight sh FROM alpine:3.16 RUN apk add --update --no-cache python3 nano py3-pip RUN pip install cherrypy routes --user RUN mkdir /app COPY . /app/ WORKDIR /app CMD ["python3","main.py"] }}} === main.py === {{{#!highlight python import cherrypy import wsgiref.handlers import time import socket class RootUrl: @cherrypy.expose def index(self): return "Hello world ola mundo %s"%(socket.gethostname()) def main(): ru = RootUrl() cherrypy.server.socket_host = '0.0.0.0' cherrypy.quickstart(ru) if __name__ == '__main__': main() }}} === test-py.yaml === {{{#!highlight yaml apiVersion: v1 kind: Service metadata: name: test-py-svc spec: ports: - port: 8080 selector: app: test-py-deploy --- apiVersion: apps/v1 kind: Deployment metadata: name: test-py-deploy spec: selector: matchLabels: app: test-py-deploy strategy: type: Recreate template: metadata: labels: app: test-py-deploy spec: containers: - image: localhost:5000/test-py-image:latest name: test-py ports: - containerPort: 8080 name: test-py imagePullSecrets: - name: regcred }}} === ingress-test-py.yaml === {{{#!highlight yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress-test-py spec: rules: - host: testpy.info http: paths: - path: / pathType: Prefix backend: service: name: test-py-svc port: number: 8080 ingressClassName: nginx }}} === Steps === {{{#!highlight sh mkdir test-py cd test-py mkdir auth docker run --entrypoint htpasswd httpd:2 -Bbn testuser testpassword > auth/htpasswd docker run -d -p 5000:5000 --restart=always --name registry -v "$(pwd)"/auth:/auth \ -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ registry:2 docker ps curl http://testuser:testpassword@localhost:5000/v2/_catalog docker build -t test-py-image . docker login localhost:5000 docker tag test-py-image localhost:5000/test-py-image docker push localhost:5000/test-py-image kubectl delete secret regcred kubectl create secret docker-registry regcred --docker-server=localhost:5000 --docker-username=testuser --docker-password=testpassword --docker-email=test@example.org kubectl apply -f test-py.yaml curl $(kubectl get services | grep test | awk '//{print $3}'):8080 # Hello world ola mundo test-py kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml # wait for nginx ingress controller to finish sleep 120 EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}') echo $EXTERNAL_IP sudo sh -c " echo '$EXTERNAL_IP testpy.info' >> /etc/hosts " kubectl apply -f ingress-test-py.yaml curl testpy.info }}} == DNS issues == k3s DNS resides in namespace kube-system in the service kube-dns {{{#!highlight sh # kube-system kube-dns ClusterIP 10.43.0.10 # tests in pods IP 10.43.0.10 nslookup ..svc.cluster.local 10.43.0.10 nslookup api.chucknorris.io 10.43.0.10 # add Google and cloudfare entries KUBE_EDITOR="nano" kubectl -n kube-system edit configmap coredns forward . 1.1.1.1 8.8.8.8 /etc/resolv.conf # Redeploy coredns kubectl -n kube-system rollout restart deployment coredns # Check logs kubectl logs --namespace=kube-system --tail=100 -l k8s-app=kube-dns }}}