k3s

K3S works great from something as small as a Raspberry Pi to an AWS a1.4xlarge 32GiB server. Download k3s - latest release, x86_64, ARMv7, and ARM64 are supported Situations where a PhD in k8s clusterology is infeasible

   1 curl -sfL https://get.k3s.io | sh -
   2 sudo curl -sfL https://get.k3s.io | sh -
   3 # [INFO]  Finding latest release
   4 # [INFO]  Using v1.17.0+k3s.1 as release
   5 # [INFO]  Downloading hash https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/sha256sum-amd64.txt
   6 # [INFO]  Downloading binary https://github.com/rancher/k3s/releases/download/v1.17.0+k3s.1/k3s
   7 # [INFO]  Verifying binary download
   8 # [INFO]  Installing k3s to /usr/local/bin/k3s
   9 # [INFO]  Creating /usr/local/bin/kubectl symlink to k3s
  10 # [INFO]  Creating /usr/local/bin/crictl symlink to k3s
  11 # [INFO]  Creating /usr/local/bin/ctr symlink to k3s
  12 # [INFO]  Creating killall script /usr/local/bin/k3s-killall.sh
  13 # [INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh
  14 # [INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env
  15 # [INFO]  systemd: Creating service file /etc/systemd/system/k3s.service
  16 # [INFO]  systemd: Enabling k3s unit
  17 # Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.
  18 # [INFO]  systemd: Starting k3s
  19 # as root
  20 k3s kubectl cluster-info
  21 kubectl create deployment springboot-test --image=vbodocker/springboot-test:latest
  22 kubectl expose deployment springboot-test --port=8000 --target-port=8080 --type=NodePort
  23 kubectl get services
  24 IP_SPRINGBOOT=$(kubectl get services | grep springboot | awk '//{print $3}')
  25 curl http://$IP_SPRINGBOOT:8000/dummy
  26 # list containerd images and containers
  27 k3s crictl images
  28 k3s crictl ps
  29 # connect to container id
  30 crictl exec -it 997a2ad8c763a  sh
  31 # connect to container/pod
  32 kubectl get pods
  33 kubectl exec -it springboot-test-6bb5fdfc48-phh8k  sh
  34 cat /etc/os-release # alpine linux in container
  35 # give sudo rights to user
  36 /sbin/usermod -aG sudo user
  37 # scale pods
  38 sudo kubectl scale deployment springboot-test --replicas=3
  39 sudo kubectl get pods -o wide
  40 # add mariadb pod/service
  41 sudo kubectl create deployment mariadb-test --image=mariadb:latest
  42 sudo kubectl get pods -o wide
  43 sudo kubectl delete deployment mariadb-test
  44 # https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/
  45 sudo kubectl apply -f mariadb-pv.yaml
  46 #persistentvolume/mariadb-pv-volume created
  47 #persistentvolumeclaim/mariadb-pv-claim created
  48 sudo kubectl apply -f mariadb-deployment.yaml 
  49 #service/mariadb created
  50 #deployment.apps/mariadb created
  51 sudo kubectl describe deployment mariadb
  52 sudo kubectl get svc -o wide
  53 # connect to mariabdb pod
  54 sudo kubectl exec -it mariadb-8578f4dc8c-r4ftv /bin/bash
  55 ss -atn # show ports tcp listening
  56 ip address # show ip addresses
  57 mysql -h localhost -p
  58 mysql -u root -h 10.42.0.12 -p 
  59 
  60 # delete service, persistent volume claim and persistent volume
  61 sudo kubectl delete deployment,svc mariadb
  62 sudo kubectl delete pvc mariadb-pv-claim
  63 sudo kubectl delete pv mariadb-pv-volume

mariadb-pv.yaml

   1 apiVersion: v1
   2 kind: PersistentVolume
   3 metadata:
   4   name: mariadb-pv-volume
   5   labels:
   6     type: local
   7 spec:
   8   storageClassName: manual
   9   capacity:
  10     storage: 1Gi
  11   accessModes:
  12     - ReadWriteOnce
  13   hostPath:
  14     path: "/mnt/data"
  15 ---
  16 apiVersion: v1
  17 kind: PersistentVolumeClaim
  18 metadata:
  19   name: mariadb-pv-claim
  20 spec:
  21   storageClassName: manual
  22   accessModes:
  23     - ReadWriteOnce
  24   resources:
  25     requests:
  26       storage: 1Gi

mariadb-deployment.yaml

   1 apiVersion: v1
   2 kind: Service
   3 metadata:
   4   name: mariadb
   5 spec:
   6   ports:
   7   - port: 3306
   8   selector:
   9     app: mariadb
  10   clusterIP: None
  11 ---
  12 apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
  13 kind: Deployment
  14 metadata:
  15   name: mariadb
  16 spec:
  17   selector:
  18     matchLabels:
  19       app: mariadb
  20   strategy:
  21     type: Recreate
  22   template:
  23     metadata:
  24       labels:
  25         app: mariadb
  26     spec:
  27       containers:
  28       - image: mariadb:latest
  29         name: mariadb
  30         env:
  31           # Use secret in real usage
  32         - name: MYSQL_ROOT_PASSWORD
  33           value: password
  34         ports:
  35         - containerPort: 3306
  36           name: mariadb
  37         volumeMounts:
  38         - name: mariadb-persistent-storage
  39           mountPath: /var/lib/mariadb
  40       volumes:
  41       - name: mariadb-persistent-storage
  42         persistentVolumeClaim:
  43           claimName: mariadb-pv-claim

Init containers

Init containers can contain utilities or setup scripts not present in an app image.

systemctl commands

   1 systemctl start k3s
   2 systemctl stop k3s
   3 systemctl status k3s
   4 systemctl disable k3s.service
   5 systemctl enable k3s

Ubuntu pod

ubuntu.yaml

   1 apiVersion: v1
   2 kind: Pod
   3 metadata:
   4   name: ubuntu
   5   labels:
   6     app: ubuntu
   7 spec:
   8   containers:
   9   - name: ubuntu
  10     image: ubuntu:latest
  11     command: ["/bin/sleep", "3650d"]
  12     imagePullPolicy: IfNotPresent
  13   restartPolicy: Always

   1 sudo kubectl apply -f ubuntu.yaml
   2 sudo kubectl get pods
   3 sudo kubectl exec -it ubuntu -- bash
   4 sudo kubectl delete pod ubuntu

Alpine pod

alpine.yaml

   1 apiVersion: v1
   2 kind: Pod
   3 metadata:
   4   name: alpine
   5   labels:
   6     app: alpine
   7 spec:
   8   containers:
   9   - name: alpine
  10     image: alpine:latest
  11     command: ["/bin/sleep", "3650d"]
  12     imagePullPolicy: IfNotPresent
  13   restartPolicy: Always

   1 # Pods use PersistentVolumeClaims to request physical storage.
   2 sudo kubectl apply -f alpine.yaml
   3 sudo kubectl exec -it alpine -- sh

Nginx with persistent volume

   1 cd /tmp
   2 mkdir -p /tmp/data
   3 echo 'Hello from Kubernetes storage' > /tmp/data/index.html

pv-volume.yaml

   1 apiVersion: v1
   2 kind: PersistentVolume
   3 metadata:
   4   name: task-pv-volume
   5   labels:
   6     type: local
   7 spec:
   8   storageClassName: manual
   9   capacity:
  10     storage: 0.2Gi
  11   accessModes:
  12     - ReadWriteOnce
  13   hostPath:
  14     path: "/tmp/data"

pv-claim.yaml

   1 apiVersion: v1
   2 kind: PersistentVolumeClaim
   3 metadata:
   4   name: task-pv-claim
   5 spec:
   6   storageClassName: manual
   7   accessModes:
   8     - ReadWriteOnce
   9   resources:
  10     requests:
  11       storage: 0.2Gi

pv-pod.yaml

   1 apiVersion: v1
   2 kind: Pod
   3 metadata:
   4   name: task-pv-pod
   5 spec:
   6   volumes:
   7     - name: task-pv-storage
   8       persistentVolumeClaim:
   9         claimName: task-pv-claim
  10   containers:
  11     - name: task-pv-container
  12       image: nginx
  13       ports:
  14         - containerPort: 80
  15           name: "http-server"
  16       volumeMounts:
  17         - mountPath: "/usr/share/nginx/html"
  18           name: task-pv-storage

   1 sudo kubectl apply -f pv-volume.yaml
   2 sudo kubectl apply -f pv-claim.yaml
   3 sudo kubectl apply -f pv-pod.yaml
   4 sudo kubectl get pods -o wide 
   5 curl http://10.42.0.28/
   6 sudo kubectl exec -it task-pv-pod -- bash
   7 cd /usr/share/nginx/html
   8 echo "Hey from Kubernetes storage" > index.html
   9 cat /etc/os-release # debian buster 
  10 kubectl delete pod task-pv-pod
  11 kubectl delete pvc task-pv-claim
  12 kubectl delete pv task-pv-volume
  13 cat /tmp/data/index.html 

Generate yaml

   1 sudo kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test --dry-run=client --output=yaml
   2 sudo kubectl expose deployment cherrypy-test --port=8080 --type=NodePort --dry-run=client --output=yaml
   3 sudo kubectl scale deployment cherrypy-test --replicas=3 --dry-run=client --output=yaml

Alpine persistent volume

alpine-shared.yaml

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: alpine-pv-volume
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 0.2Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/tmp/alpine-data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: alpine-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 0.2Gi
---
apiVersion: v1
kind: Pod
metadata:
  name: alpine-pod
  labels:
    app: alpine-pod
spec:
  volumes:
    - name: alpine-pv-storage
      persistentVolumeClaim:
        claimName: alpine-pv-claim
  containers:
  - name: alpine
    image: alpine:latest
    command: ["/bin/sleep", "3650d"]
    imagePullPolicy: IfNotPresent
    volumeMounts:
      - mountPath: "/mnt/alpine/data"
        name: alpine-pv-storage
  restartPolicy: Always

   1 mkdir -p /tmp/alpine-data/
   2 sudo kubectl apply -f alpine-shared.yaml 
   3 sudo kubectl exec -it alpine-pod -- sh
   4 # inside pod
   5 cd /mnt/alpine/data # inside pod
   6 echo "teste" > x.txt # inside pod
   7 # in host
   8 cat /tmp/alpine-data/x.txt # k8s host
   9 

MariaDB + NFS

   1 /vol *(rw,sync,insecure,fsid=0,no_subtree_check,no_root_squash)
   2 exportfs -rav
   3 exporting *:/vol
   4 mkdir -p /vol/mariadb-0
   5 
   6 kubectl apply -f mariadb-nfs.yaml
   7 kubectl exec -it mariadb-79847f5d97-smbdx -- bash
   8 touch  /var/lib/mariadb/b
   9 mount | grep nfs
  10 kubectl delete -f mariadb-nfs.yaml
  11 kubectl get pods
  12 kubectl get pvc
  13 kubectl get pv

mariadb-nfs.yaml

   1 ---
   2 apiVersion: v1
   3 kind: PersistentVolume
   4 metadata:
   5   name: mdb-vol-0
   6   labels:
   7     volume: mdb-volume
   8 spec:
   9   storageClassName: manual
  10   capacity:
  11     storage: 1Gi
  12   accessModes:
  13     - ReadWriteOnce
  14   nfs:
  15     server: 127.0.0.1
  16     path: "/vol/mariadb-0"
  17 ---
  18 apiVersion: v1
  19 kind: PersistentVolumeClaim
  20 metadata:
  21   name: mdb-pv-claim
  22 spec:
  23   storageClassName: manual
  24   accessModes:
  25     - ReadWriteOnce
  26   resources:
  27     requests:
  28       storage: 1Gi
  29 ---
  30 apiVersion: v1
  31 kind: Service
  32 metadata:
  33   name: mariadb
  34 spec:
  35   ports:
  36   - port: 3306
  37   selector:
  38     app: mariadb
  39   clusterIP: None
  40 ---
  41 apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
  42 kind: Deployment
  43 metadata:
  44   name: mariadb
  45 spec:
  46   selector:
  47     matchLabels:
  48       app: mariadb
  49   strategy:
  50     type: Recreate
  51   template:
  52     metadata:
  53       labels:
  54         app: mariadb
  55     spec:
  56       containers:
  57       - image: mariadb:latest
  58         name: mariadb
  59         env:
  60           # Use secret in real usage
  61         - name: MYSQL_ROOT_PASSWORD
  62           value: password
  63         ports:
  64         - containerPort: 3306
  65           name: mariadb
  66         volumeMounts:
  67         - name: mdb-persistent-storage
  68           mountPath: /var/lib/mariadb
  69       volumes:
  70       - name: mdb-persistent-storage
  71         persistentVolumeClaim:
  72           claimName: mdb-pv-claim

Persistent volumes

A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes.

A PersistentVolumeClaim (PVC) is a request for storage by a user.

Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see AccessModes).

Types of Persistent Volumes:

Ingress controller nginx example

ingress-cherrypy-test.yml

   1 apiVersion: networking.k8s.io/v1
   2 kind: Ingress
   3 metadata:
   4   name: ingress-cherrypy-test
   5 spec:
   6   rules:
   7   - host: cp.info
   8     http:
   9       paths:
  10       - path: /
  11         pathType: Prefix
  12         backend:
  13           service:
  14             name: cherrypy-test
  15             port:
  16               number: 8000
  17   ingressClassName: nginx

Steps

   1 # install k3s
   2 curl -sfL https://get.k3s.io | sh -
   3 KUBECONFIG=~/.kube/config
   4 mkdir ~/.kube 2> /dev/null
   5 sudo k3s kubectl config view --raw > "$KUBECONFIG"
   6 chmod 600 "$KUBECONFIG"
   7 nano ~/.bashrc 
   8 export KUBECONFIG=~/.kube/config
   9 source . ~/.bashrc 
  10 
  11 sudo nano /etc/systemd/system/k3s.service
  12 ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644
  13 sudo systemctl daemon-reload
  14 sudo service k3s start
  15 sudo service k3s status
  16 kubectl get pods 
  17 
  18 k3s kubectl cluster-info 
  19 
  20 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
  21 sudo service k3s stop
  22 sudo nano /etc/systemd/system/k3s.service
  23 # ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode=644 --no-deploy traefik 
  24 sudo systemctl daemon-reload
  25 sudo rm /var/lib/rancher/k3s/server/manifests/traefik.yaml
  26 sudo service k3s start
  27 kubectl -n kube-system delete helmcharts.helm.cattle.io traefik
  28 sudo systemctl restart k3s
  29 
  30 kubectl get nodes 
  31 kubectl delete node localhost 
  32 kubectl get pods --all-namespaces 
  33 kubectl get services --all-namespaces
  34 kubectl get deployment --all-namespaces
  35 
  36 # install nginx ingress controller 
  37 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml
  38 kubectl get pods --namespace=ingress-nginx
  39 
  40 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
  41 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP # cluster ip port 8000
  42 kubectl get services
  43 
  44 kubectl apply -f ingress-cherrypy-test.yml
  45 
  46 EXTERNAL_IP=$(ip addr show | grep wlp | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
  47 echo $EXTERNAL_IP
  48 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts  "
  49 kubectl get ingress 
  50 curl cp.info 
  51 
  52 kubectl scale deployment cherrypy-test --replicas=5
  53 curl http://cp.info/ -vvv
  54 sudo apt install apache2-utils
  55 ab -n 10 -c 10 http://cp.info/
  56 
  57 # Push image to docker hub
  58 docker build -t vbodocker/cherrypy-test . 
  59 docker run -p 8080:8080 vbodocker/cherrypy-test 
  60 docker login # login to docker hub
  61 docker push vbodocker/cherrypy-test 
  62 docker pull vbodocker/cherrypy-test:latest
  63 
  64 # Rollout, deploy new image
  65 kubectl get deployments -o wide # shows image urls 
  66 kubectl rollout restart deployment cherrypy-test # redeploy image url for cherrypy-test
  67 kubectl rollout status deployment cherrypy-test
  68 kubectl get deployments -o wide 
  69 kubectl get pods -o wide # age should be low for the newly deployed pods 
  70 

Install k3s static binary in Slack64

   1 sudo mv ~/Downloads/k3s /usr/bin/
   2 sudo chmod 744 /usr/bin/k3s

/etc/rc.d/rc.k3s

   1 #!/bin/sh
   2 PATH=$PATH:/usr/sbin
   3 
   4 k3s_start() {
   5   /usr/bin/k3s server --write-kubeconfig-mode=644 \
   6   --disable traefik > /var/log/k3s.log 2>&1 &
   7 }
   8 
   9 k3s_stop() {
  10   kill $(ps uax | grep "/usr/bin/k3s" | head -1 | awk '//{print $2}')
  11   ps uax | grep containerd | awk '//{print $2}' | xargs -i kill {}
  12 }
  13 
  14 k3s_restart() {
  15   k3s_stop
  16   k3s_start
  17 }
  18 
  19 case "$1" in
  20 'start')
  21   k3s_start
  22   ;;
  23 'stop')
  24   k3s_stop
  25   ;;
  26 'restart')
  27   k3s_restart
  28   ;;
  29 *)
  30   echo "usage $0 start|stop|restart"
  31 esac

ingress-cherrypy-test.yml

   1 apiVersion: networking.k8s.io/v1
   2 kind: Ingress
   3 metadata:
   4   name: ingress-cherrypy-test
   5 spec:
   6   rules:
   7   - host: cp.info
   8     http:
   9       paths:
  10       - path: /
  11         pathType: Prefix
  12         backend:
  13           service:
  14             name: cherrypy-test
  15             port:
  16               number: 8000
  17   ingressClassName: nginx

Steps

   1 echo "alias kubectl='/usr/bin/k3s kubectl'" >> ~/.bashrc
   2 source ~/.bashrc
   3 sudo sh /etc/rc.d/rc.k3s start
   4 kubectl get nodes
   5 kubectl get deployments --all-namespaces
   6 kubectl get services --all-namespaces
   7 kubectl get pods --all-namespaces
   8 kubectl cluster-info 
   9 # install nginx ingress controller
  10 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml 
  11 # wait for nginx ingress controller to finish 
  12 sleep 120
  13 kubectl create deployment cherrypy-test --image=vbodocker/cherrypy-test
  14 kubectl expose deployment cherrypy-test --port=8000 --target-port=8080 --type=ClusterIP
  15 kubectl get pods --all-namespaces
  16 kubectl get services --all-namespaces
  17 kubectl apply -f ingress-cherrypy-test.yml 
  18 EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
  19 echo $EXTERNAL_IP
  20 sudo sh -c " echo '$EXTERNAL_IP cp.info' >> /etc/hosts  "
  21 cat /etc/hosts
  22 kubectl get ingress 
  23 curl cp.info 

Local docker registry example

Dockerfile

   1 FROM alpine:3.16
   2 RUN apk add --update --no-cache python3 nano py3-pip
   3 RUN pip install cherrypy routes --user
   4 RUN mkdir /app
   5 COPY . /app/
   6 WORKDIR /app
   7 CMD ["python3","main.py"]

main.py

   1 import cherrypy
   2 import wsgiref.handlers
   3 import time
   4 import socket 
   5 
   6 class RootUrl:
   7   @cherrypy.expose
   8   def index(self):
   9     return "Hello world ola mundo %s"%(socket.gethostname())
  10 
  11 def main():
  12   ru = RootUrl()
  13   cherrypy.server.socket_host = '0.0.0.0'
  14   cherrypy.quickstart(ru) 
  15 
  16 if __name__ == '__main__':
  17   main()

test-py.yaml

   1 apiVersion: v1
   2 kind: Service
   3 metadata:
   4   name: test-py-svc
   5 spec:
   6   ports:
   7   - port: 8080
   8   selector:
   9     app: test-py-deploy
  10 ---
  11 apiVersion: apps/v1
  12 kind: Deployment
  13 metadata:
  14   name: test-py-deploy
  15 spec:
  16   selector:
  17     matchLabels:
  18       app: test-py-deploy
  19   strategy:
  20     type: Recreate
  21   template:
  22     metadata:
  23       labels:
  24         app: test-py-deploy
  25     spec:
  26       containers:
  27       - image: localhost:5000/test-py-image:latest
  28         name: test-py
  29         ports:
  30         - containerPort: 8080
  31           name: test-py
  32       imagePullSecrets:
  33       - name: regcred

ingress-test-py.yaml

   1 apiVersion: networking.k8s.io/v1
   2 kind: Ingress
   3 metadata:
   4   name: ingress-test-py
   5 spec:
   6   rules:
   7   - host: testpy.info
   8     http:
   9       paths:
  10       - path: /
  11         pathType: Prefix
  12         backend:
  13           service:
  14             name: test-py-svc
  15             port:
  16               number: 8080
  17   ingressClassName: nginx

Steps

   1 mkdir test-py
   2 cd test-py
   3 mkdir auth
   4 docker run --entrypoint htpasswd httpd:2 -Bbn testuser testpassword > auth/htpasswd
   5 docker run -d -p 5000:5000 --restart=always --name registry -v "$(pwd)"/auth:/auth \
   6   -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
   7   -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
   8   registry:2
   9 docker ps
  10 curl http://testuser:testpassword@localhost:5000/v2/_catalog
  11 
  12 docker build -t test-py-image .
  13 docker login localhost:5000
  14 docker tag test-py-image localhost:5000/test-py-image
  15 docker push localhost:5000/test-py-image
  16 
  17 kubectl delete secret regcred
  18 kubectl create secret docker-registry regcred --docker-server=localhost:5000 --docker-username=testuser --docker-password=testpassword --docker-email=test@example.org
  19 
  20 kubectl apply -f test-py.yaml
  21 curl $(kubectl get services | grep test | awk '//{print $3}'):8080
  22 # Hello world ola mundo test-py
  23 
  24 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml 
  25 # wait for nginx ingress controller to finish 
  26 sleep 120
  27 
  28 EXTERNAL_IP=$(/sbin/ip addr show | grep wl | grep inet | awk '//{print $2}' | sed 's/\// /g' | awk '//{print $1}')
  29 echo $EXTERNAL_IP
  30 sudo sh -c " echo '$EXTERNAL_IP testpy.info' >> /etc/hosts  "
  31 kubectl apply -f ingress-test-py.yaml
  32 curl testpy.info

DNS issues

k3s DNS resides in namespace kube-system in the service kube-dns

   1 # kube-system kube-dns  ClusterIP 10.43.0.10
   2 # tests in pods IP 10.43.0.10
   3 nslookup <service>.<namespace>.svc.cluster.local 10.43.0.10
   4 nslookup api.chucknorris.io  10.43.0.10
   5 # add Google and cloudfare entries
   6 KUBE_EDITOR="nano" kubectl -n kube-system edit configmap coredns
   7 forward . 1.1.1.1 8.8.8.8 /etc/resolv.conf
   8 # Redeploy coredns
   9 kubectl -n kube-system rollout restart deployment coredns
  10 # Check logs 
  11 kubectl logs --namespace=kube-system --tail=100 -l k8s-app=kube-dns

kubernetes/k3s (last edited 2024-10-26 09:44:29 by vitor)