Kubernetes - jrwhetse/jrwhetse.github.io GitHub Wiki
- Kail - Kubernetes Tail
- Docker
- RKE
- Helm
- Kubernetes
- Cert Manager
- Rancher
- Rancher 2.4.8 - RKE 1.1.10 - Cert-Manager 0.15.0
- Rancher 2.5.7 - RKE 1.2.6 - Cert-Manager 1.0.4
- How to Upgrade from 2.4.8 to 2.5.7
- Learning
- Documents
Table of contents generated with markdown-toc
# install
yum install go -y
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$GOPATH/bin"
https://docs.docker.com/engine/release-notes/
# docker versions
docker --version
Docker version 19.03.6-ce, build 369ce74
# docker list containers
docker ps
# clear iptables
iptables --flush; iptables -tnat --flush; systemctl restart docker
https://github.com/rancher/rke/releases
# rke versions
rke --version
rke version v1.2.4
# generate empty config
rke config --empty --name cluster.yml
# cluster up
rke up --config /home/rancher/rancher-cluster.yml
# cluster remove
rke remove --config /home/rancher/rancher-cluster.yml
# rke print system images
rke config --system-images
# rke list versions
rke config --list-version --all
# test rke dns
kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default
kubectl run -it --rm --restart=Never dnsutils --image=gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 -- /bin/sh
https://github.com/helm/helm/releases
# version
helm version
version.BuildInfo{Version:"v3.3.1", GitCommit:"249e5215cde0c3fa72e27eb7a30e8d55c9696144", GitTreeState:"clean", GoVersion:"go1.14.7"}
# add catalog
helm repo add <repo_name> <url>
# get deployed charts
helm list -A
helm list -n <namespace>
# deploy chart
helm install <chart> <chart_id> -n <namespace> --version <version>
# delete chart
helm uninstall <chart> -n <namespace>
# get values used in deployment
helm get values <chart> -n <namespace>
https://github.com/kubernetes/kubernetes/releases
# version
kubectl version
Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.2", GitCommit:"f5743093fd1c663cb0cbc89748f730662345d44d",
GitTreeState:"clean", BuildDate:"2020-09-16T13:41:02Z", GoVersion:"go1.15", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.6", GitCommit:"fbf646b339dc52336b55d8ec85c181981b86331a",
GitTreeState:"clean", BuildDate:"2020-12-18T12:01:36Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"}
# set kubeconfig
export KUBECONFIG=/home/rancher/kube_config_rancher-cluster.yml
# optionally, kubeconfig can be passed with every command
kubectl --kubeconfig /home/rancher/kube_config_rancher-cluster.yml <command>
# get config
kubectl get config
# get nodes
kubectl get nodes
# get all namespaces
kubectl get namespaces
# create namespace
kubectl create namespace cert-manager
# get pods
kubectl get pods --all-namespaces
kubectl get pods -A
kubectl get pods -n <namespace>
kubectl get pods -A -o -wide
# get logs
kubectl logs <containerid> -n <namespace>
# show labels
kubectl get pods -A --show-labels
# delete multiple containers
kubectl delete pod -n cattle-system label=<label>
kubectl delete pods -n cattle-system --all
# load docker image into kubernetes (runs nslookup)
kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default
https://github.com/jetstack/cert-manager/releases
# delete cert manager crds
kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.crds.yaml
# create cert manager crds
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.crds.yaml
customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created
customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created
# get cert-manager crds
kubectl get customresourcedefinitions | grep cert-manager
NAME CREATED AT
bgpconfigurations.crd.projectcalico.org 2021-03-18T17:29:01Z
bgppeers.crd.projectcalico.org 2021-03-18T17:29:01Z
blockaffinities.crd.projectcalico.org 2021-03-18T17:29:01Z
certificaterequests.cert-manager.io 2021-03-18T18:57:44Z
certificates.cert-manager.io 2021-03-18T18:57:44Z
challenges.acme.cert-manager.io 2021-03-18T18:57:44Z
clusterinformations.crd.projectcalico.org 2021-03-18T17:29:01Z
clusterissuers.cert-manager.io 2021-03-18T18:57:44Z
felixconfigurations.crd.projectcalico.org 2021-03-18T17:29:01Z
globalnetworkpolicies.crd.projectcalico.org 2021-03-18T17:29:01Z
globalnetworksets.crd.projectcalico.org 2021-03-18T17:29:01Z
hostendpoints.crd.projectcalico.org 2021-03-18T17:29:01Z
ipamblocks.crd.projectcalico.org 2021-03-18T17:29:01Z
ipamconfigs.crd.projectcalico.org 2021-03-18T17:29:01Z
ipamhandles.crd.projectcalico.org 2021-03-18T17:29:01Z
ippools.crd.projectcalico.org 2021-03-18T17:29:01Z
issuers.cert-manager.io 2021-03-18T18:57:44Z
kubecontrollersconfigurations.crd.projectcalico.org 2021-03-18T17:29:02Z
networkpolicies.crd.projectcalico.org 2021-03-18T17:29:02Z
networksets.crd.projectcalico.org 2021-03-18T17:29:02Z
orders.acme.cert-manager.io 2021-03-18T18:57:45Z
# get cert-manager crds details
kubectl describe customresourcedefinitions certificaterequests.cert-manager.io
kubectl describe customresourcedefinitions certificates.cert-manager.io
kubectl describe customresourcedefinitions challenges.acme.cert-manager.io
kubectl describe customresourcedefinitions clusterissuers.cert-manager.io
kubectl describe customresourcedefinitions issuers.cert-manager.io
kubectl describe customresourcedefinitions orders.acme.cert-manager.io
# check if namespace exists
kubectl get namespaces | grep cert-manager
# create cert-manager namespace
kubectl create namespace cert-manager
cattle-system helm-operation-6v2md 0/2 Completed 0 22m
cattle-system helm-operation-8z6dr 0/2 Completed 0 6m50s
cattle-system helm-operation-b8jtt 0/2 Completed 0 7m3s
cattle-system helm-operation-chvs9 0/2 Completed 0 23m
cattle-system helm-operation-hkdhv 0/2 Completed 0 22m
cattle-system helm-operation-jzhhr 0/2 Completed 0 6m18s
cattle-system helm-operation-q2hkw 0/2 Completed 0 23m
cattle-system helm-operation-q4z6m 0/2 Completed 0 6m30s
cattle-system helm-operation-srbrb 0/2 Completed 0 7m14s
cattle-system helm-operation-sz4wf 0/2 Completed 0 23m
# add cert-manager repository
helm repo add jetstack https://charts.jetstack.io
# install cert-manager
helm install cert-manager jetstack/cert-manager -n cert-manager --version v1.2.0
kubectl rollout status deploy/cert-manager -n cert-manager
# get cert-manager helm charts
helm list -n cert-manager
# get cert-manager pods
kubectl get pods -n cert-manager
# get list of resources
kubectl api-resources
# remove cert-manager pods by label
kubectl delete pod -n cert-manager label=app.kubernetes.io/instance=cert-manager
# remove cert manager
helm uninstall cert-manager -n cert-manager
https://github.com/rancher/rancher/releases
# check if namespace exists
kubectl get namespaces | grep cattle-system
# create rancher namespace
kubectl create namespace cattle-system
# add cert-manager repository
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
# install rancher chart
helm install rancher rancher-stable/rancher --version 2.5.7 --namespace cattle-system --set auditLog.level=1 --set hostname=rancher-ha-jw-mgmt.bylightsdc.bylight.com --set ingress.tls.source=letsEncrypt --set [email protected] --set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' --set 'extraEnv[0].value=1.2'
kubectl rollout status deploy/rancher -n cattle-system
# get cattle-system helm charts
helm list -n cattle-system
# get cattle-system pods
kubectl get pods -n cattle-system
# look at logs
kubectl logs -f <rancher_workload_id> <rancher | rancher-audit-log> -n cattle-system
# remove rancher pods by label
kubectl delete pod -n cert-manager label=rancher
# delete rancher
helm delete rancher -n cattle-system
# delete namespaces
kubectl delete namespace cattle-system
kubectl get namespace cattle-system -o json > cattle-system.json
vim cattle-system.json
kubectl replace --raw "/api/v1/namespaces/cattle-system/finalize" -f ./cattle-system.json
kubectl delete namespace cattle-system-data
kubectl get namespace cattle-system-data -o json > cattle-system-data.json
vim cattle-system-data.json
kubectl replace --raw "/api/v1/namespaces/cattle-system-data/finalize" -f ./cattle-system-data.json
kubectl delete namespace cattle-system-nt
kubectl get namespace cattle-system-nt -o json > cattle-system-nt.json
vim cattle-system-nt.json
kubectl replace --raw "/api/v1/namespaces/cattle-system-nt/finalize" -f ./cattle-system-nt.json
kubectl delete namespace local
kubectl get namespace local -o json > local.json
vim local.json
kubectl replace --raw "/api/v1/namespaces/local/finalize" -f ./local.json
kubectl delete namespace p-xxxx
kubectl get namespace p-xxxx -o json > p-xxxx.json
vim p-xxxx.json
kubectl replace --raw "/api/v1/namespaces/p-xxxx/finalize" -f ./p-xxxx.json
kubectl delete namespace rancher-operator-system
kubectl get namespace rancher-operator-system -o json > rancher-operator-system.json
vim rancher-operator-system.json
kubectl replace --raw "/api/v1/namespaces/rancher-operator-system/finalize" -f ./rancher-operator-system.json
# run a container (debug)
kubectl run -it --rm --restart=Never kitchensink --image=markeijsermans/debug:latest --
# delete rancher, cert-manager, rke (run on primary node)
helm delete rancher -n cattle-system
helm delete cert-manager -n cert-manager
kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v0.15.0/cert-manager.crds.yaml
rke remove --config /home/rancher/rancher-cluster.yml
# remove docker containers (run on all nodes)
docker kill $(docker ps -q) delete all stopped containers with docker rm $(docker ps -a -q)
# remove docker containers (run on all nodes)
docker kill $(docker ps -q) delete all stopped containers with docker rm $(docker ps -a -q)
# install rke (run on all nodes)
sudo wget https://github.com/rancher/rke/releases/download/v1.1.10/rke_linux-amd64 -O /usr/local/bin/rke
# configure rke cluster (run on primary node)
rke up --config /home/rancher/rancher-cluster.yml
# install cert-manager (run on primary node)
kubectl create namespace cert-manager
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.0/cert-manager.crds.yaml
helm repo add jetstack https://charts.jetstack.io
help repo update
helm install cert-manager jetstack/cert-manager --version v0.15.0 -n cert-manager
kubectl rollout status deploy/cert-manager -n cert-manager
# install rancher (run on primary node)
kubectl create namespace cattle-system
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
helm repo update
helm install rancher rancher-stable/rancher --version 2.4.8 --set auditLog.level=1 --set hostname=rancher-ha-jw4-mgmt.bylightsdc.bylight.com --set ingress.tls.source=letsEncrypt --set [email protected] --set letsEncrypt.environment=staging --set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' --set 'extraEnv[0].value=1.2' -n cattle-system
kubectl rollout status deploy/rancher -n cattle-system
kail --ns=cattle-system --ns=cert-manager
# delete rancher, cert-manager, rke (run on primary node)
helm delete rancher -n cattle-system
helm delete cert-manager -n cert-manager
kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml
rke remove --config /home/rancher/rancher-cluster.yml
# remove docker containers (run on all nodes)
docker kill $(docker ps -q) delete all stopped containers with docker rm $(docker ps -a -q)
# remove docker containers (run on all nodes)
docker kill $(docker ps -q) delete all stopped containers with docker rm $(docker ps -a -q)
# install rke (run on all nodes)
sudo wget https://github.com/rancher/rke/releases/download/v1.2.6/rke_linux-amd64 -O /usr/local/bin/rke
# configure rke cluster (run on primary node)
rke up --config /home/rancher/rancher-cluster.yml
# install cert-manager (run on primary node)
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml
kubectl create namespace cert-manager
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager --version v1.0.4 -n cert-manager
kubectl rollout status deploy/cert-manager -n cert-manager
# install rancher (run on primary node)
kubectl create namespace cattle-system
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
helm repo update --set letsEncrypt.environment=staging
helm install rancher rancher-stable/rancher --version 2.5.7 --set auditLog.level=1 --set hostname=rancher-ha-jw4-mgmt.bylightsdc.bylight.com --set ingress.tls.source=letsEncrypt --set [email protected] --set letsEncrypt.environment=staging --set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' --set 'extraEnv[0].value=1.2' -n cattle-system
kubectl rollout status deploy/rancher -n cattle-system
kail --ns=cattle-system --ns=cert-manager
# update
# helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# helm upgrade --install rancher rancher-latest/rancher --version 2.5.7 --set auditLog.level=1 --set hostname=rancher-ha-jw4-mgmt.bylightsdc.bylight.com --set ingress.tls.source=letsEncrypt --set [email protected] --set letsEncrypt.environment=staging --set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' --set 'extraEnv[0].value=1.2' -n cattle-system
It's all about Kuberenetes version....
RKE 1.2.6 uses v1.20.x of Kubernetes by default which causes problems with RKE and the network. In order to stay with Kubernetes 18.16.1, you have to update the rancher-cluster.yml and call rke up. Instructions below...
# update rancher-cluster.yml file and set kuberentes_version
kubernetes_version: v1.18.16-rancher1-1
# install rke (run on all nodes)
sudo wget https://github.com/rancher/rke/releases/download/v1.2.6/rke_linux-amd64 -O /usr/local/bin/rke
# configure rke cluster (run on primary node)
rke up --config /home/rancher/rancher-cluster.yml
# rke print system images
rke config --system-images
# rke list versions
rke config --list-version --all
# update
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
helm repo update
helm upgrade --install rancher rancher-latest/rancher --version 2.5.7 --set auditLog.level=1 --set hostname=rancher-ha-jw4-mgmt.bylightsdc.bylight.com --set ingress.tls.source=letsEncrypt --set [email protected] --set letsEncrypt.environment=staging --set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' --set 'extraEnv[0].value=1.2' -n cattle-system
# watch pods
watch -n 2 kubectl get pods -A
# bring up UI
https://rancher-ha-jw4-mgmt.bylightsdc.bylight.com
# update the mgmt/group_vars/rancher_ha.yml file to use 2.5.7 variables instead of 2.4.8
# update the rancher-ha/roles/templates/home/rancher/rancher-clustery.yml and set kubernetes_version: v1.18.16-rancher1-1
# execute rancher helper playbook. This playbook updates docker, rancher, rancher_cli, kubectl, helm and installs kail
ansible-playbook playbooks/rancher-ha/main.yml -i inventories/mgmt --extra-vars '{"project_id": "jw", "octet": {"id_1": "60", "id_2": "61", "id_3": "62"} }'
# watch pods (on primary node)
watch -n 2 kubectl get pods -A
# bring up UI
https://rancher-ha-jw4-mgmt.bylightsdc.bylight.com
kubectl get nodes
kubectl get namespaces
kubectl get pods -n default
No resources found in kube-node-lease namespace.
kubectl get pods -n nginx-ingress
NAME READY STATUS RESTARTS AGE
default-http-backend-65dd5949d9-h2p26 1/1 Running 0 79m
nginx-ingress-controller-6rnjx 1/1 Running 0 79m
nginx-ingress-controller-bw8b6 1/1 Running 0 79m
nginx-ingress-controller-qwmn2 1/1 Running 0 79m
kubectl get pods -n kube-node-lease
No resources found in kube-node-lease namespace.
kubectl get pods -n kube-public
No resources found in kube-node-lease namespace.
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6c8ddcb6cd-5rt9f 1/1 Running 0 82m
canal-9zrdr 2/2 Running 0 82m
canal-c9ggq 2/2 Running 0 82m
canal-pzdgg 2/2 Running 0 82m
coredns-669b8675c5-jsddl 1/1 Running 0 81m
coredns-669b8675c5-s7wgl 1/1 Running 0 81m
coredns-autoscaler-79599b9dc6-pw9c5 1/1 Running 0 81m
metrics-server-6cc9c79d8c-b87rt 1/1 Running 2 81m
rke-coredns-addon-deploy-job-t5qz5 0/1 Completed 0 82m
rke-ingress-controller-deploy-job-f67mx 0/1 Completed 0 81m
rke-metrics-addon-deploy-job-mshld 0/1 Completed 0 81m
rke-network-plugin-deploy-job-tkpmz 0/1 Completed 0 82m
kubectl api-resources
NAME SHORTNAMES APIVERSION NAMESPACED KIND
bindings v1 true Binding
componentstatuses cs v1 false ComponentStatus
configmaps cm v1 true ConfigMap
endpoints ep v1 true Endpoints
events ev v1 true Event
limitranges limits v1 true LimitRange
namespaces ns v1 false Namespace
nodes no v1 false Node
persistentvolumeclaims pvc v1 true PersistentVolumeClaim
persistentvolumes pv v1 false PersistentVolume
pods po v1 true Pod
podtemplates v1 true PodTemplate
replicationcontrollers rc v1 true ReplicationController
resourcequotas quota v1 true ResourceQuota
secrets v1 true Secret
serviceaccounts sa v1 true ServiceAccount
services svc v1 true Service
mutatingwebhookconfigurations admissionregistration.k8s.io/v1 false MutatingWebhookConfiguration
validatingwebhookconfigurations admissionregistration.k8s.io/v1 false ValidatingWebhookConfiguration
customresourcedefinitions crd,crds apiextensions.k8s.io/v1 false CustomResourceDefinition
apiservices apiregistration.k8s.io/v1 false APIService
controllerrevisions apps/v1 true ControllerRevision
daemonsets ds apps/v1 true DaemonSet
deployments deploy apps/v1 true Deployment
replicasets rs apps/v1 true ReplicaSet
statefulsets sts apps/v1 true StatefulSet
tokenreviews authentication.k8s.io/v1 false TokenReview
localsubjectaccessreviews authorization.k8s.io/v1 true LocalSubjectAccessReview
selfsubjectaccessreviews authorization.k8s.io/v1 false SelfSubjectAccessReview
selfsubjectrulesreviews authorization.k8s.io/v1 false SelfSubjectRulesReview
subjectaccessreviews authorization.k8s.io/v1 false SubjectAccessReview
horizontalpodautoscalers hpa autoscaling/v1 true HorizontalPodAutoscaler
cronjobs cj batch/v1beta1 true CronJob
jobs batch/v1 true Job
certificatesigningrequests csr certificates.k8s.io/v1 false CertificateSigningRequest
leases coordination.k8s.io/v1 true Lease
bgpconfigurations crd.projectcalico.org/v1 false BGPConfiguration
bgppeers crd.projectcalico.org/v1 false BGPPeer
blockaffinities crd.projectcalico.org/v1 false BlockAffinity
clusterinformations crd.projectcalico.org/v1 false ClusterInformation
felixconfigurations crd.projectcalico.org/v1 false FelixConfiguration
globalnetworkpolicies crd.projectcalico.org/v1 false GlobalNetworkPolicy
globalnetworksets crd.projectcalico.org/v1 false GlobalNetworkSet
hostendpoints crd.projectcalico.org/v1 false HostEndpoint
ipamblocks crd.projectcalico.org/v1 false IPAMBlock
ipamconfigs crd.projectcalico.org/v1 false IPAMConfig
ipamhandles crd.projectcalico.org/v1 false IPAMHandle
ippools crd.projectcalico.org/v1 false IPPool
kubecontrollersconfigurations crd.projectcalico.org/v1 false KubeControllersConfiguration
networkpolicies crd.projectcalico.org/v1 true NetworkPolicy
networksets crd.projectcalico.org/v1 true NetworkSet
endpointslices discovery.k8s.io/v1beta1 true EndpointSlice
events ev events.k8s.io/v1 true Event
ingresses ing extensions/v1beta1 true Ingress
flowschemas flowcontrol.apiserver.k8s.io/v1beta1 false FlowSchema
prioritylevelconfigurations flowcontrol.apiserver.k8s.io/v1beta1 false PriorityLevelConfiguration
ingressclasses networking.k8s.io/v1 false IngressClass
ingresses ing networking.k8s.io/v1 true Ingress
networkpolicies netpol networking.k8s.io/v1 true NetworkPolicy
runtimeclasses node.k8s.io/v1 false RuntimeClass
poddisruptionbudgets pdb policy/v1beta1 true PodDisruptionBudget
podsecuritypolicies psp policy/v1beta1 false PodSecurityPolicy
clusterrolebindings rbac.authorization.k8s.io/v1 false ClusterRoleBinding
clusterroles rbac.authorization.k8s.io/v1 false ClusterRole
rolebindings rbac.authorization.k8s.io/v1 true RoleBinding
roles rbac.authorization.k8s.io/v1 true Role
priorityclasses pc scheduling.k8s.io/v1 false PriorityClass
csidrivers storage.k8s.io/v1 false CSIDriver
csinodes storage.k8s.io/v1 false CSINode
storageclasses sc storage.k8s.io/v1 false StorageClass
volumeattachments storage.k8s.io/v1 false VolumeAttachment
# Remove namespaces that get stuck in terminating state
kubectl get namespace logging -o json > logging.json
# open logging.json and remove kubernetes from finalizers
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"creationTimestamp": "2019-05-14T13:55:20Z",
"labels": {
"name": "logging"
},
"name": "logging",
"resourceVersion": "29571918",
"selfLink": "/api/v1/namespaces/logging",
"uid": "e9516a8b-764f-11e9-9621-0a9c41ba9af6"
},
"spec": {
"finalizers": [
]
},
"status": {
"phase": "Terminating"
}
# replace logging descriptor
kubectl replace --raw "/api/v1/namespaces/logging/finalize" -f ./logging.json
https://github.com/jrwhetse/jrwhetse.github.io/tree/master/docs