kubernetes kubectl - ghdrako/doc_snipets GitHub Wiki
kubectl - tool to administer inside cluster. But is not for create cluster, change cluster shape
kubectl [command] [type] [name] [flags]
[command] - What do you want to do ? get/describe/logs/exec/...
[type] - on what type of object pods/deployments/nodes/...
[name] - what is the object name
kubectl get pods -o=yaml
kubectl get pod my-test-app -o=wide
kubectl explain deployment
kubectl explain deployment --recursive
kubectl explain deployment.spec.replicas
kubectl explain deployment.metadata.name # understand the structure of a Deployment object and understand what the individual fields do
$HOME/.kube/config
Config file contains:
- Target cluster name
- Credentials for the cluster
-
--kubeconfig=""
: use a particular kubeconfig file
kubectl config view
kubectl config --kubeconfig=config-demo view
$ KUBECONFIG=~/.kube/my.config kubectl get namespaces
- https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands
- kubernetes kubectl https://kubernetes.io/docs/reference/kubectl/cheatsheet/
kubectl --help
kubectl config view
kubectl config get-contexts # display list of contexts
kubectl config current-context # display the current-context
kubectl config use-context my-cluster-name # set the default context to my-cluster-name
kubectl config set-context $(kubectl config current-context) --namespace=istioinaction # switch to the istioinaction namespace
kubectl delete deployment,svc,gateway,\
virtualservice,destinationrule --all -n istioinaction # clean up any resource in namespace
source <(kubectl completion bash) # setup autocomplete in bash into the current shell, bash-completion package should be installed first.
echo "source <(kubectl completion bash)" >> ~/.bashrc # add autocomplete permanently to your bash shell.
alias k='kubectl --insecure-skip-tls-verify=true'
complete -F __start_kubectl k
kubectl config view # Show Merged kubeconfig settings.
# use multiple kubeconfig files at the same time and view merged config
KUBECONFIG=~/.kube/config:~/.kube/kubconfig2
kubectl constainer clusters get-credentials <cluster-name> [--region=<region>] [--zone=<zone>]
# permanently save the namespace for all subsequent kubectl commands in that context.
kubectl config set-context --current --namespace=ggckad-s2
kubectl config --kubeconfig=config-demo set-cluster development --server=https://1.2.3.4 --certificate-authority=fake-ca-file
kubectl config --kubeconfig=config-demo set-cluster scratch --server=https://5.6.7.8 --insecure-skip-tls-verify
### add user
kubectl config --kubeconfig=config-demo set-credentials developer --client-certificate=fake-cert-file --client-key=fake-key-seefile
kubectl config --kubeconfig=config-demo set-credentials experimenter --username=exp --password=some-password
# remove
kubectl --kubeconfig=config-demo config unset users.<name> # user
kubectl --kubeconfig=config-demo config unset clusters.<name> # cluster
kubectl --kubeconfig=config-demo config unset contexts.<name> # context
kubectl cluster-info # Display addresses of the master and services
kubectl cluster-info dump # Dump current cluster state to stdout
kubectl cluster-info dump --output-directory=/path/to/cluster-state # Dump current cluster state to /path/to/cluster-state
kubectl api-versions
kubectl api-resources
kubectl get nodes # list nodes
kubectl describe node my-node
kubectl top nodes # resource usage
kubectl top node my-node # Show metrics for a given node
kubectl cordon my-node # Mark my-node as unschedulable
# no new pods will be scheduled onto that node, but whatever pods are running there will stay running on that node
kubectl uncordon my-node
# remove/evict all pods from a node that is going to be deleted, upgraded, or rebooted
kubectl drain my-node --ignore-daemonsets –force # Drain my-node in preparation for maintenance
# remove/evict all pods from a node that is going to be deleted, upgraded, or rebooted, for example
kubectl delete node my-node
List all supported resource types along with their shortnames, API group, whether they are namespaced, and Kind:
kubectl api-resources
Other operations for exploring API resources:
kubectl api-resources --namespaced=true # All namespaced resources
kubectl api-resources --namespaced=false # All non-namespaced resources
kubectl api-resources -o name # All resources with simple output (only the resource name)
kubectl api-resources -o wide # All resources with expanded (aka "wide") output
kubectl api-resources --verbs=list,get # All resources that support the "list" and "get" request verbs
kubectl api-resources --api-group=extensions # All resources in the "extensions" API group
- imperative - direct command
kubectl create deployment nginx-imperative --image=nginx -n test-imperative
kubectl label namespace test-imperative namespace=imperative-apps
kubectl delete deployment -n test-imperative nginx-imperative
kubectl delete namespace test-imperative
- imperative - with config files
kubectl create -f namespace.yaml
kubectl create -f deployment.yaml
kubectl replace -f namespace.yaml
- declarative - with config files
kubectl apply -f declarative-files/namespace.yaml
kubectl apply -f declarative-files/deployment.yaml
- declarative - with config folder
kubectl apply -f declarative-folder # if resource depends on other get error - to fix run again
Note The apply
command was initially implemented completely on the client side.But more recently, the apply logic moved to the server side; all
objects have an apply method (from a REST API perspective, it is a PATCH method with an application/apply-patch+yaml content-type header), and it is enabled by default starting with version 1.16 (more on the subject here: https://kubernetes.io/docs/reference/using-api/server-side-apply/).
kubectl create deployment
kubectl get deployment
kubectl get deployment --all-namespaces # kubectl get -A deployment shortcuts for --all-namespace is -A
kubectl get all --all-namespaces
kubectl get pods
kubectl delete deployment nginx
kubectl apply –f deployment.yaml
kubectl get deployment nginx -o yaml > deployment.yaml
kubectl expose deployment nginx --port=80 --target-port=80
# Scaling up an application
kubectl scale deployment nginx –replicas=2
# Deploying a new application version
kubectl set image deployment nginx nginx=nginx:1.19.0 --record
kubectl rollout status deployment
kubectl rollout history deployment nginx
kubectl rollout undo deployment nginx –to-revision=1
kubectl rollout restart deployment/abc # restart deployment without change configuration
kubectl rollout restart deployment --selector=app=nginx # Restart deployments with the app=nginx label
kubectl rollout history deployment/abc # View the rollout history of a deployment
kubectl rollout history daemonset/abc --revision=3 # View the details of daemonset revision 3
kubectl scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)
kubectl scale --replicas=3 -f foo.yaml
kubectl scale --replicas=3 deployment my-deployment
kubectl scale <CONTROLLER> my-app --replicas 4
kubectl get <CONTROLLER> my-app
kubectl autoscale deployment my-app --max 6 --min 4 --cpu-percent 50 # kubectl autoscale command create HorizontalPodAutoscaler
kubectl get hpa # get info about HorizontalPodAutoscaler
kubectl get hpa <HPA_NAME> -o yaml
kubectl get service
kubectl describe service nginx
kubectl apply –f service.yaml
kubectl get service nginx -o yaml
kubectl get service nginx -o yaml > service.yaml
kubectl apply -f ./my-manifest.yaml # create resource(s)
kubectl apply -f ./my1.yaml -f ./my2.yaml # create from multiple files
kubectl apply -f ./dir # create resource(s) in all manifest files in dir
kubectl apply -f https://git.io/vPieo # create resource(s) from url
kubectl create deployment nginx --image=nginx # start a single instance of nginx
# Run Pod
kubectl run demo --image=<your docker id>/<image name> --port=9999 --label app=demo
kubectl port-forward pod/demo 9999:8888 #http://localhost:9999
kubectl get pods --selektor app=demo
kubectl run nginx --image=nginx --restart=Never
kubectl get pods
kubectl logs <pod name>
kubectl logs <pod name> -c <container name> # if more then one contener in pod
kubctl exec -it <pod name> bash # exec bash console on running pod container
kubectl exec admin-boot-6c58c789c6-nt5qw --namespace app -c istio-proxy -- ls
# Annotation https://jamesdefabia.github.io/docs/user-guide/kubectl/kubectl_annotate/
kubectl annotate pods foo description='my frontend'
kubectl annotate --overwrite pods foo description='my frontend running nginx'
kubectl annotate pods --all description='my frontend running nginx' # Update all pods in the namespace
kubectl annotate pods foo description- # removing an annotation named 'description' if it exists
kubectl annotate pod/<pod name> prometheus.io/path- -n acp # remove annotation
$ kubectl explain pods # get the documentation for pod manifests
$ kubectl explain pods --recursive # finding which properties any object supports. It also shows the format of properties, such as whether it’s a string, map, object, or something else.
# drill into specific attributes
$ kubectl explain pod.spec.restartPolicy # drills into the restart policy attribute of a Pod object
kubectl cp /tmp/foo_dir <some-pod>:/tmp/bar_dir
# create a Job which prints "Hello World"
kubectl create job hello --image=busybox:1.28 -- echo "Hello World"
# create a CronJob that prints "Hello World" every minute
kubectl create cronjob hello --image=busybox:1.28 --schedule="*/1 * * * *" -- echo "Hello World"
# remov jobs
kubectl -n acp delete jobs --field-selector status.successful=1
# Delete jobs with status failed
kubectl -n acp delete job $(kubectl -n acp get job -o=jsonpath='{.items[?(@.status.failed==1)].metadata.name}')
# Create multiple YAML objects from stdin
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: busybox-sleep
spec:
containers:
- name: busybox
image: busybox:1.28
args:
- sleep
- "1000000"
---
apiVersion: v1
kind: Pod
metadata:
name: busybox-sleep-less
spec:
containers:
- name: busybox
image: busybox:1.28
args:
- sleep
- "1000"
EOF
# Create a secret with several keys
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
name: mysecret
type: Opaque
data:
password: $(echo -n "s33msi4" | base64 -w0)
username: $(echo -n "jane" | base64 -w0)
EOF
deployment.yaml:
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx:1.18.0
imagePullPolicy: IfNotPresent
name: nginx
service.yaml:
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
type: ClusterIP
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx