Kubernetes vs Networking - hqzhang/cloudtestbed GitHub Wiki
image=wavecloud/nginx-std
port=8081
app=nginx
echo "push and remove docker images"
docker login -uzhanghongqi -ppassword
docker build -f Dockerfile -t $image .
docker push $image
kubectl delete deploy $app
kubectl delete pod $app
kubectl delete svc $app
kubectl create deployment $app --image=$image --port=$port
kubectl expose deployment $app --target-port=$port --type=NodePort
kubectl port-forward svc/nginx 8888:$port &
#verify
curl localhost:888
1)configmap from yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: example-configmap
data:
# Configuration values can be set as key-value properties
database: mongodb
database_uri: mongodb://localhost:27017
# Or set as complete file contents (even JSON!)
keys: |
image.public.key=771
rsa.public.key=42
2)config map from file
kubectl create configmap game-config-2 --from-file=file.properties
env:
# Define the environment variable
- name: SPECIAL_LEVEL_KEY
valueFrom:
configMapKeyRef:
# The ConfigMap containing the value you want to assign to SPECIAL_LEVEL_KEY
name: game-config-2
# Specify the key associated with the value
key: special.how
0) vi /etc/hostname for master-node and slave-node
hostnamectl set-hostname master-node
hostnamectl set-hostname slave-node
1) vi /etc/hosts
192.16899.106 master-node
192.168.0.107 slave-node
2)swapoff -a
3) vi /etc/fstabe and comment swap line.
4)install docker
apt-get install apt-transport-https ca-certificates curl software-properties-common -y
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update -y
apt-get install docker-ce -y
5)install kubernetes
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo 'deb http://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt-get update -y
apt-get install kubelet kubeadm kubectl -y
6) install master node
kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.99.106
7) Add network
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
8) taint nodes
kubectl taint nodes --all dedicated
9) install slave node
kubeadm join 192.168.99.106:6443 --token r0d2h5.4xyzwcjzwggbz1ng \
--discovery-token-ca-cert-hash sha256:3f125b681ec356674d45bbcd32352d62c83a11b36b167b449c63b2879219628d
10) verification
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-node Ready master 9d v1.14.3
slave-node Ready slave 9d v1.14.3
11) end
1.Install Helm
1) install. Brew
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
2) install helm
Brew install Kubernetes-helm
3) install tiller
Helm init
4) deploy directly without chart
helm install --name dashboard-demo stable/kubernetes-dashboard
5) create a chart
Helm create my-demo
βββ Chart.yaml
βββ charts
βββ templates
β βββ NOTES.txt
β βββ _helpers.tpl
β βββ deployment.yaml
β βββ ingress.yaml
β βββ service.yaml
β βββ tests
β βββ test-connection.yaml
βββ values.yaml
5) deploy with chart (parameter set by value.yaml)
helm install --dry-run --debug ./my-demo
helm install --name example ./my-demo --set service.type=NodePort
GOLANG code:
package main
import ("fmt"
"net/http"
)
func main() {
http.HandleFunc("/", func (w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Welcome to my website!\n")
})
fs := http.FileServer(http.Dir("static/"))
http.Handle("/static/", http.StripPrefix("/static/", fs))
http.ListenAndServe(":8080", nil)
}
///////////
DOCKERFILE code:
# build stage
FROM golang:1.12.5-alpine3.9 as build-stage
WORKDIR /app
COPY main.go ./
RUN go build main.go
# production stage
FROM alpine:3.9
LABEL authors="Hongqi Zhang <[email protected]>"
ARG user=myuser
ARG group=mygroup
RUN addgroup ${group} \
&& adduser -G ${group} -s /bin/bash -D ${user}
RUN apk update && apk add --no-cache --virtual tzdata \
&& cp /usr/share/zoneinfo/America/New_York /etc/localtime \
&& echo "America/New_York" > /etc/timezone \
&& apk del tzdata && rm -rf /var/cache/apk/*
COPY --from=build-stage /app/main /
EXPOSE 8080
USER ${user}
CMD ["/main"]
#CMD while true;do echo "wait 2..";sleep 2; done
#!/bin/bash
cat > deploy.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
spec:
replicas: 1
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
spec:
containers:
- name: web
image: wavecloud/myhello-app
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
resources: {}
EOF
cat > service.yaml <<EOF
apiVersion: v1
kind: Service
metadata:
labels:
app: web
name: web
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: web
type: NodePort
EOF
cat > ingress.yaml <<EOF
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress
annotations:
ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: www.example.com
http:
paths:
- path: /
backend:
serviceName: web
servicePort: 8080
tls:
- secretName: mytlskey
hosts:
- www.example.com
EOF
set -x
KEY_FILE=example.key
CERT_FILE=example.crt
CERT_NAME=mytlskey
HOST=www.example.com
echo "generate cert and key"
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -out ${CERT_FILE} -keyout ${KEY_FILE} \
-subj "/C=CA/ST=Ontario/L=Ottawa/O=CBS Inc./OU=IT/CN=www.example.com"
echo "create secret for key"
kubectl delete secret ${CERT_NAME}
kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}
echo "create docker image"
image=wavecloud/myhello-app
app=web
docker build -f Dockerfile -t $image .
echo "push and remove docker images"
docker login -uzhanghongqi -ppassword
docker push $image
kubectl delete deploy $app
#kubectl create deploy $app --image=$image --port=8080
kubectl apply -f deploy.yaml
kubectl delete svc web
#kubectl expose deployment $app --target-port=8080 --type=NodePort
kubectl apply -f service.yaml
kubectl apply -f ingress.yaml
sleep 10
#port=$(kubectl get svc web | grep web | cut -d'/' -f1|cut -d':' -f2 )
result="Welcome to my website!"
res=$(curl -Lk https://www.example.com)
if [[ "$res" == "$result" ]];
then echo "TEST PASS!"
else echo "TEST ERROR!"
fi
1) set hostname and firewall
hostnamectl set-hostname 'k8s-master'
exec bash
setenforce 0
sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
//6443 10250
firewall-cmd --permanent --add-port=6443/tcp
firewall-cmd --permanent --add-port=2379-2380/tcp
firewall-cmd --permanent --add-port=10250/tcp
firewall-cmd --permanent --add-port=10251/tcp
firewall-cmd --permanent --add-port=10252/tcp
firewall-cmd --permanent --add-port=10255/tcp
firewall-cmd --reload
modprobe br_netfilter
echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
///node///
firewall-cmd --permanent --add-port=10250/tcp
firewall-cmd --permanent --add-port=10255/tcpr
firewall-cmd --permanent --add-port=30000-32767/tcp
firewall-cmd --permanent --add-port=6783/tcp
firewall-cmd --reload
/etc/hosts
192.168.99.102 k8s-master
192.168.99.103 worker-node1
192.168.99.105 worker-node2
2) Disable SWAP
swapoff -a
vim /etc/fstab and comment swap line
3) /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
////node/////
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
4) install
yum install kubelet kubeadm docker -y
systemctl restart docker && systemctl enable docker
systemctl restart kubelet && systemctl enable kubelet
5) kubeadm init. #for master
kubeadm join --token a3bd48.1bc42347c3b35851 192.168.99.102:6443 //for node1
kubeadm join --token a3bd48.1bc42347c3b35851 192.168.99.102:6443. //for node2
1).install kube ingress
kubectl apply -f kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/deploy.yaml
2). Ingress check
kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch
1) ClusterIP:
Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default ServiceType.
2) NodePort:
Exposes the service on each Nodeβs IP at a static port (the NodePort). A ClusterIP service, to which the NodePort service will route, is automatically created. Youβll be able to contact the NodePort service, from outside the cluster, by requesting <NodeIP>:<NodePort>.
3) LoadBalancer:
Exposes the service externally using a cloud providerβs load balancer. NodePort and ClusterIP services, to which the external load balancer will route, are automatically created.
4) ExternalName:
Maps the service to the contents of the externalName field (e.g. foo.bar.example.com), by returning a CNAME record with its value. No proxying of any kind is set up.
1)kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
2)kubectl proxy
3)http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
4)create token
kubectl create serviceaccount dashboard -n default
kubectl create clusterrolebinding dashboard-admin -n default --clusterrole=cluster-admin --serviceaccount=default:dashboard
kubectl get secret $(kubectl get serviceaccount dashboard -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" | base64 --decode
5)login dashboard
http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/ with tokeb
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRhc2hib2FyZC10b2tlbi16a3hzcyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJkYXNoYm9hcmQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjM2FiMGIyYi05NGExLTExZTktYmUxOC0wMjUwMDAwMDAwMDEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkYXNoYm9hcmQifQ.QPsLLwL3UvY8v8rEOdqIA_-Zmr_l3l7cqK5-MFu86YApNuWSLWkumbdF5v7L0dhsXNwi74Y5wKRt5KrQmVEvLTekBjqs3WD-xqeCrNAp2GZujAGaC1rgE4VBbN_7YufDCH33-nR_YyGLo65t0-wHNP4bndVj3-86Y3VBxJ86SSegrpcHGX1xJAA2-xVXRP3PxkWKG1mNyIgoSojrKZ-_BBV9jh7iiNnNdF1caDFbE8uaxdoD8eUf1LjRIal286q6Ar7pnkWdRJEDnN5CJg2XyiUl2-l_fZCS6o9CockMfXT13145_6S-r27XnxnYuAHA68kdnJXnLUslHiemM_ewMg
kubectl can use yaml to create objects such as deploy/service/ingress, also kubectl can use command line to create objects.
0. create ssl secret
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -out my.crt -keyout my.key \
-subj "/C=CA/ST=Ontario/L=Ottawa/O=CBS Inc./OU=IT/CN=www.example.com"
kubectl create secret tls custom-tls-cert --key my.key --cert my.crt
1. create deploy
kubectl create deployment nginx --image=nginx
2. create service(nodeport)
kubectl create service nodeport nginx --tcp=80:80
(or kubectl expose deployment nginx --target-port=8080 --type=NodePort)
kubectl get svc
nginx NodePort 10.110.83.26 <none> 80:30938/TCP 5m
2.2 test
curl podNodeIP:30938
3. expose service alternatives
1) Service.Type=LoadBalancer
2) Service.Type=NodePort
3) Use a Port Proxy
4) Use ingress
ingress uses a service of type Service.Type=NodePort or Service.Type=LoadBalancer.
3.1 create ingress
3.2 test
curl localhost:80
#bridge
"sudo brctl addbr br0;",
"sudo ip link set br0 up;",
"sudo brctl addif br0 eth0;",
"sudo ip addr add \"" + contIP + "\" dev br0;",
"sudo ip addr del \"" + TargIP + "\" dev eth0;",
"sudo ip route add \"" + routeIP + "\" dev br0;",
"sudo ip link add webi0 type veth peer name webe0;",
"sudo brctl addif br0 webe0;",
"sudo ip link set netns $(exec docker inspect --format '{{ .State.Pid }}' " + contName + ") dev webi0;",
"sudo nsenter -t $(exec docker inspect --format '{{ .State.Pid }}' " + contName + ") -n ip link set webi0 up;",
"sudo nsenter -t $(exec docker inspect --format '{{ .State.Pid }}' " + contName + ") -n ip addr add \"" + TargIP + "\" dev webi0",
"#sudo nsenter -t $(exec docker inspect --format '{{ .State.Pid }}' " + contName + ") -n ip route del default;",
"#sudo nsenter -t $(exec docker inspect --format '{{ .State.Pid }}' " + contName + ") -n ip route add default via \"172.17.42.1\" dev webi0;",
https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
at master:
etcd;
kube-apiserver
kube-scheduler
kube-controller-manager
cloud-controller-manager
at node:
docker
kube-proxy
kubelet
addon:
DNS
WebUI
monitoring
logging.
monitor:
cAdviser---->heapster----infuxdb(Grafana)
#Deploy a app using CMD
1. deploy kube cluster
minikube start
kubectl get nodes
2 Deploying an App by deployment
kubectl run kubernetes-bootcamp --image=gcr.io/google-samples/kubernetes-bootcamp:v1 --port=8080
(kubectl create -f demployment.yml)
kubectl get deployments
kubectl get rs
kubectl get pods
kubectl proxy ; curl http://localhost:8001/version
3. expose App by service
kubectl expose deployment/kubernetes-bootcamp --type="NodePort" --port 8080
(kubectl create -f service.yml)
kubectl get services
export NODE_PORT=$(kubectl get services/kubernetes-bootcamp -o go-template='{{(index .spec.ports 0).nodePort}}')
echo NODE_PORT=$NODE_PORT
curl $(minikube ip):$NODE_PORT
4. add more label or override.
kubectl label --overwrite pod $POD_NAME app=v1
#Deploy a app using YML
1. kubectl create -f deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 2 # tells deployment to run 2 pods matching the template
template: # create pods using pod definition in this template
metadata:
# unlike pod-nginx.yaml, the name is not included in the meta data as a unique name is
# generated from the deployment name
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
2. kubectl create -f service.yml
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
ports:
- port: 8000 # the port that this service should serve on
# the container on each pod to connect to, can be a name
# (e.g. 'www') or a number (e.g. 80)
targetPort: 80
protocol: TCP
# just like the selector in the deployment,
# but this time it identifies the set of pods to load balance
# traffic to.
selector:
app: nginx
service --(kube-proxy) label: myapp
endpointer --
ReplicationController:selector: myapp; label: mayapp--( controller-manager)
deployment label: myapp; selector myapp
pod--(docker) label: myapp
#Paas Program: Povital Cloud Foundry
OSI:
SaaS: App/Data/Runtime/Middleware/OS/Virtualization/Server/storage/Network
PaaS: App/Data//Runtime/Middleware/OS/Virtualization/Server/storage/Network
IaaS: App/Data/Runtime/Middleware/OS//Virtualization/Server/storage/Network
Deploy PCF
signup with https://account.run.pivotal.io/z/uaa/sign-up
570 git clone https://github.com/cloudfoundry-samples/cf-sample-app-spring.git
573 cd cf-sample-app-spring/
576 cf login -a https://api.run.pivotal.io
582 cf push
583 cf logs cf-spring --recent
584 cf logs cf-spring
585 cf marketplace -s elephantsql
586 cf create-service elephantsql turtle cf-spring-db
587 cf bind-service cf-spring cf-spring-db
588 cf restart cf-spring
589 cf services
brawser:
route http://cf-spring-tired-emu.cfapps.io/ (host.domain)
app: cf-spring (in manifest.yml)
member: [email protected]
org: zhq-org
space: development
domains: apps.internal SHARED
cf-tcpapps.io SHARED
cfapps.io SHARED
host: cf-spring-tired-emu