Bash Script: Deploy Kubernetes Cluster Using Existing VMs - CloudCommandos/JohnChan GitHub Wiki
This script is tested to work with Kubernetes v1.14.0. The script is written based on the procedures described in Kubernetes The Hard Way. Use at your own risk!
usage:
./deploy_k8s.sh [--init] [--apt-update] [--new-certs] [--new-etcd]
--init
: apply both --new-certs and --new-etcd flags.
--apt-update
: run apt-get update on all nodes that are involved.
--new-certs
: create new ca and certs for cluster.
--new-etcd
: create a fresh etcd cluster. Existing etcd data will be wiped out!
deploy_k8s.sh:
#!/bin/bash
#colours
COLOUR1='\033[1;33m'
COLOUR2='\033[1;36m'
NC='\033[1;37m'
#directory settings
BASEDIR="/mnt/shares/bash/deploy_k8s"
CA_CONFIGDIR="$BASEDIR/ca_config"
CERTDIR="$BASEDIR/certs"
K8S_TEMPDIR="$BASEDIR/k8s_temp"
ETCD_TEMPDIR="$BASEDIR/etcd_temp"
#k8s cluster settings
K8S_CLUSTER_NAME="kubernetes"
K8S_VERSION="1.14.3"
CRICTL_VERSION="1.14.0"
CLUSTER_CIDR="10.96.0.0/12"
CLUSTER_CIDR_FIRST_IP="10.96.0.1"
CLUSTER_DNS_IP="10.96.0.10"
POD_CIDR="10.244.0.0/16"
POD_CIDR_FIRST_IP="10.244.0.1"
K8S_PUBLIC_IP="111.223.106.169"
K8S_PUBLIC_PORT="60443"
K8S_CONTROL_PLANE_VIP="10.0.1.99"
K8S_CONTROL_PLANE_URL="https://${K8S_CONTROL_PLANE_VIP}:${K8S_PUBLIC_PORT}"
K8S_WORKER_NODES=("kube3" "kube4" "kube5" "kube6" "kube7")
K8S_WORKER_INTERNAL_IP=("10.0.1.103" "10.0.1.104" "10.0.1.105" "10.0.1.106" "10.0.1.107")
K8S_MASTER_NODES=("kube0" "kube1" "kube2")
K8S_MASTER_INTERNAL_IP=("10.0.1.100" "10.0.1.101" "10.0.1.102")
K8S_ETCD_NODES=("kube0" "kube1" "kube2")
K8S_ETCD_INTERNAL_IP=("10.0.1.100" "10.0.1.101" "10.0.1.102")
#replace values with external environement variable file
source env.sh
#########################################################################################
#SCRIPT BEGIN
UPDATE_NODES='0'
NEW_CERTS='0'
NEW_ETCD='0'
while test $# -gt 0
do
case "$1" in
--apt-update) UPDATE_NODES='1'
;;
--init) NEW_ETCD='1' && NEW_CERTS='1'
;;
--new-certs) NEW_CERTS='1'
;;
--new-etcd) NEW_ETCD='1'
;;
*) echo "available options: --apt-update --init --new-certs --new-etcd"
;;
esac
shift
done
apt-get install -y curl
PATH=$PATH:$BASEDIR/bin
echo -e "${COLOUR1}making directory $BASEDIR/bin ...${NC}"
mkdir -p $BASEDIR/bin
echo -e "${COLOUR1}downloading cfssl binaries ...${NC}"
if [ -f "$BASEDIR/bin/cfssl" ](/CloudCommandos/JohnChan/wiki/--f-"$BASEDIR/bin/cfssl"-)
then
echo -e "${COLOUR1}cfssl already exists${NC}"
else
curl -s -L -o $BASEDIR/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
fi
if [ -f "$BASEDIR/bin/cfssljson" ](/CloudCommandos/JohnChan/wiki/--f-"$BASEDIR/bin/cfssljson"-)
then
echo -e "${COLOUR1}cfssljson already exists${NC}"
else
curl -s -L -o $BASEDIR/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
fi
chmod +x $BASEDIR/bin/{cfssl,cfssljson}
echo -e "${COLOUR1}making directory $CA_CONFIGDIR ...${NC}"
mkdir -p $CA_CONFIGDIR
#First Time Initialization--------------------------------
cd $CA_CONFIGDIR
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "43800h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "43800h"
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "Kubernetes",
"OU": "CA",
"ST": "SINGAPORE"
}
]
}
EOF
#---------------------------------------------------------
###################################################################################################
#APT UPDATE TARGETS
if [ "$UPDATE_NODES" == '1' ](/CloudCommandos/JohnChan/wiki/-"$UPDATE_NODES"-==-'1'-)
then
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}updating ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${nodename} "apt-get update -y"
done
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}updating ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${nodename} "apt-get update -y"
done
for nodename in ${K8S_ETCD_NODES[*]}
do
echo -e "${COLOUR1}updating ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${nodename} "apt-get update -y"
done
fi
#APT INSTALL TARGETS
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}installing dependencies for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${nodename} "apt-get install -y curl"
done
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}installing dependencies for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${nodename} "apt-get install -y curl"
done
for nodename in ${K8S_ETCD_NODES[*]}
do
echo -e "${COLOUR1}installing dependencies for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${nodename} "apt-get install -y curl"
done
###################################################################################################
#GENERATE CERTIFICATE AUTHORITY CONFIGURATIONS
cd $CA_CONFIGDIR
##Generate K8s Master Admin Certificate Signing Requests
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "system:masters",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
##Generate K8s Worker Kubelet Certificate Signing Requests
for nodename in ${K8S_WORKER_NODES[*]}
do
cat > ${nodename}-csr.json <<EOF
{
"CN": "system:node:$nodename",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "system:nodes",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
done
##Generate K8s Master Kubelet Certificate Signing Requests
for nodename in ${K8S_MASTER_NODES[*]}
do
cat > ${nodename}-csr.json <<EOF
{
"CN": "system:node:$nodename",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "system:nodes",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
done
##Generate K8s Kube-Controller-Manager Certificate Signing Requests
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "system:kube-controller-manager",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
##Generate K8s Kube-Proxy Certificate Signing Requests
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "system:node-proxier",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
##Generate K8s Kube-Scheduler Certificate Signing Requests
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "system:kube-scheduler",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
##Generate K8s API Server Certificate Signing Requests
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "Kubernetes",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
##Generate K8s Service Account Certificate Signing Requests
cat > service-account-csr.json <<EOF
{
"CN": "service-accounts",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "Kubernetes",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
##Generate K8s Etcd Node Certificate Signing Requests
cat > etcd-node-csr.json <<EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "Kubernetes",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
##Generate K8s Etcd client Certificate Signing Requests
cat > etcd-client-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "SG",
"L": "SG",
"O": "Kubernetes",
"OU": "$K8S_CLUSTER_NAME",
"ST": "SINGAPORE"
}
]
}
EOF
###################################################################################################
#GENERATE CERTIFICATES
echo -e "${COLOUR1}making directory $CERTDIR ...${NC}"
mkdir -p $CERTDIR
cd $CERTDIR
##Generate Certificate Authority
if [ -f "ca-key.pem" ](/CloudCommandos/JohnChan/wiki/--f-"ca-key.pem"-)
then
echo -e "${COLOUR1}ca-key.pem exists${NC}"
else
echo -e "${COLOUR1}generating certificate authority ...${NC}"
cfssl gencert -initca $CA_CONFIGDIR/ca-csr.json | cfssljson -bare ca -
fi
##Generate Worker Certificates
for ((i=0; i<${#K8S_WORKER_NODES[@]};++i))
do
if [ "$NEW_CERTS" == '0' ](/CloudCommandos/JohnChan/wiki/-"$NEW_CERTS"-==-'0'-) && [ -f "${K8S_WORKER_NODES[$i]}-key.pem" ](/CloudCommandos/JohnChan/wiki/--f-"${K8S_WORKER_NODES[$i]}-key.pem"-)
then
echo -e "${COLOUR1}${K8S_WORKER_NODES[$i]}-key.pem exists${NC}"
else
echo -e "${COLOUR1}generating ${COLOUR2}${K8S_WORKER_NODES[$i]}${COLOUR1} worker node certificate ...${NC}"
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=$CA_CONFIGDIR/ca-config.json -profile=kubernetes \
-hostname=${K8S_WORKER_NODES[$i]},${K8S_WORKER_INTERNAL_IP[$i]} \
$CA_CONFIGDIR/${K8S_WORKER_NODES[$i]}-csr.json | cfssljson -bare ${K8S_WORKER_NODES[$i]}
fi
done
##Generate Master Kubelet Certificates
for ((i=0; i<${#K8S_MASTER_NODES[@]};++i))
do
if [ "$NEW_CERTS" == '0' ](/CloudCommandos/JohnChan/wiki/-"$NEW_CERTS"-==-'0'-) && [ -f "${K8S_MASTER_NODES[$i]}-key.pem" ](/CloudCommandos/JohnChan/wiki/--f-"${K8S_MASTER_NODES[$i]}-key.pem"-)
then
echo -e "${COLOUR1}${K8S_MASTER_NODES[$i]}-key.pem exists${NC}"
else
echo -e "${COLOUR1}generating ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} master kubelet certificate ...${NC}"
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=$CA_CONFIGDIR/ca-config.json -profile=kubernetes \
-hostname=${K8S_MASTER_NODES[$i]},${K8S_MASTER_INTERNAL_IP[$i]} \
$CA_CONFIGDIR/${K8S_MASTER_NODES[$i]}-csr.json | cfssljson -bare ${K8S_MASTER_NODES[$i]}
fi
done
##Generate Various Certificates
for component in "admin" "kube-controller-manager" "kube-proxy" "kube-scheduler" "service-account"
do
if [ "$NEW_CERTS" == '0' ](/CloudCommandos/JohnChan/wiki/-"$NEW_CERTS"-==-'0'-) && [ -f "${component}-key.pem" ](/CloudCommandos/JohnChan/wiki/--f-"${component}-key.pem"-)
then
echo -e "${COLOUR1}${component}-key.pem exists${NC}"
else
echo -e "${COLOUR1}generating ${COLOUR2}${component}${COLOUR1} certificate ...${NC}"
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=$CA_CONFIGDIR/ca-config.json -profile=kubernetes \
$CA_CONFIGDIR/${component}-csr.json | cfssljson -bare ${component}
fi
done
##Generate Etcd Node Certificates
tmpHostnameStr=''
for item in ${K8S_ETCD_INTERNAL_IP[*]}
do
if [ "$tmpHostnameStr" == '' ]
then
tmpHostnameStr="${item}"
else
tmpHostnameStr="${item},${tmpHostnameStr}"
fi
done
for item in ${K8S_ETCD_NODES[*]}
do
if [ "$tmpHostnameStr" == '' ]
then
tmpHostnameStr="${item}"
else
tmpHostnameStr="${item},${tmpHostnameStr}"
fi
done
for ((i=0; i<${#K8S_ETCD_NODES[@]};++i))
do
if [ "$NEW_CERTS" == '0' ](/CloudCommandos/JohnChan/wiki/-"$NEW_CERTS"-==-'0'-) && [ -f "${K8S_ETCD_NODES[$i]}-etcd-node-key.pem" ](/CloudCommandos/JohnChan/wiki/--f-"${K8S_ETCD_NODES[$i]}-etcd-node-key.pem"-)
then
echo -e "${COLOUR1}${K8S_ETCD_NODES[$i]}-etcd-node-key.pem exists${NC}"
else
echo -e "${COLOUR1}generating ${COLOUR2}${K8S_ETCD_NODES[$i]}${COLOUR1} etcd node certificate ...${NC}"
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=$CA_CONFIGDIR/ca-config.json -profile=kubernetes \
-hostname=${tmpHostnameStr} \
$CA_CONFIGDIR/etcd-node-csr.json | cfssljson -bare ${K8S_ETCD_NODES[$i]}-etcd-node
fi
done
##Generate API Server && Etcd Client Certificate
tmpHostnameStr=''
for item in ${K8S_MASTER_INTERNAL_IP[*]}
do
if [ "$tmpHostnameStr" == '' ]
then
tmpHostnameStr="${item}"
else
tmpHostnameStr="${item},${tmpHostnameStr}"
fi
done
for item in ${K8S_MASTER_NODES[*]}
do
if [ "$tmpHostnameStr" == '' ]
then
tmpHostnameStr="${item}"
else
tmpHostnameStr="${item},${tmpHostnameStr}"
fi
done
if [ "$NEW_CERTS" == '0' ](/CloudCommandos/JohnChan/wiki/-"$NEW_CERTS"-==-'0'-) && [ -f "kubernetes-key.pem" ](/CloudCommandos/JohnChan/wiki/--f-"kubernetes-key.pem"-)
then
echo -e "${COLOUR1}kubernetes-key.pem exists${NC}"
else
echo -e "${COLOUR1}generating ${COLOUR2}API Server${COLOUR1} certificate ...${NC}"
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=$CA_CONFIGDIR/ca-config.json -profile=kubernetes \
-hostname=${CLUSTER_CIDR_FIRST_IP},${POD_CIDR_FIRST_IP},${tmpHostnameStr},${K8S_PUBLIC_IP},${K8S_CONTROL_PLANE_VIP},127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local \
$CA_CONFIGDIR/kubernetes-csr.json | cfssljson -bare kubernetes
fi
if [ "$NEW_CERTS" == '0' ](/CloudCommandos/JohnChan/wiki/-"$NEW_CERTS"-==-'0'-) && [ -f "etcd-client-key.pem" ](/CloudCommandos/JohnChan/wiki/--f-"etcd-client-key.pem"-)
then
echo -e "${COLOUR1}etcd-client-key.pem exists${NC}"
else
echo -e "${COLOUR1}generating ${COLOUR2}Etcd Client${COLOUR1} certificate ...${NC}"
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=$CA_CONFIGDIR/ca-config.json -profile=kubernetes \
-hostname=${CLUSTER_CIDR_FIRST_IP},${POD_CIDR_FIRST_IP},${tmpHostnameStr},${K8S_PUBLIC_IP},${K8S_CONTROL_PLANE_VIP},127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local \
$CA_CONFIGDIR/etcd-client-csr.json | cfssljson -bare etcd-client
fi
###################################################################################################
#GENERATE KUBERNETES CONFIGURATION FILES
##Generate Kubelet Kubernetes Configuration File
mkdir -p $K8S_TEMPDIR
cd $K8S_TEMPDIR
{
component="kubectl"
if [ -f "${component}_v${K8S_VERSION}" ]
then
echo -e "${COLOUR1}${component}_v${K8S_VERSION} binary already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component}_v${K8S_VERSION} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
-O ${component}_v${K8S_VERSION} \
"https://storage.googleapis.com/kubernetes-release/release/v${K8S_VERSION}/bin/linux/amd64/${component}"
fi
chmod +x ${component}_v${K8S_VERSION}
cp ${component}_v${K8S_VERSION} ${component}
}
for nodename in ${K8S_WORKER_NODES[*]}
do
rm $nodename.kubeconfig
echo -e "${COLOUR1}generating ${COLOUR2}${nodename}${COLOUR1} Kubelet Kubernetes Configuration File ...${NC}"
./kubectl config set-cluster $K8S_CLUSTER_NAME \
--certificate-authority=$CERTDIR/ca.pem \
--embed-certs=true \
--server=$K8S_CONTROL_PLANE_URL \
--kubeconfig=$nodename.kubeconfig
./kubectl config set-credentials system:node:$nodename \
--client-certificate=$CERTDIR/$nodename.pem \
--client-key=$CERTDIR/${nodename}-key.pem \
--embed-certs=true \
--kubeconfig=$nodename.kubeconfig
./kubectl config set-context default \
--cluster=$K8S_CLUSTER_NAME \
--user=system:node:$nodename \
--kubeconfig=$nodename.kubeconfig
./kubectl config use-context default --kubeconfig=$nodename.kubeconfig
done
##Generate Master Kubelet Kubernetes Configuration File
for nodename in ${K8S_MASTER_NODES[*]}
do
rm $nodename.kubeconfig
echo -e "${COLOUR1}generating ${COLOUR2}${nodename}${COLOUR1} Kubelet Kubernetes Configuration File ...${NC}"
./kubectl config set-cluster $K8S_CLUSTER_NAME \
--certificate-authority=$CERTDIR/ca.pem \
--embed-certs=true \
--server=$K8S_CONTROL_PLANE_URL \
--kubeconfig=$nodename.kubeconfig
./kubectl config set-credentials system:node:$nodename \
--client-certificate=$CERTDIR/$nodename.pem \
--client-key=$CERTDIR/${nodename}-key.pem \
--embed-certs=true \
--kubeconfig=$nodename.kubeconfig
./kubectl config set-context default \
--cluster=$K8S_CLUSTER_NAME \
--user=system:node:$nodename \
--kubeconfig=$nodename.kubeconfig
./kubectl config use-context default --kubeconfig=$nodename.kubeconfig
done
##Generate Kube-Proxy Kubernetes Configuration File
echo -e "${COLOUR1}generating ${COLOUR2}Kube-Proxy${COLOUR1} Kubernetes Configuration File ...${NC}"
rm kube-proxy.kubeconfig
./kubectl config set-cluster $K8S_CLUSTER_NAME \
--certificate-authority=$CERTDIR/ca.pem \
--embed-certs=true \
--server=$K8S_CONTROL_PLANE_URL \
--kubeconfig=kube-proxy.kubeconfig
./kubectl config set-credentials system:kube-proxy \
--client-certificate=$CERTDIR/kube-proxy.pem \
--client-key=$CERTDIR/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
./kubectl config set-context default \
--cluster=$K8S_CLUSTER_NAME \
--user=system:kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
./kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
##Generate Kube-Controller-Manager Kubernetes Configuration File
echo -e "${COLOUR1}generating ${COLOUR2}Kube-Controller-Manager${COLOUR1} Kubernetes Configuration File ...${NC}"
rm kube-controller-manager.kubeconfig
./kubectl config set-cluster $K8S_CLUSTER_NAME \
--certificate-authority=$CERTDIR/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-controller-manager.kubeconfig
./kubectl config set-credentials system:kube-controller-manager \
--client-certificate=$CERTDIR/kube-controller-manager.pem \
--client-key=$CERTDIR/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
./kubectl config set-context default \
--cluster=$K8S_CLUSTER_NAME \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
./kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
##Generate Kube-Scheduler Kubernetes Configuration File
echo -e "${COLOUR1}generating ${COLOUR2}Kube-Scheduler${COLOUR1} Kubernetes Configuration File ...${NC}"
rm kube-scheduler.kubeconfig
./kubectl config set-cluster $K8S_CLUSTER_NAME \
--certificate-authority=$CERTDIR/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-scheduler.kubeconfig
./kubectl config set-credentials system:kube-scheduler \
--client-certificate=$CERTDIR/kube-scheduler.pem \
--client-key=$CERTDIR/kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
./kubectl config set-context default \
--cluster=$K8S_CLUSTER_NAME \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
./kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
##Generate Admin Kubernetes Configuration File
echo -e "${COLOUR1}generating ${COLOUR2}Admin${COLOUR1} Kubernetes Configuration File ...${NC}"
rm admin.kubeconfig
./kubectl config set-cluster $K8S_CLUSTER_NAME \
--certificate-authority=$CERTDIR/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=admin.kubeconfig
./kubectl config set-credentials admin \
--client-certificate=$CERTDIR/admin.pem \
--client-key=$CERTDIR/admin-key.pem \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
./kubectl config set-context default \
--cluster=$K8S_CLUSTER_NAME \
--user=admin \
--kubeconfig=admin.kubeconfig
./kubectl config use-context default --kubeconfig=admin.kubeconfig
########################################################################################################################
#GENERATE DATA ENCRYPTION CONFIG AND KEY
if [ "$NEW_ETCD" == '1' ](/CloudCommandos/JohnChan/wiki/-"$NEW_ETCD"-==-'1'-)
then
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
cd $K8S_TEMPDIR
cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: ${ENCRYPTION_KEY}
- identity: {}
EOF
chmod 600 encryption-config.yaml
fi
########################################################################################################################
#COPY FILES TO DESTINATIONS
echo -e "${COLOUR1}copying files to destinations ...${NC}"
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
scp -p $CERTDIR/ca.pem $CERTDIR/${nodename}-key.pem $CERTDIR/$nodename.pem \
$K8S_TEMPDIR/$nodename.kubeconfig $K8S_TEMPDIR/kube-proxy.kubeconfig $nodename:~/
done
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
scp -p $CERTDIR/ca.pem $CERTDIR/ca-key.pem $CERTDIR/kubernetes-key.pem $CERTDIR/kubernetes.pem \
$CERTDIR/kube-scheduler-key.pem $CERTDIR/kube-scheduler.pem \
$CERTDIR/kube-controller-manager-key.pem $CERTDIR/kube-controller-manager.pem \
$CERTDIR/service-account-key.pem $CERTDIR/service-account.pem \
$CERTDIR/${nodename}-key.pem $CERTDIR/$nodename.pem \
$CERTDIR/etcd-client-key.pem $CERTDIR/etcd-client.pem \
$K8S_TEMPDIR/admin.kubeconfig $K8S_TEMPDIR/kube-controller-manager.kubeconfig $K8S_TEMPDIR/kube-scheduler.kubeconfig \
$K8S_TEMPDIR/$nodename.kubeconfig $K8S_TEMPDIR/kube-proxy.kubeconfig \
$K8S_TEMPDIR/encryption-config.yaml \
$nodename:~/
done
for nodename in ${K8S_ETCD_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
scp -p $CERTDIR/ca.pem \
$CERTDIR/etcd-client-key.pem $CERTDIR/etcd-client.pem \
$CERTDIR/${nodename}-etcd-node-key.pem $CERTDIR/${nodename}-etcd-node.pem $nodename:~/
done
########################################################################################################################
#CONFIGURE ETCD CLUSTER
echo -e "${COLOUR1}setting up etcd cluster ...${NC}"
mkdir -p $ETCD_TEMPDIR
cd $ETCD_TEMPDIR
if [ -f "etcd-v3.3.9-linux-amd64/etcd" ]
then
echo -e "${COLOUR1}etcd binaries already exist${NC}"
else
echo -e "${COLOUR1}downloading etcd binaries ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/coreos/etcd/releases/download/v3.3.9/etcd-v3.3.9-linux-amd64.tar.gz"
tar -xvf etcd-v3.3.9-linux-amd64.tar.gz
fi
tmpConnectionStr="${K8S_ETCD_NODES[0]}=https://${K8S_ETCD_INTERNAL_IP[0]}:2380"
for ((i=1; i<${#K8S_ETCD_NODES[@]};++i))
do
tmpConnectionStr="${K8S_ETCD_NODES[$i]}=https://${K8S_ETCD_INTERNAL_IP[$i]}:2380,${tmpConnectionStr}"
done
for ((i=0; i<${#K8S_ETCD_NODES[@]};++i))
do
if [ "$NEW_ETCD" == '1' ](/CloudCommandos/JohnChan/wiki/-"$NEW_ETCD"-==-'1'-)
then
echo -e "${COLOUR1}deleting old etcd on ${COLOUR2}${K8S_ETCD_NODES[$i]}${COLOUR1} ...${NC}"
ssh ${K8S_ETCD_NODES[$i]} "systemctl stop etcd; \
rm -rf /etc/etcd /var/lib/etcd; \
mkdir -p /etc/etcd /var/lib/etcd \
&& cp ~/ca.pem ~/etcd-client-key.pem ~/etcd-client.pem /etc/etcd/ \
&& mv ~/${K8S_ETCD_NODES[$i]}-etcd-node-key.pem /etc/etcd/etcd-node-key.pem \
&& mv ~/${K8S_ETCD_NODES[$i]}-etcd-node.pem /etc/etcd/etcd-node.pem"
else
ssh ${K8S_ETCD_NODES[$i]} "systemctl stop etcd; \
mkdir -p /etc/etcd /var/lib/etcd \
&& cp ~/ca.pem ~/etcd-client-key.pem ~/etcd-client.pem /etc/etcd/ \
&& mv ~/${K8S_ETCD_NODES[$i]}-etcd-node-key.pem /etc/etcd/etcd-node-key.pem \
&& mv ~/${K8S_ETCD_NODES[$i]}-etcd-node.pem /etc/etcd/etcd-node.pem"
fi
echo -e "${COLOUR1}copying etcd binary to ${COLOUR2}${K8S_ETCD_NODES[$i]}${COLOUR1} ...${NC}"
rsync -acIP etcd-v3.3.9-linux-amd64/etcd* ${K8S_ETCD_NODES[$i]}:/usr/local/bin/
cat <<EOF | tee etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
ExecStart=/usr/local/bin/etcd \\
--name ${K8S_ETCD_NODES[$i]} \\
--cert-file=/etc/etcd/etcd-client.pem \\
--key-file=/etc/etcd/etcd-client-key.pem \\
--peer-cert-file=/etc/etcd/etcd-node.pem \\
--peer-key-file=/etc/etcd/etcd-node-key.pem \\
--trusted-ca-file=/etc/etcd/ca.pem \\
--peer-trusted-ca-file=/etc/etcd/ca.pem \\
--peer-client-cert-auth \\
--client-cert-auth \\
--initial-advertise-peer-urls https://${K8S_ETCD_INTERNAL_IP[$i]}:2380 \\
--listen-peer-urls https://${K8S_ETCD_INTERNAL_IP[$i]}:2380 \\
--listen-client-urls https://${K8S_ETCD_INTERNAL_IP[$i]}:2379,https://127.0.0.1:2379 \\
--advertise-client-urls https://${K8S_ETCD_INTERNAL_IP[$i]}:2379 \\
--initial-cluster-token etcd-cluster-0 \\
--initial-cluster ${tmpConnectionStr} \\
--initial-cluster-state new \\
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
scp -p etcd.service ${K8S_ETCD_NODES[$i]}:/etc/systemd/system/etcd.service
echo -e "${COLOUR1}starting etcd on ${COLOUR2}${K8S_ETCD_NODES[$i]}${COLOUR1} ...${NC}"
ssh ${K8S_ETCD_NODES[$i]} "systemctl daemon-reload; systemctl enable etcd; systemctl start etcd"
done
####################################################################################################################################
#PROVISION KUBERNETES CONTROL PLANE
for ((i=0; i<${#K8S_MASTER_NODES[@]};++i))
do
echo -e "${COLOUR1}stopping services for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
ssh ${K8S_MASTER_NODES[$i]} "systemctl stop kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy kubelet kube-proxy containerd"
done
cd $K8S_TEMPDIR
##download master binaries
for component in "kube-apiserver" "kube-controller-manager" "kube-scheduler" "kubelet" "kubectl" "kube-proxy"
do
if [ -f "${component}_v${K8S_VERSION}" ]
then
echo -e "${COLOUR1}${component}_v${K8S_VERSION} binary already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component}_v${K8S_VERSION} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
-O ${component}_v${K8S_VERSION} \
"https://storage.googleapis.com/kubernetes-release/release/v${K8S_VERSION}/bin/linux/amd64/${component}"
fi
chmod +x ${component}_v${K8S_VERSION}
cp ${component}_v${K8S_VERSION} ${component}
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP ${component} $nodename:/usr/local/bin/
done
done
{
component="crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/kubernetes-sigs/cri-tools/releases/download/v${CRICTL_VERSION}/${component}"
fi
mkdir -p crictl
tar -xvf $component -C crictl
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP crictl/* $nodename:/usr/local/bin/
done
}
{
component="runsc-50c283b9f56bb7200938d9e207355f05f79f0d17"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://storage.googleapis.com/kubernetes-the-hard-way/${component}"
fi
chmod +x $component
cp $component runsc
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP runc $nodename:/usr/local/bin/runsc
done
}
{
component="runc.amd64"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/${component}"
fi
chmod +x $component
cp $component runc
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP runc $nodename:/usr/local/bin/runc
done
}
{
component="cni-plugins-linux-amd64-v0.8.1.tgz"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/containernetworking/plugins/releases/download/v0.8.1/${component}"
fi
mkdir -p cni
tar -xvf $component -C cni
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP cni/* $nodename:/usr/local/bin/
done
}
{
component="containerd-1.2.6.linux-amd64.tar.gz"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/containerd/containerd/releases/download/v1.2.6/${component}"
fi
mkdir -p containerd
tar -xvf ${component} -C containerd
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP containerd/bin/* $nodename:/bin/
done
}
for ((i=0; i<${#K8S_MASTER_NODES[@]};++i))
do
echo -e "${COLOUR1}preparing master ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
ssh ${K8S_MASTER_NODES[$i]} "apt-get -y install socat conntrack ipset \
&& mkdir -p \
/var/lib/kubelet/ \
/etc/cni/net.d \
/opt/cni/bin \
/var/lib/kubelet \
/var/lib/kube-proxy \
/var/lib/kubernetes \
/var/run/kubernetes \
/etc/containerd \
/etc/kubernetes/config \
&& mv ca.pem \
ca-key.pem \
kubernetes-key.pem \
kubernetes.pem \
service-account-key.pem \
service-account.pem \
kube-scheduler-key.pem \
kube-scheduler.pem \
kube-controller-manager-key.pem \
kube-controller-manager.pem \
etcd-client-key.pem \
etcd-client.pem \
encryption-config.yaml \
/var/lib/kubernetes/ \
&& mv ${K8S_MASTER_NODES[$i]}-key.pem ${K8S_MASTER_NODES[$i]}.pem /var/lib/kubelet"
done
##Configure Kubernetes Services
etcdClusterStr="https://${K8S_ETCD_INTERNAL_IP[0]}:2379"
for ((i=1; i<${#K8S_ETCD_INTERNAL_IP[@]};++i))
do
etcdClusterStr="https://${K8S_ETCD_INTERNAL_IP[$i]}:2379,${etcdClusterStr}"
done
for ((i=0; i<${#K8S_MASTER_NODES[@]};++i))
do
###API Server
echo -e "${COLOUR1}configuring kube-apiserver for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
cat <<EOF | tee kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--advertise-address=${K8S_MASTER_INTERNAL_IP[$i]} \\
--allow-privileged=true \\
--apiserver-count=3 \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/audit.log \\
--authorization-mode=Node,RBAC \\
--bind-address=0.0.0.0 \\
--client-ca-file=/var/lib/kubernetes/ca.pem \\
--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
--enable-swagger-ui=true \\
--etcd-cafile=/var/lib/kubernetes/ca.pem \\
--etcd-certfile=/var/lib/kubernetes/etcd-client.pem \\
--etcd-keyfile=/var/lib/kubernetes/etcd-client-key.pem \\
--etcd-servers=${etcdClusterStr} \\
--event-ttl=1h \\
--requestheader-client-ca-file=/var/lib/kubernetes/ca.pem \\
--requestheader-allowed-names=kubernetes \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--proxy-client-cert-file=/var/lib/kubernetes/kubernetes.pem \\
--proxy-client-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
--experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\
--kubelet-https=true \\
--runtime-config=api/all \\
--service-account-key-file=/var/lib/kubernetes/service-account.pem \\
--secure-port=6443 \\
--service-cluster-ip-range=${CLUSTER_CIDR} \\
--service-node-port-range=80-65535 \\
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
###Kube Controller Manager
echo -e "${COLOUR1}configuring kube-controller-manager for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
cat <<EOF | tee kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--allocate-node-cidrs=true \\
--authentication-kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
--authorization-kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
--bind-address=127.0.0.1 \\
--master=127.0.0.1:8080 \\
--client-ca-file=/var/lib/kubernetes/ca.pem \\
--cluster-cidr=${POD_CIDR} \\
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\
--controllers=*,bootstrapsigner,tokencleaner \\
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--node-cidr-mask-size=24 \\
--requestheader-client-ca-file=/var/lib/kubernetes/ca.pem \\
--root-ca-file=/var/lib/kubernetes/ca.pem \\
--service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\
--use-service-account-credentials=true \\
--feature-gates=TaintBasedEvictions=false \\
--pod-eviction-timeout=60s \\
--node-monitor-period=2s \\
--node-monitor-grace-period=16s \\
--tls-cert-file=/var/lib/kubernetes/kube-controller-manager.pem \\
--tls-private-key-file=/var/lib/kubernetes/kube-controller-manager-key.pem \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
###Kube Scheduler
echo -e "${COLOUR1}configuring kube-scheduler for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
cat <<EOF | tee kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
authentication-kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
authorization-kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
leaderElection:
leaderElect: true
EOF
cat <<EOF | tee kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--config=/etc/kubernetes/config/kube-scheduler.yaml \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
###Kubelet
echo -e "${COLOUR1}configuring kubelet for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
cat <<EOF | tee kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/var/lib/kubernetes/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "${CLUSTER_DNS_IP}"
podCIDR: "${POD_CIDR}"
resolvConf: "/etc/resolv.conf"
runtimeRequestTimeout: "15m"
tlsCertFile: "/var/lib/kubelet/${K8S_MASTER_NODES[$i]}.pem"
tlsPrivateKeyFile: "/var/lib/kubelet/${K8S_MASTER_NODES[$i]}-key.pem"
EOF
cat <<EOF | tee kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--config=/var/lib/kubelet/kubelet-config.yaml \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--image-pull-progress-deadline=2m \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\
--network-plugin=cni \\
--register-node=true \\
--fail-swap-on=false \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
###CNI Networking
#!!! USING CANAL FOR NETWOKING INSTEAD
# echo -e "${COLOUR1}configuring cni networking for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
# cat <<EOF | tee 10-bridge.conf
#{
# "cniVersion": "0.3.0",
# "name": "bridge",
# "type": "bridge",
# "bridge": "cnio0",
# "isGateway": true,
# "ipMasq": true,
# "ipam": {
# "type": "host-local",
# "ranges": [
# [{"subnet": "${POD_CIDR}"}]
# ],
# "routes": [{"dst": "0.0.0.0/0"}]
# }
#}
#EOF
#cat <<EOF | tee 99-loopback.conf
#{
# "cniVersion": "0.3.1",
# "type": "loopback"
#}
#EOF
###Containerd
echo -e "${COLOUR1}configuring containerd for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
cat << EOF | tee config.toml
[plugins]
[plugins.cri]
# stream_server_address is the ip address streaming server is listening on.
stream_server_address = "127.0.0.1"
[plugins.cri.containerd]
snapshotter = "overlayfs"
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runc"
runtime_root = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runsc"
runtime_root = "/run/containerd/runsc"
[plugins.cri.containerd.gvisor]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runsc"
runtime_root = "/run/containerd/runsc"
EOF
cat <<EOF | tee containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
###Kube-Proxy
echo -e "${COLOUR1}configuring kube-proxy for ${COLOUR2}${K8S_MASTER_NODES[$i]}${COLOUR1} ...${NC}"
cat <<EOF | tee kube-proxy-config.yaml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
mode: "iptables"
clusterCIDR: "${POD_CIDR}"
EOF
cat <<EOF | tee kube-proxy.service
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/var/lib/kube-proxy/kube-proxy-config.yaml
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
#################
ssh ${K8S_MASTER_NODES[$i]} "mv ~/kube-controller-manager.kubeconfig ~/kube-scheduler.kubeconfig /var/lib/kubernetes/ \
&& mv ~/${K8S_MASTER_NODES[$i]}.kubeconfig /var/lib/kubelet/kubeconfig \
&& mv ~/kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig"
scp -p kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service containerd.service kube-proxy.service ${K8S_MASTER_NODES[$i]}:/etc/systemd/system/
scp -p kube-scheduler.yaml ${K8S_MASTER_NODES[$i]}:/etc/kubernetes/config/
scp -p kubelet-config.yaml ${K8S_MASTER_NODES[$i]}:/var/lib/kubelet/
# scp -p 10-bridge.conf 99-loopback.conf ${K8S_MASTER_NODES[$i]}:/etc/cni/net.d/
scp -p config.toml ${K8S_MASTER_NODES[$i]}:/etc/containerd/
scp -p kube-proxy-config.yaml ${K8S_MASTER_NODES[$i]}:/var/lib/kube-proxy/
ssh ${K8S_MASTER_NODES[$i]} "systemctl daemon-reload; \
systemctl enable kube-apiserver kube-controller-manager kube-scheduler containerd kube-proxy kubelet; \
systemctl start kube-apiserver kube-controller-manager kube-scheduler containerd kube-proxy kubelet; \
crictl config runtime-endpoint unix:///var/run/containerd/containerd.sock"
sleep 2s
done
##############################################################################################################################
#RBAC FOR KUBELET AUTHORIZATION
cat > API_Server_to_Kubelet_ClusterRole.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF
scp API_Server_to_Kubelet_ClusterRole.yaml ${K8S_MASTER_NODES[0]}:~/
ssh ${K8S_MASTER_NODES[0]} "kubectl apply --kubeconfig admin.kubeconfig -f ~/API_Server_to_Kubelet_ClusterRole.yaml"
#use this to check if secrets are encrypted inside etcd
echo -e "${COLOUR1}check that secret is encrypted at rest in etcd with aescbc:v1:key1${NC}"
ssh ${K8S_MASTER_NODES[0]} "kubectl create secret generic test --from-literal=test=test -n default; ETCDCTL_API=3 etcdctl get /registry/secrets/default/test \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.pem \
--cert=/etc/etcd/etcd-client.pem \
--key=/etc/etcd/etcd-client-key.pem \
| hexdump -C \
&& kubectl delete secret test -n default \
"
#########################################################################################################################
#PROVISION KUBERNETES WORKER NODES
mkdir -p $K8S_TEMPDIR
cd $K8S_TEMPDIR
##prepare workers
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}preparing worker ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
#dependencies for kubectl port-forward command
ssh ${nodename} "apt-get -y install socat conntrack ipset \
&& mkdir -p \
/etc/cni/net.d \
/opt/cni/bin \
/var/lib/kubelet \
/var/lib/kube-proxy \
/var/lib/kubernetes \
/var/run/kubernetes \
/etc/containerd \
"
echo -e "${COLOUR1}stopping services for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${nodename} "systemctl stop kubelet kube-proxy containerd"
done
##download worker binaries
echo -e "${COLOUR1}downloading worker binaries ...${NC}"
for component in "kubectl" "kube-proxy" "kubelet"
do
if [ -f "${component}_v${K8S_VERSION}" ]
then
echo -e "${COLOUR1}${component}_v${K8S_VERSION} binary already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component}_v${K8S_VERSION} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
-O ${component}_v${K8S_VERSION} \
"https://storage.googleapis.com/kubernetes-release/release/v${K8S_VERSION}/bin/linux/amd64/${component}"
fi
chmod +x ${component}_v${K8S_VERSION}
cp ${component}_v${K8S_VERSION} ${component}
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP $component $nodename:/usr/local/bin/
done
done
{
component="crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/kubernetes-sigs/cri-tools/releases/download/v${CRICTL_VERSION}/${component}"
fi
mkdir -p crictl
tar -xvf $component -C crictl
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP crictl/* $nodename:/usr/local/bin/
done
}
{
component="runsc-50c283b9f56bb7200938d9e207355f05f79f0d17"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://storage.googleapis.com/kubernetes-the-hard-way/${component}"
fi
chmod +x $component
cp $component runsc
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP runsc $nodename:/usr/local/bin/runsc
done
}
{
component="runc.amd64"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/${component}"
fi
chmod +x $component
cp $component runc
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP runc $nodename:/usr/local/bin/runc
done
}
{
component="cni-plugins-linux-amd64-v0.8.1.tgz"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/containernetworking/plugins/releases/download/v0.8.1/${component}"
fi
mkdir -p cni
tar -xvf $component -C cni
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP cni/* $nodename:/usr/local/bin/
done
}
{
component="containerd-1.2.6.linux-amd64.tar.gz"
if [ -f "${component}" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} binary ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://github.com/containerd/containerd/releases/download/v1.2.6/${component}"
fi
mkdir -p containerd
tar -xvf ${component} -C containerd
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}to ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
rsync -acIP containerd/bin/* $nodename:/bin/
done
}
##CONFIGURE WORKER CNI Networking Configuration
###!!! USING CANAL FOR NETWORKING INSTEAD
#cat <<EOF | tee 10-bridge.conf
#{
# "cniVersion": "0.3.0",
# "name": "bridge",
# "type": "bridge",
# "bridge": "cnio0",
# "isGateway": true,
# "ipMasq": true,
# "ipam": {
# "type": "host-local",
# "ranges": [
# [{"subnet": "${POD_CIDR}"}]
# ],
# "routes": [{"dst": "0.0.0.0/0"}]
# }
#}
#EOF
#cat <<EOF | tee 99-loopback.conf
#{
# "cniVersion": "0.3.1",
# "type": "loopback"
#}
#EOF
#for nodename in ${K8S_WORKER_NODES[*]}
#do
# echo -e "${COLOUR1}configuring CNI Networking for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
# scp -p 10-bridge.conf 99-loopback.conf $nodename:/etc/cni/net.d/
#done
##Containerd Configuration
cat << EOF | tee config.toml
[plugins]
[plugins.cri]
# stream_server_address is the ip address streaming server is listening on.
stream_server_address = "127.0.0.1"
[plugins.cri.containerd]
snapshotter = "overlayfs"
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runc"
runtime_root = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runsc"
runtime_root = "/run/containerd/runsc"
[plugins.cri.containerd.gvisor]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runsc"
runtime_root = "/run/containerd/runsc"
EOF
cat <<EOF | tee containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}configuring Containerd for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
scp -p config.toml $nodename:/etc/containerd/
scp -p containerd.service $nodename:/etc/systemd/system/
done
##Configure Worker Kubelet
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}configuring Kubelet for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
cat <<EOF | tee ${nodename}-kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/var/lib/kubernetes/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "${CLUSTER_DNS_IP}"
podCIDR: "${POD_CIDR}"
resolvConf: "/etc/resolv.conf"
runtimeRequestTimeout: "15m"
tlsCertFile: "/var/lib/kubelet/${nodename}.pem"
tlsPrivateKeyFile: "/var/lib/kubelet/${nodename}-key.pem"
EOF
cat <<EOF | tee kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--config=/var/lib/kubelet/kubelet-config.yaml \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--image-pull-progress-deadline=2m \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\
--network-plugin=cni \\
--register-node=true \\
--fail-swap-on=false \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
scp -p ${nodename}-kubelet-config.yaml $nodename:/var/lib/kubelet/kubelet-config.yaml
scp -p kubelet.service $nodename:/etc/systemd/system/
ssh $nodename "mv ${nodename}-key.pem ${nodename}.pem /var/lib/kubelet/"
ssh $nodename "mv ${nodename}.kubeconfig /var/lib/kubelet/kubeconfig"
ssh $nodename "mv ca.pem /var/lib/kubernetes/"
done
##Configure Worker Kubernetes Proxy
cat <<EOF | tee kube-proxy-config.yaml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
mode: "iptables"
clusterCIDR: "${POD_CIDR}"
EOF
cat <<EOF | tee kube-proxy.service
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/var/lib/kube-proxy/kube-proxy-config.yaml
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}configuring Kube-Proxy for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
scp -p kube-proxy-config.yaml $nodename:/var/lib/kube-proxy/
scp -p kube-proxy.service $nodename:/etc/systemd/system/
ssh $nodename "mv ~/kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig"
done
##Start Kubelet and Kube-Proxy
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}start Kubelet and Kube-Proxy for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh $nodename "systemctl daemon-reload; \
systemctl enable containerd kubelet kube-proxy; \
systemctl start containerd kubelet kube-proxy; \
crictl config runtime-endpoint unix:///var/run/containerd/containerd.sock"
done
##########################################################################################################
#ENCRYPT K8S EXISTING SECRETS IN ETCD
echo -e "${COLOUR1}replacing k8s secrets with encryption at rest in etcd ...${NC}"
ssh ${K8S_MASTER_NODES[0]} "kubectl get secrets --all-namespaces -o json | kubectl replace -f -"
##########################################################################################################
#SET UP KUBERNETES NETWORKING MODEL (Canal)
mkdir -p $K8S_TEMPDIR
cd $K8S_TEMPDIR
{
component="canal-etcd.yaml"
if [ -f "$component" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://docs.projectcalico.org/v3.7/manifests/${component}"
fi
etcd_endpoints_str=''
for etcd_ip in ${K8S_ETCD_INTERNAL_IP[*]}
do
if [ "$etcd_endpoints_str" == "" ]
then
etcd_endpoints_str="https://${etcd_ip}:2379"
else
etcd_endpoints_str="https://${etcd_ip}:2379,$etcd_endpoints_str"
fi
done
sed -i "/ etcd-ca: /c\ etcd-ca: \"$(cat ${CERTDIR}/ca.pem | base64 -w 0)\"" ${component}
sed -i "/ etcd-cert: /c\ etcd-cert: \"$(cat ${CERTDIR}/etcd-client.pem | base64 -w 0)\"" ${component}
sed -i "/ etcd-key: /c\ etcd-key: \"$(cat ${CERTDIR}/etcd-client-key.pem | base64 -w 0)\"" ${component}
sed -i "/ etcd_endpoints: /c\ etcd_endpoints: \"${etcd_endpoints_str}\"" ${component}
sed -i '/ etcd_ca: /c\ etcd_ca: "/calico-secrets/etcd-ca"' ${component}
sed -i '/ etcd_cert: /c\ etcd_cert: "/calico-secrets/etcd-cert"' ${component}
sed -i '/ etcd_key: /c\ etcd_key: "/calico-secrets/etcd-key"' ${component}
sed -i '/--cert-file=/c\ - \"--cert-file=/calico-secrets/etcd-cert\"' ${component}
sed -i '/--key-file=/c\ - \"--key-file=/calico-secrets/etcd-key\"' ${component}
sed -i '/--ca-file=/c\ - \"--ca-file=/calico-secrets/etcd-ca\"' ${component}
echo -e "${COLOUR1}applying Canal as Kubernetes Network Model ...${NC}"
scp -p ${component} ${K8S_MASTER_NODES[0]}:~/
ssh ${K8S_MASTER_NODES[0]} "kubectl apply -f ${component}"
}
##Enable Flannel Packet Forwarding
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}Enabling Flannel packet forwarding for master ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh $nodename "cat /etc/sysctl.conf | grep -q 'net.ipv4.conf.flannel/1.forwarding' || echo 'net.ipv4.conf.flannel/1.forwarding = 1' >> /etc/sysctl.conf; sysctl -p -q"
done
for nodename in ${K8S_WORKER_NODES[*]}
do
echo -e "${COLOUR1}Enabling Flannel packet forwarding for worker ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh $nodename "cat /etc/sysctl.conf | grep -q 'net.ipv4.conf.flannel/1.forwarding' || echo 'net.ipv4.conf.flannel/1.forwarding = 1' >> /etc/sysctl.conf; sysctl -p -q"
done
#########################################################################################################################
#CHECK MASTERS JOINED CLUSTER
allChecked="0"
while [ "$allChecked" == "0" ](/CloudCommandos/JohnChan/wiki/-"$allChecked"-==-"0"-)
do
allChecked="1"
for nodename in ${K8S_MASTER_NODES[*]}
do
result=$(ssh ${K8S_MASTER_NODES[0]} "kubectl get nodes | grep -q \"^${nodename} \" || echo not found")
if [ "$result" == "" ](/CloudCommandos/JohnChan/wiki/-"$result"-==-""-)
then
echo -e "${COLOUR1}Master ${COLOUR2}${nodename}${COLOUR1} is in cluster${NC}"
else
echo -e "${COLOUR1}Master ${COLOUR2}${nodename}${COLOUR1} is NOT in cluster! Restarting kubelet ...${NC}"
allChecked="0"
ssh ${nodename} "systemctl restart kubelet"
sleep 3s
fi
done
done
#LABEL AND TAINT MASTERS
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}Tainting and labeling master ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh ${K8S_MASTER_NODES[0]} "kubectl label nodes ${nodename} node-role.kubernetes.io/master=; kubectl taint nodes ${nodename} node-role.kubernetes.io/master=:NoSchedule"
done
#CHECK WORKERS JOINED CLUSTER
allChecked="0"
while [ "$allChecked" == "0" ](/CloudCommandos/JohnChan/wiki/-"$allChecked"-==-"0"-)
do
allChecked="1"
for nodename in ${K8S_WORKER_NODES[*]}
do
result=$(ssh ${K8S_MASTER_NODES[0]} "kubectl get nodes | grep -q \"^${nodename} \" || echo not found")
if [ "$result" == "" ](/CloudCommandos/JohnChan/wiki/-"$result"-==-""-)
then
echo -e "${COLOUR1}Worker ${COLOUR2}${nodename}${COLOUR1} is in cluster${NC}"
else
echo -e "${COLOUR1}Worker ${COLOUR2}${nodename}${COLOUR1} is NOT in cluster! Restarting kubelet ...${NC}"
allChecked="0"
ssh ${nodename} "systemctl restart kubelet"
sleep 3s
fi
done
done
#########################################################################################################################
#DEPLOY DNS Cluster Add-on
mkdir -p $K8S_TEMPDIR
cd $K8S_TEMPDIR
{
component="coredns.yaml"
if [ -f "$component" ]
then
echo -e "${COLOUR1}${component} already exists${NC}"
else
echo -e "${COLOUR1}downloading ${component} ...${NC}"
wget -q --show-progress --https-only --timestamping \
"https://storage.googleapis.com/kubernetes-the-hard-way/${component}"
fi
sed -i "/ clusterIP: /c\ clusterIP: ${CLUSTER_DNS_IP}" ${component}
echo -e "${COLOUR1}applying coredns as cluster DNS ...${NC}"
scp -p ${component} ${K8S_MASTER_NODES[0]}:~/
ssh ${K8S_MASTER_NODES[0]} "kubectl apply -f ${component}"
}
##############################################################################################################################
#HEALTH CHECKS
for nodename in ${K8S_MASTER_NODES[*]}
do
echo -e "${COLOUR1}health check for ${COLOUR2}${nodename}${COLOUR1} ...${NC}"
ssh $nodename "curl -i https://127.0.0.1:6443/healthz --cacert /var/lib/kubernetes/ca.pem"
echo ""
ssh $nodename "kubectl get componentstatuses"
echo ""
sleep 1s
done
echo -e "${COLOUR1}checking Kubernetes Version:${NC}"
curl --cacert $CERTDIR/ca.pem https://${K8S_CONTROL_PLANE_VIP}:${K8S_PUBLIC_PORT}/version
echo ""
echo -e "${COLOUR1}running kubectl get nodes ...${NC}"
ssh ${K8S_MASTER_NODES[0]} "kubectl get nodes -o wide"
echo -e "${COLOUR1}ALL DONE!${NC}"