腾讯云搭建k8s基础环境 - ZhongyueKJ/Kubernetes GitHub Wiki

kubeadm方式部署k8s集群

初始化三台机器

[root@k8s-master ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.116.128 k8s-master
192.168.116.129 k8s-node1
192.168.116.130 k8s-node2
关闭防火墙:
# systemctl stop firewalld
# systemctl disable firewalld
禁用SELinux:
# setenforce 0
# sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux
关闭系统的Swap
# swapoff -a
[root@localhost /]# sed -i 's/.*swap.*/#&/' /etc/fstab     #修改/etc/fstab文件,注释掉swap的自动挂载
# free -m
              total        used        free      shared  buff/cache   available
Mem:           3935         144        3415           8         375        3518
Swap:             0           0           0
清理yum:
[root@k8s-master ~]# yum clean all && yum makecache fast
配置资源限制,设置系统文件句柄数
[root@k8s-master ~]# cat << EOF >>/etc/security/limits.conf 
* soft nofile 65536
* hard nofile 65536
* soft nproc 65536
* hard nproc 65536
* soft memlock  unlimited
* hard memlock  unlimited
EOF
同步时间:
systemctl start chronyd.service && systemctl enable chronyd.service
设置内核参数
[root@k8s-master ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
EOF
[root@k8s-master ~]# modprobe br_netfilter   #挂载
[root@k8s-master ~]# sysctl -p /etc/sysctl.d/k8s.conf  #使配置文件生效
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
[root@k8s-master ~]# ls /proc/sys/net/bridge/   #查看生成文件
bridge-nf-call-arptables  bridge-nf-call-iptables        bridge-nf-filter-vlan-tagged
bridge-nf-call-ip6tables  bridge-nf-filter-pppoe-tagged  bridge-nf-pass-vlan-input-dev
配置IPVS模块
[root@k8s-master ~]# cat <<EOF >>/etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@k8s-master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules 
[root@k8s-master ~]# sh /etc/sysconfig/modules/ipvs.modules 
[root@k8s-master ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4    #查看是否已经加载所需模块
nf_conntrack_ipv4      15053  0 
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145458  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          139264  2 ip_vs,nf_conntrack_ipv4
libcrc32c              12644  3 xfs,ip_vs,nf_conntrack
如果net.bridge.bridge-nf-call-iptables报错,加载br_netfilter模块或者重启服务器
# modprobe br_netfilter
# sysctl -p /etc/sysctl.d/k8s.conf
安装IPset、ipvsadm和一些依赖包及相关工具
[root@k8s-master ~]# yum install -y ipset ipvsadm
[root@k8s-master ~]# yum install -y epel-release
[root@k8s-master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools libseccomp libtool-ltdl

安装docker

# yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
# rpm -qa | grep docker
# yum install -y git
# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo   #添加docker的yum源
# yum list docker-ce --showduplicates | sort -r  #查看docker全部版本
# yum install -y docker-ce-18.06.1.ce-3.el7    //安装最新docker yum install docker-ce -y
# systemctl start docker && systemctl enable docker     /启动docker
配置docker加速器
[root@k8s-master ~]# cat > /etc/docker/daemon.json << EOF
 
{
  "exec-opts": ["native.cgroupdriver=systemd"],      #更换docker驱动为systemd
  "registry-mirrors": [
      "https://br003st4.mirror.aliyuncs.com",
      "https://dockerhub.azk8s.cn",
      "http://hub-mirror.c.163.com",
      "https://registry.docker-cn.com"
  ],
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file":"5"
  }
}
 
EOF
[root@node03 ~]# systemctl restart docker

获取镜像,所有机器都必须获取镜像

#####阿里仓库下载#####
# docker pull mirrorgooglecontainers/kube-apiserver-amd64:v1.19.0
# docker pull mirrorgooglecontainers/kube-controller-manager-amd64:v1.19.0
# docker pull mirrorgooglecontainers/kube-scheduler-amd64:v1.19.0
# docker pull mirrorgooglecontainers/kube-proxy-amd64:v1.19.0
# docker pull  mirrorgooglecontainers/pause-amd64:3.2
# docker pull mirrorgooglecontainers/etcd-amd64:3.2.24
# docker pull kuberneter/coredns:1.7.0

下载完了之后需要将aliyun下载下来的所有镜像打成k8s.gcr.io/kube-controller-manager:v1.19.0这样的tag,不然k8s识别不出来该镜像。

# docker tag mirrorgooglecontainers/kube-apiserver-amd64:v1.19.0  k8s.gcr.io/kube-apiserver:v1.19.0
# docker tag mirrorgooglecontainers/kube-controller-manager-amd64:v1.19.0 k8s.gcr.io/kube-controller-manager:v1.19.0
# docker tag mirrorgooglecontainers/kube-scheduler-amd64:v1.19.0 k8s.gcr.io/kube-scheduler:v1.19.0
# docker tag mirrorgooglecontainers/kube-proxy-amd64:v1.19.0 k8s.gcr.io/kube-proxy:v1.19.0
# docker tag mirrorgooglecontainers/pause-amd64:3.2 k8s.gcr.io/pause:3.2
# docker tag mirrorgooglecontainers/etcd-amd64:3.2.24 k8s.gcr.io/etcd:3.2.24
# docker tag kuberneter/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0

所有节点安装kubeadm、kubelet和kubectl:

配置源
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

所有节点:
1.安装
# yum makecache fast
注意安装顺序,一定不要先安装 kubeadm,因为 kubeadm 会自动安装最新版本的 kubelet 与 kubectl,导致版本不一致问题。
[root@k8s-master ~]# yum install -y kubelet-1.18.12 kubectl-1.18.12 kubeadm-1.18.12
配置启动kubelet(所有节点)
1.配置kubelet使用pause镜像
配置变量:
[root@k8s-master ~]# DOCKER_CGROUPS=`docker info |grep 'Cgroup' | awk '{print $3}'`
[root@k8s-node2 ~]# echo $DOCKER_CGROUPS
systemd
2.-----配置kubelet的cgroups
# cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=$DOCKER_CGROUPS --pod-infra-container-image=k8s.gcr.io/pause:3.1"
EOF

启动
# systemctl daemon-reload
# systemctl enable kubelet && systemctl restart kubelet
安装kubectl命令补全工具
[root@k8s-master ~]# yum install -y bash-completion && source /usr/share/bash-completion/bash_completion &&  source <(kubectl completion bash) &&  echo "source <(kubectl completion bash)" >> ~/.bashrc

配置master节点,生成token信息

[root@k8s-master ~]# cat > kubeadm-config.yaml << EOF
 
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.116.128         #master的IP地址
  bindPort: 6443
nodeRegistration:
  taints:
  - effect: PreferNoSchedule
    key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
imageRepository: registry.aliyuncs.com/google_containers
kubernetesVersion: v1.18.12               #版本号
networking:
  podSubnet: 10.244.0.0/16                #设置pod的网络范围
  serviceSubnet: 10.96.0.0/12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

EOF
[root@k8s-master ~]# kubeadm init --config kubeadm-config.yaml
......
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!  #看到这个执行成功

To start using your cluster, you need to run the following as a regular user:#需要执行下面三条命令

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.116.128:6443 --token p9uu0v.otioniqssoberwo6 \
    --discovery-token-ca-cert-hash  #记下token,后面加入集群使用 sha256:5c89cab1612f58b840be23a7594e951d9b32fad2c09d7a6d93fe7f041ffab9bb 

各步骤内容解析:
    [kubelet] 生成kubelet的配置文件”/var/lib/kubelet/config.yaml”
    [certificates]生成相关的各种证书
    [kubeconfig]生成相关的kubeconfig文件
    [bootstraptoken]生成token记录下来,后边使用kubeadm join往集群中添加节点时会用到 ======================================================================================= 
[root@kub-k8s-master ~]# rm -rf $HOME/.kube
[root@kub-k8s-master ~]# mkdir -p $HOME/.kube
[root@kub-k8s-master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@kub-k8s-master ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master ~]# kubectl get nodes    #
NAME         STATUS     ROLES    AGE     VERSION
k8s-master   NotReady   master   2m41s   v1.17.4

部署网络插件 (master节点)

Kubernetes 中可以部署很多种网络插件,不过比较流行也推荐的有两种: Flannel: Flannel 是基于 Overlay 网络模型的网络插件,能够方便部署,一般部署后只要不出问题,一般不需要管它。 Calico: 与 Flannel 不同,Calico 是一个三层的数据中心网络方案,使用 BGP 路由协议在主机之间路由数据包,可以灵活配置网络策略。 这里我部署的是calico:

#下载 calico 部署文件
[root@k8s-master ~]# wget https://docs.projectcalico.org/v3.10/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml 
[root@k8s-master ~]# sed -i 's/192.168.0.0/10.244.0.0/g' calico.yaml #改成之前设置pod的网段
[root@k8s-master ~]# kubectl apply -f calico.yaml
[root@k8s-master ~]# kubectl get pod -n kube-system  #全部running可能时间比较长
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-57546b46d6-67djm   1/1     Running   0          4m41s
calico-node-cnbn6                          1/1     Running   0          4m42s
coredns-7ff77c879f-7lbm2                   1/1     Running   0          33m
coredns-7ff77c879f-sncfc                   1/1     Running   0          33m
etcd-k8s-master                            1/1     Running   0          33m
kube-apiserver-k8s-master                  1/1     Running   0          33m
kube-controller-manager-k8s-master         1/1     Running   0          33m
kube-proxy-h5qzs                           1/1     Running   0          33m
kube-scheduler-k8s-master                  1/1     Running   0          33m
[root@k8s-master ~]# kubectl get pod -n kube-system | grep kube-proxy
kube-proxy-h5qzs                           1/1     Running   0          53m
[root@k8s-master ~]# kubectl logs kube-proxy-h5qzs -n kube-system   //选择其他一个pod查看,里面有ipvs关键字则代表使用了ipvs网络模式

node节点加入集群

配置node节点加入集群:
在所有node节点操作,此命令为初始化master成功后返回的结果
# kubeadm join 192.168.246.166:6443 --token 93erio.hbn2ti6z50he0lqs \
    --discovery-token-ca-cert-hash sha256:3bc60f06a19bd09f38f3e05e5cff4299011b7110ca3281796668f4edb29a56d9

如果报错开启ip转发:
# sysctl -w net.ipv4.ip_forward=1
查看node是否加入成功:
[root@kub-k8s-master ~]# kubectl get nodes
NAME             STATUS   ROLES    AGE     VERSION
kub-k8s-master   Ready    master   43m     v1.17.4
kub-k8s-node1    Ready    <none>   6m46s   v1.17.4
kub-k8s-node2    Ready    <none>   6m37s   v1.17.4
看到ready那么恭喜你搭建成功!
⚠️ **GitHub.com Fallback** ⚠️