kubespray - deptno/deptno.github.io GitHub Wiki

kubespray

setup

3๋Œ€ ์ด์ƒ์˜ vm(multipass ์ฐธ๊ณ ) ์ค€๋น„ multipass ์‚ฌ์šฉ์„ ๊ฐ€์ •ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ubuntu ๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์ž‘์„ฑ๋จ

dns

sudo vi /etc/hosts
# 3๋Œ€์— ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋„๋ก ์ •๋ณด ์„ค์ •

configure access permission

ssh-keygen
ssh-copy-id [name - /etc/hosts ์— ๊ธฐ์žฌํ•œ ์ด๋ฆ„]

์ž๊ธฐ ์ž์‹ ๋„ ansible ๋Œ€์ƒ์ด๋ผ ํ•จ๊ป˜ ๋ณต์‚ฌํ•ด์•ผํ•œ๋‹ค

์‹คํŒจ์‹œ

ssh-keygen
cat ~/.ssh/id_rsa.pub # ๋ณต์‚ฌ
vi ~/.ssh/authorized_keys # ๋งˆ์ง€๋ง‰ ์ค„์— ์ถ”๊ฐ€, /etc/hosts ์— ์„ค์ •ํ•œ ์ปจํŠธ๋กค ํ”Œ๋ ˆ์ธ์ด ๋  ๋ชจ๋“  ๋…ธ๋“œ์— ๋Œ€ํ•ด ์„ค์ •
ssh [name - /etc/hosts ์— ๊ธฐ์žฌํ•œ ์ด๋ฆ„] # ์ ‘์† ํ™•์ธ

configure kubespray

git clone -b v2.22.0 https://github.com/kubernetes-sigs/kubespray.git
cd kubespray
sudo apt install python3-pip -y
sudo pip3 install -r requirements.txt
cp -r inventory/sample inventory/pi
vi inventory/pi/hosts.yml # ์„ค์ •์€ kubespray ๋ฌธ์„œ ์ฐธ๊ณ 
vi inventory/pi/group_vars/k8s_cluster/k8s-cluster.yml # ์„ค์ •์€ kubespray ๋ฌธ์„œ ์ฐธ๊ณ 
ansible-playbook -i inventory/pi/hosts.yml --become --become-user=root -v cluster.yml
all:
  hosts:
    pi0:
      ansible_host: pi0
    pi1:
      ansible_host: pi1
    pi2:
      ansible_host: pi2
  children:
    kube_control_plane:
      hosts:
        pi0:
        pi1:
        pi2:
    kube_node:
      hosts:
        pi0:
        pi1:
        pi2:
    etcd:
      hosts:
        pi0:
        pi1:
        pi2:
    k8s_cluster:
      children:
        kube_control_plane:
        kube_node:
        calico_rr:
    calico_rr:
      hosts: {}
kube_proxy_strlct_arp: true # MetalLB ์„ค์ •
kubernetes audit: true

install kubectl

sudo snap install kubectl --classic
kubectl get nodes -o wide

configure local kube/config

์—ฌ๊ธฐ๋Š” localhost ๊ธฐ ๋•Œ๋ฌธ์— ๋ฌผ๋ฆฌ์ ์ธ ์‚ฌ์šฉ ์ปดํ“จํ„ฐ, ๋‚˜์˜ ๊ฒฝ์šฐ์—๋Š” mac ์ด๋‹ค. kubectl ์ด ๊น”๋ ค์žˆ๋‹ค๊ณ  ๊ฐ€์ •ํ•œ๋‹ค.

vi ~/.kube/config # vm ๋‚ด์˜ `/root/.kube/config` ๋ฅผ ๋ณต์‚ฌํ•ด์˜จ๋‹ค, ๋ชจ๋ฅด๊ฒŸ์œผ๋ฉด [multipass](/deptno/deptno.github.io/wiki/multipass) mount ์ฐธ์กฐ
# clusters.cluster[].server ์˜ ip control-plane ์˜ ip๋กœ ๊ต์ฒดํ•œ๋‹ค.
# 2022-11-27 ์ฑ…์—์„œ๋Š” ์ถ”๊ฐ€์œผ๋กœ ์•„๋ž˜ ์ •๋ณด ์ˆ˜์ •์„ ํ•˜๊ณ  ์žˆ๋Š”๋ฐ ์ด๊ฑด ์—ฌ๋Ÿฌ ํด๋Ÿฌ์Šคํ„ฐ๋ฅผ ์‚ฌ์šฉํ• ๋•Œ ๋กœ์ปฌ์—์„œ ๊ตฌ๋ถ„ํ•˜๊ธฐ ์œ„ํ•œ ๊ฑฐ์œผ๋กœ ์ƒ๊ฐ๋œ๋‹ค.
# - clusters.cluster[].name
# - clusters.contexts[].context[].cluster
# - clusters.contexts[].context[].user
# - clusters.contexts[].name
# - current-context
# - users[].name
$ kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
kube00   Ready    control-plane   31m   v1.25.4
kube01   Ready    control-plane   30m   v1.25.4
kube02   Ready    control-plane   30m   v1.25.4

error

The connection to the server localhost:8080 was refused - did you specify the right host or port?

ansible-playbook ๋ช…๋ น์–ด๋ฅผ ์‹คํ–‰ํ• ๋•Œ --become-user=root ๊ฐ€ ๋˜์–ด ์žˆ์–ด ~/.kube/config ๊ฐ€ root ๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์ƒ์„ฑ๋œ ๊ฒƒ์œผ๋กœ ์ƒ๊ฐ๋œ๋‹ค.


fatal: [kube02]: FAILED! => {"changed": false, "msg": "modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.15.0-53-generic\n", "name": "nf_conntrack_ipv4", "params": "", "rc": 1, "state": "present", "stderr": "modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.15.0-53-generic\n", "stderr_lines": ["modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.15.0-53-generic"], "stdout": "", "stdout_lines": []}
fatal: [kube02]: FAILED! => {"changed": false, "msg": "modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.15.0-53-generic\n", "name": "nf_conntrack_ipv4", "params": "", "rc": 1, "state": "present", "stderr": "modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.15.0-53-generic\n", "stderr_lines": ["modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.15.0-53-generic"], "stdout": "", "stdout_lines": []}
...ignoring
Sunday 27 November 2022  17:37:38 +0900 (0:00:00.273)       0:11:17.710 *******

TASK [kubernetes/node : Persist ip_vs modules] **************************************************************************************************************
changed: [kube00] => {"changed": true, "checksum": "963dae5d0f149158bd6b9a750827856f6f2382fd", "dest": "/etc/modules-load.d/kube_proxy-ipvs.conf", "gid": 0, "group": "root", "md5sum": "0f7f7753a47d8c043fb1f8043b65beb4", "mode": "0644"

ansible-playbook ์ปค๋งจ๋“œ ์ค‘์— ๋ณด์ธ ์—๋Ÿฌ ๋ฉ”์‹œ์ง€์ธ๋ฐ ๋ฌด์‹œํ•ด๋„ ์ž˜๋˜์—ˆ๋‹ค.

fatal: [kube03]: FAILED! => {"msg": "Missing sudo password"}
ubuntu@kube00:~/kubespray$ ansible-playbook -i inventory/mycluster/hosts-new02.yml -b facts.yml
[WARNING]: Skipping callback plugin 'ara_default', unable to load

PLAY [Gather facts] *************************************************************
Thursday 22 December 2022  13:40:19 +0900 (0:00:00.012)       0:00:00.012 *****

TASK [Gather minimal facts] *****************************************************
ok: [kube00]
ok: [kube02]
ok: [kube01]
fatal: [kube03]: FAILED! => {"msg": "Missing sudo password"}

sudo -l ์ปค๋งจํŠธ๋ฅผ ํ†ตํ•ด ๋‚˜์˜ค๋Š” ์ˆœ์„œ๊ฐ€ ์˜๋ฏธ๊ฐ€ ์žˆ์œผ๋ฉฐ ์˜ค๋ฒ„๋ผ์ด๋”ฉ ๋˜์–ด ๋‹ค๋ฅด๊ฒŒ ๋™์ž‘ ํ•  ์ˆ˜ ์žˆ๋‹ค.

# User privilege specification
root    ALL=(ALL:ALL) ALL
# ! ๋ฌธ์ œ ์˜์—ญ
# ubuntu  ALL=(ALL) NOPASSWD: ALL

# Members of the admin group may gain root privileges
%admin ALL=(ALL) ALL

# Allow members of group sudo to execute any command
%sudo   ALL=(ALL:ALL) ALL

# See sudoers(5) for more information on "@include" directives:
# +update: 2022-12-29 ! ์ด๋™ ํ›„ ํ•ด๊ฒฐ๋œ ์˜์—ญ
ubuntu  ALL=(ALL) NOPASSWD: ALL
# -update: 2022-12-29

@includedir /etc/sudoers.d
# ! ์ด๋™ ํ›„ ํ•ด๊ฒฐ๋œ ์˜์—ญ
ubuntu  ALL=(ALL) NOPASSWD: ALL

๋ฌธ์ œ์ƒํ™ฉ

ubuntu@kube03:~$ sudo -l
Matching Defaults entries for ubuntu on kube03:
    env_reset, mail_badpass, secure_path=/usr/local/sbin\:/usr/local/bin\:/usr/sbin\:/usr/bin\:/sbin\:/bin\:/snap/bin, use_pty

User ubuntu may run the following commands on kube03:
    (ALL) NOPASSWD: ALL
    (ALL : ALL) ALL
ubuntu@kube03:~$ !v
vi /etc/sudoers
ubuntu@kube03:~$ sudo vi /etc/sudoers

ํ•ด๊ฒฐ๋จ

ubuntu@kube03:~$ sudo -l
Matching Defaults entries for ubuntu on kube03:
    env_reset, mail_badpass, secure_path=/usr/local/sbin\:/usr/local/bin\:/usr/sbin\:/usr/bin\:/sbin\:/bin\:/snap/bin, use_pty

User ubuntu may run the following commands on kube03:
    (ALL : ALL) ALL
    (ALL) NOPASSWD: ALL
ubuntu@kube03:~$

fatal: [kube03]: FAILED! => {"changed": false, "elapsed": 300, "msg": "Timeout when waiting for file /etc/cni/net.d/calico-kubeconfig"}

fatal: [pi0]: FAILED! => {"changed": false, "msg": "modprobe: FATAL: Module dummy not found in directory /lib/modules/5.15.0-1012-raspi\n", "name": "dummy", "params": "numdummies=0", "rc": 1, "state": "present", "stderr": "modprobe: FATAL: Module dummy not found in directory /lib/modules/5.15.0-1012-raspi\n", "stderr_lines": ["modprobe: FATAL: Module dummy not found in directory /lib/modules/5.15.0-1012-raspi"], "stdout": "", "stdout_lines": []}
  • ๋‹ต์„ ์ฐพ์ง€ ๋ชปํ•œ ์ƒํƒœ, ubuntu 22.04 -> ubuntu 20.04 ๋กœ ๋‹ค์šด๊ทธ๋ ˆ์ด๋“œํ•จ

Failed to connect to the host via ssh: ssh: Could not resolve hostname pi2: Temporary failure in name resolution"

/etc/hosts ๋‚˜ dhcp ์„œ๋ฒ„์„œ resolv๊ฐ€ ์•ˆ๋˜๋Š” ๊ฒƒ์œผ๋กœ ๋ณด์ธ๋‹ค


the output has been hidden due to the fact that 'no_log: true' was specified for this result"

์„ค์น˜์‹œ์— ๋…ธ๋“œ ์ค‘ ๋‹ค๋ฅธ ์•„ํ‚คํ…์ณ๊ฐ€ ์žˆ์œผ๋ฉด ๋ฐœ์ƒํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์ƒ๊ฐ๋จ, eg) control-plane (arm) + worker node (x86) ์กฐํ•ฉ


TASK [etcd : Configure | Wait for etcd cluster to be healthy] ***********************************************************************************************************************************************************************
task path: /Users/deptno/workspace/src/github.com/kubespray/roles/etcd/tasks/configure.yml:82
fatal: [pi0]: FAILED! => {
    "attempts": 4,
    "changed": false,
    "cmd": "set -o pipefail && /usr/local/bin/etcdctl endpoint --cluster status && /usr/local/bin/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null",
    "delta": "0:00:05.194729",
    "end": "2022-12-30 11:54:44.046275",
    "invocation": {
        "module_args": {
            "_raw_params": "set -o pipefail && /usr/local/bin/etcdctl endpoint --cluster status && /usr/local/bin/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null",
            "_uses_shell": true,
            "argv": null,
            "chdir": null,
            "creates": null,
            "executable": "/bin/bash",
            "removes": null,
            "stdin": null,
            "stdin_add_newline": true,
            "strip_empty_ends": true,
            "warn": false
        }
    },
    "msg": "non-zero return code",
    "rc": 1,
    "start": "2022-12-30 11:54:38.851546",
    "stderr": "{\"level\":\"warn\",\"ts\":\"2022-12-30T11:54:43.954+0900\",\"logger\":\"etcd-client\",\"caller\":\"[email protected]/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0x40000d4c40/192.168.0.74:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = context deadline exceeded\"}\nFailed to get the status of endpoint https://192.168.0.78:2379 (context deadline exceeded)",
    "stderr_lines": [
        "{\"level\":\"warn\",\"ts\":\"2022-12-30T11:54:43.954+0900\",\"logger\":\"etcd-client\",\"caller\":\"[email protected]/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0x40000d4c40/192.168.0.74:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = context deadline exceeded\"}",
        "Failed to get the status of endpoint https://192.168.0.78:2379 (context deadline exceeded)"
    ],
    "stdout": "https://192.168.0.77:2379, afd0f05f33356a3a, 3.5.6, 20 kB, false, false, 9, 30, 30, \nhttps://192.168.0.74:2379, f0a124daf3245703, 3.5.6, 20 kB, true, false, 9, 30, 30, ",
    "stdout_lines": [
        "https://192.168.0.77:2379, afd0f05f33356a3a, 3.5.6, 20 kB, false, false, 9, 30, 30, ",
        "https://192.168.0.74:2379, f0a124daf3245703, 3.5.6, 20 kB, true, false, 9, 30, 30, "
    ]
}

sudo service ufw stop
sudo vi /etc/hosts
  • ๋ฐฉํ™”๋ฒฝ ํ•ด์ œ
  • sudo vi /etc/hosts ํ˜ธ์ŠคํŠธ๊ฐ„ ๋ช…์‹œ ์ดํ›„ ์—๋Ÿฌ๋ฉ”์‹œ์ง€ ๋ณ€ํ•จ -> ์žฌ์‹œ์ž‘ ํ•˜๋‹ˆ ๊ฐ™์Œ
fatal: [pi0]: FAILED! => {
    "attempts": 5,
    "changed": false,
    "cmd": [
        "/usr/local/bin/kubeadm",
        "--kubeconfig",
        "/etc/kubernetes/admin.conf",
        "token",
        "create"
    ],
    "delta": "0:01:15.145403",
    "end": "2022-12-30 12:25:47.954213",
    "invocation": {
        "module_args": {
            "_raw_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_client_request_alive: done pid = 82745\r\ndebug3: mux_client_request_session: session request sent\r\ndebug1: mux_client_request_session: master session id: 4\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 1\r\n")
  • declare IPS ๋กœ control-plane ip ์„ค์ • -> ์‹คํŒจ
  • `ansible-playbook [....] --private-key=~/.ssh/[private key]
  • ํ•ด๊ฒฐ ์•ˆ๋จ

link