Install WEKO3 on Kubernetes - RCOSDP/weko GitHub Wiki

login to master node.

vagrant ssh k8smaster

create a namespace for WEKO3

kubectl create ns weko3 

here is the result running above command.

$ kubectl create ns weko3    
namespace/weko3 created

create a context.

kubectl config set-context weko3 --namespace=weko3 --cluster=kubernetes --user=kubernetes-admin
$ kubectl config set-context weko3 --namespace=weko3 --cluster=kubernetes --user=kubernetes-admin
Context "weko3" created.

change the default context to created one.

kubectl config use-context weko3
$ kubectl config use-context weko3
Switched to context "weko3".

check contexts on this cluster.

$ kubectl config get-contexts
CURRENT   NAME                          CLUSTER      AUTHINFO           NAMESPACE
          kubernetes-admin@kubernetes   kubernetes   kubernetes-admin   
*         weko3                         kubernetes   kubernetes-admin   weko3

Intall ingress-nginx

https://kubernetes.github.io/ingress-nginx/

download ingress-nginx and apply it into the cluster.

wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yaml
kubectl apply -f mandatory.yaml

here is mandatory.yaml

apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses/status
    verbs:
      - update

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/part-of: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    spec:
      # wait up to five minutes for the drain of connections
      terminationGracePeriodSeconds: 300
      serviceAccountName: nginx-ingress-serviceaccount
      nodeSelector:
        kubernetes.io/os: linux
      containers:
        - name: nginx-ingress-controller
          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
            - --annotations-prefix=nginx.ingress.kubernetes.io
          securityContext:
            allowPrivilegeEscalation: true
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            # www-data -> 101
            runAsUser: 101
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
              protocol: TCP
            - name: https
              containerPort: 443
              protocol: TCP
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          lifecycle:
            preStop:
              exec:
                command:
                  - /wait-shutdown

---

apiVersion: v1
kind: LimitRange
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  limits:
  - min:
      memory: 90Mi
      cpu: 100m
    type: Container

here is the result.

$ kubectl apply -f mandatory.yaml
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
deployment.apps/nginx-ingress-controller created
limitrange/ingress-nginx created

check the ingress-nginx resources.

$ kubectl get all -n ingress-nginx
NAME                                            READY   STATUS              RESTARTS   AGE
pod/nginx-ingress-controller-7f74f657bd-flhcn   0/1     ContainerCreating   0          59s

NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginx-ingress-controller   0/1     1            0           59s

NAME                                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/nginx-ingress-controller-7f74f657bd   1         1         0       59s

configure ingress-nginx

cat >ingress-nginx.yaml <<EOF
kind: Service
apiVersion: v1
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  externalTrafficPolicy: Local
  type: LoadBalancer
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
  ports:
    - name: http
      port: 80
      protocol: TCP
      targetPort: http
    - name: https
      port: 443
      protocol: TCP
      targetPort: https
EOF

apply it.

kubectl apply -f ingress-nginx.yaml
$ kubectl apply -f ingress-nginx.yaml
service/ingress-nginx created

check ingress-nginx service.

$ kubectl get svc -n ingress-nginx
NAME            TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx   LoadBalancer   10.98.218.210   <pending>     80:32736/TCP,443:31415/TCP   37s

install metallb

https://metallb.universe.tf/

download metallb and apply it.

wget https://raw.githubusercontent.com/google/metallb/v0.8.3/manifests/metallb.yaml
kubectl apply -f metallb.yaml

metallb.yaml

apiVersion: v1
kind: Namespace
metadata:
  labels:
    app: metallb
  name: metallb-system
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  labels:
    app: metallb
  name: speaker
  namespace: metallb-system
spec:
  allowPrivilegeEscalation: false
  allowedCapabilities:
  - NET_ADMIN
  - NET_RAW
  - SYS_ADMIN
  fsGroup:
    rule: RunAsAny
  hostNetwork: true
  hostPorts:
  - max: 7472
    min: 7472
  privileged: true
  runAsUser:
    rule: RunAsAny
  seLinux:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  volumes:
  - '*'
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    app: metallb
  name: controller
  namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    app: metallb
  name: speaker
  namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app: metallb
  name: metallb-system:controller
rules:
- apiGroups:
  - ''
  resources:
  - services
  verbs:
  - get
  - list
  - watch
  - update
- apiGroups:
  - ''
  resources:
  - services/status
  verbs:
  - update
- apiGroups:
  - ''
  resources:
  - events
  verbs:
  - create
  - patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app: metallb
  name: metallb-system:speaker
rules:
- apiGroups:
  - ''
  resources:
  - services
  - endpoints
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ''
  resources:
  - events
  verbs:
  - create
  - patch
- apiGroups:
  - extensions
  resourceNames:
  - speaker
  resources:
  - podsecuritypolicies
  verbs:
  - use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  labels:
    app: metallb
  name: config-watcher
  namespace: metallb-system
rules:
- apiGroups:
  - ''
  resources:
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app: metallb
  name: metallb-system:controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: metallb-system:controller
subjects:
- kind: ServiceAccount
  name: controller
  namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app: metallb
  name: metallb-system:speaker
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: metallb-system:speaker
subjects:
- kind: ServiceAccount
  name: speaker
  namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    app: metallb
  name: config-watcher
  namespace: metallb-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: config-watcher
subjects:
- kind: ServiceAccount
  name: controller
- kind: ServiceAccount
  name: speaker
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    app: metallb
    component: speaker
  name: speaker
  namespace: metallb-system
spec:
  selector:
    matchLabels:
      app: metallb
      component: speaker
  template:
    metadata:
      annotations:
        prometheus.io/port: '7472'
        prometheus.io/scrape: 'true'
      labels:
        app: metallb
        component: speaker
    spec:
      containers:
      - args:
        - --port=7472
        - --config=config
        env:
        - name: METALLB_NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: METALLB_HOST
          valueFrom:
            fieldRef:
              fieldPath: status.hostIP
        image: metallb/speaker:v0.8.2
        imagePullPolicy: IfNotPresent
        name: speaker
        ports:
        - containerPort: 7472
          name: monitoring
        resources:
          limits:
            cpu: 100m
            memory: 100Mi
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_ADMIN
            - NET_RAW
            - SYS_ADMIN
            drop:
            - ALL
          readOnlyRootFilesystem: true
      hostNetwork: true
      nodeSelector:
        beta.kubernetes.io/os: linux
      serviceAccountName: speaker
      terminationGracePeriodSeconds: 0
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: metallb
    component: controller
  name: controller
  namespace: metallb-system
spec:
  revisionHistoryLimit: 3
  selector:
    matchLabels:
      app: metallb
      component: controller
  template:
    metadata:
      annotations:
        prometheus.io/port: '7472'
        prometheus.io/scrape: 'true'
      labels:
        app: metallb
        component: controller
    spec:
      containers:
      - args:
        - --port=7472
        - --config=config
        image: metallb/controller:v0.8.2
        imagePullPolicy: IfNotPresent
        name: controller
        ports:
        - containerPort: 7472
          name: monitoring
        resources:
          limits:
            cpu: 100m
            memory: 100Mi
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            drop:
            - all
          readOnlyRootFilesystem: true
      nodeSelector:
        beta.kubernetes.io/os: linux
      securityContext:
        runAsNonRoot: true
        runAsUser: 65534
      serviceAccountName: controller
      terminationGracePeriodSeconds: 0
$ kubectl apply -f metallb.yaml
namespace/metallb-system created
podsecuritypolicy.policy/speaker created
serviceaccount/controller created
serviceaccount/speaker created
clusterrole.rbac.authorization.k8s.io/metallb-system:controller created
clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created
role.rbac.authorization.k8s.io/config-watcher created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created
rolebinding.rbac.authorization.k8s.io/config-watcher created
daemonset.apps/speaker created
deployment.apps/controller created

check metallb resources.

$ kubectl get all -n metallb-system
NAME                              READY   STATUS    RESTARTS   AGE
pod/controller-65895b47d4-ck5dt   1/1     Running   0          68s
pod/speaker-2rsgz                 1/1     Running   0          68s
pod/speaker-nvcp5                 1/1     Running   0          68s
pod/speaker-nzxzf                 1/1     Running   0          68s

NAME                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE
daemonset.apps/speaker   3         3         3       3            3           beta.kubernetes.io/os=linux   68s

NAME                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/controller   1/1     1            1           68s

NAME                                    DESIRED   CURRENT   READY   AGE
replicaset.apps/controller-65895b47d4   1         1         1       68s

configure metallb.

cat >metallb-system.yaml<<EOF
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |

    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 192.168.33.80-192.168.33.90
EOF

apply it.

kubectl apply -f metallb-system.yaml
$ kubectl apply -f metallb-system.yaml
configmap/config created

check the all resources.

$ kubectl get all --all-namespaces
NAMESPACE        NAME                                            READY   STATUS    RESTARTS   AGE
ingress-nginx    pod/nginx-ingress-controller-7f74f657bd-flhcn   1/1     Running   0          10m
kube-system      pod/coredns-6955765f44-lmbbz                    1/1     Running   0          36m
kube-system      pod/coredns-6955765f44-zlqvc                    1/1     Running   0          36m
kube-system      pod/etcd-k8smaster.local                        1/1     Running   0          36m
kube-system      pod/kube-apiserver-k8smaster.local              1/1     Running   0          36m
kube-system      pod/kube-controller-manager-k8smaster.local     1/1     Running   0          36m
kube-system      pod/kube-flannel-ds-amd64-25hbh                 1/1     Running   0          19m
kube-system      pod/kube-flannel-ds-amd64-5xx6n                 1/1     Running   0          32m
kube-system      pod/kube-flannel-ds-amd64-7h4s7                 1/1     Running   0          23m
kube-system      pod/kube-proxy-nrvgl                            1/1     Running   0          23m
kube-system      pod/kube-proxy-vzlkg                            1/1     Running   0          36m
kube-system      pod/kube-proxy-wnpll                            1/1     Running   0          19m
kube-system      pod/kube-scheduler-k8smaster.local              1/1     Running   0          36m
metallb-system   pod/controller-65895b47d4-ck5dt                 1/1     Running   0          2m57s
metallb-system   pod/speaker-2rsgz                               1/1     Running   0          2m57s
metallb-system   pod/speaker-nvcp5                               1/1     Running   0          2m57s
metallb-system   pod/speaker-nzxzf                               1/1     Running   0          2m57s

NAMESPACE       NAME                    TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)                      AGE
default         service/kubernetes      ClusterIP      10.96.0.1       <none>          443/TCP                      36m
ingress-nginx   service/ingress-nginx   LoadBalancer   10.98.218.210   192.168.33.80   80:32736/TCP,443:31415/TCP   7m52s
kube-system     service/kube-dns        ClusterIP      10.96.0.10      <none>          53/UDP,53/TCP,9153/TCP       36m

NAMESPACE        NAME                                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE
kube-system      daemonset.apps/kube-flannel-ds-amd64     3         3         3       3            3           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-arm       0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-arm64     0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-s390x     0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-proxy                3         3         3       3            3           beta.kubernetes.io/os=linux   36m
metallb-system   daemonset.apps/speaker                   3         3         3       3            3           beta.kubernetes.io/os=linux   2m58s

NAMESPACE        NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
ingress-nginx    deployment.apps/nginx-ingress-controller   1/1     1            1           10m
kube-system      deployment.apps/coredns                    2/2     2            2           36m
metallb-system   deployment.apps/controller                 1/1     1            1           2m58s

NAMESPACE        NAME                                                  DESIRED   CURRENT   READY   AGE
ingress-nginx    replicaset.apps/nginx-ingress-controller-7f74f657bd   1         1         1       10m
kube-system      replicaset.apps/coredns-6955765f44                    2         2         2       36m
metallb-system   replicaset.apps/controller-65895b47d4                 1         1         1       2m58s
[vagrant@k8smaster metallb]$ kubectl get all --all-namespaces
NAMESPACE        NAME                                            READY   STATUS    RESTARTS   AGE
ingress-nginx    pod/nginx-ingress-controller-7f74f657bd-flhcn   1/1     Running   0          10m
kube-system      pod/coredns-6955765f44-lmbbz                    1/1     Running   0          36m
kube-system      pod/coredns-6955765f44-zlqvc                    1/1     Running   0          36m
kube-system      pod/etcd-k8smaster.local                        1/1     Running   0          37m
kube-system      pod/kube-apiserver-k8smaster.local              1/1     Running   0          37m
kube-system      pod/kube-controller-manager-k8smaster.local     1/1     Running   0          37m
kube-system      pod/kube-flannel-ds-amd64-25hbh                 1/1     Running   0          19m
kube-system      pod/kube-flannel-ds-amd64-5xx6n                 1/1     Running   0          32m
kube-system      pod/kube-flannel-ds-amd64-7h4s7                 1/1     Running   0          24m
kube-system      pod/kube-proxy-nrvgl                            1/1     Running   0          24m
kube-system      pod/kube-proxy-vzlkg                            1/1     Running   0          36m
kube-system      pod/kube-proxy-wnpll                            1/1     Running   0          19m
kube-system      pod/kube-scheduler-k8smaster.local              1/1     Running   0          37m
metallb-system   pod/controller-65895b47d4-ck5dt                 1/1     Running   0          3m11s
metallb-system   pod/speaker-2rsgz                               1/1     Running   0          3m11s
metallb-system   pod/speaker-nvcp5                               1/1     Running   0          3m11s
metallb-system   pod/speaker-nzxzf                               1/1     Running   0          3m11s

NAMESPACE       NAME                    TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)                      AGE
default         service/kubernetes      ClusterIP      10.96.0.1       <none>          443/TCP                      37m
ingress-nginx   service/ingress-nginx   LoadBalancer   10.98.218.210   192.168.33.80   80:32736/TCP,443:31415/TCP   8m5s
kube-system     service/kube-dns        ClusterIP      10.96.0.10      <none>          53/UDP,53/TCP,9153/TCP       37m

NAMESPACE        NAME                                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE
kube-system      daemonset.apps/kube-flannel-ds-amd64     3         3         3       3            3           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-arm       0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-arm64     0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-flannel-ds-s390x     0         0         0       0            0           <none>                        32m
kube-system      daemonset.apps/kube-proxy                3         3         3       3            3           beta.kubernetes.io/os=linux   37m
metallb-system   daemonset.apps/speaker                   3         3         3       3            3           beta.kubernetes.io/os=linux   3m11s

NAMESPACE        NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
ingress-nginx    deployment.apps/nginx-ingress-controller   1/1     1            1           10m
kube-system      deployment.apps/coredns                    2/2     2            2           37m
metallb-system   deployment.apps/controller                 1/1     1            1           3m11s

NAMESPACE        NAME                                                  DESIRED   CURRENT   READY   AGE
ingress-nginx    replicaset.apps/nginx-ingress-controller-7f74f657bd   1         1         1       10m
kube-system      replicaset.apps/coredns-6955765f44                    2         2         2       36m
metallb-system   replicaset.apps/controller-65895b47d4                 1         1         1       3m11s

build a docker registory

in master node, make directory for a docker registory.

mkdir registory

customize docker-registory

cat > registory/docker-compose.yml << EOF
 version: '2'
 services:
   registry:
     image: registry:2
     volumes:
       - ./registry:/var/lib/registry
     ports:
         - "5050:5000" 
     restart: always
 
EOF

run the docker registory.

cd registory
docker-compose up -d
$ docker-compose up -d
Creating network "registory_default" with the default driver
Pulling registry (registry:2)...
2: Pulling from library/registry
486039affc0a: Pull complete
ba51a3b098e6: Pull complete
8bb4c43d6c8e: Pull complete
6f5f453e5f2d: Pull complete
42bc10b72f42: Pull complete
Digest: sha256:7d081088e4bfd632a88e3f3bcd9e007ef44a796fddfe3261407a3f9f04abe1e7
Status: Downloaded newer image for registry:2
Creating registory_registry_1 ... done
$ docker-compose ps
        Name                      Command               State           Ports         
--------------------------------------------------------------------------------------
registory_registry_1   /entrypoint.sh /etc/docker ...   Up      0.0.0.0:5050->5000/tcp

configure

vagrant ssh k8snode1
sudo su
sudo cat >/etc/docker/daemon.json << EOF
{
  "insecure-registries" : ["k8smaster:5050"]
}
EOF
systemctl restart docker
vagrant ssh k8snode2
sudo su
sudo cat >/etc/docker/daemon.json << EOF
{
  "insecure-registries" : ["k8smaster:5050"]
}
EOF
systemctl restart docker

nfs

vagrant ssh k8smaster
sudo yum install -y rpcbind nfs-utils
sudo systemctl enable rpcbind nfs-server
sudo systemctl start rpcbind nfs-server
sudo  mkdir /pv
sudo mkdir /pv/es
sudo mkdir /pv/pgsql
sudo chown -R 1000:1000 /pv/es
sudo su
sudo cat >/etc/exports <<EOF
/pv  192.168.33.0/24(rw,sync,no_subtree_check,no_root_squash)
EOF
exportfs -a
vagrant ssh k8snode1
sudo yum install -y nfs-utils
vagrant ssh k8snode2
sudo yum install -y nfs-utils

weko3

git clone https://github.com/RCOSDP/weko.git
cd weko
git checkout origin/develop
$ git diff nginx/Dockerfile
diff --git a/nginx/Dockerfile b/nginx/Dockerfile
index f766d40..912ffa0 100644
--- a/nginx/Dockerfile
+++ b/nginx/Dockerfile
@@ -17,7 +17,7 @@
 # along with WEKO3; if not, write to the
 # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
 # MA 02111-1307, USA.
-
+FROM weko_web as builder
 FROM nginx
 RUN rm /etc/nginx/conf.d/default.conf
 RUN rm /etc/nginx/nginx.conf
@@ -27,3 +27,6 @@ ADD ./keys/server.crt /etc/nginx/server.crt
 ADD ./keys/server.key /etc/nginx/server.key
 
 RUN chmod 400 /etc/nginx/server.key
+COPY --from=weko_web /home/invenio/.virtualenvs/invenio/var/instance/static  \
+    /home/invenio/.virtualenvs/invenio/var/instance/static
+
$ git diff nginx/weko.conf 
diff --git a/nginx/weko.conf b/nginx/weko.conf
index 485fd2e..7caae6b 100644
--- a/nginx/weko.conf
+++ b/nginx/weko.conf
@@ -1,5 +1,5 @@
 upstream app_server {
-  server web:5000 fail_timeout=0;
+  server 127.0.0.1:5000 fail_timeout=0;
 }
 
 server {
$ git diff Dockerfile     
diff --git a/Dockerfile b/Dockerfile
index 742ef59..40420c3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -61,6 +61,7 @@ RUN echo "source /usr/local/bin/virtualenvwrapper.sh" >> ~/.ba
 RUN echo "workon invenio" >> ~/.bashrc
 
 # Start the Weko application:
-CMD ["/bin/bash", "-c", "invenio run -h 0.0.0.0"]
+CMD ["/bin/bash", "-c", "rm -f /code/celeryd.pid; celery worker -A invenio_app.celery --loglevel=INFO -B -D && uwsgi --ini /code/scripts/uwsgi.ini"]
+#CMD ["/bin/bash", "-c", "invenio run -h 0.0.0.0"]
 # CMD ["/bin/bash", "-c", "gunicorn invenio_app.wsgi --workers=4 --worker-class
 #CMD ["/bin/bash","-c","uwsgi --ini /code/scripts/uwsgi.ini"]
docker-compose build
docker image pull redis
docker image pull rabbitmq
docker image pull mher/flower
docker pull postgres:12
docker tag $(docker images postgres:12 -q) k8smaster:5050/postgres:k8s
docker tag $(docker images weko_nginx:latest -q) k8smaster:5050/weko_nginx:k8s
docker tag $(docker images weko_web:latest -q) k8smaster:5050/weko_web:k8s
docker tag $(docker images weko_elasticsearch:latest -q) k8smaster:5050/weko_elasticsearch:k8s
docker tag $(docker images rabbitmq:latest -q) k8smaster:5050/rabbitmq:k8s
docker tag $(docker images redis:latest -q) k8smaster:5050/redis:k8s
docker push k8smaster:5050/weko_nginx:k8s
docker push k8smaster:5050/postgres:k8s
docker push k8smaster:5050/redis:k8s
docker push k8smaster:5050/weko_elasticsearch:k8s
docker push k8smaster:5050/rabbitmq:k8s
docker push k8smaster:5050/weko_web:k8s

k8s

services.yaml

apiVersion: v1
kind: Service
metadata:
  name: elasticsearch
spec:
  type: NodePort
  selector:
    app: elasticsearch
  ports:
  - name: elasticsearch-port
    protocol: TCP
    port: 9200
    targetPort: 9200
    nodePort: 31092
---
apiVersion: v1
kind: Service
metadata:
  annotations:
    # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-west-2:671441944253:certificate/a3144169-97cf-493e-a388-2ea4665ec8ea"
    service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
  name: nginx-service
spec:
  type: NodePort
  selector:
    app: nginx
  ports:
  - name: nginx-http-port
    protocol: TCP
    port: 80
    targetPort: 80
    # インスタンスが公開するポートを設定
    nodePort: 31080
  - name: nginx-https-port
    protocol: TCP
    port: 443
    targetPort: 443
    nodePort: 31443
---
apiVersion: v1
kind: Service
metadata:
  name: postgresql
spec:
  type: NodePort
  selector:
    app: postgresql
  ports:
  - name: postgresql-port
    protocol: TCP
    port: 5432
    targetPort: 5432
    nodePort: 31054
---
apiVersion: v1
kind: Service
metadata:
  name: rabbitmq
spec:
  type: NodePort
  selector:
    app: rabbitmq
  ports:
  - name: rabbitmq-port
    protocol: TCP
    port: 5672
    targetPort: 5672
    nodePort: 31056
---
apiVersion: v1
kind: Service
metadata:
  name: redis
spec:
  type: NodePort
  selector:
    app: redis
  ports:
  - name: redis-port
    protocol: TCP
    port: 6379
    targetPort: 6379
    nodePort: 31063

deploy.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: weko-elasticsearch
spec:
  replicas: 1
  selector:
    matchLabels:
      app: elasticsearch
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: elasticsearch
    spec:
      containers:
        - name: elasticsearch
          image: k8smaster:5050/weko_elasticsearch:k8s
          imagePullPolicy: Always
          ports:
            - containerPort: 9200
          resources: {}
          volumeMounts:
            - name: nfs
              mountPath: /usr/share/elasticsearch/data
      restartPolicy: Always
      securityContext:
        fsGroup: 1000
      volumes:
        - name: nfs
          persistentVolumeClaim:
            claimName: es-nfs-pvc
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: weko-nginx-web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: k8smaster:5050/weko_nginx:k8s
        imagePullPolicy: Always
        ports:
        - containerPort: 80
        - containerPort: 443
        resources: {}
      - name: web
        image: k8smaster:5050/weko_web:k8s
        imagePullPolicy: Always
        command: [ "/bin/bash" ]
        args: ["-c", "jinja2 /code/scripts/instance.cfg > /home/invenio/.virtualenvs/invenio/var/instance/invenio.cfg && rm -f /code/celeryd.pid; celery worker -A invenio_app.celery --loglevel=INFO -B -D && uwsgi --ini /code/scripts/uwsgi.ini"]
        envFrom:
        - configMapRef:
            name: nginx-web-configmap
        ports:
        - containerPort: 5000
        resources: {}
        volumeMounts:
        - mountPath: /var/tmp
          name: nfs
        envFrom:
          - configMapRef:
              name: nginx-web-configmap
        resources: {}
      restartPolicy: Always
      securityContext:
        fsGroup: 1000
      volumes:
        - name: nfs
          persistentVolumeClaim:
            claimName: nfs-pvc
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: weko-postgresql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: postgresql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: postgresql
    spec:
      containers:
        - name: postgresql
          image: k8smaster:5050/postgres:k8s
          imagePullPolicy: Always
          ports:
            - containerPort: 5432
          resources: {}
          env:
            - name: POSTGRES_USER
              valueFrom:
                secretKeyRef:
                  name: postgres-sc
                  key: POSTGRES_USER
            - name: POSTGRES_DB
              valueFrom:
                secretKeyRef:
                  name: postgres-sc
                  key: POSTGRES_DB
            - name: POSTGRES_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: postgres-sc
                  key: POSTGRES_PASSWORD
            - name: PGDATA
              value: /var/lib/postgresql/data/pgdata
            - name: TZ
              value: Asia/Tokyo
          volumeMounts:
            - mountPath: /var/lib/postgresql/data
              name: nfs
      restartPolicy: Always
      volumes:
        - name: nfs
          persistentVolumeClaim:
           claimName: pgsql-nfs-pvc
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: weko-rabbitmq
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rabbitmq
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rabbitmq
    spec:
      containers:
      - name: rabbitmq
        image: k8smaster:5050/rabbitmq:k8s
        imagePullPolicy: Always
        ports:
        - containerPort: 5672
        resources: {}
      restartPolicy: Always
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: weko-redis
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: redis
    spec:
      containers:
      - name: redis
        image: k8smaster:5050/redis:k8s
        imagePullPolicy: Always
        ports:
        - containerPort: 6379
        resources: {}
      restartPolicy: Always
status: {}

nginx-web-configmap.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-web-configmap
data:
  FLASK_DEBUG: "1"
  INVENIO_ELASTICSEARCH_HOST: elasticsearch
  INVENIO_POSTGRESQL_HOST: postgresql
  INVENIO_RABBITMQ_HOST: rabbitmq
  INVENIO_REDIS_HOST: redis
  INVENIO_FILES_LOCATION_NAME: local
  INVENIO_FILES_LOCATION_URI: /var/tmp
  INVENIO_POSTGRESQL_DBNAME: invenio
  INVENIO_POSTGRESQL_DBPASS: dbpass123
  INVENIO_POSTGRESQL_DBUSER: invenio
  INVENIO_ROLE_COMMUNITY: Community Administrator
  INVENIO_ROLE_CONTRIBUTOR: Contributor
  INVENIO_ROLE_REPOSITORY: Repository Administrator
  INVENIO_ROLE_SYSTEM: System Administrator
  INVENIO_USER_EMAIL: [email protected]
  INVENIO_USER_PASS: uspass123
  INVENIO_WEB_HOST: 127.0.0.1
  INVENIO_WEB_INSTANCE: invenio
  INVENIO_WEB_VENV: invenio
  INVENIO_WORKER_HOST: 127.0.0.1
  PATH: /home/invenio/.virtualenvs/invenio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
  SEARCH_INDEX_PREFIX: tenant1
  VIRTUALENVWRAPPER_PYTHON: /usr/local/bin/python

postgres-secret.yaml

apiVersion: v1
kind: Secret
metadata:
  name: postgres-sc
type: Opaque
data:
  POSTGRES_USER: aW52ZW5pbw==
  POSTGRES_DB: aW52ZW5pbw==
  POSTGRES_PASSWORD: ZGJwYXNzMTIz

nfs-pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv
  annotations:
    volume.beta.kubernetes.io/storage-class: "nfs"
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: k8sm
    path: /pv

nfs-pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
  storageClassName: nfs

es-nfs-pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: es-nfs-pv
  annotations:
    volume.beta.kubernetes.io/storage-class: "nfs"
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: k8smaster
    path: /pv/es

$ cat k8s/es-nfs-pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: es-nfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
  storageClassName: nfs
$ cat k8s/pgsql-nfs-pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pgsql-nfs-pv
  annotations:
    volume.beta.kubernetes.io/storage-class: "nfs"
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: k8smaster
    path: /pv/pgsql
[vagrant@k8smaster ~]$ cat k8s/pgsql-nfs-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pgsql-nfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
  storageClassName: nfs

deploy

kubectl apply -f services.yaml 
kubectl apply -f nginx-web-configmap.yaml 
kubectl apply -f nfs-pvc.yaml 
kubectl apply -f nfs-pv.yaml 
kubectl apply -f es-nfs-pvc.yaml 
kubectl apply -f es-nfs-pv.yaml 
kubectl apply -f pgsql-nfs-pvc.yaml 
kubectl apply -f pgsql-nfs-pv.yaml 
kubectl apply -f postgres-secret.yaml 
kubectl apply -f deploy.yaml          
kubectl exec -it $(kubectl get pod | grep weko-nginx-web | awk '{print $1}')  -c web -- ./scripts/populate-instance.sh 
kubectl cp ./scripts/demo/item_type.sql $(kubectl get pod | grep weko-postgresql|awk '{print $1}'):/tmp/
kubectl exec $(kubectl get pod | grep weko-postgresql|awk '{print $1}') -- psql -U invenio invenio  -f /tmp/item_type.sql
kubectl exec $(kubectl get pod | grep weko-nginx-web | awk '{print $1}') -c web -- invenio workflow init action_status,Action
$ kubectl get svc -n weko3
NAME            TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
elasticsearch   NodePort   10.100.55.181    <none>        9200:31092/TCP               6h51m
nginx-service   NodePort   10.103.166.217   <none>        80:31080/TCP,443:31443/TCP   6h51m
postgresql      NodePort   10.107.244.11    <none>        5432:31054/TCP               6h51m
rabbitmq        NodePort   10.111.196.61    <none>        5672:31056/TCP               6h51m
redis           NodePort   10.97.29.155     <none>        6379:31063/TCP               6h51m
$ kubectl get endpoints
NAME            ENDPOINTS                      AGE
elasticsearch   10.244.1.7:9200                6h52m
nginx-service   10.244.2.8:80,10.244.2.8:443   6h52m
postgresql      10.244.2.9:5432                6h52m
rabbitmq        10.244.1.9:5672                6h52m
redis           10.244.1.8:6379                6h52m
cat >ingress.yaml << EOF
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: weko-ingress
  annotations:
    kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - host: weko
    http:
      paths:
      - path: /
        backend:
          serviceName: nginx-service
          servicePort: 80
EOF
kubectl apply -f ingress.yaml
$ kubectl get ingress --all-namespaces
NAMESPACE   NAME           HOSTS   ADDRESS   PORTS   AGE
weko3       weko-ingress   weko              80      36s

https-httpsとするなら、ingress.yamlを下記のように変更する。

apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: weko-ingress
  annotations:
    kubernetes.io/ingress.class: nginx
    #nginx.ingress.kubernetes.io/rewrite-target: /
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    nginx.ingress.kubernetes.io/ssl-passthrough: "true"
    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
  rules:
  - host: weko
    http:
      paths:
      - path: /
        backend:
          serviceName: nginx-service
          #servicePort: 80
          servicePort: 443

kubeadm reset

master

sudo kubeadm reset -f
sudo systemctl stop kubelet
sudo systemctl stop docker
sudo rm -rvf /var/lib/cni/;
sudo rm -rvf /var/lib/kubelet/
sudo rm -rvf /etc/cni/  
sudo ip link delete cni0
sudo ip link delete flannel.1
sudo systemctl start kubelet
sudo systemctl start docker

worker

sudo kubeadm reset -f
sudo systemctl stop kubelet
sudo systemctl stop docker
sudo rm -rvf /var/lib/cni/;
sudo rm -rvf /var/lib/kubelet/
sudo rm -rvf /etc/cni/  
sudo ip link delete cni0
sudo ip link delete flannel.1
sudo systemctl start kubelet
sudo systemctl start docker

k8s master

git clone https://github.com/RCOSDP/weko-k8s.git

cd weko

$ kubectl get pod -o wide
NAME                                 READY   STATUS    RESTARTS   AGE   IP            NODE             NOMINATED NODE   READINESS GATES
dnsutils                             1/1     Running   0          13m   10.244.2.21   k8snode2.local   <none>           <none>
weko-elasticsearch-f696db989-57zjk   1/1     Running   0          67m   10.244.1.39   k8snode1.local   <none>           <none>
weko-nginx-web-c5549bd64-mlcrr       2/2     Running   0          12m   10.244.2.22   k8snode2.local   <none>           <none>
weko-postgresql-54cf48586f-q4k8r     1/1     Running   0          67m   10.244.1.40   k8snode1.local   <none>           <none>
weko-rabbitmq-6b5f998747-vqdbd       1/1     Running   0          67m   10.244.1.35   k8snode1.local   <none>           <none>
weko-redis-84f565f995-d2srp          1/1     Running   0          67m   10.244.1.38   k8snode1.local   <none>           <none>
$ kubectl get pods --namespace=kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
coredns-6955765f44-7rmp4                  1/1     Running   5          5d2h
coredns-6955765f44-hlqg5                  1/1     Running   5          5d2h
etcd-k8smaster.local                      1/1     Running   10         5d2h
kube-apiserver-k8smaster.local            1/1     Running   10         5d2h
kube-controller-manager-k8smaster.local   1/1     Running   18         5d2h
kube-flannel-ds-amd64-jcjwp               1/1     Running   5          5d2h
kube-flannel-ds-amd64-jgjqk               1/1     Running   11         5d2h
kube-flannel-ds-amd64-z2zxs               1/1     Running   22         5d2h
kube-proxy-4kfcb                          1/1     Running   7          5d2h
kube-proxy-ml77x                          1/1     Running   5          5d2h
kube-proxy-r79z5                          1/1     Running   6          5d2h
kube-scheduler-k8smaster.local            1/1     Running   19         5d2h
$ kubectl get svc --namespace=kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   5d2h
$ kubectl get ep kube-dns --namespace=kube-system
NAME       ENDPOINTS                                                  AGE
kube-dns   10.244.0.12:53,10.244.0.13:53,10.244.0.12:53 + 3 more...   5d2h

cat > k8s/kube-dns-conf.yaml << EOF

apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system data: upstreamNameservers: | ["8.8.8.8", "8.8.4.4"] EOF

⚠️ **GitHub.com Fallback** ⚠️