DirectPV suspend - cniackz/public GitHub Wiki
sh-5.1# ls -l /dev/disk/by-path/| grep sdb
lrwxrwxrwx. 1 root root 9 Feb 26 20:35 pci-0000:af:00.0-sas-phy1-lun-0 -> ../../sdb
sh-5.1# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
loop0 7:0 0 16M 0 loop
sda 8:0 0 1.5T 0 disk
|-sda1 8:1 0 1M 0 part
|-sda2 8:2 0 127M 0 part
|-sda3 8:3 0 384M 0 part /boot
`-sda4 8:4 0 1.5T 0 part /var/lib/kubelet/pods/3ce81463-48be-4552-bc9d-4ee71bbc5f09/volume-subpaths/minio-directpv-operator-catalog-configmap-partition-1-volume/registry-grpc/0
/var/lib/kubelet/pods/7a7f2ecd-dd9e-407e-b3cd-79ad047fba1c/volume-subpaths/nginx-conf/kubevirt-console-plugin/1
/var
/sysroot/ostree/deploy/rhcos/var
/sysroot
/usr
/etc
/
sdb 8:16 0 1.5T 0 disk /var/lib/kubelet/pods/b68cfdaf-0e6c-4368-b059-fd948506ed33/volumes/kubernetes.io~csi/pvc-1c2c51cc-829a-4301-ab81-d0b44c88e957/mount
/var/lib/kubelet/plugins/kubernetes.io/csi/directpv-min-io/28e520d457f0651c34577be97984711e0031ba6593296a7f4cc57b7539e4e1cb/globalmount
/var/lib/directpv/mnt/ab6c68e2-56e8-4961-a5ef-75055c9a13ae
sdc 8:32 0 1.5T 0 disk /var/lib/directpv/mnt/d0414d28-8df8-495e-b537-01d74454b894
sdd 8:48 0 1.5T 0 disk /var/lib/kubelet/pods/b68cfdaf-0e6c-4368-b059-fd948506ed33/volumes/kubernetes.io~csi/pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f/mount
/var/lib/kubelet/plugins/kubernetes.io/csi/directpv-min-io/6ea5bf8970588d7bc5c37b5a56669164fa6a4d1b768561d840b1fb92cf00bee5/globalmount
/var/lib/directpv/mnt/1b5e7dbb-b204-41c6-9f44-82854da539c8
sde 8:64 0 1.5T 0 disk /var/lib/directpv/mnt/546fc458-ef80-41f9-823c-0d5f5e6dcf2b
sdf 8:80 0 1.5T 0 disk /var/lib/directpv/mnt/f34ee3a1-59ff-411b-9f80-bd5baa05f41e
nvme2n1 259:0 0 931.5G 0 disk /var/lib/directpv/mnt/8027020d-45c6-4c5d-a5f9-568357a67e43
nvme1n1 259:1 0 931.5G 0 disk /var/lib/directpv/mnt/73b23e52-bebf-4990-8d65-09810a78c6b2
nvme0n1 259:2 0 476.9G 0 disk /var/lib/directpv/mnt/922ff8c9-a119-4ca6-896c-eda2110ed1db
sh-5.1# umount -l /dev/sdb
sh-5.1# umount -l /dev/sdb
sh-5.1# umount -l /dev/sdb
sh-5.1# umount -l /dev/sdb
umount: /dev/sdb: not mounted.
sh-5.1# umount -l /dev/sdb
umount: /dev/sdb: not mounted.
sh-5.1# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
loop0 7:0 0 16M 0 loop
sda 8:0 0 1.5T 0 disk
|-sda1 8:1 0 1M 0 part
|-sda2 8:2 0 127M 0 part
|-sda3 8:3 0 384M 0 part /boot
`-sda4 8:4 0 1.5T 0 part /var/lib/kubelet/pods/3ce81463-48be-4552-bc9d-4ee71bbc5f09/volume-subpaths/minio-directpv-operator-catalog-configmap-partition-1-volume/registry-grpc/0
/var/lib/kubelet/pods/7a7f2ecd-dd9e-407e-b3cd-79ad047fba1c/volume-subpaths/nginx-conf/kubevirt-console-plugin/1
/var
/sysroot/ostree/deploy/rhcos/var
/sysroot
/usr
/etc
/
sdb 8:16 0 1.5T 0 disk
sdc 8:32 0 1.5T 0 disk /var/lib/directpv/mnt/d0414d28-8df8-495e-b537-01d74454b894
sdd 8:48 0 1.5T 0 disk /var/lib/kubelet/pods/b68cfdaf-0e6c-4368-b059-fd948506ed33/volumes/kubernetes.io~csi/pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f/mount
/var/lib/kubelet/plugins/kubernetes.io/csi/directpv-min-io/6ea5bf8970588d7bc5c37b5a56669164fa6a4d1b768561d840b1fb92cf00bee5/globalmount
/var/lib/directpv/mnt/1b5e7dbb-b204-41c6-9f44-82854da539c8
sde 8:64 0 1.5T 0 disk /var/lib/directpv/mnt/546fc458-ef80-41f9-823c-0d5f5e6dcf2b
sdf 8:80 0 1.5T 0 disk /var/lib/directpv/mnt/f34ee3a1-59ff-411b-9f80-bd5baa05f41e
nvme2n1 259:0 0 931.5G 0 disk /var/lib/directpv/mnt/8027020d-45c6-4c5d-a5f9-568357a67e43
nvme1n1 259:1 0 931.5G 0 disk /var/lib/directpv/mnt/73b23e52-bebf-4990-8d65-09810a78c6b2
nvme0n1 259:2 0 476.9G 0 disk /var/lib/directpv/mnt/922ff8c9-a119-4ca6-896c-eda2110ed1db
echo 1 > /sys/bus/pci/devices/0000:af:00.0/remove
$ oc get nodes
NAME STATUS ROLES AGE VERSION
25 Ready control-plane,master,worker 30d v1.27.9+e36e183
26 Ready control-plane,master,worker 30d v1.27.9+e36e183
27 NotReady control-plane,master,worker 30d v1.27.9+e36e183
$ oc get pods -n tenant-certmanager -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
myminio-pool-0-0 2/2 Running 0 26h 10.128.0.203 25 <none> <none>
myminio-pool-0-1 2/2 Running 0 39h 10.130.0.149 27 <none> <none>
myminio-pool-0-2 2/2 Running 0 39h 10.129.1.167 26 <none> <none>
$ k delete pod myminio-pool-0-1 -n tenant-certmanager
pod "myminio-pool-0-1" deleted
$ kubectl directpv list volumes --nodes=27
┌──────────────────────────────────────────┬──────────┬──────┬───────┬──────────────────┬────────────────────┬─────────┐
│ VOLUME │ CAPACITY │ NODE │ DRIVE │ PODNAME │ PODNAMESPACE │ STATUS │
├──────────────────────────────────────────┼──────────┼──────┼───────┼──────────────────┼────────────────────┼─────────┤
│ pvc-2dc31d7b-6df7-4b5f-8e4d-f93b11006208 │ 10 GiB │ 27 │ sde │ console-1 │ console │ Ready │
│ pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f │ 1.0 TiB │ 27 │ sdb │ myminio-pool-0-1 │ tenant-certmanager │ Bounded │
│ pvc-1c2c51cc-829a-4301-ab81-d0b44c88e957 │ 1.0 TiB │ 27 │ sdc │ myminio-pool-0-1 │ tenant-certmanager │ Bounded │
│ pvc-5323a38e-301f-4935-b16d-11d6d2fe2b5d │ 2.0 GiB │ 27 │ sda │ myminio-pool-0-0 │ tenant-lite │ Ready │
│ pvc-3d133806-8fcf-45a5-9901-66994b13ce2d │ 2.0 GiB │ 27 │ sdf │ myminio-pool-0-0 │ tenant-lite │ Ready │
└──────────────────────────────────────────┴──────────┴──────┴───────┴──────────────────┴────────────────────┴─────────┘
$ kubectl directpv list drives --nodes=27
┌──────┬─────────┬───────────────────────────┬─────────┬─────────┬─────────┬────────┐
│ NODE │ NAME │ MAKE │ SIZE │ FREE │ VOLUMES │ STATUS │
├──────┼─────────┼───────────────────────────┼─────────┼─────────┼─────────┼────────┤
│ 27 │ sdb │ SEAGATE XS1600LE70084 │ 1.5 TiB │ 466 GiB │ 1 │ Error │
│ 27 │ sdc │ SEAGATE XS1600LE70084 │ 1.5 TiB │ 466 GiB │ 1 │ Error │
│ 27 │ sde │ SEAGATE XS1600LE70084 │ 1.5 TiB │ 1.4 TiB │ 1 │ Ready │
│ 27 │ sda │ SEAGATE XS1600LE70084 │ 1.5 TiB │ 1.5 TiB │ 1 │ Ready │
│ 27 │ sdf │ SEAGATE XS1600LE70084 │ 1.5 TiB │ 1.5 TiB │ 1 │ Ready │
│ 27 │ nvme0n1 │ Micron_2200_MTFDHBA512TCK │ 477 GiB │ 477 GiB │ - │ Ready │
│ 27 │ nvme1n1 │ INTEL SSDPE2KX010T8 │ 932 GiB │ 931 GiB │ - │ Ready │
│ 27 │ nvme2n1 │ INTEL SSDPE2KX010T8 │ 932 GiB │ 931 GiB │ - │ Ready │
└──────┴─────────┴───────────────────────────┴─────────┴─────────┴─────────┴────────┘
$ kubectl directpv list volumes --nodes=27 --pvc --output wide
┌──────────────────────────────────────────┬──────────┬──────┬───────┬──────────────────┬────────────────────┬─────────┬──────────────────────────────────────┬────────────────────────┐
│ VOLUME │ CAPACITY │ NODE │ DRIVE │ PODNAME │ PODNAMESPACE │ STATUS │ DRIVE ID │ PVC │
├──────────────────────────────────────────┼──────────┼──────┼───────┼──────────────────┼────────────────────┼─────────┼──────────────────────────────────────┼────────────────────────┤
│ pvc-2dc31d7b-6df7-4b5f-8e4d-f93b11006208 │ 10 GiB │ 27 │ sde │ console-1 │ console │ Ready │ 546fc458-ef80-41f9-823c-0d5f5e6dcf2b │ console-data-console-1 │
│ pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f │ 1.0 TiB │ 27 │ sdb │ myminio-pool-0-1 │ tenant-certmanager │ Bounded │ 1b5e7dbb-b204-41c6-9f44-82854da539c8 │ 0-myminio-pool-0-1 │
│ pvc-1c2c51cc-829a-4301-ab81-d0b44c88e957 │ 1.0 TiB │ 27 │ sdc │ myminio-pool-0-1 │ tenant-certmanager │ Bounded │ ab6c68e2-56e8-4961-a5ef-75055c9a13ae │ 1-myminio-pool-0-1 │
│ pvc-5323a38e-301f-4935-b16d-11d6d2fe2b5d │ 2.0 GiB │ 27 │ sda │ myminio-pool-0-0 │ tenant-lite │ Ready │ d0414d28-8df8-495e-b537-01d74454b894 │ data0-myminio-pool-0-0 │
│ pvc-3d133806-8fcf-45a5-9901-66994b13ce2d │ 2.0 GiB │ 27 │ sdf │ myminio-pool-0-0 │ tenant-lite │ Ready │ f34ee3a1-59ff-411b-9f80-bd5baa05f41e │ data1-myminio-pool-0-0 │
└──────────────────────────────────────────┴──────────┴──────┴───────┴──────────────────┴────────────────────┴─────────┴──────────────────────────────────────┴────────────────────────┘
kubectl directpv suspend drives 1b5e7dbb-b204-41c6-9f44-82854da539c8 --dangerous
$ kubectl directpv suspend drives 1b5e7dbb-b204-41c6-9f44-82854da539c8 --dangerous
Drive 27/sdb suspended
$ k get pods -n tenant-certmanager
NAME READY STATUS RESTARTS AGE
myminio-pool-0-0 2/2 Running 0 12s
myminio-pool-0-1 1/2 CreateContainerError 0 11s
myminio-pool-0-2 2/2 Running 0 11s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 40s default-scheduler Successfully assigned tenant-certmanager/myminio-pool-0-1 to 27
Normal AddedInterface 39s multus Add eth0 [10.130.0.22/23] from openshift-sdn
Normal Pulled 39s kubelet Container image "quay.io/minio/operator@sha256:0a5688b6ac83800d61c32b3f8a19913278d9322ed8974f4e6b444074ecf3d3ee" already present on machine
Normal Created 39s kubelet Created container validate-arguments
Normal Started 38s kubelet Started container validate-arguments
Normal Pulled 38s kubelet Container image "quay.io/minio/operator@sha256:0a5688b6ac83800d61c32b3f8a19913278d9322ed8974f4e6b444074ecf3d3ee" already present on machine
Normal Created 38s kubelet Created container sidecar
Normal Started 38s kubelet Started container sidecar
Normal Pulled 11s (x5 over 38s) kubelet Container image "quay.io/minio/minio:RELEASE.2024-03-05T04-48-44Z" already present on machine
Warning Failed 11s (x5 over 38s) kubelet Error: relabel failed /var/lib/kubelet/pods/462a36c8-c608-41db-8d48-b02da82c8ffd/volumes/kubernetes.io~csi/pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f/mount: lsetxattr /var/lib/kubelet/pods/462a36c8-c608-41db-8d48-b02da82c8ffd/volumes/kubernetes.io~csi/pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f/mount: read-only file system
sh-5.1# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
# See also:
# https://docs.fedoraproject.org/en-US/quick-docs/getting-started-with-selinux/#getting-started-with-selinux-selinux-states-and-modes
#
# NOTE: In earlier Fedora kernel builds, SELINUX=disabled would also
# fully disable SELinux during boot. If you need a system with SELinux
# fully disabled instead of SELinux running with no policy loaded, you
# need to pass selinux=0 to the kernel command line. You can use grubby
# to persistently set the bootloader to boot with selinux=0:
#
# grubby --update-kernel ALL --args selinux=0
#
# To revert back to SELinux enabled:
#
# grubby --update-kernel ALL --remove-args selinux
#
SELINUX=enforcing
# SELINUXTYPE= can take one of these three values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
sh-5.1# cat /etc/selinux/semanage.conf
# Authors: Jason Tang <[email protected]>
#
# Copyright (C) 2004-2005 Tresys Technology, LLC
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Specify how libsemanage will interact with a SELinux policy manager.
# The four options are:
#
# "source" - libsemanage manipulates a source SELinux policy
# "direct" - libsemanage will write directly to a module store.
# /foo/bar - Write by way of a policy management server, whose
# named socket is at /foo/bar. The path must begin
# with a '/'.
# foo.com:4242 - Establish a TCP connection to a remote policy
# management server at foo.com. If there is a colon
# then the remainder is interpreted as a port number;
# otherwise default to port 4242.
module-store = direct
# When generating the final linked and expanded policy, by default
# semanage will set the policy version to POLICYDB_VERSION_MAX, as
# given in <sepol/policydb.h>. Change this setting if a different
# version is necessary.
#policy-version = 19
# expand-check check neverallow rules when executing all semanage
# commands. There might be a penalty in execution time if this
# option is enabled.
expand-check=0
# usepasswd check tells semanage to scan all pass word records for home directories
# and setup the labeling correctly. If this is turned off, SELinux will label only /home
# and home directories of users with SELinux login mappings defined, see
# semanage login -l for the list of such users.
# If you want to use a different home directory, you will need to use semanage fcontext command.
# For example, if you had home dirs in /althome directory you would have to execute
# semanage fcontext -a -e /home /althome
usepasswd=False
bzip-small=true
bzip-blocksize=5
ignoredirs=/root;/bin;/boot;/dev;/etc;/lib;/lib64;/proc;/run;/sbin;/sys;/tmp;/usr;/var
optimize-policy=true
[sefcontext_compile]
path = /usr/sbin/sefcontext_compile
args = -r $@
[end]
store-root=/etc/selinux
restorecon -R /var/lib/kubelet/pods/462a36c8-c608-41db-8d48-b02da82c8ffd/volumes/kubernetes.io~csi/pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f/mount
restorecon -e /var/lib/kubelet/pods
restorecon -n /var/lib/kubelet/pods
sh-5.1# sudo mount --bind -o context="system_u:object_r:local_t:s0" /var/lib/directpv/tmp /var/lib/kubelet/pods/f4eb93b6-f553-48de-983a-187ee33c6170/volumes/kubernetes.io~csi/pvc-ff938628-7478-495c-bfa0-5c4b2ffd3a7f/mount
https://github.com/bottlerocket-os/bottlerocket/issues/2656
from enforcing
to disabled
as per above