Cluster Node Update Notes - tum-t38/firefly GitHub Wiki

# dsmc rest -subdir=yes /home/flo/Projects/hsp90/NMC/closed/yfr1_ff14SB/free /home/flo/Projects/hsp90/NMC/closed/yfr1_ff14SB/rest/

sudo -E --preserve-env=PYTHONPATH python3 -m pip install --no-dependencies peakutils -t /apps/python-3.7.3/peakutils/1.3.2

sudo apt-get update
sudo apt-get -y upgrade
sudo unattended-upgrades
echo ‘DPkg::options { "--force-confnew"; }’  >>  /etc/apt/apt.conf.d/local
sudo do-release-upgrade -f DistUpgradeViewNonInteractive
sudo apt-get install csh htop bison libssl-dev libacl1-dev sqlite sqlite3 libsqlite3-dev pkg-config python-dev liburcu-dev autotools-dev automake libtool libxml2-dev attr nfs-common portmap nis xorg build-essential lmod hwloc libgmp-dev libmpc-dev zip m4 zip libgmp-dev libmpc-dev flex zlib1g-dev libbz2-dev ntp csh bc nethogs sysstat p7zip-full sysstat libc6-dev libtachyon-mt-0 libtachyon-mt-0-dev libnetcdf-dev freeglut3-dev libglew-dev libfreetype6-dev glew-utils libssl-dev texinfo tk-dev mesa-utils libssh2-1-dev libncurses-dev
sudo vim /etc/modprobe.d/blacklist.conf
blacklist amd76x_edac #this might not be required for x86 32 bit users.
blacklist vga16fb
blacklist nouveau
blacklist rivafb
blacklist nvidiafb
blacklist rivatv

sudo update-initramfs -u
## FOR UPGRADE PROBLEMS WITH NVIDIA
#!!!!
# Needed after a kernel upgrade!!!
#!!!!
sudo /usr/bin/nvidia-uninstall
https://www.x.org/wiki/NVIDIAProprietaryDriver/

<install new driver>
sudo update-initramfs -u
<restart>

##

cd /etc/init.d && mv screen-cleanup.dpkg-new screen-cleanup

sudo vi /etc/hosts
sudo vi /etc/hosts.allow
sudo vi /etc/hosts.deny

sudo mkdir /etc/munge
sudo scp root@cow:/etc/munge/munge.key /etc/munge
# Fixup for munge (needed by slurm)
sudo chmod g-w /var/{lib,run,log}
sudo mkdir -p /var/{lib,run,log}/munge
sudo apt-get install munge

sudo vi /etc/passwd
sudo vi /etc/shadow
sudo vi /etc/group

munge:x:64029:64029::/nonexistent:/bin/false

slurm:x:64030:64030::/nonexistent:/bin/false

slurm:\*:17227:0:99999:7:::

munge:\*:17227:0:99999:7:::

sudo chown -R munge:munge /var/{lib,run,log}/munge
sudo chown -R munge:munge /etc/munge
sudo apt-get install munge

sudo echo “d /var/run/munge 0755 munge munge -“ > /usr/lib/tmpfiles.d/munge.conf
sudo systemctl edit --system --full munge
# add —-syslog to command line <- NOT SURE NOW AFTER UBUNTU UPGRADE # NOOOOO!!
sudo systemctl enable munge.service
sudo service munge restart
sudo service nis restart
sudo service rpcbind restart
sudo systemctl enable nis.service
sudo systemctl enable rpcbind.service
sudo systemctl add-wants multi-user.target rpcbind.service

sudo vi /etc/yp.conf
cd /var/yp && sudo make

# domain t38.physik.tu-muenchen.de server 10.152.219.101

Ubuntu 20.04:
	/etc/default/nis
	YPBINDARGS=-no-dbus -broadcast
(cat <<EOF
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.

nvidia-smi -pm 1

exit 0
EOF
      ) | sudo tee /etc/rc.local > /dev/null

sudo chmod +x /etc/rc.local
sudo mkdir -p /apps /work /cudas /scratch
sudo vi /etc/fstab
sudo mount -a

cd /cudas/apps/build/glusterfs/3.8.8/build-cuda-node[001-004]

#
# if upgrading glusterfs, stop all processes with
#  sudo killall glusterfs glusterfsd glusterd
# then
#  sudo systemctl stop glusterd
#  sudo systemctl disable glusterd
#

VER=3.13.2
VER=3.8.8
VER=4.1.4
cd /tmp
mkdir -p glusterfs-build-$VER
cd glusterfs-build-$VER
module purge

apps/build/glusterfs/${VER}/glusterfs-${VER}/configure --prefix=/opt/glusterfs/${VER} AM_XML_PATH=/usr/share/aclocal
make
sudo make install
sudo systemctl enable glusterd
sudo systemctl start glusterd
sudo mkdir -p /gluster-brick/scratch
sudo mkdir -p /opt/glusterfs/latest/var/log/
sudo mount -a

# on other cluster peer:
sudo gluster peer probe cuda-nodeXXX
sudo gluster volume add-brick gluster-scratch cuda-nodeXXX:/gluster-brick/scratch force

sudo mount -a
sudo mkdir -p /var/log/slurm/ && sudo chown -R slurm /var/log/slurm/
sudo mkdir -p /var/run/slurm/ && sudo chown -R slurm /var/run/slurm/
sudo mkdir -p /var/lib/slurm/ && sudo chown -R slurm /var/lib/slurm/

sudo scp jcoles@cuda-node016:/etc/init.d/slurmd /etc/init.d
sudo systemctl enable slurmd.service
sudo systemctl unmask slurmd.service
sudo systemctl start slurmd.service
sudo systemctl status slurmd.service
##
## fix stalling ssh login
## https://github.com/systemd/systemd/issues/7074
##
sudo vi /lib/systemd/system/systemd-logind.service
Comment out -> #IPAddressDeny=any

sudo sed -i 's/^IPAddressDeny/#IPAddressDeny/' /lib/systemd/system/systemd-logind.service
sudo systemctl daemon-reload
sudo systemctl restart systemd-logind


##
## Fix fstab, then...
##

sudo mount -a
cd /etc/profile.d && sudo ln -s /apps/lmod/lmod/init/cshrc z00_lmod.csh
cd /etc/profile.d && sudo ln -s /apps/lmod/lmod/init/profile z00_lmod.sh
echo "module load GnuEnv" | sudo tee /etc/profile.d/z01_lmod.sh > /dev/null
##
## Fix Lmod lua library problem
##

sudo rm -f /etc/profile.d/lmod.sh
sudo ln -s /usr/lib/x86_64-linux-gnu/lua/5.2/posix_c.so /usr/lib/x86_64-linux-gnu/lua/5.2/posix.so
(export VER=4.1.4 ; cat <<EOF
#!/bin/bash
export PATH=/opt/glusterfs/$VER/sbin:/opt/glusterfs/$VER/bin:\$PATH
export LD_LIBRARY_PATH=/opt/glusterfs/$VER/lib:\$LD_LIBRARY_PATH
EOF
      ) | sudo tee /etc/profile.d/gluster.sh > /dev/null
sudo vi /etc/ntp.conf
pool ntp1.lrz.de
pool ntp2.lrz.de
sudo systemctl restart ntp
--- /etc/default/ipmievd        2013-04-24 16:43:05.669776319 +0200
+++ /etc/default/ipmievd.dpkg-new       2014-06-01 08:41:32.000000000 +0200
@@ -1 +1,11 @@
-ENABLED=false
###########
# Adding new disks to LVM
###########

sudo gparted  # -> Format disk for LVM on /dev/sdX
sudo pvcreate /dev/sdX
sudo pvdisplay
sudo vgextend cow_home /dev/sdX
sudo pvdisplay
sudo lvextend -l +100%FREE /dev/cow_home/home
sudo xfs_growfs /home
sudo find /{var,run,etc} -user 105 -exec chown -h 64029 {} \;
sudo find /{var,run,etc} -group 114 -exec chgrp -h 64029 {} \;

sudo find /{var,run,etc} -user 112 -exec chown -h 64030 {} \;
sudo find /{var,run,etc} -group 114 -exec chgrp -h 64030 {} \;

-alias l='ls -lah --color'

-alias quota='xfs_quota -x -c "quota -h"'

MEGARAID
—————

# Clear all foreign flags
sudo storcli64 /c0 /fall delete

# Setup only unconfigured good drives as raid 0 to be ready for ZFS
storcli64 -CfgEachDskRaid0 WB RA Direct CachedBadBBU -a0
REPLACING FAILED DISK in ZFS
——————————————————————-
Mark as offline:
zpool offline <pool> <dev>
REPLACE DISK
zpool replace <pool> <dev>
WILL AUTO START RESILVER
storcli /c0 restart
sudo zpool import -d /dev/disk/by-id -aN
Switch speed change:

config
interface TFGigabitEthernet 0/29
port speed-mode 10G
Updates to DNS zones:
Changes can't be made to original zone files because of dynamic updates. Need to dump journal first then change that file:
sudo rndc stop
Edit /var/cache/bind/... zone files
sudo systemctl restart bind9

10.152.215.xxx

wikijs
------
Enable access to port 80:
sudo apt-get install libcap2-bin
sudo setcap cap_net_bind_service=+ep `readlink -f \`which node\``
⚠ **GitHub.com Fallback** ⚠