Three node motr s3 hare deployment on vm using mini provisioner (WIP for full stack) - Seagate/cortx-motr GitHub Wiki

Exchange ssh keys node 1 (primary)

VM2_FQDN=<node 2 hostname>
VM3_FQDN=<node 3 hostname>
ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa <<<y 2>&1 >/dev/null
ssh-copy-id -o "StrictHostKeyChecking=no" $VM2_FQDN
ssh-copy-id -o "StrictHostKeyChecking=no" $VM3_FQDN

Exchange ssh keys node 2 (secondary 1)

VM1_FQDN=<node 1 hostname>
VM3_FQDN=<node 3 hostname>
ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa <<<y 2>&1 >/dev/null
ssh-copy-id -o "StrictHostKeyChecking=no" $VM1_FQDN
ssh-copy-id -o "StrictHostKeyChecking=no" $VM3_FQDN

Exchange ssh keys node 3 (secondary 2)

VM1_FQDN=<node 1 hostname>
VM2_FQDN=<node 2 hostname>
ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa <<<y 2>&1 >/dev/null
ssh-copy-id -o "StrictHostKeyChecking=no" $VM1_FQDN
ssh-copy-id -o "StrictHostKeyChecking=no" $VM2_FQDN

Add repo on all node

REPO_URL=http://cortx-storage.colo.seagate.com/releases/cortx/github/integration-custom-ci/release/centos-7.8.2003/custom-build-636
yum-config-manager --add-repo=$REPO_URL/cortx_iso/
yum-config-manager --add-repo=$REPO_URL/3rd_party/
yum-config-manager --add-repo=$REPO_URL/3rd_party/lustre/custom/tcp/

Install RPM on all node

yum install -y consul --nogpgcheck
yum install -y cortx-motr --nogpgcheck
yum install -y cortx-hare --nogpgcheck
yum install -y cortx-py-utils --nogpgcheck
yum localinstall -y https://bintray.com/rabbitmq-erlang/rpm/download_file?file_path=erlang%2F23%2Fel%2F7%2Fx86_64%2Ferlang-23.1.5-1.el7.x86_64.rpm
curl -s https://packagecloud.io/install/repositories/rabbitmq/rabbitmq-server/script.rpm.sh | sudo bash
yum install -y rabbitmq-server
yum install -y haproxy --nogpgcheck
yum install -y openldap-servers openldap-clients
yum install -y cortx-s3server --nogpgcheck
yum install -y cortx-s3iamcli --nogpgcheck
yum install -y gcc
yum install -y python3-devel

Install py modules on all nodes

pip3 install aiohttp==3.6.1
pip3 install elasticsearch-dsl==6.4.0
pip3 install python-consul==1.1.0
pip3 install schematics==2.1.0
pip3 install toml==0.10.0
pip3 install cryptography==2.8
pip3 install PyYAML==5.1.2
pip3 install configparser==4.0.2
pip3 install networkx==2.4
pip3 install numpy==1.19.5
pip3 install matplotlib==3.1.3
pip3 install argparse==1.4.0
pip3 install confluent-kafka==1.5.0
pip3 install python-crontab==2.5.1
pip3 install elasticsearch==6.8.1
pip3 install paramiko==2.7.1
pip3 install pyldap

Create confstore json on node (primary node only)

rm -f /root/provisioner_cluster.json

######node-1
rm -f /etc/machine-id /var/lib/dbus/machine-id
dbus-uuidgen --ensure=/etc/machine-id
dbus-uuidgen --ensure
systemctl status network
cat /etc/machine-id
MACHINEID=`cat /etc/machine-id`
conf json:///root/provisioner_cluster.json set "cluster>server_nodes>$MACHINEID=srvnode-1"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>machine_id=$MACHINEID"

HOSTNAME=`hostname`
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>hostname=$HOSTNAME"

conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>node_type=VM"

#EDIT HERE
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>storage>metadata_devices[0]=/dev/sdb"

#EDIT HERE
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>storage>data_devices[0]=/dev/sdc"
#conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>storage>data_devices[1]=/dev/sdd"
#EDIT HERE

conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>public_interfaces[0]=eth1"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>public_interfaces[1]=eth2"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>private_interfaces[0]=eth3"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>private_interfaces[1]=eth4"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>interface_type=tcp"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>transport_type=lnet"

GENERATEDKEY=`s3cipher generate_key --const_key openldap`
echo $GENERATEDKEY

ENCPW=`s3cipher encrypt --data 'seagate' --key $GENERATEDKEY`
echo $ENCPW

CLUSTERID=`conf yaml:///opt/seagate/cortx/s3/s3backgrounddelete/s3_cluster.yaml get 'cluster_config>cluster_id'|cut -d '"' -f 2`
echo $CLUSTERID

PBIP=`ip addr show eth1|grep "inet "|awk '{print $2}'|cut -d '/' -f 1`
echo $PBIP

PRIP=`ip addr show eth3|grep "inet "|awk '{print $2}'|cut -d '/' -f 1`
echo $PRIP

conf json:///root/provisioner_cluster.json set "cluster>cluster_id=$CLUSTERID"
conf json:///root/provisioner_cluster.json set "cluster>mgmt_vip=127.0.0.1"
conf json:///root/provisioner_cluster.json set "cluster>cluster_ip=127.0.0.1"
conf json:///root/provisioner_cluster.json set "cluster>dns_servers[0]=8.8.8.8"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>public_ip=$PBIP"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>private_ip=$PRIP"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>netmask=255.255.255.0"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>gateway=255.255.255.0"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>network>data>roaming_ip=127.0.0.1"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-1>s3_instances=1"

conf json:///root/provisioner_cluster.json set "openldap>root>user=admin"
conf json:///root/provisioner_cluster.json set "openldap>root>secret=$ENCPW"
conf json:///root/provisioner_cluster.json set "openldap>sgiam>user=sgiamadmin"
conf json:///root/provisioner_cluster.json set "openldap>sgiam>secret=$ENCPW"

######node-2
ssh -o "StrictHostKeyChecking=no" $VM2_FQDN 'rm -f /etc/machine-id /var/lib/dbus/machine-id'
ssh -o "StrictHostKeyChecking=no" $VM2_FQDN 'dbus-uuidgen --ensure=/etc/machine-id'
ssh -o "StrictHostKeyChecking=no" $VM2_FQDN 'dbus-uuidgen --ensure'
ssh -o "StrictHostKeyChecking=no" $VM2_FQDN 'systemctl status network'
ssh -o "StrictHostKeyChecking=no" $VM2_FQDN 'cat /etc/machine-id'
MACHINEID=`ssh -o "StrictHostKeyChecking=no" $VM2_FQDN 'cat /etc/machine-id'`
conf json:///root/provisioner_cluster.json set "cluster>server_nodes>$MACHINEID=srvnode-2"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>machine_id=$MACHINEID"

HOSTNAME=`ssh -o "StrictHostKeyChecking=no" $VM2_FQDN 'hostname'`
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>hostname=$HOSTNAME"

conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>node_type=VM"

#EDIT HERE
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>storage>metadata_devices[0]=/dev/sdb"

#EDIT HERE
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>storage>data_devices[0]=/dev/sdc"
#conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>storage>data_devices[1]=/dev/sdd"
#EDIT HERE

conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>public_interfaces[0]=eth1"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>public_interfaces[1]=eth2"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>private_interfaces[0]=eth3"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>private_interfaces[1]=eth4"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>interface_type=tcp"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>transport_type=lnet"


PBIP=`ssh -o "StrictHostKeyChecking=no" $VM2_FQDN ip addr show eth1|grep "inet "|awk '{print $2}'|cut -d '/' -f 1`
echo $PBIP

PRIP=`ssh -o "StrictHostKeyChecking=no" $VM2_FQDN ip addr show eth3|grep "inet "|awk '{print $2}'|cut -d '/' -f 1`
echo $PRIP

conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>public_ip=\$PBIP"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>private_ip=\$PRIP"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>netmask=255.255.255.0"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>gateway=255.255.255.0"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>network>data>roaming_ip=127.0.0.1"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-2>s3_instances=1"

######node-3
ssh -o "StrictHostKeyChecking=no" $VM3_FQDN 'rm -f /etc/machine-id /var/lib/dbus/machine-id'
ssh -o "StrictHostKeyChecking=no" $VM3_FQDN 'dbus-uuidgen --ensure=/etc/machine-id'
ssh -o "StrictHostKeyChecking=no" $VM3_FQDN 'dbus-uuidgen --ensure'
ssh -o "StrictHostKeyChecking=no" $VM3_FQDN 'systemctl status network'
ssh -o "StrictHostKeyChecking=no" $VM3_FQDN 'cat /etc/machine-id'
MACHINEID=`ssh -o "StrictHostKeyChecking=no" $VM3_FQDN 'cat /etc/machine-id'`
conf json:///root/provisioner_cluster.json set "cluster>server_nodes>$MACHINEID=srvnode-3"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>machine_id=$MACHINEID"

HOSTNAME=`ssh -o "StrictHostKeyChecking=no" $VM3_FQDN 'hostname'`
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>hostname=$HOSTNAME"

conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>node_type=VM"

#EDIT HERE
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>storage>metadata_devices[0]=/dev/sdb"

#EDIT HERE
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>storage>data_devices[0]=/dev/sdc"
#conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>storage>data_devices[1]=/dev/sdd"
#EDIT HERE

conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>public_interfaces[0]=eth1"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>public_interfaces[1]=eth2"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>private_interfaces[0]=eth3"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>private_interfaces[1]=eth4"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>interface_type=tcp"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>transport_type=lnet"


PBIP=`ssh -o "StrictHostKeyChecking=no" $VM3_FQDN ip addr show eth1|grep "inet "|awk '{print $2}'|cut -d '/' -f 1`
echo $PBIP

PRIP=`ssh -o "StrictHostKeyChecking=no" $VM3_FQDN ip addr show eth3|grep "inet "|awk '{print $2}'|cut -d '/' -f 1`
echo $PRIP

conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>public_ip=$PBIP"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>private_ip=$PRIP"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>netmask=255.255.255.0"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>gateway=255.255.255.0"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>network>data>roaming_ip=127.0.0.1"
conf json:///root/provisioner_cluster.json set "cluster>srvnode-3>s3_instances=1"

scp /root/provisioner_cluster.json $VM2_FQDN:/root
scp /root/provisioner_cluster.json $VM3_FQDN:/root

Run mini provisioner motr on all three node

/opt/seagate/cortx/motr/bin/motr_setup post_install --config json:///root/provisioner_cluster.json
/opt/seagate/cortx/motr/bin/motr_setup config --config json:///root/provisioner_cluster.json
/opt/seagate/cortx/motr/bin/motr_setup init --config json:///root/provisioner_cluster.json

Run mini provisioner motr test stage on all node only after init complete on all node successfully

/opt/seagate/cortx/motr/bin/motr_setup test --config json:///root/provisioner_cluster.json

Run mini provisioner s3 on all three node

systemctl start rabbitmq-server
systemctl enable rabbitmq-server
systemctl status rabbitmq-server

curl https://raw.githubusercontent.com/Seagate/cortx-s3server/main/scripts/kafka/install-kafka.sh -o /root/install-kafka.sh 
curl -O https://raw.githubusercontent.com/Seagate/cortx-s3server/main/scripts/kafka/create-topic.sh -o /root/create-topic.sh
chmod a+x /root/install-kafka.sh 
chmod a+x /root/create-topic.sh

HOSTNAME=`hostname`
/root/install-kafka.sh -c 1 -i $HOSTNAME
/root/create-topic.sh -c 1 -i $HOSTNAME
sed -i '/PROFILE=SYSTEM/d' /etc/haproxy/haproxy.cfg
mkdir /etc/ssl/stx/ -p
curl https://raw.githubusercontent.com/Seagate/cortx-prvsnr/pre-cortx-1.0/srv/components/misc_pkgs/ssl_certs/files/stx.pem -o /etc/ssl/stx/stx.pem
ls /etc/ssl/stx/stx.pem

/opt/seagate/cortx/s3/bin/s3_setup post_install --config json:///root/provisioner_cluster.json
/opt/seagate/cortx/s3/bin/s3_setup config --config json:///root/provisioner_cluster.json
/opt/seagate/cortx/s3/bin/s3_setup init --config json:///root/provisioner_cluster.json

systemctl restart s3authserver.service
systemctl start s3backgroundproducer
systemctl start s3backgroundconsumer

echo 127.0.0.1 iam.seagate.com s3.seagate.com >> /etc/hosts
cat /etc/hosts

Run mini provisioner hare on all three node

/opt/seagate/cortx/hare/bin/hare_setup post_install
/opt/seagate/cortx/hare/bin/hare_setup config --config json:///root/provisioner_cluster.json --file '/var/lib/hare/cluster.yaml'

Fix metadata hardcode device name in /var/lib/hare/cluster.yaml for primary node only(remove this once hardcode fix)

conf yaml:///var/lib/hare/cluster.yaml get "nodes[0]>m0_servers[0]>io_disks>meta_data"
conf yaml:///var/lib/hare/cluster.yaml set "nodes[0]>m0_servers[0]>io_disks>meta_data=/dev/vg_srvnode-1_md1/lv_raw_md1"
conf yaml:///var/lib/hare/cluster.yaml get "nodes[0]>m0_servers[0]>io_disks>meta_data"

conf yaml:///var/lib/hare/cluster.yaml get "nodes[0]>m0_servers[1]>io_disks>meta_data"
conf yaml:///var/lib/hare/cluster.yaml set "nodes[0]>m0_servers[1]>io_disks>meta_data=/dev/vg_srvnode-1_md1/lv_raw_md1"
conf yaml:///var/lib/hare/cluster.yaml get "nodes[0]>m0_servers[1]>io_disks>meta_data"

conf yaml:///var/lib/hare/cluster.yaml get "nodes[1]>m0_servers[0]>io_disks>meta_data"
conf yaml:///var/lib/hare/cluster.yaml set "nodes[1]>m0_servers[0]>io_disks>meta_data=/dev/vg_srvnode-2_md1/lv_raw_md1"
conf yaml:///var/lib/hare/cluster.yaml get "nodes[1]>m0_servers[0]>io_disks>meta_data"

conf yaml:///var/lib/hare/cluster.yaml get "nodes[1]>m0_servers[1]>io_disks>meta_data"
conf yaml:///var/lib/hare/cluster.yaml set "nodes[1]>m0_servers[1]>io_disks>meta_data=/dev/vg_srvnode-2_md1/lv_raw_md1"
conf yaml:///var/lib/hare/cluster.yaml get "nodes[1]>m0_servers[1]>io_disks>meta_data"

conf yaml:///var/lib/hare/cluster.yaml get "nodes[2]>m0_servers[0]>io_disks>meta_data"
conf yaml:///var/lib/hare/cluster.yaml set "nodes[2]>m0_servers[0]>io_disks>meta_data=/dev/vg_srvnode-3_md1/lv_raw_md1"
conf yaml:///var/lib/hare/cluster.yaml get "nodes[2]>m0_servers[0]>io_disks>meta_data"

conf yaml:///var/lib/hare/cluster.yaml get "nodes[2]>m0_servers[1]>io_disks>meta_data"
conf yaml:///var/lib/hare/cluster.yaml set "nodes[2]>m0_servers[1]>io_disks>meta_data=/dev/vg_srvnode-3_md1/lv_raw_md1"
conf yaml:///var/lib/hare/cluster.yaml get "nodes[2]>m0_servers[1]>io_disks>meta_data"

Bootstrap cluster

# change if require: /var/lib/hare/cluster.yaml
hctl bootstrap --mkfs /var/lib/hare/cluster.yaml
hctl status

AWS setup

s3iamcli CreateAccount -n test -e [email protected] --ldapuser sgiamadmin --ldappasswd seagate --no-ssl > s3user.txt
cat s3user.txt

curl https://raw.githubusercontent.com/Seagate/cortx-s3server/main/ansible/files/certs/stx-s3-clients/s3/ca.crt -o /etc/ssl/ca.crt
AWSKEYID=`cat s3user.txt |cut -d ',' -f 4 |cut -d ' ' -f 4`
AWSKEY=`cat s3user.txt |cut -d ',' -f 5 |cut -d ' ' -f 4`
pip3 install awscli
pip3 install awscli-plugin-endpoint
aws configure set aws_access_key_id $AWSKEYID
aws configure set aws_secret_access_key $AWSKEY
aws configure set plugins.endpoint awscli_plugin_endpoint 
aws configure set s3.endpoint_url http://s3.seagate.com 
aws configure set s3api.endpoint_url http://s3.seagate.com
aws configure set ca_bundle '/etc/ssl/ca.crt'
cat .aws/config
cat .aws/credentials
aws s3 mb s3://test
aws s3 ls

S3bench setup

yum install -y go
go get github.com/igneous-systems/s3bench
acc_id=$(cat ~/.aws/credentials | grep aws_access_key_id | cut -d= -f2)
acc_key=$(cat ~/.aws/credentials | grep aws_secret_access_key | cut -d= -f2)
/root/go/bin/s3bench -accessKey $acc_id -accessSecret $acc_key -bucket test -endpoint http://s3.seagate.com -numClients 10 -numSamples 10 -objectNamePrefix=s3workload -objectSize 1024 -verbose

Reference

https://github.com/Seagate/cortx-motr/wiki/Motr-deployment-using-motr_setup-on-three-node-VM
https://github.com/Seagate/cortx-hare/wiki/Hare-provisioning-for-3-node-cluster
https://github.com/Seagate/cortx-s3server/wiki/S3server-provisioning-on-3-node-VM-cluster-setup:-Manual

⚠️ **GitHub.com Fallback** ⚠️