building rac - liamlamth/blog GitHub Wiki

network setup

  1. dns server

    LPD-SCAN   192.168.1.201
    LPD-SCAN   192.168.1.202
    LPD-SCAN   192.168.1.203
    NODE1-VIP  192.168.1.51
    NODE2-VIP  192.168.1.52
    NODE1      192.168.1.11
    NODE2      192.168.1.12
    
  2. node bonding setup

    node1
    [root@node1 ~] nmcli connection add type bond con-name bond0 ifname bond0 bond.options "miimon=50,mode=active-backup,use_carrier=1,updelay=5000,downdelay=100,primary=eno1,primary_reselect=better"
    [root@node1 ~] nmcli connection add type bond con-name bond1 ifname bond1 bond.options "miimon=50,mode=active-backup,use_carrier=1,updelay=5000,downdelay=100,primary=eno2,primary_reselect=better"
    [root@node1 ~] nmcli connection modify bond0 ipv4.addresses '192.168.1.11/24'                  ### public ip
    [root@node1 ~] nmcli connection modify bond0 ipv4.gateway '192.168.1.254'
    [root@node1 ~] nmcli connection modify bond0 ipv4.dns '192.168.1.25'
    [root@node1 ~] nmcli connection modify bond0 ipv4.dns-search 'mynet.local'
    [root@node1 ~] nmcli connection modify bond0 ipv4.method manual
    [root@node1 ~] nmcli connection modify bond1 ipv4.addresses '10.0.0.11/24'                     ### private ip
    [root@node1 ~] nmcli connection modify bond1 ipv4.method manual
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond0-port1 ifname eno1 master bond0
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond0-port2 ifname eno3 master bond0
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond1-port1 ifname eno2 master bond1
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond1-port2 ifname eno4 master bond1
    
    node2
    [root@node2 ~] nmcli connection add type bond con-name bond0 ifname bond0 bond.options "miimon=50,mode=active-backup,use_carrier=1,updelay=5000,downdelay=100,primary=eno1,primary_reselect=better"
    [root@node2 ~] nmcli connection add type bond con-name bond1 ifname bond1 bond.options "miimon=50,mode=active-backup,use_carrier=1,updelay=5000,downdelay=100,primary=eno2,primary_reselect=better"
    [root@node2 ~] nmcli connection modify bond0 ipv4.addresses '192.168.1.12/24'                  ### public ip
    [root@node2 ~] nmcli connection modify bond0 ipv4.gateway '192.168.1.254'
    [root@node2 ~] nmcli connection modify bond0 ipv4.dns '192.168.1.25'
    [root@node2 ~] nmcli connection modify bond0 ipv4.dns-search 'mynet.local'
    [root@node2 ~] nmcli connection modify bond0 ipv4.method manual
    [root@node2 ~] nmcli connection modify bond1 ipv4.addresses '10.0.0.12/24'                     ### private ip
    [root@node2 ~] nmcli connection modify bond1 ipv4.method manual  
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond0-port1 ifname eno1 master bond0
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond0-port2 ifname eno3 master bond0
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond1-port1 ifname eno2 master bond1 
    [root@node1 ~] nmcli connection add type ethernet slave-type bond con-name bond1-port2 ifname eno4 master bond1
    
    [root@both ~] vi /etc/hostname
    

os setup

  1. both nodes -- create lvm: storage, u01
  2. both nodes -- extend swap size
  3. both nodes -- turn off swappiness
    [root@both ~] sysctl -w vm.swappiness=0
    [root@both ~] echo 'vm.swappiness=0' >> /etc/sysctl.d/99-swappiness.conf
    
  4. both nodes -- turn off firewall
    [root@both ~] systemctl stop firewalld
    [root@both ~] systemctl disable firewalld
    
  5. both nodes -- turn off selinux, require reboot
    [root@both ~] vi /etc/selinux/config
      SELINUX=disabled
    
  6. both nodes -- clock sync
    [root@both ~] vi /etc/chrony.conf                                         ### clock sync
    [root@both ~] systemctl enable chronyd
    

rac setup

  1. preinstallation

    [root@both ~] ### install preinstallation
    [root@both ~] groupadd --gid 54327 asmdba 
    [root@both ~] groupadd --gid 54328 asmoper
    [root@both ~] groupadd --gid 54329 asmadmin
    [root@both ~] useradd --uid 54322 --gid oinstall --groups dba,asmadmin,asmdba,asmoper,racdba grid
    [root@both ~] usermod -a -G asmdba oracle
    [root@both ~] passwd oracle
    [root@both ~] passwd grid
    [root@both ~] vi /etc/security/limits.d/oracle-database-preinstall-19c.conf
      
    modify body oracle soft nofile 1024 oracle hard nofile 65536 oracle soft nproc 16384 oracle hard nproc 16384 oracle soft stack 10240 oracle hard stack 32768 oracle hard memlock 237394652 oracle soft memlock 237394652 grid soft nofile 1024 grid hard nofile 65536 grid soft nproc 16384 grid hard nproc 16384 grid soft stack 10240 grid hard stack 32768 grid hard memlock 237394652 grid soft memlock 237394652 # oracle-database-preinstall-19c setting for data soft limit is 'unlimited' oracle soft data unlimited # oracle-database-preinstall-19c setting for data hard limit is 'unlimited' oracle hard data unlimited
    [root@both ~] mkdir -p /u01/app/19.0.0/grid [root@both ~] chown -R grid:oinstall /u01 [root@both ~] chown grid:oinstall grid19c.zip [root@node1 ~] mkdir -p /u01/app/oracle/product/19.0.0/dbhome_1/ [root@node1 ~] chown -R oracle:oinstall /u01/app/oracle [root@node2 ~] mkdir -p /u01/app/oracle/ [root@node2 ~] chown -R oracle:oinstall /u01/app/oracle [grid@node1 ~] su -l grid [grid@node1 ~] unzip grid19c.zip -d /u01/app/19.0.0/grid [root/grid/oracle@both ~] ssh-keygen [root/grid/oracle@both ~] ssh-copy-id node1/node2 [root@node1 ~] scp /u01/app/19.0.0/grid/cv/rpm/cvuqdisk-1.0.10-1.rpm node2:/storage/ [root@node2 ~] yum localinstall /storage/cvuqdisk-1.0.10-1.rpm
  2. multipath

    [root@both ~] yum install device-mapper-multipath
    [root@both ~] mpathconf --enable
    [root@both ~] vi /etc/multipath.conf
      
    modify body defaults { user_friendly_names yes find_multipaths yes } devices { device { vendor "COMPELNT" product "Compellent Vol" path_grouping_policy multibus path_selector "service-time 0" path_checker tur features "0" hardware_handler "0" prio const failback immediate rr_weight uniform no_path_retry queue } } ### find wwid by ### 1) /usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sdb ### 2) in shared device software multipaths { multipath { wwid 36000d41004a71c00000000000000005e alias TESTRAC_ARCH1 } multipath { wwid 36000d41004a71c00000000000000005f alias TESTRAC_ARCH2 } }
    [root@both ~] systemctl start multipathd.service [root@both ~] systemctl enable multipathd.service
  3. oracle asm

    [root@both ~] yum install oracleasm-support                                                       ### asmlib is not needed in oracle linux
    [root@both ~] oracleasm configure -i
      Default user to own the driver interface [grid]:
      Default group to own the driver interface [oinstall]:
      Start Oracle ASM library driver on boot (y/n) [y]:
      Scan for Oracle ASM disks on boot (y/n) [y]:
    [root@both ~] oracleasm init                                                                      ### otherwise: oracleasm module not loaded or /dev/oracleasm not mounted.
    [root@both ~] oracleasm scandisks
      or manual creation
      [root@both ~] oracleasm createdisk TESTRAC_ARCH1 /dev/mapper/TESTRAC_ARCH1
         use below if error: Unable to open device "/dev/mapper/TESTRAC_ARCH1": Device or resource busy
         /usr/sbin/asmtool -C -l /dev/oracleasm -n TESTRAC_ARCH1 -s /dev/mapper/TESTRAC_ARCH1 -a force=yes
         /usr/sbin/asmtool -C -l /dev/oracleasm -n TESTRAC_ARCH2 -s /dev/mapper/TESTRAC_ARCH2 -a force=yes
         ......
    [root@both ~] oracleasm listdisks
    [root@both ~] ls -l /dev/oracleasm/disks
    
  4. install grid

    [grid@node1 ~] export DISPLAY=192.168.2.10:0.0                                                    ### project to the remote source xwindows
    [grid@node1 ~] export CV_ASSUME_DISTID=OEL7.8                                                     ### may not be needed for patched grid
    [grid@node1 ~] rm -rf /u01/app/19.0.0/grid/OPatch
    [grid@node1 ~] unzip /storage/p6880880_190000_Linux-x86-64.zip -d /u01/app/19.0.0/grid
    [grid@node1 ~] /u01/app/19.0.0/grid/gridSetup.sh -applyRU /storage/31305339                       ### applying patch before is a must installation (apply patch: Doc ID 1410202.1), otherwise 
                                                                                                      ### i)  [INS-06006] Passwordless SSH connectivity not set up (Doc ID 2555697.1)
                                                                                                      ### ii) clocksync requires to use ntpd instead of chronyd
    
    • cluster name must ≤ 14 characters otherwise, when installing 1-node grid, return errors below
         execute root script on hub nodes failed
         CRS-2674: Start of 'ora.grid' on '' failed
         CLSRSC-119: Start of the exclusive mode cluster failed
      
      Solution of Doc ID 2568395.1: ≤ 15 characters does not work
    • Network Interface Usage: bond1 --> ASM & Private
    • Create Grid Infrastructure Management Repository: Yes
    • Grid Infrastructure Management Repository Option: No
    • Create ASM Disk Group
      • Redundancy: external
      • Allocation Unit: 8MB
      • Disk Discovery Path: '/dev/oracleasm/disks'
    • Failure Isolation: Do not use IPMI
    • Installation Location: Oracle base --> /u01/app/grid
  5. install oracle

    [oracle@node1 ~] export DISPLAY=192.168.2.10:0.0                                                  ### project to the remote source xwindows
    [oracle@node1 ~] export CV_ASSUME_DISTID=OEL7.8                                                   ### may not be needed for patched oracle
    [oracle@node1 ~] rm -rf /u01/app/oracle/product/19.0.0/dbhome_1/OPatch
    [oracle@node1 ~] unzip /storage/p6880880_190000_Linux-x86-64.zip -d /u01/app/oracle/product/19.0.0/dbhome_1
    [oracle@node1 ~] /u01/app/oracle/product/19.0.0/dbhome_1/runInstaller -applyRU /storage/31305339  ### patch seems not a must but still recommended
                                                                                                      ### clocksync requires to use ntpd instead of chronyd will still happen but it is safe to ignore
    [root@both ~] /u01/app/oracle/product/19.0.0/dbhome_1/root.sh
    
    • Configuration Option: Set Up Software Only
    • Database Installation Option: Oracle Real Application Clusters database installation
    • Database Edition: Enterprise Edition
  6. create asm disgroup

    [grid@node1 ~] export DISPLAY=192.168.2.10:0.0
    [grid@node1 ~] export CV_ASSUME_DISTID=OEL7.8
    [grid@node1 ~] /u01/app/19.0.0/grid/bin/asmca
      create disk group
    
  7. create database

    [oracle@node1 ~] export DISPLAY=192.168.2.10:0.0
    [oracle@node1 ~] export CV_ASSUME_DISTID=OEL7.8
    [oracle@node1 ~] /u01/app/oracle/product/19.0.0/dbhome_1/bin/dbca
    
  8. post installation

    • environment variable: vi ~/.bash_profiles
      • root
        export ORACLE_BASE=/u01/app/grid
        export GRID_HOME=/u01/app/19.0.0/grid
        export ORACLE_HOME=/u01/app/19.0.0/grid
        export PATH=$ORACLE_HOME/bin:$GRID_HOME/OPatch:$BASE_PATH:$PATH
        
      • grid
        export ORACLE_BASE=/u01/app/grid
        export GRID_HOME=/u01/app/19.0.0/grid
        export ORACLE_HOME=/u01/app/19.0.0/grid
        export ORACLE_SID=-MGMTDB
        export PATH=$ORACLE_HOME/bin:$GRID_HOME/OPatch:$BASE_PATH:$PATH
        
      • oracle
        PS1='${ORACLE_SID}@${HOSTNAME%} ${PWD} $ '
        
        export ORACLE_SID=TESTPD1
        export ORAENV_ASK=NO
        . oraenv
        export PATH=$PATH:$ORACLE_HOME/bin:$ORACLE_HOME/OPatch
        export ORAENV_ASK=YES
        
    • oratab -- do not have slash(/) in the path tail, otherwise error ORA-12547: TNS:lost contact
    • tnsname.ora
⚠️ **GitHub.com Fallback** ⚠️