Oracle12.2 RAC集群管理之增加删除节点(DNS解析)

发布于:2024-11-29 ⋅ 阅读:(33) ⋅ 点赞:(0)

Oracle12.2 RAC集群管理之增加删除节点

该章节实验是基于此章节基础上操作:
Oracle LinuxR7安装Oracle 12.2 RAC集群实施(DNS解析)-CSDN博客

操作系统参数配置

172.30.21.101   hefei1   hefei1.hefeidb.com
172.30.21.102   hefei2   hefei2.hefeidb.com
172.30.21.108   hefei3   hefei3.hefeidb.com #新增
10.10.1.101  hefei1prv     hefei1prv.hefeidb.com
10.10.1.102  hefei2prv     hefei2prv.hefeidb.com
10.10.1.108  hefei3prv     hefei3prv.hefeidb.com #新增
172.30.21.103   hefei1vip    hefei1vip.hefeidb.com
172.30.21.104   hefei2vip     hefei2vip.hefeidb.com
172.30.21.109   hefei3vip     hefei3vip.hefeidb.com #新增
172.30.21.105   hefeiscan     hefeiscan.hefeidb.com
172.30.21.106   hefeiscan     hefeiscan.hefeidb.com
172.30.21.107   hefeiscan     hefeiscan.hefeidb.com
172.30.21.112 hefeidns  hefeidb.com  hefeidns.hefeidb.com

配置DNS服务器(节点3)

image-20241125152453053

配置第三台主机环境变量

vi /etc/resolv.conf

search hefeidb.com hefeidns.hefeidb.com localdomain

nameserver 192.168.1.222

options timeout:2

options attempts:5

配置解析顺序

vi /etc/host.conf

order bind,hosts

multi on

配置网卡上的dns

PEERDNS=NO

DNS1=192.168.1.222

停止NetworkManager服务

systemctl stop NetworkManager.service

systemctl status NetworkManager.service

systemctl disable NetworkManager.service

测试nslookup

service network restart

nslookup hefei3.hefeidb.com

nslookup hefei3vip.hefeidb.com

nslookup hefei1.hefeidb.com

检查两个网卡配置信息

ONBOOT=yes

inux操作系统的配置及参数修改

service network restart

ping 10.10.10.201

ping 10.10.10.203

ping 10.10.10.208

准备文件系统

/dev/sdc1 /oracle

/dev/sdb1 /soft

LVM:

pvcreate /dev/sdb /dev/sdc
vgcreate softvg /dev/sdc
vgcreate oravg /dev/sdb
lvcreate -n softlv -L 30000M softvg 
lvcreate -n oralv -L 50000M oravg 
mkfs.xfs /dev/softvg/softlv 
mkfs.xfs /dev/oravg/oralv 

#vi /etc/fstab 
/dev/oravg/oralv /oracle xfs defaults 0 0 
/dev/softvg/softlv /soft xfs defaults 0 0 
mkdir /oracle 
mkdir /soft 
mount /oracle 
mount /soft 

df -h 

创建用户组

/usr/sbin/groupadd -g 60001 oinstall 
/usr/sbin/groupadd -g 60002 dba 
/usr/sbin/groupadd -g 60003 oper 
/usr/sbin/groupadd -g 60004 backupdba 
/usr/sbin/groupadd -g 60005 dgdba 
/usr/sbin/groupadd -g 60006 kmdba 
/usr/sbin/groupadd -g 60007 asmdba 
/usr/sbin/groupadd -g 60008 asmoper 
/usr/sbin/groupadd -g 60009 asmadmin 
/usr/sbin/groupadd -g 60010 racdba 

useradd -u 61001 -g oinstall -G dba,asmdba,backupdba,dgdba,kmdba,racdba,oper oracle 
useradd -u 61002 -g oinstall -G asmadmin,asmdba,asmoper,dba grid 
	
id oracle 
id grid

passwd oracle 
passwd grid 
	
mkdir -p /oracle/app/grid 
mkdir -p /oracle/app/12.2/grid 
chown -R grid:oinstall /oracle 
mkdir -p /oracle/app/oracle 
chown -R oracle:oinstall /oracle/app/oracle 
mkdir -p /oracle/app/oraInventory 
chown -R grid:oinstall /oracle/app/oraInventory 
chmod -R 775 /oracle 

安装相关软件包

mount /dev/cdrom /mnt

cd /etc/yum.repos.d

mkdir bk

mv public-yum-ol7.repo bk/

#vi hefei.repo 

[EL]
name =Linux 7.3 DVD 
baseurl=file:///mnt
gpgcheck=0
enabled=1 

yum list |more

# From Public Yum or ULN 
yum -y install autoconf 
yum -y install automake 
yum -y install binutils 
yum -y install binutils-devel 
yum -y install bison 
yum -y install cpp 
yum -y install dos2unix 
yum -y install ftp 
yum -y install gcc 
yum -y install gcc-c++ 
yum -y install lrzsz 
yum -y install python-devel 
yum -y install compat-db* 
yum -y install compat-gcc-34 
yum -y install compat-gcc-34-c++ 
yum -y install compat-libcap1 
yum -y install compat-libstdc++-33
yum -y install compat-libstdc++-33.i686 
yum -y install glibc-* 
yum -y install glibc-*.i686 
yum -y install libXpm-*.i686 
yum -y install libXp.so.6 
yum -y install libXt.so.6 
yum -y install libXtst.so.6 
yum -y install libXext 
yum -y install libXext.i686 
yum -y install libXtst 
yum -y install libXtst.i686 
yum -y install libX11 
yum -y install libX11.i686 
yum -y install libXau 
yum -y install libXau.i686 
yum -y install libxcb 
yum -y install libxcb.i686 
yum -y install libXi 
yum -y install libXi.i686 
yum -y install libgcc_s.so.1 
yum -y install libstdc++.i686 
yum -y install libstdc++-devel 
yum -y install libstdc++-devel.i686 
yum -y install libaio 
yum -y install libaio.i686 
yum -y install libaio-devel 
yum -y install libaio-devel.i686 
yum -y install ksh 
yum -y install libXp 
yum -y install libaio-devel 
yum -y install numactl 
yum -y install numactl-devel 
yum -y install make -y 
yum -y install sysstat -y 
yum -y install unixODBC 
yum -y install unixODBC-devel
yum -y install elfutils-libelf-devel-0.97 
yum -y install elfutils-libelf-devel 
yum -y install redhat-lsb-core 
yum -y install *vnc* 

修改资源限制参数

vi /etc/security/limits.conf 

#ORACLE SETTING 
grid soft nproc 16384 
grid hard nproc 16384 
grid soft nofile 16384 
grid hard nofile 65536 
grid soft stack 16384 
grid hard stack 32768 
grid hard memlock 6192000 
grid soft memlock 6192000 
oracle soft nproc 16384 
oracle hard nproc 16384 
oracle soft nofile 16384 
oracle hard nofile 65536 
oracle soft stack 10240 
oracle hard stack 32768 
oracle hard memlock 6192000 
oracle soft memlock 6192000 

ulimit -a
echo "* - nproc 16384" > /etc/security/limits.d/20-nproc.conf 
cat /etc/security/limits.d/20-nproc.conf 
echo "session required pam_limits.so" >> /etc/pam.d/login 
cat /etc/pam.d/login

修改内核参数

vi /etc/sysctl.conf 

#ORACLE SETTING 
fs.aio-max-nr = 1048576 
fs.file-max = 6815744 
kernel.shmmax = 5719476736 
kernel.shmall = 2128778 
kernel.shmmni = 4096 
kernel.sem = 250 32000 100 128 
net.ipv4.ip_local_port_range = 9000 65500 
net.core.rmem_default = 262144 
net.core.rmem_max = 4194304 
net.core.wmem_default = 262144 
net.core.wmem_max = 1048586 
kernel.panic_on_oops = 1 
vm.nr_hugepages = 2500 

sysctl -p 

关闭透明页

vi /etc/rc.d/rc.local 

if test -f /sys/kernel/mm/transparent_hugepage/enabled; then 
echo never > /sys/kernel/mm/transparent_hugepage/enabled 
fi 
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then 
echo never > /sys/kernel/mm/transparent_hugepage/defrag 
fi 

chmod +x /etc/rc.d/rc.local 

关闭防火墙

systemctl status firewalld.service
systemctl stop firewalld.service
systemctl disable firewalld.service

SELINUX

vi /etc/selinux/config

SELINIX=disabled 

setenforce 0

关闭ntp服务

systemctl stop ntpd.service
systemctl disable ntpd.service
systemctl status ntpd.service

mv /etc/ntp.conf /etc/ntp.conf.bak
rm /var/run/ntpd.pid

配置NOZEROCONF

echo “NOZEROCONF=yes” >> /etc/sysconfig/network

–修改nsswitch.conf
vi /etc/nsswitch.conf

hosts: files dns myhostname
–to:
hosts: files dns myhostname nis

关闭avahi-daemon

systemctl status avahi-daemon.service
systemctl start avahi-daemon.service

时间同步

date -s ‘XXXX’

重启

增加环境变量

su - grid 
[grid@hefei3 ~]$ vi .bash_profile 
PS1="[`whoami`@`hostname`:"'$PWD]$' 
export PS1 
umask 022 
#alias sqlplus="rlwrap sqlplus" 
export TMP=/tmp 
export LANG=en_US
export TMPDIR=$TMP 
ORACLE_SID=+ASM3; export ORACLE_SID 
ORACLE_TERM=xterm; export ORACLE_TERM 
ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE 
ORACLE_HOME=/oracle/app/12.2/grid; export ORACLE_HOME 
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT 
PATH=$PATH:$HOME/.local/bin:$HOME/bin:$ORACLE_HOME/bin; export PATH 
THREADS_FLAG=native; export THREADS_FLAG 
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then 
if [ $SHELL = "/bin/ksh" ]; then 
ulimit -p 16384 
ulimit -n 65536 
else 
ulimit -u 16384 -n 65536 
fi 
umask 022 
fi 
su - oracle 
[oracle@hefei3 ~]$ vi .bash_profile 

PS1="[`whoami`@`hostname`:"'$PWD]$' 
#alias sqlplus="rlwrap sqlplus" 
#alias rman="rlwrap rman" 
export PS1 
export TMP=/tmp 
export LANG=en_US 
export TMPDIR=$TMP 
export ORACLE_HOSTNAME=hefei3 
export ORACLE_UNQNAME=hefeidb 
ORACLE_SID=hefeidb3; export ORACLE_SID 
ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE 
ORACLE_HOME=$ORACLE_BASE/product/12.2/db_1; export ORACLE_HOME 
ORACLE_TERM=xterm; export ORACLE_TERM 
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT 
NLS_LANG=AMERICAN_AMERICA.ZHS16GBK;export NLS_LANG
PATH=.:$PATH:$HOME/.local/bin:$HOME/bin:$ORACLE_BASE/product/12.2/db_1/bin:$ORACLE_HOME/bin; export PATH 
THREADS_FLAG=native; export THREADS_FLAG 
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then 
if [ $SHELL = "/bin/ksh" ]; then 
ulimit -p 16384 
ulimit -n 65536 
else 
ulimit -u 16384 -n 65536 
fi 
umask 022 
fi 
su - root 
vi .bash_profile 
/oracle/app/12.2/grid/bin 

创建asm

for i in d e f g h i; 
do 
echo "sd$i" "`/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i` "; 
done 
	
	
vi /etc/udev/rules.d/99-oracle-asmdevices.rules 
	
	KERNEL=="sd?",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VBd2430a0a-255a817b",SYMLINK+="asm-dggrid1",OWNER="grid", GROUP="asmadmin",MODE="0660"
	KERNEL=="sd?",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VB9b6a71f1-fdae8d45",SYMLINK+="asm-dggrid2",OWNER="grid", GROUP="asmadmin",MODE="0660"
	KERNEL=="sd?",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VB6446e0c2-d7933b7b",SYMLINK+="asm-dgsystem",OWNER="grid", GROUP="asmadmin",MODE="0660"
	KERNEL=="sd?",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VB90dec77a-102eab38",SYMLINK+="asm-dgrecovery",OWNER="grid", GROUP="asmadmin",MODE="0660"
	KERNEL=="sd?",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VB5e9d82c4-33692a7f",SYMLINK+="asm-dgdata01",OWNER="grid", GROUP="asmadmin",MODE="0660"
	KERNEL=="sd?",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="1ATA_VBOX_HARDDISK_VBf4c1bcfe-dcc15b3d",SYMLINK+="asm-dggriddb",OWNER="grid", GROUP="asmadmin",MODE="0660"

重启 udev 设备

/sbin/udevadm trigger --type=devices --action=change

检查 udev 设备

ls -lsa /dev/asm*
ls -lsa /dev/sd*

配置三个节点的信任关系 --在第一台上面执行就可以

root:

cd /oracle/app/12.2/grid/oui/prov/resources/scripts

./sshUserSetup.sh -user grid -hosts “hefei1 hefei2 hefei3” -advanced -exverify -confirm

./sshUserSetup.sh -user oracle -hosts “hefei1 hefei2 hefei3” -advanced -exverify -confirm

./sshUserSetup.sh -user grid -hosts “hefei1 hefei2 hefei3” -advanced -exverify -confirm

./sshUserSetup.sh -user oracle -hosts “hefei1 hefei2 hefei3” -advanced -exverify -confirm

验证:

su - grid

ssh hefei1 date

ssh hefei2 date

ssh hefei3 date

su - oracle

ssh hefei1 date

ssh hefei2 date

环境验证与信息检查

–第三个节点安装:

/oracle/app/12.2/grid/cv/rpm

root@hefei1 rpm]# scp cvuqdisk-1.0.10-1.rpm hefei3:/soft

rpm -ivh cvuqdisk-1.0.10.1.rpm

–验证:

在节点1,grid用户

将需要加进来的hefei3节点和hefei1对比一下,看看是否有不match的情况:

su - grid

cd /oracle/app/12.2/grid/bin

./cluvfy comp peer -refnode hefei1 -n hefei3 -verbose

image-20241125153450499

./cluvfy stage -post hwos -n hefei1,hefei2,hefei3 -verbose

./cluvfy stage -pre nodeadd -n ihefei3 -verbose

image-20241125153612555

[grid@hefei1:/oracle/app/12.2/grid/bin]$olsnodes -s -t -n

[grid@hefei1:/oracle/app/12.2/grid/bin]$srvctl status database -d hefeidb -f -v

[grid@hefei1:/oracle/app/12.2/grid/bin]$srvctl config database -d hefeidb -a |grep managed

[root@hefei1 rpm]# ocrconfig -manualbackup

hefei2 2023/08/24 14:16:22 +dggriddb:/hefeicluster/OCRBACKUP/backup_20230824_141622.ocr.281.1145715383 0

GRID节点增加

–增加节点

–grid node 添加过程

第一种:命令方式 addnode.sh

–[hefei1上面执行]

su - grid

cd $ORACLE_HOME/addnode

./addnode.sh -silent "CLUSTER_NEW_NODE={hefei3}" "CLUSTER_NEW_VIRTANMES={hefei3vip}" "CLUSTER_NEW_NODE_ROLES={hub}"

第二种:图形界面 --建议

su - grid –[hefei1上面执行]

vnc

cd $ORACLE_HOME/addnode

./addnode.sh

image-20241125153828912

image-20241125153841418

image-20241125153851164

检查状态

crs_stat -t

crsctl status res -t

image-20241125153908850

数据库软件节点增加

–db node 添加过程

第一种:命令方式:addnode.sh

–[hefei1**上面执行]

su - oracle

cd $ORACLE_HOME/addnode

./addnode.sh -silent "CLUSTER_NEW_NODES={hefei3}"

第二种:图形界面addnode.sh (建议使用)

–[hefei1**上面执行]

vnc

cd $ORACLE_HOME/addnode

./addnode.sh

image-20241125154020049

根据要求执行root.sh

image-20241125154033595

db实例节点增加与功能测试

–instance 添加过程

第一种:命令方式:addnode.sh

–[hefei1**上面执行]

su - oracle

dbca -silent -addInstance -nodeName hefei3 -gdbName hefeidb -instanceName hefeidb3 -sysDBAUsername sys -sysDBAPassword oracle 

第二种:图形界面addnode.sh (建议使用)

–[hefei1上面执行]

vnc+dbca

crsctl status res -t |more

select instance_name,host_name,status from v$instance;

show parameter sga

show parameter pga

image-20241125154134864

环境检查与实例删除

环境信息了解及准备

olsnodes -s -t -n 查看有几个节点
srvctl status database -d hefeidb -f -v 查看节点状态
srvctl config database -d hefeidb -a |grep managed 查看状态

备份:

[root@hefei1 rpm]# ocrconfig -manualbackup

hefei2 2023/08/24 14:16:22 +dggriddb:/hefeicluster/OCRBACKUP/backup_20230824_141622.ocr.281.1145715383 0

关闭并删除数据库实例3

su - grid

srvctl stop instance -d hefeidb -i hefeidb3

dbca图形界面

or:

su - oracle

dbca -silent -deleteInstance -nodeName hefei3 -gdbName hefeidb -instanceName hefeidb3 -sysDBAUserName sys -sysDBAPassword oracle 

image-20241125154334302

检查:

su - grid

srvctl status database -d hefeidb -f -v

srvctl status database -d hefeidb -a|grep instance

数据库软件节点删除

更新删除节点的清单信息 inventory

–节点 3

su - oracle

cd $ORACLE_HOME/oui/bin

./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={hefei3}" -local 

image-20241125154412558

分离$ORACLE_HOME

(如果ORACLE_HOME在共享存储上,就要做这步,如果不在共享存储上,就不用做,做了也没有影响)

–节点 3

su - oracle

cd $ORACLE_HOME/oui/bin

./runInstaller -detachHome ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={hefei3}" 

更新非删除节点的清单信息 inventory

–节点 1

su - oracle

cd $ORACLE_HOME/oui/bin

./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={hefei1,hefei2}"

image-20241125154527246

更新非删除节点的清单信息 inventory

–节点 1

su - oracle

cd $ORACLE_HOME/oui/bin

./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={hefei1,hefei2}"

–节点 2

su - oracle

cd $ORACLE_HOME/oui/bin

./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME "CLUSTER_NODES={hefei1,hefei2}" 

删除节点3的DB软件及相关信息

–节点 3

su - oracle

$ORACLE_HOME/deinstall/deinstall -local 

image-20241125154633800

image-20241125154639692

image-20241125154647073

image-20241125154700291

grid集群节点删除及相关环境测试

检查环境

su - grid

[grid@hefei1:/home/grid]$olsnodes -t -s

hefei1 Active Unpinned

hefei2 Active Unpinned

hefei3 Active Unpinned

卸载crs --节点3

su - root

cd /oracle/app/12.2/grid/crs/install

./rootcrs.pl -deconfig -force 

–root@hefei3 install]# scp hefei1:/oracle/app/12.2/grid/perl/lib/5.22.0/Env.pm /usr/lib64/perl5/vendor_perl/

登录 hefei1

crsctl delete node -n hefei3

image-20241125154940870

更新删除节点清单 inventory

节点 3

su - grid

cd $ORACLE_HOME/oui/bin

./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME “CLUSTER_NODES={hefei3}” -local

deinstall 需要删除的节点

–节点 3

su - grid

$ORACLE_HOME/deinstall/deinstall -local

–【有回车的敲回车 有y的输入y 回车 y】

更新不需要删除节点清单 inventory

节点 1

su - grid

cd $ORACLE_HOME/oui/bin

./runInstaller -updateNodeList ORACLE_HOME=$ORACLE_HOME “CLUSTER_NODES={hefei1,hefei2}” CRS=TRUE

检查

crsctl status res -t

olsnodes -s -t -n

image-20241125155037057

清理环境

清理DNS

清理hosts文件

image-20241125155053407


网站公告

今日签到

点亮在社区的每一天
去签到