回顾一下去年的项目:
1.题目
构建一个基于haproxy和Keepalived 的高可用负载均衡架构,在 haproxy 或节点发生故障时,能够自动进行故障转移。使用2台 DS服务器和3台RS服务器,Keepalived与haproxy相结合,通过Keepalived 的健康监测机制实现对 DS和 RS 的 HealthCheck,并且利用Keepalived 支持的 VRRP 协议来实现对两台 DS的主备。
2.主机规划
主机名 | IP地址 | 安装软件 |
nginx1 | 192.168.111.10 | nginx,nfs-utils |
nginx2 | 192.168.111.20 | nginx,nfs-utils,chrony |
nginx3 | 192.168.111.30 | nginx,nfs-utils,chrony |
nfs | 192.168.111.40 | nfs-utils,chrony |
dnf | 192.168.111.50 | bind |
haproxy | 192.168.111.100 | haproxy,keepalived |
haproxy1 | 192.168.111.101 | haproxy,keepalived |
vip | 192.168.111.200 |
注意:默认所有软件都已经下载
3、配置NFS和chrony
[root@nfs ~]# mkdir /nfs/data #创建挂载目录
[root@nfs ~]# cat /etc/exports
/nfs/data 192.168.111.40(rw)
[root@nfs ~]# exportfs -r #刷新
[root@nfs ~]# cat /etc/chrony.conf..............
# Allow NTP client access from local network.
allow 192.168.111.0/24..................
[root@nfs ~]# systemctl restart nfs-server.service
[root@nfs ~]# systemctl restart chronyd
[root@nfs ~]# showmount -e 192.168.111.40
Export list for 192.168.111.40:
/nfs/data 192.168.111.0/24[root@nginx1 ~]# mount -t nfs 192.168.111.40:/nfs/data /usr/share/nginx/html/
[root@nginx1 ~]# systemctl restart nginx
[root@nginx2 ~]# mount -t nfs 192.168.111.40:/nfs/data /usr/share/nginx/html/
[root@nginx2 ~]# systemctl restart nginx[root@nginx3 ~]# mount -t nfs 192.168.111.40:/nfs/data /usr/share/nginx/html/
[root@nginx3 ~]# systemctl restart nginx
4、配置DNS
[root@dns ~]# cat /etc/named.conf
options {
listen-on port 53 { 192.168.111.50; };
directory "/var/named";
};
zone "haha.com" IN {
type master;
file "haha.com";
[root@dns ~]# cat /var/named/haha.com
$TTL 1D
@ IN SOA @ ns.haha.com. admin.haha.com. (0 1H 2W 2D 1D)
IN NS ns
ns IN A 192.168.111.50
www IN A 192.168.111.200
5、配置keepalived
1.master
[root@haproxy ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_MASTER
}vrrp_instance VI_1 {
state MASTER
interface ens160
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.111.200
}
track_script {
check_haproxy
}
}
vrrp_script check_haproxy{
script "/etc/keepalived/check_haproxy.sh"
interval 2
weight -20
fall 3
rise 2
}
2、backup
[root@haproxy2 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_BACKUP
}
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy.sh"
interval 2
weight -20
fall 3
rise 2
}vrrp_instance VI_1 {
state BACKUP
interface ens160
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.111.200
}
track_script {
check_haproxy
}
}注:id需要相同,两边的shell脚本是一致的
#!/bin/bash
SERVICE="haproxy"
STATUS=$(systemctl is-active --quiet "$SERVICE";echo $?)
if [ $STATUS -eq 0 ]
then
echo "haproxy is active"
else
systemctl restart haproxy
sleep 2
systemctl is-active --quiet "$SERVICE"
RESTART_STATUS=$?if [ $RESTART_STATUS -eq 0 ]
then
echo "haproxy 重启成功"
else
systemctl stop keepalived
echo "keepalived is stop"
fi
fi
6、配置haproxy(两个DS的配置是一样的)
[root@haproxy ~]# cat /etc/haproxy/haproxy.cfg
..................................
frontend main
bind *:80
#acl url_static path_beg -i /static /images /javascript /stylesheets
#acl url_static path_end -i .jpg .gif .png .css .js#use_backend static if url_static
default_backend nginx#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
balance roundrobin
server static 127.0.0.1:4331 check#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend nginx
balance roundrobin
server nginx1 192.168.111.10:80 check inter 2000 fall 3 rise 3 weight 1
server nginx2 192.168.111.20:80 check inter 2000 fall 3 rise 3 weight 1
server nginx3 192.168.111.30:80 check inter 2000 fall 3 rise 3 weight 1
启动服务[root@haproxy ~]# systemctl restart keepalived.service
[root@haproxy ~]# systemctl restart haproxy.service
7、测试服务是否是高可用
[root@haproxy ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:9c:b2:af brd ff:ff:ff:ff:ff:ff
altname enp3s0
inet 192.168.111.100/24 brd 192.168.111.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet 192.168.111.200/32 scope global ens160
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe9c:b2af/64 scope link noprefixroute
valid_lft forever preferred_lft forever关闭haproxy,看看是否可以重新启动
[root@haproxy ~]# systemctl stop haproxy.service
[root@haproxy ~]# systemctl is-active haproxy.service
inactive
[root@haproxy ~]# systemctl is-active haproxy.service
active
查看浏览器,会访问一样的结果