离线部署openstack 2024.1控制节点基础服务

发布于:2025-06-13 ⋅ 阅读:(24) ⋅ 点赞:(0)

mariadb 10.6

离线下载

apt-key adv --fetch-keys 'https://mariadb.org/mariadb_release_signing_key.asc'
add-apt-repository 'deb [arch=amd64] http://mirrors.aliyun.com/mariadb/repo/10.6/ubuntu jammy main'

apt-get --download-only install mariadb-server mariadb-client galera-4 rsync socat python3-pymysql

mkdir /controller/mariadb-glaera
mv /var/cache/apt/archives/*.deb /controller/mariadb-glaera/
dpkg -i /controller/mariadb-glaera/*.deb

在三个控制节点操作

  • 配置
vim /etc/mysql/my.cnf

[client]
default-character-set = utf8mb4

[mysqld]
# 本机IP,其余节点填对应IP
bind-address = ip1
# 关闭 binlog(可选,推荐关闭以减少存储压力)
binlog_format = ROW
default_storage_engine = InnoDB
innodb_autoinc_lock_mode = 2
innodb_flush_log_at_trx_commit = 1
innodb_file_per_table = 1
max_connections = 4096
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci

# 指定Galera配置
wsrep_on = ON
wsrep_provider = /usr/lib/galera/libgalera_smm.so
wsrep_cluster_address = "gcomm://ip1,ip2,ip3"
# 集群名称
wsrep_cluster_name = openstack_galera
# 本机IP,其余节点填对应IP
wsrep_node_address = ip1
# 本机hostname,其余节点填对应hostname
wsrep_node_name = controller1

wsrep_sst_method = rsync
# 注意密码
wsrep_sst_auth = "root:MYSQL_ROOT_PASS"

# 禁用symbolic-links以防止各种安全风险
symbolic-links=0

[galera]
  • 安全
chown -R mysql:mysql /var/lib/mysql
chmod 755 /var/lib/mysql

在第一个控制节点操作

# 初始化
galera_new_cluster

在第二、三个控制节点操作

systemctl start mariadb

在三个控制节点操作

systemctl enable mariadb
  • 检查
# 安全加固,包括设置root密码,参考:https://cloud.tencent.com/developer/article/2027903
mysql_secure_installation
# 登录
mysql -uroot -p
-- 显示:3、Primary、ON
SHOW STATUS LIKE 'wsrep_cluster_size';
SHOW STATUS LIKE 'wsrep_cluster_status';
SHOW STATUS LIKE 'wsrep_ready';
-- 创建检查用户,赋予USAGE、PROCESS权限
CREATE USER 'haproxy'@'ip1' IDENTIFIED BY 'HAPROXY_DBPASS';
CREATE USER 'haproxy'@'ip2' IDENTIFIED BY 'HAPROXY_DBPASS';
CREATE USER 'haproxy'@'ip3' IDENTIFIED BY 'HAPROXY_DBPASS';

GRANT USAGE ON *.* TO 'haproxy'@'ip1';
GRANT USAGE ON *.* TO 'haproxy'@'ip2';
GRANT USAGE ON *.* TO 'haproxy'@'ip3';

GRANT PROCESS ON *.* TO 'haproxy'@'ip1';
GRANT PROCESS ON *.* TO 'haproxy'@'ip2';
GRANT PROCESS ON *.* TO 'haproxy'@'ip3';

FLUSH PRIVILEGES;

rabbitmq 3.13

离线下载

rmq官方

# 还要包含supported_erlang_version="1:26.2.5.10-1"
apt install --download-only rabbitmq-server=3.11.28-1

mkdir /controller/rmq
mv /var/cache/apt/archives/*.deb /controller/rmq/
dpkg -i /controller/rmq/*.deb

在三个控制节点操作

vim /etc/rabbitmq/rabbitmq-env.conf
# 本机ip,其余节点填对应ip
NODE_IP_ADDRESS=ip1
  • 三个节点的erlang cookie文件要相同
echo 'Os#123' | tee /var/lib/rabbitmq/.erlang.cookie
chmod 400 /var/lib/rabbitmq/.erlang.cookie
chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie
systemctl start rabbitmq-server
systemctl enable rabbitmq-server

在一个控制节点操作

  • 启用web管理插件
rabbitmq-plugins enable rabbitmq_management
  • 创建管理员并赋权
rabbitmqctl add_user OSadmin OSADMIN_PASS
# 配置、读、写
rabbitmqctl set_permissions OSadmin ".*" ".*" ".*"
# 最高权限
rabbitmqctl set_user_tags OSadmin administrator
  • 创建服务用户并赋权
# Keystone使用Oslo Messaging框架通信,默认不需要RabbitMQ

rabbitmqctl add_user nova NOVA_PASS
rabbitmqctl set_permissions nova ".*" ".*" ".*"

rabbitmqctl add_user neutron NEUTRON_PASS
rabbitmqctl set_permissions neutron ".*" ".*" ".*"

rabbitmqctl add_user cinder CINDER_PASS
rabbitmqctl set_permissions cinder ".*" ".*" ".*"

rabbitmqctl add_user glance GLANCE_PASS
rabbitmqctl set_permissions glance ".*" ".*" ".*"

rabbitmqctl add_user placement PLACEMENT_PASS
rabbitmqctl set_permissions placement ".*" ".*" ".*"

rabbitmqctl add_user horizon HORIZON_PASS
rabbitmqctl set_permissions horizon ".*" ".*" ".*"

rabbitmqctl add_user masakari MASAKARI_PASS
rabbitmqctl set_permissions masakari ".*" ".*" ".*"

在另外两个控制节点操作

# 停止应用
rabbitmqctl stop_app
# 以第一个控制节点为主,加入集群
rabbitmqctl join_cluster rabbit@controller1

rabbitmqctl start_app
# 任意节点验证
rabbitmqctl cluster_status

在第一个控制节点操作

# 启用队列镜像
rabbitmqctl set_policy ha-all "^" '{"ha-mode":"all"}'

memcache客户端分布式

离线下载

apt-get install --download-only memcached python3-memcache

mkdir /controller/memcache
mv /var/cache/apt/archives/*.deb /controller/memcache/
dpkg -i /controller/memcache/*.deb

在三个控制节点操作

  • 每个节点部署一个实例
vim /etc/memcached.conf
# 本机IP,其余节点填对应IP
-l ip1
# 最大内存
-m 1024
-u memcache
systemctl start memcached && systemctl enable memcached

haproxy

离线下载

apt-get install --download-only haproxy keepalived

mkdir /controller/hk
mv /var/cache/apt/archives/*.deb /controller/hk/
dpkg -i /controller/hk/*.deb

在三个控制节点操作

基础服务

vim /etc/haproxy/haproxy.cfg

global
    log /dev/log    local0
    log /dev/log    local1 warning
    maxconn 8192
    user haproxy
    group haproxy
    daemon

defaults
    log     global
    mode    http
    option  httplog
    option  dontlognull
    timeout connect 5000
    timeout client  60000
    timeout server  60000
    maxconn 8192

# MariaDB
frontend mysql_front
    bind <vip>:3306
    default_backend mysql_back

backend mysql_back
    # 控制节点数据库使用场景为短连接,采用轮询
    balance roundrobin
    # 以haproxy用户登录mariadb,检查wsr状态,返回正常值4
    option mysql-check user haproxy
    mysql-check query "SHOW STATUS LIKE 'wsrep_local_state';"
    mysql-check expect string :4
    server controller1 <ip1>:3306 check
    server controller2 <ip2>:3306 check
    server controller3 <ip3>:3306 check

组件服务

vim /etc/haproxy/haproxy.cfg

frontend keystone_front
    bind <vip>:5000
    default_backend keystone_back

backend keystone_back
    balance roundrobin
    # 采用http协议的get方法对服务端点探测
    mode http
    option httpchk GET /v3/
    server controller1 <ip1>:5000 check
    server controller2 <ip2>:5000 check
    server controller3 <ip3>:5000 check

frontend glance_api_front
    bind <vip>:9292
    default_backend glance_api_back

backend glance_api_back
    balance roundrobin
    mode http
    option httpchk GET /
    server controller1 <ip1>:9292 check
    server controller2 <ip2>:9292 check
    server controller3 <ip3>:9292 check

frontend nova_api_front
    bind <vip>:8774
    default_backend nova_api_back

backend nova_api_back
    balance roundrobin
    mode http
    option httpchk GET /2.1/
    server controller1 <ip1>:8774 check
    server controller2 <ip2>:8774 check
    server controller3 <ip3>:8774 check

frontend neutron_api_front
    bind <vip>:9696
    default_backend neutron_api_back

backend neutron_api_back
    balance roundrobin
    mode http
    option httpchk GET /
    server controller1 <ip1>:9696 check
    server controller2 <ip2>:9696 check
    server controller3 <ip3>:9696 check

frontend cinder_api_front
    bind <vip>:8776
    default_backend cinder_api_back

backend cinder_api_back
    balance roundrobin
    mode http
    option httpchk GET /v3/
    server controller1 <ip1>:8776 check
    server controller2 <ip2>:8776 check
    server controller3 <ip3>:8776 check

frontend placement_api_front
    bind <vip>:8778
    default_backend placement_api_back

backend placement_api_back
    balance roundrobin
    mode http
    option httpchk GET /
    server controller1 <ip1>:8778 check
    server controller2 <ip2>:8778 check
    server controller3 <ip3>:8778 check

frontend horizon_front
    bind <vip>:80
    mode http
    default_backend horizon_back

backend horizon_back
    balance roundrobin
    mode http
    option httpchk GET /
    server controller1 <ip1>:80 check
    server controller2 <ip2>:80 check
    server controller3 <ip3>:80 check

frontend masakari_api_front
    bind <vip>:15868
    default_backend masakari_api_back

backend masakari_api_back
    balance roundrobin
    mode http
    option httpchk GET /v1/
    server controller1 <ip1>:15868 check
    server controller2 <ip2>:15868 check
    server controller3 <ip3>:15868 check

其他

vim /etc/haproxy/haproxy.cfg

listen stats
    # 本机ip,其余节点填对应ip
    bind ip1:8888
    mode http
    stats enable
    stats hide-version
    # 访问路径
    stats uri /haproxy_stats
    # 认证提示信息
    stats realm Haproxy\ Statistics
    # 用户名及密码,可多行,设置多个用户
    stats auth haproxyOS:Os#123
    # 刷新间隔
    stats refresh 120s
    # 在页面上做启停操作
    stats admin if FALSE

启动

systemctl start haproxy && systemctl enable haproxy
# 访问:http://ip:8888/haproxy_stats

keepalived

在第一个控制节点操作

vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {
    # controller1:主节点
    state MASTER

    # 管理网络
    interface bond0
    virtual_router_id 51
    
    # controller1
    priority 110

    advert_int 1
    authentication {
        auth_type PASS
        # 可以自定义
        auth_pass Os#123
    }

    virtual_ipaddress {
        <vip>
    }

    # 检测haproxy是否存活,绑定到此vrrp实例
    track_script {
        chk_haproxy
    }
}

vrrp_script chk_haproxy {
    # 每60秒执行相应命令进行检测,失败时优先级减少15,促使VIP漂移到健康节点
    script "systemctl is-active haproxy"
    interval 60
    weight -15
}

在第二个控制节点操作

vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {
    # controller2:从节点
    state BACKUP

    # 管理网络
    interface bond0
    virtual_router_id 51
    
    # controller2
    priority 100
    
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass Os#123
    }

    virtual_ipaddress {
        <vip>
    }

    track_script {
        chk_haproxy
    }
}

vrrp_script chk_haproxy {
    script "systemctl is-active haproxy"
    interval 60
    weight -15
}

在第三个控制节点操作

vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {
    # controller3:从节点
    state BACKUP

    # 管理网络
    interface bond0
    virtual_router_id 51

    # controller3
    priority 90
    
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass Os#123
    }

    virtual_ipaddress {
        <vip>
    }

    track_script {
        chk_haproxy
    }
}

vrrp_script chk_haproxy {
    script "systemctl is-active haproxy"
    interval 60
    weight -15
}

在三个控制节点操作

systemctl start keepalived && systemctl enable keepalived