目录
1.部署3节点mongodb复制集。
1.1 安装mongodb
MongoDB安装包下载
下载地址:https://www.mongodb.com/try/download/community
版本根据需求选择,Mongodb6.0及以上的版本没有mongo命令
#部署环境apt update && apt install libcurl3 openssl# 创建所需用户和组useradd mongod# 创建 mongodb 所需目录结构mkdir -p /mongodb/{conf,data,log}# 创建 YAML 格式的配置文件 , 早期 3.0 版本以前是普通文本格式cat > /mongodb/conf/mongo.conf <<EOF# 日志相关systemLog:destination: filepath: "/mongodb/log/mongodb.log" # 日志位置logAppend: true # 追加日志# 数据存储有关storage:dbPath: "/mongodb/data/" # 数据路径的位置# 进程控制processManagement:fork : true # 后台守护进程# 网络配置有关net:port: 27017 # 端口号 , 默认不配置端口号,是 27017bindIp: 0 .0.0.0 # 监听地址自 MongoDB 3.6 版本后默认监听在 localhost# 安全验证有关配置#security:#authorization: enabled # 是否打开用户名密码验证 , 默认此项为关掉EOFcat /mongodb/conf/mongo.confsystemLog:destination: filepath: "/mongodb/log/mongodb.log"logAppend: truestorage:dbPath: "/mongodb/data/"processManagement:fork: truenet:port: 27017bindIp: 0 .0.0.0replication:replSetName: huangrepl #指定复制集名称,所有复制集成员此名称要一致chown -R mongod.mongod /mongodb/tar xf mongodb-linux-x86_64-ubuntu2004-5.0.3.tgz -C /usr/localln -s /usr/local/mongodb-linux-x86_64-rhel70-3.6.23-8-gc2609ed/ /usr/local/mongodb#service配置文件[Unit]
Description=mongodb
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
User=mongod
Group=mongod
ExecStart= /usr/local/mongodb/bin/mongod --config /mongodb/conf/mongo.conf
ExecReload=/bin/kill -s HUP \$MAINPID
ExecStop=usr/local/bin/mongod --config /mongodb/conf/mongo.conf --shutdown
PrivateTmp=true
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
[Install]
WantedBy=multi-user.target
#启动服务
systemctl daemon-reload
systemctl enable --now mongod
1.2 部署复制集
# mongo管理工具下载
apt install mongodb-clients
或者安装mongosh
wget https://downloads.mongodb.com/compass/mongosh-1.6.0-linux-x64.tgz
#在主机上指定复制集成员信息
mongo
> config ={ _id: 'huangrepl', members: [{_id: 0, host: '10.0.0.100:27017'},{_id: 1, host: '10.0.0.200:27017'},{_id: 3, host: '10.0.0.201:27017'}]}
#json格式显示变量内容
> printjson(config)
{
"_id" : "huangrepl",
"members" : [
{
"_id" : 0,
"host" : "10.0.0.100:27017"
},
{
"_id" : 1,
"host" : "10.0.0.200:27017"
},
{
"_id" : 3,
"host" : "10.0.0.201:27017"
}
]
}
#初始化并启动复制集
> rs.initiate(config)
{ "ok" : 1 }
huangrepl:PRIMARY>#查看复制集信息
huangrepl:PRIMARY>
huangrepl:PRIMARY> rs.status()
{
"set" : "huangrepl",
"date" : ISODate("2024-08-12T06:35:54.276Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3...................
"members" : [
{
"_id" : 0,
"name" : "10.0.0.100:27017", #
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 896,
"optime" : {
"ts" : Timestamp(1723444549, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2024-08-12T06:35:49Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1723444499, 1),
"electionDate" : ISODate("2024-08-12T06:34:59Z"),
"configVersion" : 1,
"configTerm" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "10.0.0.200:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 64,
"optime" : {
"ts" : Timestamp(1723444549, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1723444549, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2024-08-12T06:35:49Z"),
"optimeDurableDate" : ISODate("2024-08-12T06:35:49Z"),
"lastHeartbeat" : ISODate("2024-08-12T06:35:53.853Z"),
"lastHeartbeatRecv" : ISODate("2024-08-12T06:35:53.357Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "10.0.0.100:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 1
},
{
"_id" : 3,
"name" : "10.0.0.201:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 64,
"optime" : {
"ts" : Timestamp(1723444549, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1723444549, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2024-08-12T06:35:49Z"),
"optimeDurableDate" : ISODate("2024-08-12T06:35:49Z"),
"lastHeartbeat" : ISODate("2024-08-12T06:35:53.853Z"),
"lastHeartbeatRecv" : ISODate("2024-08-12T06:35:53.357Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "10.0.0.100:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 1
2.部署3节点Kafka集群
安装kafka
# 在三个 Ubuntu20.04 节点提前部署 zookeeper 和 kafka 三个节点复用node1:10.0.0.100node2:10.0.0.200node3:10.0.0.201# 注意 : 生产中 zookeeper 和 kafka 一般是分开独立部署的 ,kafka 安装前需要安装 java 环境
#部署java环境apt install openjdk-8-jdk -y
#下载二进制包wget -P /usr/local/src https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.5.0/kafka_2.13-3.5.0.tgz
tar kafka_2.13-3.5.0.tgz -C /usr/local
ln -s /usr/local/kafka_2.13-3.5.0 /usr/local/kafka
#修改环境变量
echo PATH=/usr/local/kafka/bin:'$PATH' >/etc/profile.d/kafka.sh
. /etc/profile.d/kafka.sh
#生成配置文件
vim /usr/local/kafka/conf/server.properties
# broker 的 id ,值为整数,且必须唯一,在一个集群中不能重复broker .id = 1############################# Socket ServerSettings ####################### kafka 监听端口,默认 9092listeners = PLAINTEXT://10.0.0.101:9092# 处理网络请求的线程数量,默认为 3 个num.network .threads = 3# 执行磁盘 IO 操作的线程数量,默认为 8 个num.io .threads = 8# socket 服务发送数据的缓冲区大小,默认 100KBsocket.send.buffer .bytes = 102400# socket 服务接受数据的缓冲区大小,默认 100KBsocket.receive.buffer .bytes = 102400# socket 服务所能接受的一个请求的最大大小,默认为 100Msocket.request.max .bytes = 104857600############################# Log Basics#################################### kafka 存储消息数据的目录log .dirs = ../data# 每个 topic 默认的 partitionnum .partitions = 1# 设置副本数量为 3, 当 Leader 的 Replication 故障,会进行故障自动转移。default.replication .factor = 3# 在启动时恢复数据和关闭时刷新数据时每个数据目录的线程数量num.recovery.threads.per.data .dir = 1############################# Log FlushPolicy ############################## 消息刷新到磁盘中的消息条数阈值log.flush.interval .messages = 10000# 消息刷新到磁盘中的最大时间间隔 ,1slog.flush.interval .ms = 1000############################# Log RetentionPolicy ########################## 日志保留小时数,超时会自动删除,默认为 7 天log.retention .hours = 168# 日志保留大小,超出大小会自动删除,默认为 1G#log.retention.bytes=1073741824# 日志分片策略,单个日志文件的大小最大为 1G ,超出后则创建一个新的日志文件log.segment .bytes = 1073741824# 每隔多长时间检测数据是否达到删除条件 ,300slog.retention.check.interval .ms = 300000############################# Zookeeper ##################################### Zookeeper 连接信息,如果是 zookeeper 集群,则以逗号隔开zookeeper .connect = 10 .0.0.100:2181,10.0.0.200:2181,10.0.0.201:2181# 连接 zookeeper 的超时时间 ,6szookeeper.connection.timeout .ms = 6000# 是否允许删除 topic ,默认为 false , topic 只会标记为 marked for deletiondelete.topic .enable = true#创建数据目录mkdir /usr/local/kafka/data#创建service服务文件cat > /lib/systemd/system/kafka.service <<EOF
[Unit]
Description=Apache kafka
After=network.target
[Service]
Type=simple
#Environment=JAVA_HOME=/data/server/java
#PIDFile=/usr/local/kafka/kafka.pid
ExecStart=/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties
ExecStop=/bin/kill -TERM \${MAINPID}
Restart=always
RestartSec=20
[Install]
WantedBy=multi-user.target
#启动服务
systemctl daemon-reload
systemctl enable --now kafka.service
kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties#验证
kafka-topics.sh --create --topic huang --bootstrap-server 10.0.0.100:9092 -partitions 3 --replication-factor 2
在其他节点上查询可以得到:
ls /usr/local/kafka/data/cleaner-offset-checkpoint huang-2 meta.properties replication-offset-checkpoint
huang-0 log-start-offset-checkpoint recovery-point-offset-checkpoint