ubuntu22.04基于docker部署k8s1.29.x 高可用集群

发布于:2024-03-29 ⋅ 阅读:(16) ⋅ 点赞:(0)

参考:https://mp.weixin.qq.com/s/7i68jmvi2eo_6wlqYEOupQ

操作系统:Ubuntu 22.04

nginx代理配置

代理IP :192.168.0.10

vim /etc/nginx/nginx.conf

stream {
    upstream kube-apiserver {
        server 192.168.0.11:6443     max_fails=3 fail_timeout=30s;
        #server 192.168.0.12:6443     max_fails=3 fail_timeout=30s;
        #server 192.168.0.13:6443     max_fails=3 fail_timeout=30s;
    }
    server {
        listen 6443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}

设置主机名

hostnamectl set-hostname master1
hostnamectl set-hostname node1
hostnamectl set-hostname node2

配置hosts

vim /etc/hosts

192.168.0.11 master1
192.168.0.21 node1
192.168.0.22 node2

安装ipvsadm和ipset

ipset 主要用于支持 Service 的负载均衡和网络策略。它可以帮助实现高性能的数据包过滤和转发,以及对 IP 地址和端口进行快速匹配
ipvsadm 主要用于配置和管理 IPVS 负载均衡器,以实现 Service 的负载均衡

apt install -y ipset ipvsadm

加载内核模块

modprobe br_netfilter
modprobe overlay
modprobe ip_conntrack
modprobe  ip_vs
modprobe  ip_vs_rr
modprobe  ip_vs_wrr
modprobe  ip_vs_sh
modprobe  nf_conntrack
lsmod | grep conntrack
lsmod | grep br_netfilt
lsmod | grep overlay
lsmod |egrep  "ip_vs|nf_conntrack"

创建一个名为 kubernetes.conf 的内核配置文件,并写入以下配置内容

cat > /etc/sysctl.d/kubernetes.conf < EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
# net.ipv4.conf.all.send_redirects = 0
# net.ipv4.conf.default.send_redirects = 0
# net.netfilter.nf_conntrack_max = 1000000
# net.netfilter.nf_conntrack_tcp_timeout_established = 86400
# net.core.somaxconn = 1024
# net.ipv4.tcp_syncookies = 1
# net.ipv4.tcp_max_syn_backlog = 2048
# net.ipv4.tcp_synack_retries = 2
fs.file-max = 65536
vm.swappiness = 0
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf

系统引导时自动加载的内核模块

cat > /etc/modules-load.d/kubernetes.conf << EOF
# /etc/modules-load.d/kubernetes.conf
br_netfilter
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
ip_tables
EOF
chmod a+x /etc/modules-load.d/kubernetes.conf

关闭swap分区

swapoff -a
# 注释掉/etc/fstab swap分区的配置

容器运行时:docker-ce和cri-dockerd

安装docker-ce

apt -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt update
apt install docker-ce

vim /etc/docker/daemon.json

{
"registry-mirrors": [
  "https://ahed1oup.mirror.aliyuncs.com"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
  "max-size": "200m"
},
"storage-driver": "overlay2"  
}

启动docker

systemctl daemon-reload
systemctl start docker.service
systemctl enable docker.service

安装cri-dockerd

curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.10/cri-dockerd_0.3.10.3-0.ubuntu-jammy_amd64.deb
apt install ./cri-dockerd_0.3.10.3-0.ubuntu-jammy_amd64.deb
systemctl status cri-docker.service

安装kubelet、kubeadm和kubectl

apt-get update && apt-get install -y apt-transport-https
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/deb/Release.key |    gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/deb/ /" |    tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl
systemctl enable kubelet.service
kubeadm                                1.29.3-1.1
kubectl                                1.29.3-1.1
kubelet                                1.29.3-1.1

整合kubelet和cri-dockerd(仅cri-dockerd需要)

配置cri-dockerd

vim /usr/lib/systemd/system/cri-docker.service
修改ExecStart配置

ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9

重启cri-docker

systemctl daemon-reload && systemctl restart cri-docker.service

配置kubelet

vim /etc/sysconfig/kubelet

KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/run/cri-dockerd.sock"
KUBELET_EXTRA_ARGS=

初始化第一个主节点

下载镜像

kubeadm config images list
kubeadm config images list --image-repository=registry.aliyuncs.com/google_containers

kubeadm config images pull --image-repository=registry.aliyuncs.com/google_containers --cri-socket unix:///run/cri-dockerd.sock

下载flannel部署文件
https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

    image: docker.io/flannel/flannel:v0.24.4
    image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1
    image: docker.io/flannel/flannel:v0.24.4

生成kubeadmconfig文件

kubeadm config print init-defaults > kubeadm-config.yaml

修改kubeadmconfig文件
vim kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.0.11
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.0.10:6443
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.29.3
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}

初始化master节点

kubeadm init --config kubeadm-config.yaml --upload-certs

...
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:ff71ad6d9569107cf146a9a05d72ee0af13db3aa721bd8a9fe4e2b7e9b3bddb7 \
	--control-plane --certificate-key 4cbae11642fb00bd1bf97ab47384d222d501c3e3df5217e4a7472909f79dc933

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:ff71ad6d9569107cf146a9a05d72ee0af13db3aa721bd8a9fe4e2b7e9b3bddb7

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

设置kubectl命令补全

echo "source <(kubectl completion bash)" >> /etc/profile

部署flannel网络插件

kubectl apply -f kube-flannel.yml

node节点加入集群

kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:ff71ad6d9569107cf146a9a05d72ee0af13db3aa721bd8a9fe4e2b7e9b3bddb7 --cri-socket unix:///run/cri-dockerd.sock

查看结果

root@master1:~# kubectl get nodes 
NAME      STATUS   ROLES           AGE   VERSION
master1   Ready    control-plane   52m   v1.29.3
node1     Ready    <none>          49m   v1.29.3
node2     Ready    <none>          49m   v1.29.3
root@master1:~# kubectl -n kube-system get pod
NAME                              READY   STATUS    RESTARTS      AGE
coredns-857d9ff4c9-28j7b          1/1     Running   1 (18m ago)   52m
coredns-857d9ff4c9-mm892          1/1     Running   1 (18m ago)   52m
etcd-master1                      1/1     Running   1 (19m ago)   52m
kube-apiserver-master1            1/1     Running   1 (18m ago)   52m
kube-controller-manager-master1   1/1     Running   1 (19m ago)   52m
kube-proxy-5lcl5                  1/1     Running   1 (19m ago)   49m
kube-proxy-ft4lj                  1/1     Running   1 (19m ago)   52m
kube-proxy-js88l                  1/1     Running   1 (19m ago)   49m
kube-scheduler-master1            1/1     Running   1 (18m ago)   52m
本文含有隐藏内容,请 开通VIP 后查看