1、环境:
三台服务器(虚拟机):
192.168.0.60 k8s-master
192.168.0.61 k8s-node1
192.168.0.62 k8s-node2
最低配置:最低配置:2核、2G内存、20G硬盘(能通网)
操作系统:CentOS 7
Docker:20+
k8s:1.23.6
2、初始操作(master节点和node节点都需要配置)
关闭防火墙和selinux
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@localhost ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@localhost ~]# setenforce 0
关闭swap
[root@localhost ~]# swapoff -a #临时关闭
[root@localhost ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab #永久关闭
关闭完swap后,一定要重启一下虚拟机!!!reboot
根据规划设置主机名和hosts文件
[root@localhost ~]# hostnamectl set-hostname k8s-master
[root@localhost ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.0.60 k8s-master
192.168.0.61 k8s-node1
192.168.0.62 k8s-node2
将桥接的IPv4流量传递到iptables的链
[root@k8s-master ~]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@k8s-master ~]# sysctl --system ##生效
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
时间同步
[root@k8s-master ~]# yum install ntpdate -y
[root@k8s-master ~]# ntpdate time.windows.com
15 Jul 20:21:34 ntpdate[9046]: adjust time server 52.231.114.183 offset 0.007813 sec
3、docker安装(master节点和node节点都需要配置)
详细步骤:docker-ce镜像_docker-ce下载地址_docker-ce安装教程-阿里巴巴开源镜像站
安装如果报错
更新阿里源镜像
[root@k8s-master ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
重新下载
[root@k8s-master ~]# sudo yum -y install docker-ce
添加阿里云源
[root@k8s-master ~]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
[root@k8s-master ~]#
安装docker版本与Kubernetes版本要适配
4、安装 kubeadm、kubelet、kubectl(master节点和node节点都需要配置)
[root@k8s-master ~]# yum install -y kubelet-1.23.6 kubeadm-1.23.6 kubectl-1.23.6
[root@k8s-master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
先配置好docker加速器,在重启docker
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl restart docker
添加镜像加速器
[root@k8s-master ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": [ "https://XXXXXXXmirror.swr.myhuaweicloud.com" ],
"exec-opts": [ "native.cgroupdriver=systemd" ]
}
***以上配置master节点和node节点都需要配置***
master配置 初始化kubelet
kubeadm init \
--apiserver-advertise-address=192.168.0.60 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.23.6 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
安装好后执行
[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady control-plane,master 17h v1.23.6
#Kubernetes中node节点加入master集群命令
1. 在Master节点上查看加入命令(如果你已经忘记了初始化时的命令):
查看现有的token:
kubeadm token list
如果token过期,可以创建新的token:
kubeadm token create
获取discovery-token-ca-cert-hash
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
kubeadm join 192.168.0.60:6443 --token 28upng.6xy4zzqynuzbcdnl --discovery-token-ca-cert-hash sha256:7b3ccc0d117e50c66c6e1061cb198a6d9b8368f8dfefdfc976f1fa575a2afd05
5、部署网络插件
由于网路插件的问题,这两个pod处于pending状态
下载https://calico-v3-25.netlify.app/archive/v3.25/manifests/calico.yaml文件到本地
[root@k8s-master k8s]# grep image calico.yaml
image: docker.io/calico/cni:v3.25.0
imagePullPolicy: IfNotPresent
image: docker.io/calico/cni:v3.25.0
imagePullPolicy: IfNotPresent
image: docker.io/calico/node:v3.25.0
imagePullPolicy: IfNotPresent
image: docker.io/calico/node:v3.25.0
imagePullPolicy: IfNotPresent
image: docker.io/calico/kube-controllers:v3.25.0
imagePullPolicy: IfNotPresent
[root@k8s-master k8s]#
[root@k8s-master k8s]#
[root@k8s-master k8s]# sed -i 's#docker.io/##g' calico.yaml
[root@k8s-master k8s]#
[root@k8s-master k8s]# grep image calico.yaml
image: calico/cni:v3.25.0
imagePullPolicy: IfNotPresent
image: calico/cni:v3.25.0
imagePullPolicy: IfNotPresent
image: calico/node:v3.25.0
imagePullPolicy: IfNotPresent
image: calico/node:v3.25.0
imagePullPolicy: IfNotPresent
image: calico/kube-controllers:v3.25.0
imagePullPolicy: IfNotPresent
[root@k8s-master k8s]# kubectl apply -f calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
deployment.apps/calico-kube-controllers created
[root@k8s-master k8s]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-cd8566cf-5s2dv 1/1 Running 0 36s
calico-node-5xdh4 1/1 Running 0 36s
calico-node-hj9jp 0/1 Running 0 36s
calico-node-wrffj 0/1 Running 0 36s
coredns-6d8c4cb4d-ncvcw 1/1 Running 0 19h
coredns-6d8c4cb4d-nh9fw 1/1 Running 0 19h
etcd-k8s-master 1/1 Running 1 19h
kube-apiserver-k8s-master 1/1 Running 1 19h
kube-controller-manager-k8s-master 1/1 Running 1 19h
kube-proxy-hlvqx 1/1 Running 0 54m
kube-proxy-n2q9m 1/1 Running 0 19h
kube-proxy-xpldw 1/1 Running 1 54m
kube-scheduler-k8s-master 1/1 Running 1 19h
测试
#创建部署
[root@k8s-master k8s]# kubectl create deployment nginx --image=nginx
deployment.apps/nginx created
#暴露端口
[root@k8s-master k8s]# kubectl expose deployment nginx --port=80 --type=NodePort
service/nginx exposed
# 查看 pod 以及服务信息
[root@k8s-master k8s]# kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/nginx-85b98978db-98dx9 1/1 Running 0 9s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 19h
service/nginx NodePort 10.111.173.174 <none> 80:30746/TCP 4s