前序
在接触Virtual PLCnext Control的时候,我想过好几次如何将它运行在k8s上,由于对k8s的熟悉程度不够,跌跌撞撞尝试了很久,终于把vPLC部署在单机版的k8s上了。(此教程仅为demo阶段,此教程仅为demo阶段,此教程仅为demo阶段,仅供参考与交流)
- 环境:Ubuntu24.04 Preempt-RT
- 机器:i5-12459H
- 测试1:OPC-UA ✅
- 测试2:Profinet Master/device✅
- 测试3:PLCnext App✅
k8s VPLCnext demo需求
- 内核5.19以上
- 系统支持systemd
- Preempt-RT补丁
- 内核选项和ubuntu24.04基本一致(比如开启macvlan、veth、cgroup等选项)
系统
这次用的操作系统是Ubuntu24.04,为了确保实时性,还需要打实时补丁。再安装好Ubuntu24.04之后,运行下列命令
sudo apt update
sudo apt-get install linux-realtime
sudo nano /etc/default/grub
# 注释hidden
# 设置timeout为10
GRUB_DEFAULT=0
# GRUB_TIMEOUT_STYLE=hidden
GRUB_TIMEOUT=10
GRUB_DISTRIBUTOR=`( . /etc/os-release; echo ${NAME:-Ubuntu} ) 2>/dev/null || echo Ubuntu`
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash"
GRUB_CMDLINE_LINUX=""
sudo update-grub
sudo nano /boot/grub
# 将linux-realtime改成默认启动,找到对应的启动项,将该启动项替换到export linux_gfx_mode后面
if [ "${recordfail}" != 1 ]; then
if [ -e ${prefix}/gfxblacklist.txt ]; then
if [ ${grub_platform} != pc ]; then
set linux_gfx_mode=keep
elif hwmatch ${prefix}/gfxblacklist.txt 3; then
if [ ${match} = 0 ]; then
set linux_gfx_mode=keep
else
set linux_gfx_mode=text
fi
else
set linux_gfx_mode=text
fi
else
set linux_gfx_mode=keep
fi
else
set linux_gfx_mode=text
fi
export linux_gfx_mode
menuentry 'Ubuntu, with Linux 6.8.1-1015-realtime' --class ubuntu --class gnu-linux --class gnu --class os $menuentry_id_option 'gnulinux-6.8.1-1015-realtime-advanced-d712360d-c47b-4226-a398-5ee5569a179c' {
recordfail
load_video
gfxmode $linux_gfx_mode
insmod gzio
if [ x$grub_platform = xxen ]; then insmod xzio; insmod lzopio; fi
insmod part_gpt
insmod ext2
set root='hd0,gpt2'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 d712360d-c47b-4226-a398-5ee5569a179c
else
search --no-floppy --fs-uuid --set=root d712360d-c47b-4226-a398-5ee5569a179c
fi
echo 'Loading Linux 6.8.1-1015-realtime ...'
linux /boot/vmlinuz-6.8.1-1015-realtime root=UUID=d712360d-c47b-4226-a398-5ee5569a179c ro quiet splash $vt_handoff
echo 'Loading initial ramdisk ...'
initrd /boot/initrd.img-6.8.1-1015-realtime
}
安装好后重启系统,确保是实时内核
root@plcnext-VMware-Virtual-Platform:/boot/grub# uname -a
Linux plcnext-VMware-Virtual-Platform 6.8.1-1015-realtime #16-Ubuntu SMP PREEMPT_RT Wed Jan 15 21:03:54 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
安装kubectl & kubeadm & kubelet
sudo apt-get update
sudo apt-get install containerd
sudo mkdir /etc/containerd
sudo nano /etc/containerd/config.toml
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
disabled_plugins = []
required_plugins = ["io.containerd.grpc.v1.cri"]
oom_score = -999
# Alibaba Cloud Vendor enhancement configuration
# imports = ["/etc/containerd/alibabacloud.toml"]
[grpc]
address = "/run/containerd/containerd.sock"
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
address = "/run/containerd/debug.sock"
level = "info"
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry-cn-shanghai.ack.aliyuncs.com/acs/pause:3.9"
ignore_image_defined_volumes = true
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
privileged_without_host_devices = false
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
NoPivotRoot = false
NoNewKeyring = false
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/cert.d"
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
sudo apt install curl
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
sudo swapoff -a # off swap
sudo sed -i '/swap/d' /etc/fstab # off swap need reboot system
reboot
重启后,再执行
sudo su
bash -c 'echo 1 > /proc/sys/net/ipv4/ip_forward'
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo modprobe br-netfilter
kubeadm init --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
安装Flannel
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
# if download failed like ImagePullError, delete it and it will auto build again
# 如果下载失败了,删除pod后,会自动重新下载
kubectl get pods -n kube-flannel
kubectl delete pod kube-flannel-ds-<xxxx> -n kube-flannel
安装Multus
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml
确保都在运行
root@plcnext-virtual-machine:/home/plcnext/vplc_k8s# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-mz5xf 1/1 Running 5 (42d ago) 42d
kube-system kube-multus-ds-rk2w6 1/1 Running 0 19h
kubectl get pods -n kube-system | grep multus
kube-multus-ds-rk2w6 1/1 Running 0 19h
为vPLC创建Macvlan NIC
nano macvlan-net.yaml
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-eth1
annotations:
k8s.v1.cni.cncf.io/resourceName: macvlan.network.k8s.io/eth1
spec:
config: '{
"cniVersion": "0.3.1",
"name": "macvlan-eth1",
"type": "macvlan",
"master": "ens33",
"mode": "bridge",
"ipam": {
"type": "static",
"addresses": [
{
"address": "192.168.10.152/24",
"gateway": "192.168.10.1"
}
],
"routes": [
{
"dst": "0.0.0.0/0",
"gw": "192.168.10.1"
}
],
"dns": {
"nameservers": ["8.8.8.8", "8.8.4.4"]
}
}
}'
应用macvlan NIC
kubectl apply -f macvlan-net.yaml
检查是否正常工作
root@:kubectl get network-attachment-definitions.k8s.cni.cncf.io -A
NAMESPACE NAME AGE
default macvlan-eth1 19h
root@: kubectl describe network-attachment-definitions.k8s.cni.cncf.io macvlan-eth1 -n default
Name: macvlan-eth1
Namespace: default
Labels: <none>
Annotations: k8s.v1.cni.cncf.io/resourceName: macvlan.network.k8s.io/eth1
API Version: k8s.cni.cncf.io/v1
Kind: NetworkAttachmentDefinition
Metadata:
Creation Timestamp: 2025-05-13T05:19:50Z
Generation: 1
Resource Version: 43216
UID: e1babe8b-d9f0-4646-8013-9d57896444f3
Spec:
Config: { "cniVersion": "0.3.1", "name": "macvlan-eth1", "type": "macvlan", "master": "ens33", "mode": "bridge", "ipam": { "type": "static", "addresses": [ { "address": "192.168.10.152/24", "gateway": "192.168.10.1" } ], "routes": [ { "dst": "0.0.0.0/0", "gw": "192.168.10.1" } ], "dns": { "nameservers": ["8.8.8.8", "8.8.4.4"] } } }
Events: <none>
创建VPLC pod yaml
nano pod.yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
io.podman.annotations.ulimit: rtprio=-1:-1,nofile=1024:10024
k8s.v1.cni.cncf.io/networks: |-
[{
"name": "macvlan-eth1",
"interface": "eth1"
}]
name: my-pod
spec:
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
containers:
- env:
- name: HOSTNAME
value: VL3-PLCNEXT-x86-64
image: registry.k8s.io/vplcnextcontrol1000-x86-64:78
name: vplcnextcontrol1000
command: ["/usr/lib/systemd/systemd"]
imagePullPolicy: Never
securityContext:
privileged: true
volumeMounts:
- mountPath: /dev/fuse
name: fuse-device
- mountPath: /opt/plcnext/config/
name: env1-config-pvc
- mountPath: /opt/plcnext/apps
name: env1-apps-pvc
- mountPath: /opt/plcnext/data/
name: env1-data-pvc
- mountPath: /opt/plcnext/projects/
name: env1-projects-pvc
- mountPath: /opt/plcnext/logs
name: env1-logs-pvc
hostname: vl3-plcnext-x86
volumes:
- name: fuse-device
hostPath:
path: /dev/fuse
type: CharDevice
- name: env1-config-pvc
persistentVolumeClaim:
claimName: env1-config-pvc
- name: env1-apps-pvc
persistentVolumeClaim:
claimName: env1-apps-pvc
- name: env1-data-pvc
persistentVolumeClaim:
claimName: env1-data-pvc
- name: env1-projects-pvc
persistentVolumeClaim:
claimName: env1-projects-pvc
- name: env1-logs-pvc
persistentVolumeClaim:
claimName: env1-logs-pvc
更新vplc镜像到本地k8s.io
#load image
ctr -n k8s.io images import vplcnextcontrol1000-x86-64-2025.0.0-25.0.0.78.tar
# change tag
ctr -n k8s.io images tag \
import-2025-05-14:vplcnextcontrol1000-x86-64 \
registry.k8s.io/vplcnextcontrol1000-x86-64:78
# delete old tag
ctr -n k8s.io images rm import-2025-05-14:vplcnextcontrol1000-x86-64
检查镜像
oot@plcnext-VMware-Virtual-Platform:/etc/cni/net.d# ctr -n=k8s.io images list
REF TYPE DIGEST SIZE PLATFORMS LABELS
ghcr.io/flannel-io/flannel-cni-plugin:v1.6.2-flannel1 application/vnd.oci.image.index.v1+json sha256:f1812994f0edbcb5bb5ccb63be2147ba6ad10e1faaa7ca9fcdad4f441739d84f 4.6 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/riscv64,linux/s390x io.cri-containerd.image=managed
ghcr.io/flannel-io/flannel-cni-plugin@sha256:f1812994f0edbcb5bb5ccb63be2147ba6ad10e1faaa7ca9fcdad4f441739d84f application/vnd.oci.image.index.v1+json sha256:f1812994f0edbcb5bb5ccb63be2147ba6ad10e1faaa7ca9fcdad4f441739d84f 4.6 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/riscv64,linux/s390x io.cri-containerd.image=managed
ghcr.io/flannel-io/flannel:v0.26.7 application/vnd.oci.image.index.v1+json sha256:7f471907fa940f944867270de4ed78121b8b4c5d564e17f940dc787cb16dea82 31.5 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/riscv64,linux/s390x io.cri-containerd.image=managed
ghcr.io/flannel-io/flannel@sha256:7f471907fa940f944867270de4ed78121b8b4c5d564e17f940dc787cb16dea82 application/vnd.oci.image.index.v1+json sha256:7f471907fa940f944867270de4ed78121b8b4c5d564e17f940dc787cb16dea82 31.5 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/riscv64,linux/s390x io.cri-containerd.image=managed
ghcr.io/k8snetworkplumbingwg/multus-cni:snapshot-thick application/vnd.docker.distribution.manifest.list.v2+json sha256:97792a778f41c6fcc31fc08615f2c0f5fd89ae4d9f402e78528c76ec5c2c5fd1 170.9 MiB linux/amd64,linux/arm/v7,linux/arm/v8,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
ghcr.io/k8snetworkplumbingwg/multus-cni@sha256:97792a778f41c6fcc31fc08615f2c0f5fd89ae4d9f402e78528c76ec5c2c5fd1 application/vnd.docker.distribution.manifest.list.v2+json sha256:97792a778f41c6fcc31fc08615f2c0f5fd89ae4d9f402e78528c76ec5c2c5fd1 170.9 MiB linux/amd64,linux/arm/v7,linux/arm/v8,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry-cn-shanghai.ack.aliyuncs.com/acs/pause:3.9 application/vnd.docker.distribution.manifest.list.v2+json sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 314.0 KiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed,io.cri-containerd.pinned=pinned
registry-cn-shanghai.ack.aliyuncs.com/acs/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 application/vnd.docker.distribution.manifest.list.v2+json sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 314.0 KiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed,io.cri-containerd.pinned=pinned
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.10.1 application/vnd.docker.distribution.manifest.list.v2+json sha256:90d3eeb2e2108a14fe2ecbef1bc1b5607834335d99c842a377f338aade9da028 15.4 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/mips64le,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns@sha256:90d3eeb2e2108a14fe2ecbef1bc1b5607834335d99c842a377f338aade9da028 application/vnd.docker.distribution.manifest.list.v2+json sha256:90d3eeb2e2108a14fe2ecbef1bc1b5607834335d99c842a377f338aade9da028 15.4 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/mips64le,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.15-0 application/vnd.docker.distribution.manifest.list.v2+json sha256:d0e1bc44b9bc37d0b63612e1a11b43e07bc650ffc0545d58f7991607460974d4 54.3 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd@sha256:d0e1bc44b9bc37d0b63612e1a11b43e07bc650ffc0545d58f7991607460974d4 application/vnd.docker.distribution.manifest.list.v2+json sha256:d0e1bc44b9bc37d0b63612e1a11b43e07bc650ffc0545d58f7991607460974d4 54.3 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.28.15 application/vnd.docker.distribution.manifest.list.v2+json sha256:ad05900683464980ef45b957b8da61dc33eaefe4df2318abc7bfe9b13a46cbb8 32.8 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver@sha256:ad05900683464980ef45b957b8da61dc33eaefe4df2318abc7bfe9b13a46cbb8 application/vnd.docker.distribution.manifest.list.v2+json sha256:ad05900683464980ef45b957b8da61dc33eaefe4df2318abc7bfe9b13a46cbb8 32.8 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.28.15 application/vnd.docker.distribution.manifest.list.v2+json sha256:2d54389718888db44390e85df3201f356d213be2df06365e782ba2ab0154ee42 31.8 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager@sha256:2d54389718888db44390e85df3201f356d213be2df06365e782ba2ab0154ee42 application/vnd.docker.distribution.manifest.list.v2+json sha256:2d54389718888db44390e85df3201f356d213be2df06365e782ba2ab0154ee42 31.8 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.28.15 application/vnd.docker.distribution.manifest.list.v2+json sha256:6dd470206000214b25123febf230af297375469037dc18619fd75a5528cff215 27.0 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy@sha256:6dd470206000214b25123febf230af297375469037dc18619fd75a5528cff215 application/vnd.docker.distribution.manifest.list.v2+json sha256:6dd470206000214b25123febf230af297375469037dc18619fd75a5528cff215 27.0 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.28.15 application/vnd.docker.distribution.manifest.list.v2+json sha256:50bf0089b068e77c7b57d0c225840aa90b020d16bc9d76d8463fb34597e9509b 17.7 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler@sha256:50bf0089b068e77c7b57d0c225840aa90b020d16bc9d76d8463fb34597e9509b application/vnd.docker.distribution.manifest.list.v2+json sha256:50bf0089b068e77c7b57d0c225840aa90b020d16bc9d76d8463fb34597e9509b 17.7 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9 application/vnd.docker.distribution.manifest.list.v2+json sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 314.0 KiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed
registry.cn-hangzhou.aliyuncs.com/google_containers/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 application/vnd.docker.distribution.manifest.list.v2+json sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 314.0 KiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed
registry.k8s.io/vplcnextcontrol1000-x86-64:78 application/vnd.oci.image.manifest.v1+json sha256:f3f4c9ee6a734e4498b2216739c542cfe8b5981215421aab8007cbf840da7e59 224.7 MiB linux/amd64 io.cri-containerd.image=managed
sha256:10541d8af03f40fae257735edd69b6c5dd0084bb9796649409ac7b5660705148 application/vnd.docker.distribution.manifest.list.v2+json sha256:2d54389718888db44390e85df3201f356d213be2df06365e782ba2ab0154ee42 31.8 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
sha256:29bd7bbb1c041945b8111bf2ecb5d4e656d56bdc3db7d5119b52fa5797780552 application/vnd.oci.image.manifest.v1+json sha256:f3f4c9ee6a734e4498b2216739c542cfe8b5981215421aab8007cbf840da7e59 224.7 MiB linux/amd64 io.cri-containerd.image=managed
sha256:2e96e5913fc06e3d26915af3d0f2ca5048cc4b6327e661e80da792cbf8d8d9d4 application/vnd.docker.distribution.manifest.list.v2+json sha256:d0e1bc44b9bc37d0b63612e1a11b43e07bc650ffc0545d58f7991607460974d4 54.3 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed
sha256:55ce2385d9d8c6f720091c177fbe885a21c9dc07c9e480bfb4d94b3001f58182 application/vnd.oci.image.index.v1+json sha256:f1812994f0edbcb5bb5ccb63be2147ba6ad10e1faaa7ca9fcdad4f441739d84f 4.6 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/riscv64,linux/s390x io.cri-containerd.image=managed
sha256:965b9dd4aa4c1b6b68a4c54a166692b4645b6e6f8a5937d8dc17736cb63f515e application/vnd.oci.image.index.v1+json sha256:7f471907fa940f944867270de4ed78121b8b4c5d564e17f940dc787cb16dea82 31.5 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/riscv64,linux/s390x io.cri-containerd.image=managed
sha256:9d3465f8477c6b383762d90ec387c9d77da8a402a849265805f86feaa57aeeea application/vnd.docker.distribution.manifest.list.v2+json sha256:50bf0089b068e77c7b57d0c225840aa90b020d16bc9d76d8463fb34597e9509b 17.7 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
sha256:9dc6939e7c573673801790fcfad6f994282c216e005578f5836b5fafc6685fc2 application/vnd.docker.distribution.manifest.list.v2+json sha256:ad05900683464980ef45b957b8da61dc33eaefe4df2318abc7bfe9b13a46cbb8 32.8 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
sha256:ba6d7f8bc25be40b51dfeb5ddfda697527ba55073620c1c5fa04a5f0ae9e3816 application/vnd.docker.distribution.manifest.list.v2+json sha256:6dd470206000214b25123febf230af297375469037dc18619fd75a5528cff215 27.0 MiB linux/amd64,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
sha256:cd453b9857733f7230c435f941ae5b4ec835627732f18f9278f05375122c9bf5 application/vnd.docker.distribution.manifest.list.v2+json sha256:97792a778f41c6fcc31fc08615f2c0f5fd89ae4d9f402e78528c76ec5c2c5fd1 170.9 MiB linux/amd64,linux/arm/v7,linux/arm/v8,linux/arm64,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
sha256:e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c application/vnd.docker.distribution.manifest.list.v2+json sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 314.0 KiB linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,windows/amd64 io.cri-containerd.image=managed,io.cri-containerd.pinned=pinned
sha256:ead0a4a53df89fd173874b46093b6e62d8c72967bbf606d672c9e8c9b601a4fc application/vnd.docker.distribution.manifest.list.v2+json sha256:90d3eeb2e2108a14fe2ecbef1bc1b5607834335d99c842a377f338aade9da028 15.4 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/mips64le,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
sha256:fcacad112e0df0b551f562f2ba6c6c94cd2a47c761c5a36eb525091fae2b721d application/vnd.docker.distribution.manifest.v2+json sha256:e17cd0e26a6fad98b670af17b41cbac8b17e997a2759e3cadb6816feef680b69 165.3 MiB linux/amd64 io.cri-containerd.image=managed
swr.cn-north-4.myhuaweicloud.com/ddn-k8s/ghcr.io/k8snetworkplumbingwg/multus-cni:snapshot-thick application/vnd.docker.distribution.manifest.v2+json sha256:e17cd0e26a6fad98b670af17b41cbac8b17e997a2759e3cadb6816feef680b69 165.3 MiB linux/amd64 io.cri-containerd.image=managed
swr.cn-north-4.myhuaweicloud.com/ddn-k8s/ghcr.io/k8snetworkplumbingwg/multus-cni@sha256:e17cd0e26a6fad98b670af17b41cbac8b17e997a2759e3cadb6816feef680b69 application/vnd.docker.distribution.manifest.v2+json sha256:e17cd0e26a6fad98b670af17b41cbac8b17e997a2759e3cadb6816feef680b69 165.3 MiB linux/amd64 io.cri-containerd.image=managed
创建持久卷
nano env1-vplc-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: env1-apps-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt/data/env1-apps
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: env1-config-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt/data/env1-config
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: env1-data-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt/data/env1-data
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: env1-projects-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt/data/env1-projects
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: env1-logs-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt/data/env1-logs
nano env1-vplc-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: env1-apps-pvc
spec:
volumeName: env1-apps-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: env1-config-pvc
spec:
volumeName: env1-config-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: env1-data-pvc
spec:
volumeName: env1-data-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: env1-projects-pvc
spec:
volumeName: env1-projects-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: env1-logs-pvc
spec:
volumeName: env1-logs-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
# apply pv first, then apply pvc
kubectl apply -f env1-vplc-pv.yaml
kubectl apply -f env1-vplc-pvc.yaml
mkdir /mnt/data
检查PV和PVC
root@plcnext-VMware-Virtual-Platform:/home/plcnext/vplc# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
env1-apps-pv 1Gi RWO Retain Bound default/env1-apps-pvc 67m
env1-config-pv 1Gi RWO Retain Bound default/env1-config-pvc 66m
env1-data-pv 1Gi RWO Retain Bound default/env1-data-pvc 66m
env1-logs-pv 1Gi RWO Retain Bound default/env1-logs-pvc 66m
env1-projects-pv 1Gi RWO Retain Bound default/env1-projects-pvc 66m
root@plcnext-VMware-Virtual-Platform:/home/plcnext/vplc# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
env1-apps-pvc Bound env1-apps-pv 1Gi RWO 67m
env1-config-pvc Bound env1-config-pv 1Gi RWO 66m
env1-data-pvc Bound env1-data-pv 1Gi RWO 66m
env1-logs-pvc Bound env1-logs-pv 1Gi RWO 66m
env1-projects-pvc Bound env1-projects-pv 1Gi RWO 66m
创建K8S容器
kubectl apply -f pod.yaml
kubectl exec -it my-pod -c vplcnextcontrol1000 -- bash
修改Network
默认有一个错误的ip地址和路由 192.168.1.10
& 192.168.1.1
.
这是由于VPLC的固件中 /usr/lib/systemd/network/80-wired.network
导致的
删除所有的192.168.1.* ,并修改79-if-1.network和79-if-2.network填写gateway和ipaddress
ip route del default via 192.168.1.1 dev eth0
ip route del default via 192.168.1.1 dev eth1
ip addr del 192.168.1.10/24 dev eth0
ip addr del 192.168.1.10/24 dev eth1
ip route del default via 192.168.10.1 dev eth1
ip route add default via 192.168.10.1 dev eth1 proto static metric 100
systemctl restart arp-preinit.service
nano /etc/systemd/network/79-if-1.network
nano /etc/systemd/network/79-if-2.network
systemctl restart plcnext
拷贝文件
kubectl cp ./pack.zip default/my-pod:/opt/plcnext
kubectl exec -it my-pod -c vplcnextcontrol1000 -- bash
unzip pack.zip
cd pack
cp -r apps config data projects ../
chmod 777 -R /opt/plcnext/apps /opt/plcnext/config /opt/plcnext/data /opt/plcnext/projects
重启系统
systemctl restart plcnext
修正防火墙
systemctl restart firewall-preinit
systemctl restart firewall
systemctl restart plcnext
# now the firewall should work
修正Profinet Device设备
固件里默认NIC0是作为pn的NIC,但是现在NIC1才是macvlan设备,所以要设置为NIC1
nano /etc/plcnext/Device.redefine.acf.settings
<EnvironmentVariable name="ARP_SETTING_PN_CONTROLLER_ADAPTER_INDEX" value="1" redefine="true" />
<EnvironmentVariable name="ARP_SETTING_PN_DEVICE_ADAPTER_INDEX" value="1" redefine="true" />
change to =>
<EnvironmentVariable name="ARP_SETTING_PN_CONTROLLER_ADAPTER_INDEX" value="2" redefine="true" />
<EnvironmentVariable name="ARP_SETTING_PN_DEVICE_ADAPTER_INDEX" value="2" redefine="true" />
nano /etc/plcnext/device/Io/PnS/PnS.target.config
<Var name="InterfaceName">eth0</Var>
change to =>
<Var name="InterfaceName">eth1</Var>
systemctl restart plcnext
# check wbm profinet