Y9000P跑开源模型(未完成)

发布于:2025-08-08 ⋅ 阅读:(21) ⋅ 点赞:(0)

环境信息

1、Y9000笔记本
2、1T空白硬盘
3、ubunut24.04桌面版

一、环境初始化

第一部分:系统初始化

1、安装基础软件

apt-get update
apt-get -y install openssh-server openssh-client apt-utils freeipmi ipmitool sshpass  ethtool zip unzip nano less git netplan.io iputils-ping mtr ipvsadm smartmontools python3-pip socat conntrack libvirt-clients libnuma-dev ctorrent nvme-cli gcc-12 g++-12
apt-get -y install vim wget apt git unzip zip ntp ntpdate lrzsz lftp tree bash-completion  elinks dos2unix tmux jq
apt-get -y install  nmap net-tools  mtr traceroute tcptraceroute aptitude htop iftop hping3 fping nethogs sshuttle tcpdump figlet  stress iperf iperf3 dnsutils curl
apt install -y linux-tools-generic linux-cloud-tools-generic

curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash
sudo apt-get install git-lfs
git lfs install

sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 12

sudo update-alternatives --config gcc
sudo update-alternatives --config g++

2、调整文件描述符

echo "ulimit -SHn 655350" >>/etc/profile
echo "fs.file-max = 655350" >>/etc/sysctl.conf
echo "root soft nofile 655350" >>/etc/security/limits.conf
echo "root hard nofile 655350" >>/etc/security/limits.conf
echo "* soft nofile 655350" >>/etc/security/limits.conf
echo "* hard nofile 655350" >>/etc/security/limits.conf
echo "session required pam_limits.so" >>/etc/pam.d/common-session
source /etc/profile

3、优化history

cat /etc/profile
export HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S  `whoami` "
export HISTFILESIZE=50000
export HISTSIZE=50000
source /etc/profile

4、检查防火墙和设置journalctl日志大小避免把系统盘占满

apt list --installed | grep ufw
如果ufw默认安装则需要关闭
ufw disable

journalctl 命令自动维护文件大小
只保留近一周的日志
journalctl --vacuum-time=1w

只保留500MB的日志
journalctl --vacuum-size=500M

5、优化内核参数

cp /etc/sysctl.conf /etc/sysctl.conf.bak

vi /etc/sysctl.conf
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_abort_on_overflow = 1
net.ipv4.tcp_max_tw_buckets = 6000
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096        87380  4194304
net.ipv4.tcp_wmem = 4096        66384  4194304
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.core.optmem_max = 81920
net.core.wmem_default = 8388608
net.core.wmem_max = 16777216
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 1020000
net.core.netdev_max_backlog = 862144
net.core.somaxconn = 262144
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_keepalive_time = 30
net.ipv4.ip_local_port_range = 1024    65535
net.netfilter.nf_conntrack_tcp_timeout_established = 180
net.netfilter.nf_conntrack_max = 1048576
net.nf_conntrack_max = 1048576
fs.file-max = 655350
modprobe nf_conntrack
sysctl -p /etc/sysctl.conf
sysctl -w net.ipv4.route.flush=1

6、关闭ubuntu更新

rm -rf /etc/apt/apt.conf.d/20auto-upgrades
cat << EOF > /etc/apt/apt.conf.d/20auto-upgrades
APT::Periodic::Update-Package-Lists "0";
APT::Periodic::Download-Upgradeable-Packages "0";
APT::Periodic::AutocleanInterval "0";
APT::Periodic::Unattended-Upgrade "0";
EOF

sed -i "/^Unattended-Upgrade::Allowed-Origins {/,/^};/s/^/\/\//" /etc/apt/apt.conf.d/50unattended-upgrades
systemctl restart unattended-upgrades
systemctl status unattended-upgrades

mv /etc/update-motd.d /etc/update-motd.d.backup
mkdir -p /etc/update-motd.d

7、关闭swap

swapoff -a
删除 /etc/fstab内swap的行

systemctl list-unit-files|grep swap
systemctl disable dev-nvme0n1p2.swap
systemctl status dev-nvme0n1p2.swap

systemctl status swap.target
systemctl stop swap.target

//防止 systemd-gpt-auto-generator 在启动时自动激活 Swap。需要在 /etc/fstab 中添加一个 noauto 选项的 swap 行
cat /etc/fstab
/dev/nvme0n1p2 none swap noauto

8、开启root 用户ssh并优化ssh

sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config
sed -i 's/#ClientAliveInterval 0/ClientAliveInterval 15/g' /etc/ssh/sshd_config
sed -i 's/#ClientAliveCountMax 3/ClientAliveCountMax 30/g' /etc/ssh/sshd_config
systemctl enable ssh.service
systemctl restart sshd

9、配置时区并同步时间

timedatectl set-timezone Asia/Shanghai

ntpdate ntp1.aliyun.com
systemctl restart ntp
systemctl enable ntp

10、配置pip源

mkdir -p /root/.pip
cat /root/.pip/pip.conf
[global]
timeout = 60
index-url = http://pypi.douban.com/simple
trusted-host = pypi.douban.com

pip3 install virtualenv

11、配置静态IP地址

cp /etc/netplan/01-network-manager-all.yaml /etc/netplan/01-network-manager-all.yaml.bak

root@ubuntu:~# nmcli device show wlp0s20f3
GENERAL.DEVICE:                         wlp0s20f3
GENERAL.TYPE:                           wifi
GENERAL.HWADDR:                         BC:6E:E2:36:D2:1A
GENERAL.MTU:                            1500
GENERAL.STATE:                          100(已连接)
GENERAL.CONNECTION:                     CCTV-2
GENERAL.CON-PATH:                       /org/freedesktop/NetworkManager/ActiveConnection/4
IP4.ADDRESS[1]:                         192.168.31.187/24
IP4.GATEWAY:                            192.168.31.1
IP4.ROUTE[1]:                           dst = 192.168.31.0/24, nh = 0.0.0.0, mt = 600
IP4.ROUTE[2]:                           dst = 0.0.0.0/0, nh = 192.168.31.1, mt = 600
IP4.DNS[1]:                             192.168.31.1
IP6.ADDRESS[1]:                         fe80::d1a8:4024:59cf:d295/64
IP6.GATEWAY:                            --
IP6.ROUTE[1]:                           dst = fe80::/64, nh = ::, mt = 1024

root@ubuntu:/etc/netplan# cat /etc/netplan/01-network-manager-all.yaml
network:
  ethernets:
    wlp0s20f3:   # 配置的网卡的名称
      addresses: [192.168.31.187/24]   # 配置的静态ip地址和掩码
      dhcp4: false   # 关闭dhcp4
      optional: true
      routes:
        - to: default
          via: 192.168.31.1/24
      nameservers:
        addresses: [192.168.31.1,114.114.114.114]  # DNS服务器地址,多个DNS服务器地址需要用英文逗号分>隔开,可不配置
  version: 2
  renderer: NetworkManager


root@ubuntu:~# netplan apply

第二部分:显卡驱动、cuda等部署

1、禁用ubuntu自带的nouveau驱动

cat << EOF >  /etc/modprobe.d/blacklist-nouveau.conf
blacklist nouveau
blacklist lbm-nouveau
options nouveau modeset=0
alias nouveau off
alias lbm-nouveau off
EOF

执行如下指令关闭nouveau

echo options nouveau modeset=0 | tee -a /etc/modprobe.d/nouveau-kms.conf
update-initramfs -u
必须重启服务器
lsmod | grep nouveau(重启成功后打开终端输入如下,如果什么都不显示,说明正面上面禁用nouveau的流程正确)

2、安装nvidia驱动(https://download.nvidia.com/XFree86/Linux-x86_64)

获取推荐安装版本
add-apt-repository ppa:graphics-drivers/ppa
apt-get update
ubuntu-drivers devices
显示推荐版本(recommended那项)

wget https://download.nvidia.com/XFree86/Linux-x86_64/575.57.08/NVIDIA-Linux-x86_64-575.57.08.run
chmod +x NVIDIA-Linux-x86_64-575.57.08.run
bash NVIDIA-Linux-x86_64-575.57.08.run

root@ubuntu:~# nvidia-smi
Sat Aug  2 22:58:58 2025       
+-----------------------------------------------------------------------------------------+
| NVIDIA-SMI 575.57.08              Driver Version: 575.57.08      CUDA Version: 12.9     |
|-----------------------------------------+------------------------+----------------------+
| GPU  Name                 Persistence-M | Bus-Id          Disp.A | Volatile Uncorr. ECC |
| Fan  Temp   Perf          Pwr:Usage/Cap |           Memory-Usage | GPU-Util  Compute M. |
|                                         |                        |               MIG M. |
|=========================================+========================+======================|
|   0  NVIDIA GeForce RTX 3060 ...    Off |   00000000:01:00.0  On |                  N/A |
| N/A   64C    P5             18W /   80W |     453MiB /   6144MiB |      0%      Default |
|                                         |                        |                  N/A |
+-----------------------------------------+------------------------+----------------------+
                                                                                         
+-----------------------------------------------------------------------------------------+
| Processes:                                                                              |
|  GPU   GI   CI              PID   Type   Process name                        GPU Memory |
|        ID   ID                                                               Usage      |
|=========================================================================================|
|    0   N/A  N/A            2053      G   /usr/lib/xorg/Xorg                      154MiB |
|    0   N/A  N/A            2196      G   /usr/bin/gnome-shell                     35MiB |
|    0   N/A  N/A            4048      G   .../6565/usr/lib/firefox/firefox        226MiB |
+-----------------------------------------------------------------------------------------+


以下为执行bash NVIDIA-Linux-x86_64-575.57.08.run之后操作

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

3、安装cuda

根据上面步骤可以看到cuda支持最新可用的cuda版本是12.9,登录访问https://developer.nvidia.com/cuda-toolkit-archive 并下载12.9版本的cuda
在这里插入图片描述

wget https://developer.download.nvidia.com/compute/cuda/12.9.0/local_installers/cuda_12.9.0_575.51.03_linux.run
chmoad a+x cuda_12.9.0_575.51.03_linux.run
bash cuda_12.9.0_575.51.03_linux.run

选择continue->accept-->取消Driver安装-->Install

在pofile内添加cuda环境变量
cat /etc/profile
export PATH=$PATH:/usr/local/cuda/bin  
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64  
export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/cuda/lib64

source /etc/profile
nvcc -V 验证

在这里插入图片描述

4、安装docker

wget https://download.docker.com/linux/static/stable/x86_64/docker-28.3.0.tgz


tar xf docker-28.3.0.tgz
cp docker/* /usr/bin/
vi /etc/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd --selinux-enabled=false 
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

chmod 777 /etc/systemd/system/docker.service
systemctl daemon-reload && systemctl enable docker.service
systemctl restart docker
docker info
mkdir -p /etc/docker

vi /etc/docker/daemon.json
{
  "registry-mirrors": [
    "https://t3xbrfwz.mirror.aliyuncs.com",
    "https://docker.unsee.tech",
    "https://dockerpull.org",
    "https://docker.1panel.live",
    "https://dockerhub.icu",
    "https://docker.m.daocloud.io",
    "https://docker.nju.edu.cn"
  ],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "max-concurrent-downloads": 30,
  "max-concurrent-uploads": 15,
  "live-restore": true
}

5、安装nvidiaDocker

使用中科大镜像源安装 NVIDIA Container Toolkit
1. 下载 GPG 密钥并保存
curl -fsSL https://mirrors.ustc.edu.cn/libnvidia-container/gpgkey | \
  sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg

2. 配置 APT 源列表
curl -s -L https://mirrors.ustc.edu.cn/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
  sed 's#deb https://nvidia.github.io#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://mirrors.ustc.edu.cn#g' | \
  sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list

3. 更新包列表并安装
sudo apt-get update
sudo apt-get install -y nvidia-container-toolkit

4. 验证安装
nvidia-container-cli --version

docker-nvidia json加速配置
cat /etc/docker/daemon.json
{
    "runtimes": {
        "nvidia": { 
            "path": "/usr/bin/nvidia-container-runtime",
            "runtimeArgs": []
         }
    }
}

在这里插入图片描述

第三部分:GPU相关优化

1、固定软件版本

#固定nvidia相关包的版本
dpkg -l | awk '{print $2}' | grep -E '^nvidia-'|xargs -n 1 apt-mark hold
#固化系统内核版本
dpkg -l | awk '{print $2}' | grep -E '^linux-'|xargs -n 1 apt-mark hold


dpkg-query -W --showformat='${Package} ${Status}\n' | \
grep -v deinstall | \
awk '{ print $1 }' | \
grep -E 'nvidia.*-[0-9]+$' | \
xargs -r -L 1 sudo apt-mark hold

2、开启Persistence Mode模式

作用:将 NVIDIA GPU 卡的 Persistence Mode 设置为 enable 状态。Persistence mode 被设计为在 GPU 长时间无活动时,把 GPU 驱动保持在活动状态。开启这个模式可以提高例如 CUDA 内核的启动时间,因为不需要在每次执行 CUDA 应用时再去唤醒 GPU。

nvidia-smi -pm 1

3、允许ECC内存模式下模拟错误

作用:允许 GPU 在 ECC(Error Correction Code)内存模式下模拟错误。但是请注意,这个选项只在某些特定型号的 Tesla 和 Quadro 设备上有效

命令:
nvidia-smi -e ENABLED

4、CPU锁频

在这里插入图片描述

# 锁定CPU频率可提升性能,部分机型可能会设置失败先忽略其报错
sudo apt install -y linux-tools-6.14.0-27-generic linux-cloud-tools-6.14.0-27-generic
cpupower idle-set -D 0 || true
cpupower -c all frequency-set -g performance || true

5、GPU锁频

# 锁定GPU频率1410
nvidia-smi -lgc 1410 || true

6、PCI设备中查支持ACS设备并关闭ACS

遍历系统中的所有PCI设备,并尝试禁用那些支持ACS功能的设备的ACS特性。禁用ACS通常是为了避免在多GPU系统中可能出现的带宽限制问题,确保所有GPU间能更有效地共享总线资源

lspci -vvv | grep "PCI bridge" | cut -d" " -f 1 | xargs -I{} setpci -s {} ECAP_ACS+06.w=0000

7、cpu性能模式

cpupower frequency-set -g performance
cpupower idle-set -D 0

8、安装nvidia-fabricmanager(必须和驱动版本保持一致),非必需需要有NVSwitch硬件支持

1、查看nvidia驱动版本
root@ubuntu:~# nvidia-smi|grep 'Driver Version'
| NVIDIA-SMI 575.57.08              Driver Version: 575.57.08      CUDA Version: 12.9     |
2、下载并安装
wget https://developer.download.nvidia.cn/compute/cuda/repos/ubuntu2004/x86_64/nvidia-fabricmanager-575_575.57.08-1_amd64.deb

dpkg -i nvidia-fabricmanager-575_575.57.08-1_amd64.deb 

systemctl enable nvidia-fabricmanager.service
systemctl restart nvidia-fabricmanager.service
systemctl status nvidia-fabricmanager.service

9、启用 GPU P2P 功能(未测试)

echo "nvidia_peermem" >>/etc/modules-load.d/nvidia.conf
modprobe nvidia_peermem && lsmod | grep nvidia_peermem
nvidia-smi nvlink --status

10、部署HPC-X(https://developer.nvidia.com/networking/hpc-x 页面最下选择下载版本)

apt-get -y install lbzip2

wget https://content.mellanox.com/hpc/hpc-x/v2.20/hpcx-v2.20-gcc-inbox-ubuntu24.04-cuda12-x86_64.tbz

tar -xf hpcx-v2.20-gcc-inbox-ubuntu24.04-cuda12-x86_64.tbz -C /opt/

ln -s /opt/hpcx-v2.20-gcc-inbox-ubuntu24.04-cuda12-x86_64 /opt/hpcx

 
#激活hpc-x
export HPCX_HOME=/opt/hpcx
. $HPCX_HOME/hpcx-init.sh
hpcx_load

11、安装nccl(静态编译)

mkdir -p /root/nccl/ && cd /root/nccl
git clone https://github.com/NVIDIA/nccl.git
cd nccl
make src.build CUDA_HOME=/usr/local/cuda PATH=$PATH:/usr/local/cuda/bin LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH

12、安装nccl-test (静态编译)

mkdir -p /root/nccl/ && cd /root/nccl
git clone https://github.com/NVIDIA/nccl-tests.git
cd nccl-tests
which mpirun
# /opt/hpcx/ompi/bin/mpirun 截取 MPI_HOME=/opt/hpcx/ompi


PATH=$PATH:/usr/local/cuda/bin LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64 LIBRARY_PATH=$LIBRARY_PATH:/usr/local/cuda/lib64 make -j 8 CUDA_HOME=/usr/local/cuda NCCL_HOME=/root/nccl/nccl/build NCCL_LIBDIR=/root/nccl/nccl/build/lib NCCL_STATIC=1

以MPI方式构建
PATH=$PATH:/usr/local/cuda/bin LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64 LIBRARY_PATH=$LIBRARY_PATH:/usr/local/cuda/lib64 make -j 32  MPI=1  MPI_HOME=/opt/hpcx/ompi CUDA_HOME=/usr/local/cuda NCCL_HOME=/root/nccl/nccl/build NCCL_LIBDIR=/root/nccl/nccl/build/lib NCCL_STATIC=1


多机MPI方式运行, 在hostfile内添加对应文件,并且在同一个目录存放nccl、nccl-test、mpi这些文件,需要多机保持一致
mpirun --allow-run-as-root --hostfile /root/hostfile -oversubscribe -map-by ppr:8:node -mca pml ob1 -mca btl ^openib -x OMPI_MCA_btl_tcp_if_include="eth0" -x NCCL_NET_GDR_LEVEL=4 -x NCCL_PXN_DISABLE=1 -x NCCL_IB_GID_INDEX=3 -x NCCL_NVLS_ENABLE=1 -x LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/root/nccl/nccl/build/lib -x NCCL_DEBUG=VERSION /root/nccl/nccl-tests/build/all_reduce_perf -b 128M -e 8G -g 1 -f 2 -i 0


cat /root/hostfile #要求node01和node02免密ssh,包括ssh 自己
node01 slots=8
node02 slots=8

13、测试

 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/root/nccl/nccl/build/lib
 ./build/all_reduce_perf -b 8 -e 128M -f 2 -g 1

#以MPI方式执行
mpirun --allow-run-as-root --host localhost -oversubscribe -map-by ppr:8:node -mca pml ob1 -mca btl ^openib -x OMPI_MCA_btl_tcp_if_include="eth0" -x NCCL_NET_GDR_LEVEL=4 -x NCCL_PXN_DISABLE=1 -x NCCL_IB_GID_INDEX=3 -x NCCL_NVLS_ENABLE=1 -x NCCL_DEBUG=INFO /root/nccl/nccl-tests/build/all_reduce_perf -b 4G -e 4G -g 1 -f 2 -i 0

mpirun --allow-run-as-root --host localhost -oversubscribe -map-by ppr:8:node -mca pml ob1 -mca btl ^openib -x OMPI_MCA_btl_tcp_if_include="eth0" -x NCCL_NET_GDR_LEVEL=4 -x NCCL_PXN_DISABLE=1 -x NCCL_IB_GID_INDEX=3 -x NCCL_NVLS_ENABLE=1 /root/nccl/nccl-tests/build/reduce_scatter_perf -b 4G -e 4G -g 1 -f 2 -i 0

在这里插入图片描述
以MPI方式测试(未测试)

#查找libnccl.so.2位置
find / -name "libnccl.so.2" 2>/dev/null

export LD_LIBRARY_PATH=/root/backup/nccl/nccl/build/lib:$LD_LIBRARY_PATH

#运行测试用例
mpirun --allow-run-as-root --host 192.168.31.45 -np 1 --mca btl_tcp_if_include wlp0s20f3 --mca oob_tcp_if_include wlp0s20f3 -x NCCL_IB_GID_INDEX=3 -x NCCL_DEBUG=INFO -x NCCL_DEBUG_SUBSYS=INIT,NET,GRAPH  -x LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH -x NCCL_PXN_DISABL=0   -x NCCL_NET_PLUGIN=none /root/backup/nccl/nccl-tests/build/alltoall_perf -b 256 -e 4G -g 1 -i 104857600


这个输出信息表示NCCL alltoall_perf 测试用例已经成功运行。开始的部分是 NCCL 初始化过程的调试信息,最后的部分表示执行结果。

本次性能测试主要进行了浮点数的各种具有不同大小的全交换(alltoall)操作,首先进行了一个 out-of-place 操作,然后进行了一个 in-place 操作。

下面是这些输出结果的一些解释:

size:数据包的字节大小;
count:需要进行传输的元素数;
type:数据类型,这里都是 float;
redop:归约运算类型,这里都是 'none' 表示无归约运算;
root:归约运算的根节点,这里都是 '-1' 表示无对应的根节点;
time:操作完成的时间,单位是微秒;
algbw:“算法带宽”,即传输数据的带宽;
busbw:跨 GPU 或者跨节点间接口数据传输带宽;
#wrong:错误数,0 表示没有错误。
从这里可以看出,所有的全交换测试都成功完成,没有任何错误,并且测试的带宽值可以被算出来。输出的最后几行表明,所有的测试参数都未超出预期范围,总的平均带宽是 0(这个应该会在测试多个节点时有其他值,单节点测试为0),且整个测试流程结束后已经清理完所有资源。

all_reduce_perf测试(未测试)

mpirun --allow-run-as-root --host 192.168.31.45 -np 1 -oversubscribe -map-by ppr:8:node -mca pml ob1 -mca btl ^openib -x OMPI_MCA_btl_tcp_if_include="eth0" -x NCCL_IB_GID_INDEX=3 -x NCCL_ALGO=RING -x NCCL_COLLNET_ENABLE=0 -x LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH /root/backup/nccl/nccl-tests//build/all_reduce_perf -b 1G -e 2G -g 1 -f 2 -i 0



以下是上述测试命令的具体解释:

1. `mpirun`:这是一个用于在分布式系统上启动 MPI 程序的命令。
2. `--allow-run-as-root`:这个选项允许作为 root 用户运行 mpi 程序。
3. `--host 192.168.31.45 -np 1`:这表示在 IP 地址为 192.168.31.45 的节点上启动 1 个进程。
4. `-oversubscribe`:允许在一个进程上运行的 MPI 进程数超过内核数。
5. `-map-by ppr:8:node`:设置每个节点上进程的分布模式。这里表示,每个节点上有 8 个进程进行循环放置。
6. `-mca pml ob1 -mca btl ^openib`:选择 MPI 的通信模式和传输层。这里表示,使用 `ob1` 这种点对点的通信模式,使用除了 'openib' 以外的所有可用传输层协议。
7. `-x OMPI_MCA_btl_tcp_if_include="eth0" -x NCCL_IB_GID_INDEX=3`:指定运行环境变量。这里表示,MPI 通信使用 'eth0' 这个网络接口,并将 InfiniBand 的 GID 索引设置为 38. `-x NCCL_ALGO=RING -x NCCL_COLLNET_ENABLE=0`:指定 NCCL 的环境变量。这里表示,使用 'RING' 这种算法,并关闭 Collective 跨网络功能。
9. `-x LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH`:添加运行库的搜索路径。
10. `/root/backup/nccl/nccl-tests//build/all_reduce_perf -b 1G -e 2G -g 1 -f 2 -i 0`:运行的 NCCL 性能测试和参数。

输出结果中,先记录了基本参数信息,然后记录了 NCCL 使用的设备信息。接下来,程序提示最大字节数由于内存限制被自动减小到 1708916736。然后是性能测试结果,表明了 out-of-place 和 in-place 测试在 1GB 数据包下的性能表现。在这次测试结束后,没有任何错误提示,不过由于迭代次数 `-i` 参数设为了0,因此这个测试实际上没有进行任何数据传输,只是进行了创建连接和相应的资源准备。

总的来说,这次的 NCCL 性能测试在设备的硬件和配置条件下运行正常,没有出现任何错误。

二、部署模型

三、部署kubernetes

第一部分:系统初始化

参考第一部中的系统初始化

第二部分:集群初始化

1、安装nvidia-container-toolkit(未测试)

curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | sudo apt-key add -
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list
apt update

2、安装nvidia-container-runtime

apt-get -y install  nvidia-container-runtime runc

3、安装containerd

#步骤一:下载并安装containerd二进制文件
wget https://github.com/containerd/containerd/releases/download/v1.7.8/containerd-1.7.8-linux-amd64.tar.gz
tar -xf containerd-1.7.8-linux-amd64.tar.gz -C /usr/local/bin/

cat  /etc/containerd/config.toml |grep sandbox_image
    sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10"
cat  /etc/containerd/config.toml |grep SystemdCgroup
		SystemdCgroup = true
#修改sandbox_image为阿里云的sandbox_image

步骤二:创建systemd服务文件
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml

# 如果需要对配置文件做自定义修改,请在此时编辑/etc/containerd/config.toml
# 注意:在配置文件中确保正确设置插件、日志路径等选项

# 创建systemd unit文件
sudo tee /etc/systemd/system/containerd.service <<-'EOF'
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target

[Service]
ExecStart=/usr/local/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

systemctl enable containerd && systemctl start containerd 
#安装crictl
wget https://ghfast.top/https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.25.0/crictl-v1.25.0-linux-amd64.tar.gz
tar -zxvf  crictl-*-linux-amd64.tar.gz  -C /usr/local/bin/

cat <<EOF > /etc/crictl.yaml 
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
containerd对接nvidia-container-toolkit
cat /etc/containerd/config.toml
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2

[cgroup]
  path = ""

[debug]
  address = ""
  format = ""
  gid = 0
  level = ""
  uid = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216
  tcp_address = ""
  tcp_tls_ca = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0

[metrics]
  address = ""
  grpc_histogram = false

[plugins]

  [plugins."io.containerd.gc.v1.scheduler"]
    deletion_threshold = 0
    mutation_threshold = 100
    pause_threshold = 0.02
    schedule_delay = "0s"
    startup_delay = "100ms"

  [plugins."io.containerd.grpc.v1.cri"]
    cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
    device_ownership_from_security_context = false
    disable_apparmor = false
    disable_cgroup = false
    disable_hugetlb_controller = true
    disable_proc_mount = false
    disable_tcp_service = true
    enable_cdi = true
    enable_selinux = false
    enable_tls_streaming = false
    enable_unprivileged_icmp = false
    enable_unprivileged_ports = false
    ignore_image_defined_volumes = false
    image_pull_progress_timeout = "6m0s"
    max_concurrent_downloads = 30
    max_container_log_line_size = 16384
    netns_mounts_under_state_dir = false
    restrict_oom_score_adj = false
    sandbox_image = "sealos.hub:5000/pause:3.9"
    selinux_category_range = 1024
    stats_collect_period = 10
    stream_idle_timeout = "4h0m0s"
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    systemd_cgroup = false
    tolerate_missing_hugetlb_controller = true
    unset_seccomp_profile = ""

    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      conf_template = ""
      ip_pref = ""
      max_conf_num = 1
      setup_serially = false

    [plugins."io.containerd.grpc.v1.cri".containerd]
      default_runtime_name = "nvidia"
      disable_snapshot_annotations = true
      discard_unpacked_layers = false
      ignore_blockio_not_enabled_errors = false
      ignore_rdt_not_enabled_errors = false
      no_pivot = false
      snapshotter = "overlayfs"

      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        privileged_without_host_devices_all_devices_allowed = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""
        sandbox_mode = ""
        snapshotter = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]

      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia]
          privileged_without_host_devices = false
          runtime_engine = ""
          runtime_root = ""
          runtime_type = "io.containerd.runc.v2"
          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia.options]
            BinaryName = "/usr/bin/nvidia-container-runtime"
            SystemdCgroup = true

        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          base_runtime_spec = ""
          cni_conf_dir = ""
          cni_max_conf_num = 0
          container_annotations = []
          pod_annotations = []
          privileged_without_host_devices = false
          privileged_without_host_devices_all_devices_allowed = false
          runtime_engine = ""
          runtime_path = ""
          runtime_root = ""
          runtime_type = "io.containerd.runc.v2"
          sandbox_mode = "podsandbox"
          snapshotter = ""

          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
            BinaryName = ""
            CriuImagePath = ""
            CriuPath = ""
            CriuWorkPath = ""
            IoGid = 0
            IoUid = 0
            NoNewKeyring = false
            NoPivotRoot = false
            Root = ""
            ShimCgroup = ""
            SystemdCgroup = true

      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        privileged_without_host_devices_all_devices_allowed = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""
        sandbox_mode = ""
        snapshotter = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]

    [plugins."io.containerd.grpc.v1.cri".image_decryption]
      key_model = "node"

    [plugins."io.containerd.grpc.v1.cri".registry]
      config_path = "/etc/containerd/certs.d"

      [plugins."io.containerd.grpc.v1.cri".registry.auths]

      [plugins."io.containerd.grpc.v1.cri".registry.configs]

      [plugins."io.containerd.grpc.v1.cri".registry.headers]

      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]

    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""

  [plugins."io.containerd.internal.v1.opt"]
    path = "/opt/containerd"

  [plugins."io.containerd.internal.v1.restart"]
    interval = "10s"

  [plugins."io.containerd.internal.v1.tracing"]
    sampling_ratio = 1.0
    service_name = "containerd"

  [plugins."io.containerd.metadata.v1.bolt"]
    content_sharing_policy = "shared"

  [plugins."io.containerd.monitor.v1.cgroups"]
    no_prometheus = false

  [plugins."io.containerd.nri.v1.nri"]
    disable = true
    disable_connections = false
    plugin_config_path = "/etc/nri/conf.d"
    plugin_path = "/opt/nri/plugins"
    plugin_registration_timeout = "5s"
    plugin_request_timeout = "2s"
    socket_path = "/var/run/nri/nri.sock"

  [plugins."io.containerd.runtime.v1.linux"]
    no_shim = false
    runtime = "runc"
    runtime_root = ""
    shim = "containerd-shim"
    shim_debug = false

  [plugins."io.containerd.runtime.v2.task"]
    platforms = ["linux/amd64"]
    sched_core = false

  [plugins."io.containerd.service.v1.diff-service"]
    default = ["walking"]

  [plugins."io.containerd.service.v1.tasks-service"]
    blockio_config_file = ""
    rdt_config_file = ""

  [plugins."io.containerd.snapshotter.v1.aufs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.btrfs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.devmapper"]
    async_remove = false
    base_image_size = ""
    discard_blocks = false
    fs_options = ""
    fs_type = ""
    pool_name = ""
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.native"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.overlayfs"]
    root_path = ""
    upperdir_label = false

  [plugins."io.containerd.snapshotter.v1.zfs"]
    root_path = ""

  [plugins."io.containerd.tracing.processor.v1.otlp"]
    endpoint = ""
    insecure = false
    protocol = ""

  [plugins."io.containerd.transfer.v1.local"]

[proxy_plugins]

[stream_processors]

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar"

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar+gzip"

[timeouts]
  "io.containerd.timeout.bolt.open" = "0s"
  "io.containerd.timeout.metrics.shimstats" = "2s"
  "io.containerd.timeout.shim.cleanup" = "5s"
  "io.containerd.timeout.shim.load" = "5s"
  "io.containerd.timeout.shim.shutdown" = "3s"
  "io.containerd.timeout.task.state" = "2s"

[ttrpc]
  address = ""
  gid = 0
  uid = 0



重启containerd、kubelet
systemctl restart containerd.service
systemctl restart kubelet.service

4、安装kubeadm、kubectl、kubelet等二进制文件

下载文件
wget https://storage.googleapis.com/kubernetes-release/release/v1.28.4/bin/linux/amd64/kubeadm
wget https://storage.googleapis.com/kubernetes-release/release/v1.28.4/bin/linux/amd64/kubelet
wget https://storage.googleapis.com/kubernetes-release/release/v1.28.4/bin/linux/amd64/kubectl
chmod a+x kube*
cp kube* /usr/bin/
创建配置文件
cat /lib/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/
Wants=network-online.target
After=network-online.target

[Service]
ExecStart=/usr/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
mkdir -p /usr/lib/systemd/system/kubelet.service.d/
cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS



cat /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1

sysctl --system
modprobe br_netfilter
systemctl enable kubelet

5、安装nerdctl

wget https://github.com/containerd/nerdctl/releases/download/v1.7.5/nerdctl-full-1.7.5-linux-amd64.tar.gz
tar -xf nerdctl-full-1.7.5-linux-amd64.tar.gz -C /usr/local/

第三部分:集群初始化

1、配置解析

echo 192.168.31.187 k8s.disallow.cn>>/etc/hosts

2、生成kubeadm配置文件

cat kubeadm.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: "192.168.31.187"
  bindPort: 6443
---
# kubeadm.yaml
apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
    max-mutating-requests-inflight: "1000"
    max-requests-inflight: "2000"
    watch-cache-sizes: node#2000,pod#10000
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.31.187:6443 
# controlPlaneEndpoint: k8s.disallow.cn:6443
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
    extraArgs:
      "pre-vote": "true"
      "auto-compaction-retention": "1"
      "auto-compaction-mode": "periodic"
      "quota-backend-bytes": "8589934592"
      "heartbeat-interval": "500"
      "election-timeout": "3000"
      "snapshot-count": "50000"
      "max-request-bytes": "31457280"
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.28.4
networking:
  dnsDomain: cluster.local
  podSubnet: 10.42.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}

3、执行初始化

kubeadm config images pull --config kubeadm.yaml
kubeadm init --config kubeadm.yaml
  
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get node

若初始化失败

# 删除 Kubernetes 配置目录
sudo rm -rf /etc/kubernetes/*
sudo rm -rf ~/.kube/*

# 删除 kubelet 配置
sudo rm -rf /var/lib/kubelet/*

# 删除 etcd 数据
sudo rm -rf /var/lib/etcd/*

# 删除 CNI 配置
sudo rm -rf /etc/cni/net.d/*
sudo rm -rf /opt/cni/bin/*

# 删除容器运行时相关文件
sudo rm -rf /var/run/kubernetes/*
sudo rm -rf /var/lib/containerd/*

# 停止 kubelet
sudo systemctl stop kubelet

# 重启 containerd
sudo systemctl restart containerd

# 重启 kubelet
sudo systemctl start kubelet

# 检查端口 10250 是否被占用
sudo ss -tulpn | grep 10250

# 如果被占用,终止占用该端口的进程
sudo kill -9 <PID>

# 重新初始化 Kubernetes 集群
sudo kubeadm init --config kubeadm.yaml

4、安装calico

下载 Tiger Operator:
wget https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/tigera-operator.yaml
sed -i 's/quay.io\/tigera\/operator/quay.m.daocloud.io\/tigera\/operator/g' tigera-operator.yaml
创建配置 custom-resources.yaml
cat custom-resources.yaml
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  calicoNetwork:
    bgp: Enabled
    containerIPForwarding: Enabled
    hostPorts: Enabled
    ipPools:
      - blockSize: 26
        cidr: 10.42.0.0/16
        disableBGPExport: false
        encapsulation: IPIP
        natOutgoing: Enabled
        nodeSelector: all()
    linuxDataplane: Iptables
    multiInterfaceMode: None
    nodeAddressAutodetectionV4:
      kubernetes: NodeInternalIP
  cni:
    ipam:
      type: Calico
    type: Calico
  controlPlaneReplicas: 2
  flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
  kubeletVolumePluginPath: /var/lib/kubelet
  logging:
    cni:
      logFileMaxAgeDays: 30
      logFileMaxCount: 10
      logFileMaxSize: 1024Mi
      logSeverity: Info
  nodeUpdateStrategy:
    rollingUpdate:
      maxUnavailable: 1
    type: RollingUpdate
  nonPrivileged: Disabled
  registry: docker.m.daocloud.io
  variant: Calico
kubectl create -f tigera-operator.yaml
kubectl create -f  custom-resources.yaml

若无法无网络插件则执行以下操作

wget https://docs.projectcalico.org/manifests/calico.yaml -O calico.yaml
sed -i 's#docker.io/calico/#registry.aliyuncs.com/calico/#' calico.yaml  
# 替换为国内镜像
kubectl apply -f calico.yaml

5、解除master污点

kubectl  taint node ubuntu(节点名称)  node-role.kubernetes.io/control-plane:NoSchedule-

6、部署 metrics-server插件

kubectl -n kube-system apply -f metrics-server-components.yaml
cat metrics-server-components.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - nodes/metrics
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      tolerations:
      - operator: "Exists"
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=10250
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls
        image: k8s.m.daocloud.io/metrics-server/metrics-server:v0.7.0
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 10250
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          initialDelaySeconds: 20
          periodSeconds: 10
        resources:
          requests:
            cpu: 100m
            memory: 200Mi
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            drop:
            - ALL
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
          seccompProfile:
            type: RuntimeDefault
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {}
        name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100

第四部分:安装gpu-operator

1、安装helm

wget https://get.helm.sh/helm-v3.14.2-linux-amd64.tar.gz
tar -xf helm-v3.14.2-linux-amd64.tar.gz
mv linux-amd64/helm /usr/bin/

2、使用 helm 安装 NVIDIA GPU Operator

helm repo add nvidia https://helm.ngc.nvidia.com/nvidia 
helm repo update

helm install --wait gpu-operator -n gpu-operator --create-namespace nvidia/gpu-operator --set driver.enabled=false --set toolkit.enabled=false --set dcgmExporter.config.name=dcgm-exporter --set dcgmExporter.enabled=true

3、GPU分时共享

# https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html
apiVersion: v1
kind: ConfigMap
metadata:
  name: time-slicing-config-all
  namespace: gpu-operator
data:
  any: |-
    version: v1
    flags:
      migStrategy: none
    sharing:
      timeSlicing:
        resources:
        - name: nvidia.com/gpu
          replicas: 32
          
# kubectl patch clusterpolicy/cluster-policy -n gpu-operator --type merge -p '{"spec": {"devicePlugin": {"config": {"name": "time-slicing-config-all", "default": "any"}}}}'


网站公告

今日签到

点亮在社区的每一天
去签到