k8s三阶段项目

发布于:2025-09-02 ⋅ 阅读:(19) ⋅ 点赞:(0)

k8s部署discuz论坛和Tomcat商城

一、持久化存储—storageclass+nfs

1.创建sa账户

[root@k8s-master scnfs]# cat nfs-provisioner-rbac.yaml
# 1. ServiceAccount:供 NFS Provisioner 使用的服务账号
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-provisioner
  namespace: default  # 所有资源统一在 default 命名空间,可按需修改

---
# 2. ClusterRole:集群级权限,覆盖动态 PV 供应核心操作(PV/PVC/StorageClass 等)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: nfs-provisioner-clusterrole
rules:
  # 操作 PV(创建/删除/查询)
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  # 操作 PVC(查询/更新状态)
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  # 操作 StorageClass(查询可用的 SC)
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  # 发送事件(如 PV 创建成功/失败的通知)
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  # 操作 Services/Endpoints(领导者选举依赖,部分版本需)
  - apiGroups: [""]
    resources: ["services", "endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]

---
# 3. ClusterRoleBinding:将集群级权限绑定到 ServiceAccount
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: nfs-provisioner-clusterrolebinding
subjects:
  - kind: ServiceAccount
    name: nfs-provisioner
    namespace: default  # 必须与 ServiceAccount 所在命名空间一致
roleRef:
  kind: ClusterRole
  name: nfs-provisioner-clusterrole
  apiGroup: rbac.authorization.k8s.io

---
# 4. Role:命名空间级权限,专门覆盖领导者选举的 leases/endpoints 操作
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: nfs-provisioner-role
  namespace: default  # 与 Provisioner 同命名空间,确保选举权限生效
rules:
  - apiGroups: [""]
    resources: ["endpoints", "leases"]  # 领导者选举核心依赖资源
    verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]

---
# 5. RoleBinding:将命名空间级选举权限绑定到 ServiceAccount
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: nfs-provisioner-rolebinding
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-provisioner
    namespace: default
roleRef:
  kind: Role
  name: nfs-provisioner-role
  apiGroup: rbac.authorization.k8s.io

2.创建制备器

[root@k8s-master scnfs]# cat nfs-provisioner-deploy.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-provisioner
  namespace: default
  labels:
    app: nfs-provisioner
spec:
  replicas: 1  # 测试环境单副本,生产环境建议用 StatefulSet 多副本
  selector:
    matchLabels:
      app: nfs-provisioner
  template:
    metadata:
      labels:
        app: nfs-provisioner
    spec:
      serviceAccountName: nfs-provisioner  # 关联前面创建的 ServiceAccount
      containers:
        - name: nfs-provisioner
          # 国内阿里云镜像,稳定可拉取(替代 k8s.gcr.io)
          image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
          # 环境变量:核心配置(必须与 StorageClass 的 provisioner 一致)
          env:
            - name: PROVISIONER_NAME
              value: cluster.local/nfs-provisioner  # 供应器名称,后续 SC 需引用此值
            - name: NFS_SERVER
              value: 192.168.157.110  # 已部署的 NFS 服务器 IP
            - name: NFS_PATH
              value: /nfs  # 已配置的 NFS 共享目录
          # 挂载 NFS 共享目录到容器内(固定路径,Provisioner 在此创建 PV 子目录)
          volumeMounts:
            - name: nfs-volume
              mountPath: /persistentvolumes
      # 定义 NFS 卷(关联 NFS 服务器信息)
      volumes:
        - name: nfs-volume
          nfs:
            server: 192.168.157.110
            path: /nfs

3.创建storageclass

[root@k8s-master scnfs]# cat nfs-storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-sc  # StorageClass 名称,PVC 需引用此名称
provisioner: cluster.local/nfs-provisioner  # 必须与 Provisioner 的 PROVISIONER_NAME 完全一致
parameters:
  archiveOnDelete: "true"  # 删除 PVC 时,归档 NFS 数据(避免误删,生产建议 true)
reclaimPolicy: Delete  # PV 回收策略:Delete(删 PVC 自动删 PV/数据)/ Retain(保留数据)
allowVolumeExpansion: true  # 允许 PVC 扩容(v4.0+ 版本支持)
volumeBindingMode: Immediate  # 立即绑定 PVC(无需等待 Pod 调度)

4.检验

[root@k8s-master scnfs]# kubectl get sa
NAME              SECRETS   AGE
default           0         32h
nfs-provisioner   0         38m
[root@k8s-master scnfs]# kubectl get sc
NAME     PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-sc   cluster.local/nfs-provisioner   Delete          Immediate           true                   39m
[root@k8s-master scnfs]# kubectl get pod
NAME                               READY   STATUS    RESTARTS   AGE
nfs-provisioner-745557fd5c-dslzv   1/1     Running   0          39m

二、部署MySQL主从

1.创建secret存储密码

[root@k8s-master mysql]# cat secret.yaml 

apiVersion: v1
kind: Secret                  # 资源类型:Secret(密钥)
metadata:
  name: mysql-secrets         # Secret名称:标识为"mysql-secrets"
  namespace: mysql            # 命名空间:属于mysql命名空间
type: Opaque                  # 密钥类型:Opaque(通用密钥类型)
data:                         # 密钥数据(Base64编码)
  root-password: MTIzLmNvbQ==  # 键名:root-password(MySQL root密码)
                              # 解码后:123.com

2.配置文件

[root@k8s-master mysql]# cat configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql-config
  namespace: mysql
data:
  master.cnf: |
    [mysqld]
    server-id=10
    log_bin=/var/lib/mysql/mysql-bin.log
    read_only=0
    bind-address=0.0.0.0
    gtid_mode=ON
    enforce_gtid_consistency=ON
    default_authentication_plugin=mysql_native_password
 
  slave1.cnf: |
    [mysqld]
    server-id=20
    relay_log=/var/lib/mysql/mysql-relay-bin.log
    log_bin=/var/lib/mysql/mysql-bin.log
    read_only=1
    bind-address=0.0.0.0
    gtid_mode=ON
    enforce_gtid_consistency=ON
    default_authentication_plugin=mysql_native_password

3.初始化

[root@k8s-master mysql]# cat init-scripts.yaml 
apiVersion: v1
kind: ConfigMap                  # 资源类型:ConfigMap(配置映射)
metadata:
  name: mysql-init-scripts        # ConfigMap名称:MySQL初始化脚本
  namespace: mysql                # 命名空间:属于mysql命名空间
data:                             # 数据字段:存储初始化SQL脚本
  master-init.sql: |              # 主节点初始化SQL脚本
    # 从环境变量获取密码(避免明文)
    CREATE USER IF NOT EXISTS 'rsyncuser'@'%' IDENTIFIED BY '123.com';  # 创建复制用户
    GRANT REPLICATION SLAVE ON *.* TO 'rsyncuser'@'%';                 # 授予复制权限
    
    CREATE DATABASE IF NOT EXISTS discuz;                              # 创建discuz数据库
    CREATE USER IF NOT EXISTS 'discuz'@'%' IDENTIFIED BY '123.com';     # 创建discuz用户
    GRANT ALL PRIVILEGES ON discuz.* TO 'discuz'@'%';                   # 授予discuz库全权限
    
    CREATE DATABASE IF NOT EXISTS biyesheji;                            # 创建毕业设计数据库
    CREATE USER IF NOT EXISTS 'tomcat'@'%' IDENTIFIED BY '123.com';      # 创建tomcat用户
    GRANT ALL PRIVILEGES ON biyesheji.* TO 'tomcat'@'%';                # 授予毕业设计库全权限
    
    FLUSH PRIVILEGES;                                                  # 刷新权限生效
  slave-init.sql: |                   # 从节点初始化SQL脚本
    CHANGE MASTER TO
      MASTER_HOST = 'mysql-master-0.mysql-master.mysql.svc.cluster.local',  # 主节点地址
      MASTER_PORT = 3306,                   # MySQL端口
      MASTER_USER = 'rsyncuser',            # 复制用户名
      MASTER_PASSWORD = '123.com',          # 复制用户密码
      MASTER_AUTO_POSITION = 1;             # 启用GTID自动定位
    START SLAVE;                            # 启动主从复制

4.主库statefulset+headless

[root@k8s-master mysql]# cat master.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql-master
  namespace: mysql
spec:
  serviceName: mysql-master
  replicas: 1
  selector:
    matchLabels:
      app: mysql-master
  template:
    metadata:
      labels:
        app: mysql-master
    spec:
      containers:
      - name: mysql
        image: mysql:8.0
        ports:
        - containerPort: 3306
        env:
        - name: MYSQL_ROOT_PASSWORD
          valueFrom:
            secretKeyRef:
              name: mysql-secrets
              key: root-password
        volumeMounts:
        - name: mysql-config
          mountPath: /etc/mysql/conf.d/master.cnf
          subPath: master.cnf
        - name: master-init-script  # 仅挂载主库脚本
          mountPath: /docker-entrypoint-initdb.d/master-init.sql
          subPath: master-init.sql  # 明确指定脚本名
      volumes:
      - name: mysql-config
        configMap:
          name: mysql-config
          items:
          - key: master.cnf
            path: master.cnf
      - name: master-init-script  # 关联 ConfigMap 中的主库脚本
        configMap:
          name: mysql-init-scripts
          items:
          - key: master-init.sql
            path: master-init.sql
  # 添加 volumeClaimTemplates
  volumeClaimTemplates:
  - metadata:
      name: mysql-data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "nfs-sc"  # 使用之前创建的 StorageClass
      resources:
        requests:
          storage: 10Gi  # 根据需求调整大小
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-master
  namespace: mysql
spec:
  ports:
  - port: 3306
    name: mysql
  clusterIP: None
  selector:
    app: mysql-master

5.从库

[root@k8s-master mysql]# cat slave.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql-slave
  namespace: mysql
spec:
  serviceName: mysql-slave
  replicas: 1  
  selector:
    matchLabels:
      app: mysql-slave
  template:
    metadata:
      labels:
        app: mysql-slave
    spec:
      containers:
      - name: mysql
        image: mysql:8.0
        ports:
        - containerPort: 3306
        env:
        - name: MYSQL_ROOT_PASSWORD
          valueFrom:
            secretKeyRef:
              name: mysql-secrets
              key: root-password
        volumeMounts:
        - name: mysql-config
          mountPath: /etc/mysql/conf.d/slave.cnf
          subPath: slave1.cnf
        - name: init-script
          mountPath: /docker-entrypoint-initdb.d
      volumes:
      - name: mysql-config
        configMap:
          name: mysql-config
          items:
          - key: slave1.cnf
            path: slave1.cnf
      - name: init-script
        configMap:
          name: mysql-init-scripts
  volumeClaimTemplates:
  - metadata:
      name: mysql-data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "nfs-sc"
      resources:
        requests:
          storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-slave
  namespace: mysql
spec:
  ports:
  - port: 3306
    name: mysql
  clusterIP: None
  selector:
    app: mysql-slave

6.检测

###pod正常创建
[root@k8s-master nginx]# kubectl -n mysql get pod
NAME             READY   STATUS    RESTARTS   AGE
mysql-master-0   1/1     Running   0          11h
mysql-slave-0    1/1     Running   0          11h
### svc创建
[root@k8s-master nginx]# kubectl -n mysql get svc
NAME           TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
mysql-master   ClusterIP   None         <none>        3306/TCP   12h
mysql-slave    ClusterIP   None         <none>        3306/TCP   12h
###pvc 动态创建
[root@k8s-master nginx]# kubectl -n mysql get pvc
NAME                        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
mysql-data-mysql-master-0   Bound    pvc-26c64423-bdc7-4687-9cfc-9b3ac3375e9f   10Gi       RWO            nfs-sc         11h
mysql-data-mysql-slave-0    Bound    pvc-153c4aaa-d48f-4526-a290-381f62d421d4   10Gi       RWO            nfs-sc         11h
###pv动态创建
[root@k8s-master nginx]# kubectl -n mysql get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                             STORAGECLASS   REASON   AGE
pvc-153c4aaa-d48f-4526-a290-381f62d421d4   10Gi       RWO            Delete           Bound    mysql/mysql-data-mysql-slave-0    nfs-sc                  11h
pvc-26c64423-bdc7-4687-9cfc-9b3ac3375e9f   10Gi       RWO            Delete           Bound    mysql/mysql-data-mysql-master-0   nfs-sc                  11h

检测主从

[root@k8s-master nginx]# kubectl -n mysql exec -it mysql-slave-0 -- /bin/bash
bash-5.1# mysql -uroot -p123.com
mysql> show slave status\G;
......
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
.....
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| biyesheji          |
| discuz             |
| information_schema |
| mysql              |
| performance_schema |
| sys                |
+--------------------+
6 rows in set (0.01 sec)

三、Redis主从

1.创建命名空间

[root@k8s-master redis]# cat namespace.yaml 

apiVersion: v1
kind: Namespace
metadata:
  name: redis
[root@k8s-maste

2.创建configmap

[root@k8s-master redis]# cat redis-configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: redis-config
  namespace: redis
data:
  redis-master.conf: |
    port 6379
    bind 0.0.0.0
    protected-mode no
    daemonize no
    timeout 0
    save ""
    appendonly no
    maxmemory 1gb
    maxmemory-policy allkeys-lru
  redis-slave.conf: |
    port 6379
    bind 0.0.0.0
    protected-mode no
    daemonize no
    timeout 0
    save ""
    appendonly no
    maxmemory 1gb
    maxmemory-policy allkeys-lru
    slaveof redis-master-0.redis-master.redis.svc.cluster.local 6379
    slave-read-only yes

3.Redis主库

[root@k8s-master redis]# cat redis-master.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis-master        # StatefulSet名称
  namespace: redis          # 所属命名空间
spec:
  serviceName: redis-master # 关联的Headless Service名称
  replicas: 1               # 副本数(主节点通常为1)
  selector:                 # Pod选择器
    matchLabels:
      app: redis-master
  template:                 # Pod模板
    metadata:
      labels:
        app: redis-master   # Pod标签(需与selector匹配)
    spec:
      containers:
      - name: redis-master  # 容器名称
        image: redis:6-alpine  # 使用Alpine版Redis镜像
        command: ["redis-server", "/etc/redis/redis-master.conf"]  # 启动命令
        ports:
        - containerPort: 6379  # Redis默认端口
        volumeMounts:
        - name: redis-config   # 挂载配置卷
          mountPath: /etc/redis  # 容器内挂载路径
      volumes:
      - name: redis-config     # 卷定义
        configMap:
          name: redis-config   # 引用名为redis-config的ConfigMap
---
apiVersion: v1
kind: Service
metadata:
  name: redis-master
  namespace: redis
spec:
  clusterIP: None
  selector:
    app: redis-master
  ports:
  - port: 6379
    targetPort: 6379

4…redis从库

[root@k8s-master redis]# cat redis-slave.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis-slave           # 有状态应用名称
  namespace: redis            # 指定命名空间
spec:
  serviceName: redis-slave   # 关联的Headless Service名称
  replicas: 1                 # 副本数量(主节点通常为1)
  selector:                   # Pod选择器
    matchLabels:
      app: redis-slave
  template:                   # Pod模板
    metadata:
      labels:
        app: redis-slave     # Pod标签(必须与selector匹配)
    spec:
      containers:
      - name: redis-slave    # 容器名称
        image: redis:6-alpine # 使用Alpine轻量版Redis镜像
        command: ["redis-server", "/etc/redis/redis-master.conf"] # 启动命令
        ports:
        - containerPort: 6379  # Redis默认端口
        volumeMounts:
        - name: redis-config   # 配置卷名称
          mountPath: /etc/redis # 容器内挂载路径
      volumes:                # 存储卷定义
      - name: redis-config
        configMap:            # 使用ConfigMap作为配置源
          name: redis-config  # 引用的ConfigMap名称
---
apiVersion: v1
kind: Service
metadata:
  name: redis-slave
  namespace: redis
spec:
  clusterIP: None
  selector:
    app: redis-slave
  ports:
  - port: 6379

5.检测

[root@k8s-master nginx]# kubectl -n redis get pod
NAME             READY   STATUS    RESTARTS   AGE
redis-master-0   1/1     Running   0          12h
redis-slave-0    1/1     Running   0          12h
[root@k8s-master nginx]# kubectl -n redis get svc
NAME           TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
redis-master   ClusterIP   None         <none>        6379/TCP   12h
redis-slave    ClusterIP   None         <none>        6379/TCP   12h
[root@k8s-master nginx]# kubectl -n redis get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                             STORAGECLASS   REASON   AGE
pvc-153c4aaa-d48f-4526-a290-381f62d421d4   10Gi       RWO            Delete           Bound    mysql/mysql-data-mysql-slave-0    nfs-sc                  11h
pvc-26c64423-bdc7-4687-9cfc-9b3ac3375e9f   10Gi       RWO            Delete           Bound    mysql/mysql-data-mysql-master-0   nfs-sc                  11h

四、构建镜像

1.nginx-php-discuz

[root@k8s-master nginx]# ls
discuz  discuz.conf  discuz.yaml  Dockerfile  proj-nginx:latest  www.conf

nginx配置文件

[root@k8s-master nginx]# cat discuz.conf 
server {
    listen 80;
    server_name localhost;
    root /var/www/html;
    index index.php index.html index.htm;
 
    access_log /var/log/nginx/discuz_access.log;
    error_log /var/log/nginx/discuz_error.log;
 
    location / {
        try_files $uri $uri/ /index.php?$query_string;
    }
 
    location ~ \.php$ {
        fastcgi_pass unix:/run/php/php83-fpm.sock;
        fastcgi_index index.php;
        fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
        include fastcgi_params;
    }
 
    location ~ /\.ht {
        deny all;
    }
}

php配置文件

[root@k8s-master nginx]# cat www.conf 
[www]
user = nginx
group = nginx
listen = /run/php/php83-fpm.sock
listen.owner = nginx
listen.group = nginx
listen.mode = 0660
 
pm = dynamic
pm.max_children = 5
pm.start_servers = 2
pm.min_spare_servers = 1
pm.max_spare_servers = 3
 
php_admin_value[error_log] = /var/log/php83/www-error.log
php_admin_flag[log_errors] = on

Dockerfile

[root@k8s-master nginx]# cat Dockerfile 
FROM alpine:latest
RUN  apk update && apk add --no-cache \
     php83 php83-fpm php83-mysqlnd php83-gd php83-mbstring \
     php83-curl php83-json php83-openssl php83-xml \
     php83-mysqli php83-tokenizer php83-pdo php83-pdo_mysql \
     nginx php83-redis vim 
RUN mkdir -p \
    /run/nginx \
    /var/www/html \
    /run/php &&\
    chown -R nginx:nginx /var/www/html /run/nginx && \
    chmod 755 /var/www/html
#nginx配置
COPY discuz.conf /etc/nginx/http.d/default.conf
#php配置
COPY www.conf /etc/php83/php-fpm.d/www.conf
#暴露端口
EXPOSE 80
#运行
CMD ["sh","-c","php-fpm83 --nodaemonize & nginx -g 'daemon off;'"]

生成镜像

 docker build -t nginx:v1 .
####运行镜像
[root@k8s-master nginx]# docker run -itd nginx:v1
a902b31a6fe65fe5e7db02c68ec073407f85142fceaab1ce89be9be21fd03efc

编写测试文件

#### index.html info.php mysql.php redis.php

/var/www/html # cat index.html 
nginx
/var/www/html # cat info.php 
<?php
    phpinfo();
?>
/var/www/html # cat mysql.php 
<?php
error_reporting(E_ALL);
ini_set('display_errors', 1);
$host = 'mysql-master-0.mysql-master.mysql.svc.cluster.local';     // 数据库主机地址
$user = 'discuz';        // MySQL 用户名
$pass = '123.com';       // MySQL 用户密码
$dbname = 'discuz';      // 要连接的数据库名
 
// 尝试连接 MySQL
$conn = new mysqli($host, $user, $pass, $dbname);
 
// 检查连接错误
if ($conn->connect_error) {
    // 连接失败时终止脚本并输出错误
    die('连接失败:' . $conn->connect_error);
}
 
// 连接成功,输出数据库版本信息
echo "MySQL 连接成功!数据库版本:" . $conn->server_info;
?>
/var/www/html # cat redis.php 
<?php
$redis = new Redis();
try {
    // 连接Master(替换为你的实际地址和端口)
    $conn = $redis->connect('redis-master.redis.svc.cluster.local', 6379, 2); 
    if ($conn) {
        echo "连接成功!";
        echo "Redis响应:" . $redis->ping(); // 测试服务响应
    } else {
        echo "连接失败(无错误信息)";
    }
} catch (RedisException $e) {
    // 打印具体错误(如:连接超时、拒绝连接、认证失败等)
    echo "Redis连接错误:" . $e->getMessage();
}
/var/www/html # exit

[root@k8s-master nginx]# ls
discuz.conf  Dockerfile  www.conf

导入discuz安装包

[root@k8s-master nginx]# mkdir discuz
[root@k8s-master nginx]# ls
discuz  discuz.conf  Dockerfile  www.conf
[root@k8s-master nginx]# cd discuz/
[root@k8s-master discuz]# ls
[root@k8s-master discuz]# rz
rz waiting to receive.**[root@k8s-master discuz]# 
[root@k8s-master discuz]# 
[root@k8s-master discuz]# ls
Discuz_X3.5_SC_UTF8_20250205.zip
[root@k8s-master discuz]# unzip Discuz_X3.5_SC_UTF8_20250205.zip 

[root@k8s-master discuz]# ls
Discuz_X3.5_SC_UTF8_20250205.zip  LICENSE  qqqun.png  readme  readme.html  upload  utility.html
[root@k8s-master discuz]# rm -rf Discuz_X3.5_SC_UTF8_20250205.zip 

把安装包拷到容器中

[root@k8s-master discuz]# docker cp ./ a902b31a6fe6:/var/www/html
Successfully copied 34.8MB to a902b31a6fe6:/var/www/html
[root@k8s-master discuz]# docker exec -it a902 /bin/sh
/ # cd /var/www/html
/var/www/html # ls
LICENSE       info.php      qqqun.png     readme.html   upload
index.html    mysql.php     readme        redis.php     utility.html
/var/www/html # cd ..
/var/www # chown -R nginx:nginx html/
/var/www # cd html
/var/www/html # ls -l

修改discuz配置文件

/var/www/html/upload # cd config/
/var/www/html/upload/config # ls
config_global_default.php   config_ucenter_default.php  index.htm
/var/www/html/upload/config # vim config_global_default.php 

修改后

在这里插入图片描述

主库

在这里插入图片描述

从库
在这里插入图片描述

redis
在这里插入图片描述

/var/www/html/upload/config # vim config_ucenter_default.php

在这里插入图片描述

导出镜像 分发至各node节点

[root@k8s-master nginx]# docker commit a902b31a6fe6 pro-nginx:latest
sha256:a4bf8e59acf9a819bb4a2ea875eb1ba6e11fc2d868213d076322b10340f294a0
[root@k8s-master nginx]# docker save proj-nginx:latest -o pro-nginx.tar
[root@k8s-master nginx]# ls
discuz  discuz.conf  Dockerfile  luo-nginx.tar  www.conf
[root@k8s-master nginx]# scp pro-nginx.tar 192.168.158.34:/root/
 
Authorized users only. All activities may be monitored and reported.
luo-nginx.tar                                                                                    100%  123MB 330.6MB/s   00:00    
[root@k8s-master nginx]# scp pro-nginx.tar 192.168.158.35:/root/
 
Authorized users only. All activities may be monitored and reported.
luo-nginx.tar                                                                                    100%  123MB 344.9MB/s   00:00 

2.tomcat

[root@k8s-master tomcat]# ls
Dockerfile  shop  tomcat.yaml

下载镜像

docker pull tomcat:8

将 war 包放到shangcheng目录下

[root@k8s-master tomcat]# cd shop/
[root@k8s-master shangcheng]# ls
[root@k8s-master shangcheng]# rz
rz waiting to receive.**[root@k8s-master shangcheng]# 
[root@k8s-master shangcheng]# ls
ROOT.war

生成测试镜像

[root@k8s-master shangcheng]# docker run -itd --name tomcat -v /root/project/tomcat/shop/:/usr/local/tomcat/webapps/  --restart=always tomcat:8
1adaadb8d33f77f4ca31bfb438471a6328c35ec4105aecdd88a71330896bca5c
###war包自动解压
[root@k8s-master shop]# ls
ROOT  ROOT.war

修改tomcat配置文件

####由于在测试容器中war包已经解压 ### 直接在本地改就行不用进入容器再改
[root@k8s-master classes]# pwd
/root/project/tomcat/shop/ROOT/WEB-INF/classes
#####添加MySQL svc域名地址 数据库名字 密码
[root@k8s-master classes]# vim jdbc.properties 
jdbc.driver=com.mysql.cj.jdbc.Driver
jdbc.jdbcUrl=jdbc:mysql://mysql-master-0.mysql-master.mysql.svc.cluster.local:3306/biyesheji?useUnicode=true&characterEncoding=utf-8&allowMultiQueries=true&useSSL=false&serverTimezone=GMT%2b8&allowPublicKeyRetrieval=true
jdbc.user=tomcat
jdbc.password=123.com

编写Dockerfile文件

[root@k8s-master tomcat]# vim Dockerfile 
FROM tomcat:8

COPY shop/ /usr/local/tomcat/webapps/

EXPOSE 8080

CMD ["catalina.sh", "run"]

构建镜像

[root@k8s-master tomcat]# docker build -t pro-tomcat:latest .

分发至各个节点

[root@k8s-master tomcat]# docker save pro-tomcat:latest -o pro-tomcat.tar
[root@k8s-master tomcat]# scp pro-tomcat.tar 192.168.157.101:/root

Authorized users only. All activities may be monitored and reported.
pro-tomcat.tar                                                                                        100%  481MB 105.1MB/s   00:04    
[root@k8s-master tomcat]# scp pro-tomcat.tar 192.168.157.102:/root

Authorized users only. All activities may be monitored and reported.
pro-tomcat.tar 

五、k8s部署

放在同一命名空间

1.tomcat-shop

[root@k8s-master tomcat]# pwd
/root/project/tomcat
[root@k8s-master tomcat]# ls
Dockerfile  pro-tomcat.tar  shop  tomcat.yaml
##########
[root@k8s-master tomcat]# cat tomcat.yaml 
apiVersion: v1
kind: Namespace
metadata:
  name: web
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: dep-tomcat
  namespace: web 
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-tomcat
  template:
    metadata:
      labels:
        app: web-tomcat
    spec:
      containers:
      - name: tomcat
        image: pro-tomcat:latest  # 使用你的镜像
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 8080  # 容器内暴露的端
 
---
apiVersion: v1
kind: Service
metadata:
  name: svc-tomcat
  namespace: web  # 指定命名空间为 discuz
spec:
  type: NodePort
  ports:
  - port: 8080        # 集群内访问端口
    targetPort: 8080  # 容器内端口
  selector:
    app: web-tomcat  # 匹配 Deployment 的标签

执行检测

[root@k8s-master tomcat]# kubectl apply -f tomcat.yaml 
namespace/web created
deployment.apps/dep-tomcat created
service/svc-tomcat created
[root@k8s-master tomcat]# kubectl -n web get pod
NAME                         READY   STATUS    RESTARTS   AGE
dep-tomcat-69dd5b6fd-zh9tj   1/1     Running   0          28s
[root@k8s-master tomcat]# kubectl -n web get svc
NAME         TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
svc-tomcat   NodePort   10.101.173.117   <none>        8080:30448/TCP   32s

http://192.168.157.100:30448/fore/foreIndex
在这里插入图片描述

2.discuz

[root@k8s-master nginx]# pwd
/root/project/nginx
[root@k8s-master nginx]# ls
discuz  discuz.conf  discuz.yaml  Dockerfile  www.conf
[root@k8s-master nginx]# cat discuz.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: dep-discuz
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-discuz
  template:
    metadata:
      labels:
        app: web-discuz
    spec:
      containers:
      - name: nginx
        image: proj-nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: svc-discuz
  namespace: web
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 80
  selector:
    app: web-discuz

执行检测

[root@k8s-master nginx]# kubectl apply -f discuz.yaml
[root@k8s-master nginx]# kubectl -n discuz get pod
NAME                          READY   STATUS    RESTARTS   AGE
dep-discuz-758c879dcb-6kdcf   1/1     Running   0          6h32m
[root@k8s-master nginx]# kubectl -n discuz get svc
NAME         TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
svc-discuz   NodePort   10.108.98.218   <none>        80:30378/TCP   6h32m

测试能否正常访问

在这里插入图片描述

测试MySQL-redis
在这里插入图片描述

安装
在这里插入图片描述

登录
在这里插入图片描述

六、ingress实现域名访问

1.拷贝导入镜像

导入解压 导入镜像
######所有节点
[root@k8s-node1 ~]# docker load -i ingress-1.11.tar 
######主节点 模拟负载均衡器
[root@k8s-master ~]# unzip metallb-0.14.8.zip

2.部署ingress控制器

[root@k8s-master ~]# cd /root/ingress-nginx-controller-v1.11.3/deploy/static/provider/cloud/
[root@k8s-master cloud]# ls
deploy.yaml  kustomization.yaml
[root@k8s-master cloud]# vim deploy.yaml 
###修改配置文件 从447开始 安装包都删除以下内容 3处
 image: registry.k8s.io/ingress-nginx/controller:v1.11.3##删除@sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7
[root@k8s-master cloud]# cat deploy.yaml | grep -n image
445:        image: registry.k8s.io/ingress-nginx/controller:v1.12.0
547:        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0
601:        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0
#########部署#############
[root@k8s-master01 cloud]# kubectl apply -f deploy.yaml 
......
[root@k8s-master cloud]# kubectl -n ingress-nginx get pod
NAME                                        READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-6qhh4        0/1     Completed   0          4m27s
ingress-nginx-admission-patch-6jnc8         0/1     Completed   1          4m27s
ingress-nginx-controller-7d7455dcf8-89krp   1/1     Running     0          4m27s

3.切换为loadbalance模式

kubectl -n ingress-nginx edit svc ingress-nginx-controller
  type: LoadBalancer  ###这个字段 50行
status:
  loadBalancer:

4.部署matellb

[root@k8s-master cloud]# cd /root/metallb-0.14.8/config/manifests
#########下载较慢 先执行
[root@k8s-master cloud]# kubectl apply -f metallb-native.yaml

创建ip地址池

cat > IPAddressPool.yaml<<EOF
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: planip-pool #这里与下面的L2Advertisement的ip池名称需要一样
  namespace: metallb-system
spec:
  addresses:
  - 192.168.157.170-192.168.157.180 #自定义ip段 最好和当前集群一个地址段
EOF

关联IP地址池

#关联IP地址池
cat > L2Advertisement.yaml<<EOF
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: planip-pool
  namespace: metallb-system
spec:
  ipAddressPools:
  - planip-pool #这里需要跟上面ip池的名称保持一致
EOF

执行

###需要等上面创建好,才能执行
kubectl apply -f IPAddressPool.yaml
kubectl apply -f L2Advertisement.yaml

4.创建ingress规则

[root@k8s-master project]# cat ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress # 创建一个类型为Ingress的资源
metadata:
  name: nginx-ingress # 这个资源的名字为 nginx-ingress
  namespace: web
spec:
  ingressClassName: nginx # 使用nginx
  rules:
  - host: test.nginx.haochacha.com # 访问此内容的域名
    http:
      paths:
      - backend:
          service:
            name: svc-discuz # 对应nginx的服务名字,该规则的namespace必须与service的一致
            port:
              number: 80 # 访问的端口
        path: / # 匹配规则
        pathType: Prefix # 匹配类型,这里为前缀匹配
  - host: test.tomcat.haochacha.com # 访问此内容的域名
    http:
      paths:
      - backend:
          service:
            name: svc-tomcat # 对应nginx的服务名字,该规则的namespace必须与service的一致
            port:
              number: 8080 # 访问的端口
        path: / # 匹配规则
        pathType: Prefix # 匹配类型,这里为前缀匹配
####提交
[root@k8s-master project]# kubectl apply -f ingress.yaml

[root@k8s-master project]# kubectl -n web get ingress
NAME            CLASS   HOSTS                                                ADDRESS           PORTS   AGE
nginx-ingress   nginx   test.nginx.haochacha.com,test.tomcat.haochacha.com   192.168.157.170   80      60m

[root@k8s-master project]# kubectl -n web describe ingress nginx-ingress 
Name:             nginx-ingress
Labels:           <none>
Namespace:        web
Address:          192.168.157.170
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host                       Path  Backends
  ----                       ----  --------
  test.nginx.haochacha.com   
                             /   svc-discuz:80 (10.244.169.158:80)
  test.tomcat.haochacha.com  
                             /   svc-tomcat:8080 (10.244.36.105:8080)
Annotations:                 <none>
Events:
  Type    Reason  Age                  From                      Message
  ----    ------  ----                 ----                      -------
  Normal  Sync    62s (x3 over 4m45s)  nginx-ingress-controller  Scheduled for sync       

5.测试

###添加hosts记录
[root@nfs-server ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.157.170  test.nginx.haochacha.com
192.168.157.170  test.tomcat.haochacha.com
####访问discuz
[root@nfs-server ~]# curl http://test.nginx.haochacha.com/upload/install/
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta name="renderer" content="webkit" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<title>Discuz! 安装向导</title>
<link rel="stylesheet" href="static/style.css" type="text/css" media="all" />
......
#####访问商城
[root@nfs-server ~]# curl http://test.tomcat.haochacha.com/fore/foreIndex
<!DOCTYPE html>
<html class="no-js" lang="zxx">

<head>
    <meta charset="utf-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <title>星味美网上订餐系统</title>
    <meta name="description" content="">
    <meta name="viewport" content="width=device-width, initial-scale=1">
    <!-- Favicon -->
    <link rel="icon" href="/assets/images/favicon.ico">
.......

七、总结

构建镜像时,nginx是在容器内修改配置文件,tomcat实在外面修改配置文件
ingress 要和 代理的应用在同一ns下