k8s部署discuz論壇和Tomcat商城
一、持久化存儲—storageclass+nfs
1.創建sa賬戶
[root@k8s-master scnfs]# cat nfs-provisioner-rbac.yaml
# 1. ServiceAccount:供 NFS Provisioner 使用的服務賬號
apiVersion: v1
kind: ServiceAccount
metadata:name: nfs-provisionernamespace: default # 所有資源統一在 default 命名空間,可按需修改---
# 2. ClusterRole:集群級權限,覆蓋動態 PV 供應核心操作(PV/PVC/StorageClass 等)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:name: nfs-provisioner-clusterrole
rules:# 操作 PV(創建/刪除/查詢)- apiGroups: [""]resources: ["persistentvolumes"]verbs: ["get", "list", "watch", "create", "delete"]# 操作 PVC(查詢/更新狀態)- apiGroups: [""]resources: ["persistentvolumeclaims"]verbs: ["get", "list", "watch", "update"]# 操作 StorageClass(查詢可用的 SC)- apiGroups: ["storage.k8s.io"]resources: ["storageclasses"]verbs: ["get", "list", "watch"]# 發送事件(如 PV 創建成功/失敗的通知)- apiGroups: [""]resources: ["events"]verbs: ["create", "update", "patch"]# 操作 Services/Endpoints(領導者選舉依賴,部分版本需)- apiGroups: [""]resources: ["services", "endpoints"]verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]---
# 3. ClusterRoleBinding:將集群級權限綁定到 ServiceAccount
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:name: nfs-provisioner-clusterrolebinding
subjects:- kind: ServiceAccountname: nfs-provisionernamespace: default # 必須與 ServiceAccount 所在命名空間一致
roleRef:kind: ClusterRolename: nfs-provisioner-clusterroleapiGroup: rbac.authorization.k8s.io---
# 4. Role:命名空間級權限,專門覆蓋領導者選舉的 leases/endpoints 操作
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:name: nfs-provisioner-rolenamespace: default # 與 Provisioner 同命名空間,確保選舉權限生效
rules:- apiGroups: [""]resources: ["endpoints", "leases"] # 領導者選舉核心依賴資源verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]---
# 5. RoleBinding:將命名空間級選舉權限綁定到 ServiceAccount
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:name: nfs-provisioner-rolebindingnamespace: default
subjects:- kind: ServiceAccountname: nfs-provisionernamespace: default
roleRef:kind: Rolename: nfs-provisioner-roleapiGroup: rbac.authorization.k8s.io
2.創建制備器
[root@k8s-master scnfs]# cat nfs-provisioner-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: nfs-provisionernamespace: defaultlabels:app: nfs-provisioner
spec:replicas: 1 # 測試環境單副本,生產環境建議用 StatefulSet 多副本selector:matchLabels:app: nfs-provisionertemplate:metadata:labels:app: nfs-provisionerspec:serviceAccountName: nfs-provisioner # 關聯前面創建的 ServiceAccountcontainers:- name: nfs-provisioner# 國內阿里云鏡像,穩定可拉取(替代 k8s.gcr.io)image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2# 環境變量:核心配置(必須與 StorageClass 的 provisioner 一致)env:- name: PROVISIONER_NAMEvalue: cluster.local/nfs-provisioner # 供應器名稱,后續 SC 需引用此值- name: NFS_SERVERvalue: 192.168.157.110 # 已部署的 NFS 服務器 IP- name: NFS_PATHvalue: /nfs # 已配置的 NFS 共享目錄# 掛載 NFS 共享目錄到容器內(固定路徑,Provisioner 在此創建 PV 子目錄)volumeMounts:- name: nfs-volumemountPath: /persistentvolumes# 定義 NFS 卷(關聯 NFS 服務器信息)volumes:- name: nfs-volumenfs:server: 192.168.157.110path: /nfs
3.創建storageclass
[root@k8s-master scnfs]# cat nfs-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: nfs-sc # StorageClass 名稱,PVC 需引用此名稱
provisioner: cluster.local/nfs-provisioner # 必須與 Provisioner 的 PROVISIONER_NAME 完全一致
parameters:archiveOnDelete: "true" # 刪除 PVC 時,歸檔 NFS 數據(避免誤刪,生產建議 true)
reclaimPolicy: Delete # PV 回收策略:Delete(刪 PVC 自動刪 PV/數據)/ Retain(保留數據)
allowVolumeExpansion: true # 允許 PVC 擴容(v4.0+ 版本支持)
volumeBindingMode: Immediate # 立即綁定 PVC(無需等待 Pod 調度)
4.檢驗
[root@k8s-master scnfs]# kubectl get sa
NAME SECRETS AGE
default 0 32h
nfs-provisioner 0 38m
[root@k8s-master scnfs]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-sc cluster.local/nfs-provisioner Delete Immediate true 39m
[root@k8s-master scnfs]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nfs-provisioner-745557fd5c-dslzv 1/1 Running 0 39m
二、部署MySQL主從
1.創建secret存儲密碼
[root@k8s-master mysql]# cat secret.yaml apiVersion: v1
kind: Secret # 資源類型:Secret(密鑰)
metadata:name: mysql-secrets # Secret名稱:標識為"mysql-secrets"namespace: mysql # 命名空間:屬于mysql命名空間
type: Opaque # 密鑰類型:Opaque(通用密鑰類型)
data: # 密鑰數據(Base64編碼)root-password: MTIzLmNvbQ== # 鍵名:root-password(MySQL root密碼)# 解碼后:123.com
2.配置文件
[root@k8s-master mysql]# cat configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: mysql-confignamespace: mysql
data:master.cnf: |[mysqld]server-id=10log_bin=/var/lib/mysql/mysql-bin.logread_only=0bind-address=0.0.0.0gtid_mode=ONenforce_gtid_consistency=ONdefault_authentication_plugin=mysql_native_passwordslave1.cnf: |[mysqld]server-id=20relay_log=/var/lib/mysql/mysql-relay-bin.loglog_bin=/var/lib/mysql/mysql-bin.logread_only=1bind-address=0.0.0.0gtid_mode=ONenforce_gtid_consistency=ONdefault_authentication_plugin=mysql_native_password
3.初始化
[root@k8s-master mysql]# cat init-scripts.yaml
apiVersion: v1
kind: ConfigMap # 資源類型:ConfigMap(配置映射)
metadata:name: mysql-init-scripts # ConfigMap名稱:MySQL初始化腳本namespace: mysql # 命名空間:屬于mysql命名空間
data: # 數據字段:存儲初始化SQL腳本master-init.sql: | # 主節點初始化SQL腳本# 從環境變量獲取密碼(避免明文)CREATE USER IF NOT EXISTS 'rsyncuser'@'%' IDENTIFIED BY '123.com'; # 創建復制用戶GRANT REPLICATION SLAVE ON *.* TO 'rsyncuser'@'%'; # 授予復制權限CREATE DATABASE IF NOT EXISTS discuz; # 創建discuz數據庫CREATE USER IF NOT EXISTS 'discuz'@'%' IDENTIFIED BY '123.com'; # 創建discuz用戶GRANT ALL PRIVILEGES ON discuz.* TO 'discuz'@'%'; # 授予discuz庫全權限CREATE DATABASE IF NOT EXISTS biyesheji; # 創建畢業設計數據庫CREATE USER IF NOT EXISTS 'tomcat'@'%' IDENTIFIED BY '123.com'; # 創建tomcat用戶GRANT ALL PRIVILEGES ON biyesheji.* TO 'tomcat'@'%'; # 授予畢業設計庫全權限FLUSH PRIVILEGES; # 刷新權限生效slave-init.sql: | # 從節點初始化SQL腳本CHANGE MASTER TOMASTER_HOST = 'mysql-master-0.mysql-master.mysql.svc.cluster.local', # 主節點地址MASTER_PORT = 3306, # MySQL端口MASTER_USER = 'rsyncuser', # 復制用戶名MASTER_PASSWORD = '123.com', # 復制用戶密碼MASTER_AUTO_POSITION = 1; # 啟用GTID自動定位START SLAVE; # 啟動主從復制
4.主庫statefulset+headless
[root@k8s-master mysql]# cat master.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: mysql-masternamespace: mysql
spec:serviceName: mysql-masterreplicas: 1selector:matchLabels:app: mysql-mastertemplate:metadata:labels:app: mysql-masterspec:containers:- name: mysqlimage: mysql:8.0ports:- containerPort: 3306env:- name: MYSQL_ROOT_PASSWORDvalueFrom:secretKeyRef:name: mysql-secretskey: root-passwordvolumeMounts:- name: mysql-configmountPath: /etc/mysql/conf.d/master.cnfsubPath: master.cnf- name: master-init-script # 僅掛載主庫腳本mountPath: /docker-entrypoint-initdb.d/master-init.sqlsubPath: master-init.sql # 明確指定腳本名volumes:- name: mysql-configconfigMap:name: mysql-configitems:- key: master.cnfpath: master.cnf- name: master-init-script # 關聯 ConfigMap 中的主庫腳本configMap:name: mysql-init-scriptsitems:- key: master-init.sqlpath: master-init.sql# 添加 volumeClaimTemplatesvolumeClaimTemplates:- metadata:name: mysql-dataspec:accessModes: [ "ReadWriteOnce" ]storageClassName: "nfs-sc" # 使用之前創建的 StorageClassresources:requests:storage: 10Gi # 根據需求調整大小
---
apiVersion: v1
kind: Service
metadata:name: mysql-masternamespace: mysql
spec:ports:- port: 3306name: mysqlclusterIP: Noneselector:app: mysql-master
5.從庫
[root@k8s-master mysql]# cat slave.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: mysql-slavenamespace: mysql
spec:serviceName: mysql-slavereplicas: 1 selector:matchLabels:app: mysql-slavetemplate:metadata:labels:app: mysql-slavespec:containers:- name: mysqlimage: mysql:8.0ports:- containerPort: 3306env:- name: MYSQL_ROOT_PASSWORDvalueFrom:secretKeyRef:name: mysql-secretskey: root-passwordvolumeMounts:- name: mysql-configmountPath: /etc/mysql/conf.d/slave.cnfsubPath: slave1.cnf- name: init-scriptmountPath: /docker-entrypoint-initdb.dvolumes:- name: mysql-configconfigMap:name: mysql-configitems:- key: slave1.cnfpath: slave1.cnf- name: init-scriptconfigMap:name: mysql-init-scriptsvolumeClaimTemplates:- metadata:name: mysql-dataspec:accessModes: [ "ReadWriteOnce" ]storageClassName: "nfs-sc"resources:requests:storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:name: mysql-slavenamespace: mysql
spec:ports:- port: 3306name: mysqlclusterIP: Noneselector:app: mysql-slave
6.檢測
###pod正常創建
[root@k8s-master nginx]# kubectl -n mysql get pod
NAME READY STATUS RESTARTS AGE
mysql-master-0 1/1 Running 0 11h
mysql-slave-0 1/1 Running 0 11h
### svc創建
[root@k8s-master nginx]# kubectl -n mysql get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
mysql-master ClusterIP None <none> 3306/TCP 12h
mysql-slave ClusterIP None <none> 3306/TCP 12h
###pvc 動態創建
[root@k8s-master nginx]# kubectl -n mysql get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mysql-data-mysql-master-0 Bound pvc-26c64423-bdc7-4687-9cfc-9b3ac3375e9f 10Gi RWO nfs-sc 11h
mysql-data-mysql-slave-0 Bound pvc-153c4aaa-d48f-4526-a290-381f62d421d4 10Gi RWO nfs-sc 11h
###pv動態創建
[root@k8s-master nginx]# kubectl -n mysql get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-153c4aaa-d48f-4526-a290-381f62d421d4 10Gi RWO Delete Bound mysql/mysql-data-mysql-slave-0 nfs-sc 11h
pvc-26c64423-bdc7-4687-9cfc-9b3ac3375e9f 10Gi RWO Delete Bound mysql/mysql-data-mysql-master-0 nfs-sc 11h
檢測主從
[root@k8s-master nginx]# kubectl -n mysql exec -it mysql-slave-0 -- /bin/bash
bash-5.1# mysql -uroot -p123.com
mysql> show slave status\G;
......Slave_IO_Running: YesSlave_SQL_Running: Yes
.....
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| biyesheji |
| discuz |
| information_schema |
| mysql |
| performance_schema |
| sys |
+--------------------+
6 rows in set (0.01 sec)
三、Redis主從
1.創建命名空間
[root@k8s-master redis]# cat namespace.yaml apiVersion: v1
kind: Namespace
metadata:name: redis
[root@k8s-maste
2.創建configmap
[root@k8s-master redis]# cat redis-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: redis-confignamespace: redis
data:redis-master.conf: |port 6379bind 0.0.0.0protected-mode nodaemonize notimeout 0save ""appendonly nomaxmemory 1gbmaxmemory-policy allkeys-lruredis-slave.conf: |port 6379bind 0.0.0.0protected-mode nodaemonize notimeout 0save ""appendonly nomaxmemory 1gbmaxmemory-policy allkeys-lruslaveof redis-master-0.redis-master.redis.svc.cluster.local 6379slave-read-only yes
3.Redis主庫
[root@k8s-master redis]# cat redis-master.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: redis-master # StatefulSet名稱namespace: redis # 所屬命名空間
spec:serviceName: redis-master # 關聯的Headless Service名稱replicas: 1 # 副本數(主節點通常為1)selector: # Pod選擇器matchLabels:app: redis-mastertemplate: # Pod模板metadata:labels:app: redis-master # Pod標簽(需與selector匹配)spec:containers:- name: redis-master # 容器名稱image: redis:6-alpine # 使用Alpine版Redis鏡像command: ["redis-server", "/etc/redis/redis-master.conf"] # 啟動命令ports:- containerPort: 6379 # Redis默認端口volumeMounts:- name: redis-config # 掛載配置卷mountPath: /etc/redis # 容器內掛載路徑volumes:- name: redis-config # 卷定義configMap:name: redis-config # 引用名為redis-config的ConfigMap
---
apiVersion: v1
kind: Service
metadata:name: redis-masternamespace: redis
spec:clusterIP: Noneselector:app: redis-masterports:- port: 6379targetPort: 6379
4…redis從庫
[root@k8s-master redis]# cat redis-slave.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: redis-slave # 有狀態應用名稱namespace: redis # 指定命名空間
spec:serviceName: redis-slave # 關聯的Headless Service名稱replicas: 1 # 副本數量(主節點通常為1)selector: # Pod選擇器matchLabels:app: redis-slavetemplate: # Pod模板metadata:labels:app: redis-slave # Pod標簽(必須與selector匹配)spec:containers:- name: redis-slave # 容器名稱image: redis:6-alpine # 使用Alpine輕量版Redis鏡像command: ["redis-server", "/etc/redis/redis-master.conf"] # 啟動命令ports:- containerPort: 6379 # Redis默認端口volumeMounts:- name: redis-config # 配置卷名稱mountPath: /etc/redis # 容器內掛載路徑volumes: # 存儲卷定義- name: redis-configconfigMap: # 使用ConfigMap作為配置源name: redis-config # 引用的ConfigMap名稱
---
apiVersion: v1
kind: Service
metadata:name: redis-slavenamespace: redis
spec:clusterIP: Noneselector:app: redis-slaveports:- port: 6379
5.檢測
[root@k8s-master nginx]# kubectl -n redis get pod
NAME READY STATUS RESTARTS AGE
redis-master-0 1/1 Running 0 12h
redis-slave-0 1/1 Running 0 12h
[root@k8s-master nginx]# kubectl -n redis get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
redis-master ClusterIP None <none> 6379/TCP 12h
redis-slave ClusterIP None <none> 6379/TCP 12h
[root@k8s-master nginx]# kubectl -n redis get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-153c4aaa-d48f-4526-a290-381f62d421d4 10Gi RWO Delete Bound mysql/mysql-data-mysql-slave-0 nfs-sc 11h
pvc-26c64423-bdc7-4687-9cfc-9b3ac3375e9f 10Gi RWO Delete Bound mysql/mysql-data-mysql-master-0 nfs-sc 11h
四、構建鏡像
1.nginx-php-discuz
[root@k8s-master nginx]# ls
discuz discuz.conf discuz.yaml Dockerfile proj-nginx:latest www.conf
nginx配置文件
[root@k8s-master nginx]# cat discuz.conf
server {listen 80;server_name localhost;root /var/www/html;index index.php index.html index.htm;access_log /var/log/nginx/discuz_access.log;error_log /var/log/nginx/discuz_error.log;location / {try_files $uri $uri/ /index.php?$query_string;}location ~ \.php$ {fastcgi_pass unix:/run/php/php83-fpm.sock;fastcgi_index index.php;fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;include fastcgi_params;}location ~ /\.ht {deny all;}
}
php配置文件
[root@k8s-master nginx]# cat www.conf
[www]
user = nginx
group = nginx
listen = /run/php/php83-fpm.sock
listen.owner = nginx
listen.group = nginx
listen.mode = 0660pm = dynamic
pm.max_children = 5
pm.start_servers = 2
pm.min_spare_servers = 1
pm.max_spare_servers = 3php_admin_value[error_log] = /var/log/php83/www-error.log
php_admin_flag[log_errors] = on
Dockerfile
[root@k8s-master nginx]# cat Dockerfile
FROM alpine:latest
RUN apk update && apk add --no-cache \php83 php83-fpm php83-mysqlnd php83-gd php83-mbstring \php83-curl php83-json php83-openssl php83-xml \php83-mysqli php83-tokenizer php83-pdo php83-pdo_mysql \nginx php83-redis vim
RUN mkdir -p \/run/nginx \/var/www/html \/run/php &&\chown -R nginx:nginx /var/www/html /run/nginx && \chmod 755 /var/www/html
#nginx配置
COPY discuz.conf /etc/nginx/http.d/default.conf
#php配置
COPY www.conf /etc/php83/php-fpm.d/www.conf
#暴露端口
EXPOSE 80
#運行
CMD ["sh","-c","php-fpm83 --nodaemonize & nginx -g 'daemon off;'"]
生成鏡像
docker build -t nginx:v1 .
####運行鏡像
[root@k8s-master nginx]# docker run -itd nginx:v1
a902b31a6fe65fe5e7db02c68ec073407f85142fceaab1ce89be9be21fd03efc
編寫測試文件
#### index.html info.php mysql.php redis.php/var/www/html # cat index.html
nginx
/var/www/html # cat info.php
<?phpphpinfo();
?>
/var/www/html # cat mysql.php
<?php
error_reporting(E_ALL);
ini_set('display_errors', 1);
$host = 'mysql-master-0.mysql-master.mysql.svc.cluster.local'; // 數據庫主機地址
$user = 'discuz'; // MySQL 用戶名
$pass = '123.com'; // MySQL 用戶密碼
$dbname = 'discuz'; // 要連接的數據庫名// 嘗試連接 MySQL
$conn = new mysqli($host, $user, $pass, $dbname);// 檢查連接錯誤
if ($conn->connect_error) {// 連接失敗時終止腳本并輸出錯誤die('連接失敗:' . $conn->connect_error);
}// 連接成功,輸出數據庫版本信息
echo "MySQL 連接成功!數據庫版本:" . $conn->server_info;
?>
/var/www/html # cat redis.php
<?php
$redis = new Redis();
try {// 連接Master(替換為你的實際地址和端口)$conn = $redis->connect('redis-master.redis.svc.cluster.local', 6379, 2); if ($conn) {echo "連接成功!";echo "Redis響應:" . $redis->ping(); // 測試服務響應} else {echo "連接失敗(無錯誤信息)";}
} catch (RedisException $e) {// 打印具體錯誤(如:連接超時、拒絕連接、認證失敗等)echo "Redis連接錯誤:" . $e->getMessage();
}
/var/www/html # exit[root@k8s-master nginx]# ls
discuz.conf Dockerfile www.conf
導入discuz安裝包
[root@k8s-master nginx]# mkdir discuz
[root@k8s-master nginx]# ls
discuz discuz.conf Dockerfile www.conf
[root@k8s-master nginx]# cd discuz/
[root@k8s-master discuz]# ls
[root@k8s-master discuz]# rz
rz waiting to receive.**[root@k8s-master discuz]#
[root@k8s-master discuz]#
[root@k8s-master discuz]# ls
Discuz_X3.5_SC_UTF8_20250205.zip
[root@k8s-master discuz]# unzip Discuz_X3.5_SC_UTF8_20250205.zip [root@k8s-master discuz]# ls
Discuz_X3.5_SC_UTF8_20250205.zip LICENSE qqqun.png readme readme.html upload utility.html
[root@k8s-master discuz]# rm -rf Discuz_X3.5_SC_UTF8_20250205.zip
把安裝包拷到容器中
[root@k8s-master discuz]# docker cp ./ a902b31a6fe6:/var/www/html
Successfully copied 34.8MB to a902b31a6fe6:/var/www/html
[root@k8s-master discuz]# docker exec -it a902 /bin/sh
/ # cd /var/www/html
/var/www/html # ls
LICENSE info.php qqqun.png readme.html upload
index.html mysql.php readme redis.php utility.html
/var/www/html # cd ..
/var/www # chown -R nginx:nginx html/
/var/www # cd html
/var/www/html # ls -l
修改discuz配置文件
/var/www/html/upload # cd config/
/var/www/html/upload/config # ls
config_global_default.php config_ucenter_default.php index.htm
/var/www/html/upload/config # vim config_global_default.php
修改后
主庫
從庫
redis
/var/www/html/upload/config # vim config_ucenter_default.php
導出鏡像 分發至各node節點
[root@k8s-master nginx]# docker commit a902b31a6fe6 pro-nginx:latest
sha256:a4bf8e59acf9a819bb4a2ea875eb1ba6e11fc2d868213d076322b10340f294a0
[root@k8s-master nginx]# docker save proj-nginx:latest -o pro-nginx.tar
[root@k8s-master nginx]# ls
discuz discuz.conf Dockerfile luo-nginx.tar www.conf
[root@k8s-master nginx]# scp pro-nginx.tar 192.168.158.34:/root/Authorized users only. All activities may be monitored and reported.
luo-nginx.tar 100% 123MB 330.6MB/s 00:00
[root@k8s-master nginx]# scp pro-nginx.tar 192.168.158.35:/root/Authorized users only. All activities may be monitored and reported.
luo-nginx.tar 100% 123MB 344.9MB/s 00:00
2.tomcat
[root@k8s-master tomcat]# ls
Dockerfile shop tomcat.yaml
下載鏡像
docker pull tomcat:8
將 war 包放到shangcheng目錄下
[root@k8s-master tomcat]# cd shop/
[root@k8s-master shangcheng]# ls
[root@k8s-master shangcheng]# rz
rz waiting to receive.**[root@k8s-master shangcheng]#
[root@k8s-master shangcheng]# ls
ROOT.war
生成測試鏡像
[root@k8s-master shangcheng]# docker run -itd --name tomcat -v /root/project/tomcat/shop/:/usr/local/tomcat/webapps/ --restart=always tomcat:8
1adaadb8d33f77f4ca31bfb438471a6328c35ec4105aecdd88a71330896bca5c
###war包自動解壓
[root@k8s-master shop]# ls
ROOT ROOT.war
修改tomcat配置文件
####由于在測試容器中war包已經解壓 ### 直接在本地改就行不用進入容器再改
[root@k8s-master classes]# pwd
/root/project/tomcat/shop/ROOT/WEB-INF/classes
#####添加MySQL svc域名地址 數據庫名字 密碼
[root@k8s-master classes]# vim jdbc.properties
jdbc.driver=com.mysql.cj.jdbc.Driver
jdbc.jdbcUrl=jdbc:mysql://mysql-master-0.mysql-master.mysql.svc.cluster.local:3306/biyesheji?useUnicode=true&characterEncoding=utf-8&allowMultiQueries=true&useSSL=false&serverTimezone=GMT%2b8&allowPublicKeyRetrieval=true
jdbc.user=tomcat
jdbc.password=123.com
編寫Dockerfile文件
[root@k8s-master tomcat]# vim Dockerfile
FROM tomcat:8COPY shop/ /usr/local/tomcat/webapps/EXPOSE 8080CMD ["catalina.sh", "run"]
構建鏡像
[root@k8s-master tomcat]# docker build -t pro-tomcat:latest .
分發至各個節點
[root@k8s-master tomcat]# docker save pro-tomcat:latest -o pro-tomcat.tar
[root@k8s-master tomcat]# scp pro-tomcat.tar 192.168.157.101:/rootAuthorized users only. All activities may be monitored and reported.
pro-tomcat.tar 100% 481MB 105.1MB/s 00:04
[root@k8s-master tomcat]# scp pro-tomcat.tar 192.168.157.102:/rootAuthorized users only. All activities may be monitored and reported.
pro-tomcat.tar
五、k8s部署
放在同一命名空間
1.tomcat-shop
[root@k8s-master tomcat]# pwd
/root/project/tomcat
[root@k8s-master tomcat]# ls
Dockerfile pro-tomcat.tar shop tomcat.yaml
##########
[root@k8s-master tomcat]# cat tomcat.yaml
apiVersion: v1
kind: Namespace
metadata:name: web
---
apiVersion: apps/v1
kind: Deployment
metadata:name: dep-tomcatnamespace: web
spec:replicas: 1selector:matchLabels:app: web-tomcattemplate:metadata:labels:app: web-tomcatspec:containers:- name: tomcatimage: pro-tomcat:latest # 使用你的鏡像imagePullPolicy: IfNotPresentports:- containerPort: 8080 # 容器內暴露的端---
apiVersion: v1
kind: Service
metadata:name: svc-tomcatnamespace: web # 指定命名空間為 discuz
spec:type: NodePortports:- port: 8080 # 集群內訪問端口targetPort: 8080 # 容器內端口selector:app: web-tomcat # 匹配 Deployment 的標簽
執行檢測
[root@k8s-master tomcat]# kubectl apply -f tomcat.yaml
namespace/web created
deployment.apps/dep-tomcat created
service/svc-tomcat created
[root@k8s-master tomcat]# kubectl -n web get pod
NAME READY STATUS RESTARTS AGE
dep-tomcat-69dd5b6fd-zh9tj 1/1 Running 0 28s
[root@k8s-master tomcat]# kubectl -n web get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-tomcat NodePort 10.101.173.117 <none> 8080:30448/TCP 32s
http://192.168.157.100:30448/fore/foreIndex
2.discuz
[root@k8s-master nginx]# pwd
/root/project/nginx
[root@k8s-master nginx]# ls
discuz discuz.conf discuz.yaml Dockerfile www.conf
[root@k8s-master nginx]# cat discuz.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: dep-discuznamespace: web
spec:replicas: 1selector:matchLabels:app: web-discuztemplate:metadata:labels:app: web-discuzspec:containers:- name: nginximage: proj-nginx:latestimagePullPolicy: IfNotPresentports:- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:name: svc-discuznamespace: web
spec:type: NodePortports:- port: 80targetPort: 80selector:app: web-discuz
執行檢測
[root@k8s-master nginx]# kubectl apply -f discuz.yaml
[root@k8s-master nginx]# kubectl -n discuz get pod
NAME READY STATUS RESTARTS AGE
dep-discuz-758c879dcb-6kdcf 1/1 Running 0 6h32m
[root@k8s-master nginx]# kubectl -n discuz get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-discuz NodePort 10.108.98.218 <none> 80:30378/TCP 6h32m
測試能否正常訪問
測試MySQL-redis
安裝
登錄
六、ingress實現域名訪問
1.拷貝導入鏡像
導入解壓 導入鏡像
######所有節點
[root@k8s-node1 ~]# docker load -i ingress-1.11.tar
######主節點 模擬負載均衡器
[root@k8s-master ~]# unzip metallb-0.14.8.zip
2.部署ingress控制器
[root@k8s-master ~]# cd /root/ingress-nginx-controller-v1.11.3/deploy/static/provider/cloud/
[root@k8s-master cloud]# ls
deploy.yaml kustomization.yaml
[root@k8s-master cloud]# vim deploy.yaml
###修改配置文件 從447開始 安裝包都刪除以下內容 3處image: registry.k8s.io/ingress-nginx/controller:v1.11.3##刪除@sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7
[root@k8s-master cloud]# cat deploy.yaml | grep -n image
445: image: registry.k8s.io/ingress-nginx/controller:v1.12.0
547: image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0
601: image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0
#########部署#############
[root@k8s-master01 cloud]# kubectl apply -f deploy.yaml
......
[root@k8s-master cloud]# kubectl -n ingress-nginx get pod
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-6qhh4 0/1 Completed 0 4m27s
ingress-nginx-admission-patch-6jnc8 0/1 Completed 1 4m27s
ingress-nginx-controller-7d7455dcf8-89krp 1/1 Running 0 4m27s
3.切換為loadbalance模式
kubectl -n ingress-nginx edit svc ingress-nginx-controllertype: LoadBalancer ###這個字段 50行
status:loadBalancer:
4.部署matellb
[root@k8s-master cloud]# cd /root/metallb-0.14.8/config/manifests
#########下載較慢 先執行
[root@k8s-master cloud]# kubectl apply -f metallb-native.yaml
創建ip地址池
cat > IPAddressPool.yaml<<EOF
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:name: planip-pool #這里與下面的L2Advertisement的ip池名稱需要一樣namespace: metallb-system
spec:addresses:- 192.168.157.170-192.168.157.180 #自定義ip段 最好和當前集群一個地址段
EOF
關聯IP地址池
#關聯IP地址池
cat > L2Advertisement.yaml<<EOF
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:name: planip-poolnamespace: metallb-system
spec:ipAddressPools:- planip-pool #這里需要跟上面ip池的名稱保持一致
EOF
執行
###需要等上面創建好,才能執行
kubectl apply -f IPAddressPool.yaml
kubectl apply -f L2Advertisement.yaml
4.創建ingress規則
[root@k8s-master project]# cat ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress # 創建一個類型為Ingress的資源
metadata:name: nginx-ingress # 這個資源的名字為 nginx-ingressnamespace: web
spec:ingressClassName: nginx # 使用nginxrules:- host: test.nginx.haochacha.com # 訪問此內容的域名http:paths:- backend:service:name: svc-discuz # 對應nginx的服務名字,該規則的namespace必須與service的一致port:number: 80 # 訪問的端口path: / # 匹配規則pathType: Prefix # 匹配類型,這里為前綴匹配- host: test.tomcat.haochacha.com # 訪問此內容的域名http:paths:- backend:service:name: svc-tomcat # 對應nginx的服務名字,該規則的namespace必須與service的一致port:number: 8080 # 訪問的端口path: / # 匹配規則pathType: Prefix # 匹配類型,這里為前綴匹配
####提交
[root@k8s-master project]# kubectl apply -f ingress.yaml[root@k8s-master project]# kubectl -n web get ingress
NAME CLASS HOSTS ADDRESS PORTS AGE
nginx-ingress nginx test.nginx.haochacha.com,test.tomcat.haochacha.com 192.168.157.170 80 60m[root@k8s-master project]# kubectl -n web describe ingress nginx-ingress
Name: nginx-ingress
Labels: <none>
Namespace: web
Address: 192.168.157.170
Ingress Class: nginx
Default backend: <default>
Rules:Host Path Backends---- ---- --------test.nginx.haochacha.com / svc-discuz:80 (10.244.169.158:80)test.tomcat.haochacha.com / svc-tomcat:8080 (10.244.36.105:8080)
Annotations: <none>
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Sync 62s (x3 over 4m45s) nginx-ingress-controller Scheduled for sync
5.測試
###添加hosts記錄
[root@nfs-server ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.157.170 test.nginx.haochacha.com
192.168.157.170 test.tomcat.haochacha.com
####訪問discuz
[root@nfs-server ~]# curl http://test.nginx.haochacha.com/upload/install/
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta name="renderer" content="webkit" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<title>Discuz! 安裝向導</title>
<link rel="stylesheet" href="static/style.css" type="text/css" media="all" />
......
#####訪問商城
[root@nfs-server ~]# curl http://test.tomcat.haochacha.com/fore/foreIndex
<!DOCTYPE html>
<html class="no-js" lang="zxx"><head><meta charset="utf-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"><title>星味美網上訂餐系統</title><meta name="description" content=""><meta name="viewport" content="width=device-width, initial-scale=1"><!-- Favicon --><link rel="icon" href="/assets/images/favicon.ico">
.......
七、總結
構建鏡像時,nginx是在容器內修改配置文件,tomcat實在外面修改配置文件
ingress 要和 代理的應用在同一ns下