书接上回【传统方式部署Ruoyi微服务】,此刻要迁移至k8s。
环境说明
31 master , 32 node1 , 33 node2
迁移思路
交付思路:
其实和交付到Linux主机上是一样的,无外乎将这些微服务都做成了Docker镜像;
1、微服务数据层: MySQL、 Redis;2、微服务治理层: NACos、sentinel、 skywalking...3、微服务组件
3.1 将微服务编译为jar包;
3.2 将其构建成Docker镜像;
3.3根据服务情况选择对应的工作负载来进行交付;Deployment、Service、Ingress:system:Deployment;auth:Deployment;gateway: Deployment、 service;monitor: Deployment、 Service、 Ingressui: Deployment、 Service、 Ingress; nginx/haproxy
01-mysql (Service、StatefulSet)
kubectl create ns dev
01-mysql-ruoyi-sts-svc.yaml
apiVersion: v1
kind: Service
metadata:name: mysql-ruoyi-svcnamespace: dev
spec:clusterIP: Noneselector:app: mysqlrole: ruoyiports:- port: 3306targetPort: 3306---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: mysql-ruoyinamespace: dev
spec:serviceName: "mysql-ruoyi-svc"replicas: 1selector:matchLabels:app: mysqlrole: ruoyitemplate:metadata:labels:app: mysqlrole: ruoyispec:containers:- name: dbimage: mysql:5.7args:- "--character-set-server=utf8"env:- name: MYSQL_ROOT_PASSWORDvalue: oldxu- name: MYSQL_DATABASEvalue: ry-cloudports:- containerPort: 3306volumeMounts:- name: datamountPath: /var/lib/mysql/volumeClaimTemplates:- metadata:name: dataspec:accessModes: ["ReadWriteMany"]storageClassName: "nfs"resources:requests:storage: 6Gi
解析mysql对应的IP
${statefulSetName}-${headlessName}.{namspace}.svc.cluster.local[root@master01 01-mysql]# dig @10.96.0.10 mysql-ruoyi-0.mysql-ruoyi-svc.dev.svc.cluster.local +short
10.244.2.129
连接mysql,导入sql文件
yum install -y mysql
mysql -uroot -poldxu -h10.244.2.129
mysql -uroot -poldxu -h10.244.2.129 -B ry-cloud < ry_20220814.sql
02-redis/
(这里使用的是无状态部署,做缓存。 按情况而定也可参考mysql的部署方法,做成有状态部署来redis)
01-redis-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: redis-servernamespace: dev
spec:replicas: 1selector:matchLabels:app: redistemplate:metadata:labels:app: redisspec:containers:- name: cacheimage: redisports:- containerPort: 6379
02-redis-service.yaml
apiVersion: v1
kind: Service
metadata:name: redis-svcnamespace: dev
spec:selector:app: redisports:- port: 6379targetPort: 6379
验证redis
[root@master01 02-redis]# dig @10.96.0.10 redis-svc.dev.svc.cluster.local +short
10.111.240.148kubectl describe svc -n dev redis-svc
sudo yum install epel-release
sudo yum install redis
[root@master01 02-redis]# redis-cli -h 10.111.240.148
03-nacos/
官方的nacos k8s参考资料https://github.com/nacos-group/nacos-k8s/blob/master/README-CN.md
迁移思路:
安装nacos的mysql数据库
01-mysql-nacos-sts-svc.yaml
apiVersion: v1
kind: Service
metadata:name: mysql-nacos-svcnamespace: dev
spec:clusterIP: Noneselector:app: mysqlrole: nacosports:- port: 3306targetPort: 3306---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: mysql-nacos-stsnamespace: dev
spec:serviceName: "mysql-nacos-svc"replicas: 1selector:matchLabels:app: mysqlrole: nacostemplate:metadata:labels:app: mysqlrole: nacosspec:containers:- name: dbimage: mysql:5.7args:- "--character-set-server=utf8"env:- name: MYSQL_ROOT_PASSWORDvalue: oldxu- name: MYSQL_DATABASEvalue: ry-configports:- containerPort: 3306volumeMounts:- name: datamountPath: /var/lib/mysql/volumeClaimTemplates:- metadata:name: dataspec:accessModes: ["ReadWriteMany"]storageClassName: "nfs"resources:requests:storage: 6Gi
[root@master01 03-nacos]# dig @10.96.0.10 mysql-nacos-sts-0.mysql-nacos-svc.dev.svc.cluster.local +short
10.244.2.130
导入config的sql文件
mysql -uroot -poldxu -h10.244.2.130 -B ry-config < ry_config_20220510.sql
02-nacos-configmap.yaml
configmap(填写对应数据库地址、名称、端口、用户名及密码)
apiVersion: v1
kind: ConfigMap
metadata:name: nacos-cmnamespace: dev
data:mysql.host: "mysql-nacos-sts-0.mysql-nacos-svc.dev.svc.cluster.local"mysql.db.name: "ry-config"mysql.port: "3306"mysql.user: "root"mysql.password: "oldxu"
03-nacos-sts-deploy-svc.yaml
#可提前下载,因为镜像大小1GB多
docker pull nacos/nacos-peer-finder-plugin:1.1
docker pull nacos/nacos-server:v2.1.1#自动PV,引用pvc 、pod反亲和性保证每个节点部署一个pod、initContainer找到nacos集群的IP、
apiVersion: v1
kind: Service
metadata:name: nacos-svcnamespace: dev
spec:clusterIP: Noneselector:app: nacosports:- name: serverport: 8848targetPort: 8848- name: client-rpcport: 9848targetPort: 9848- name: raft-rpcport: 9849targetPort: 9849- name: old-raft-rpcport: 7848targetPort: 7848---apiVersion: apps/v1
kind: StatefulSet
metadata:name: nacosnamespace: dev
spec:serviceName: "nacos-svc"replicas: 3selector:matchLabels:app: nacostemplate:metadata:labels:app: nacosspec:affinity: # 避免Pod运行到同一个节点上了podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchExpressions:- key: appoperator: Invalues: ["nacos"]topologyKey: "kubernetes.io/hostname" initContainers:- name: peer-finder-plugin-installimage: nacos/nacos-peer-finder-plugin:1.1imagePullPolicy: AlwaysvolumeMounts:- name: datanmountPath: /home/nacos/plugins/peer-findersubPath: peer-findercontainers:- name: nacosimage: nacos/nacos-server:v2.1.1resources:requests:memory: "800Mi"cpu: "500m"ports:- name: client-portcontainerPort: 8848- name: client-rpccontainerPort: 9848- name: raft-rpccontainerPort: 9849- name: old-raft-rpccontainerPort: 7848env:- name: MODE value: "cluster"- name: NACOS_VERSIONvalue: 2.1.1- name: NACOS_REPLICASvalue: "3"- name: SERVICE_NAME value: "nacos-svc"- name: DOMAIN_NAME value: "cluster.local"- name: NACOS_SERVER_PORT value: "8848"- name: NACOS_APPLICATION_PORTvalue: "8848"- name: PREFER_HOST_MODEvalue: "hostname"- name: POD_NAMESPACE valueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: MYSQL_SERVICE_HOSTvalueFrom:configMapKeyRef:name: nacos-cmkey: mysql.host- name: MYSQL_SERVICE_DB_NAMEvalueFrom:configMapKeyRef:name: nacos-cmkey: mysql.db.name- name: MYSQL_SERVICE_PORTvalueFrom:configMapKeyRef:name: nacos-cmkey: mysql.port- name: MYSQL_SERVICE_USERvalueFrom:configMapKeyRef:name: nacos-cmkey: mysql.user- name: MYSQL_SERVICE_PASSWORDvalueFrom:configMapKeyRef:name: nacos-cmkey: mysql.passwordvolumeMounts:- name: datanmountPath: /home/nacos/plugins/peer-findersubPath: peer-finder- name: datanmountPath: /home/nacos/datasubPath: data- name: datanmountPath: /home/nacos/logssubPath: logsvolumeClaimTemplates:- metadata:name: datanspec:storageClassName: "nfs"accessModes: ["ReadWriteMany"]resources:requests:storage: 20Gi
访问验证:
http://nacos.oldxu.net:30080/nacos/
04-nacos-ingress.yaml
打#号的是新版本的写法
apiVersion: extensions/v1beta1
kind: Ingress
metadata:name: nacos-ingressnamespace: dev
spec:ingressClassName: "nginx"rules:- host: nacos.oldxu.nethttp:paths:- path: /pathType: Prefixbackend:serviceName: nacos-svcservicePort: 8848# service:
# name: nacos-svc
# port:
# name: server
04-sentinel/
sentinel迁移思路
Sentinel
1、编写Dockerfile 、entrypoint.sh
2、推送到Harbor镜像仓库;
3、使用Deployment就可以运行该镜像;
4、使用Service、 Ingress来讲 其对外提供访问;
编写sentinel的Dockerfile
#下载包
wget https://linux.oldxu.net/sentinel-dashboard-1.8.5.jar
docker login harbor.oldxu.net
Dockerfile 与 entrypoint.sh
Dockerfile
FROM openjdk:8-jre-alpine
COPY ./sentinel-dashboard-1.8.5.jar /sentinel-dashboard.jar
COPY ./entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 8718 8719
CMD ["/bin/sh","-c","/entrypoint.sh"]
entrypoint.sh
JAVA_OPTS="-Dserver.port=8718 \
-Dcsp.sentinel.dashboard.server=localhost:8718 \
-Dproject.name=sentinel-dashboard \
-Dcsp.sentinel.api.port=8719 \
-Xms${XMS_OPTS:-150m} \
-Xmx${XMX_OPTS:-150m}"java ${JAVA_OPTS} -jar /sentinel-dashboard.jar
[root@master01 04-sentinel]# ls
Dockerfile entrypoint.sh sentinel-dashboard-1.8.5.jardocker build -t harbor.oldxu.net/springcloud/sentinel-dashboard:v1.0 .
docker push harbor.oldxu.net/springcloud/sentinel-dashboard:v1.0
01-sentinel-deploy.yaml
kubectl create secret docker-registry harbor-admin \--docker-username=admin \--docker-password=Harbor12345 \--docker-server=harbor.oldxu.net \-n dev
apiVersion: apps/v1
kind: Deployment
metadata:name: sentinel-servernamespace: dev
spec:replicas: 1selector:matchLabels:app: sentineltemplate:metadata:labels:app: sentinelspec:imagePullSecrets:- name: harbor-admincontainers:- name: sentinelimage: harbor.oldxu.net/springcloud/sentinel-dashboard:v2.0ports:- name: servercontainerPort: 8718- name: apicontainerPort: 8719
02-sentinel-svc.yaml
apiVersion: v1
kind: Service
metadata:name: sentinel-svcnamespace: dev
spec:selector:app: sentinelports:- name: serverport: 8718targetPort: 8718- name: apiport: 8719targetPort: 8719
03-sentinel-ingress.yaml
写#号的是新版本的写法。
#apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:name: sentinel-ingressnamespace: dev
spec:ingressClassName: "nginx"rules:- host: sentinel.oldxu.nethttp:paths:- path: /pathType: Prefixbackend:serviceName: sentinel-svcservicePort: 8718#service:# name: sentinel-svc# port:# name: server
访问 http://sentinel.oldxu.net:30080/#/dashboard/metric/sentinel-dashboard
05-skywalking/
迁移思路
本次Skywalking采用内置H2作为存储,也可考虑采用ElasticSearch作为数据存储。
01-skywalking-oap-deploy.yaml
02-skywalking-ui-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: skywalking-uinamespace: dev
spec:replicas: 1selector:matchLabels:app: sky-uitemplate:metadata:labels:app: sky-uispec:containers:- name: uiimage: apache/skywalking-ui:8.9.1ports:- containerPort: 8080env:- name: SW_OAP_ADDRESSvalue: "http://skywalking-oap-svc:12800"
---
apiVersion: v1
kind: Service
metadata:name: skywalking-ui-svcnamespace: dev
spec:selector:app: sky-uiports:- name: uiport: 8080targetPort: 8080
[root@master01 05-skywalking]# dig @10.96.0.10 skywalking-oap-svc.dev.svc.cluster.local +short
10.111.30.115
03-skywalking-ingress.yaml
带#号的是新版本的写法。
apiVersion: extensions/v1beta1
#apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:name: skywalking-ingress namespace: dev
spec:ingressClassName: "nginx"rules:- host: sky.oldxu.nethttp:paths: - path: /pathType: Prefixbackend:serviceName: skywalking-ui-svcservicePort: 8080#service:# name: skywalking-ui-svc# port:# name: ui
访问sky.oldxu.net:30080
04-skywalking-agent-demo.yaml (客户端demo)
将Skywalking-agent制作为Docker镜像,后续业务容器通过sidecar 模式挂载 agent
下载agent 和 制作dockerfile ,推送镜像
wget https://linux.oldxu.net/apache-skywalking-javaagent-8.8.0.tgz
wget https://linux.oldxu.net/apache-skywalking-java-agent-8.8.0.tgz[root@master01 04-skywalking-agent-demo]# cat Dockerfile
FROM alpine
ADD ./apache-skywalking-java-agent-8.8.0.tgz /[root@master01 04-skywalking-agent-demo]# ls
apache-skywalking-java-agent-8.8.0.tgz Dockerfile
docker build -t harbor.oldxu.net/springcloud/skywalking-java-agent:8.8 .
docker push harbor.oldxu.net/springcloud/skywalking-java-agent:8.8
#使用边车模式的思想来实现 (类似的有ELK收集Pod的日志)
业务容器通过sidecar模式挂载制作好的skywalking-agent镜像
apiVersion: apps/v1
kind: Deployment
metadata:name: skywalking-agent-demonamespace: dev
spec:replicas: 1selector:matchLabels:app: demotemplate:metadata:labels:app: demospec:imagePullSecrets:- name: harbor-adminvolumes: #定义共享的存储卷- name: skywalking-agentemptyDir: {}initContainers: #初始化容器,将这个容器中的数据拷贝到共享的卷中- name: init-skywalking-agentimage: harbor.oldxu.net/springcloud/skywalking-java-agent:8.8command:- 'sh'- '-c'- 'mkdir -p /agent; cp -r /skywalking-agent/* /agent;'volumeMounts:- name: skywalking-agentmountPath: /agentcontainers:- name: webimage: nginxvolumeMounts:- name: skywalking-agentmountPath: /skywalking-agent/
06-service-all/ (ruoyi业务层面 system , auth , gateway ,monitor ,ui)
迁移思路
6.1 迁移微服务ruoyi-system
1 maven编译system项目
对应的路径及信息
cd /root/k8sFile/project/danji-ruoyi/guanWang
[root@node4 guanWang]# ls
logs note.txt RuoYi-Cloud skywalking-agent startServer.sh[root@node4 guanWang]# ls RuoYi-Cloud/
bin docker LICENSE pom.xml README.md ruoyi-api ruoyi-auth ruoyi-common ruoyi-gateway ruoyi-modules ruoyi-ui ruoyi-visual sql
[root@node4 RuoYi-Cloud]# pwd
/root/k8sFile/project/danji-ruoyi/guanWang/RuoYi-Cloud
[root@node4 RuoYi-Cloud]# ls
bin docker LICENSE pom.xml README.md ruoyi-api ruoyi-auth ruoyi-common ruoyi-gateway ruoyi-modules ruoyi-ui ruoyi-visual sql
[root@node4 RuoYi-Cloud]# [root@node4 RuoYi-Cloud]# mvn package -Dmaven.test.skip=true -pl ruoyi-modules/ruoyi-system/ -am
2 编写Dockerfile
vim ruoyi-modoles/ruoyi-system/DockerfileFROM openjdk:8-jre-alpine
COPY ./target/*.jar /ruoyi-modules-system.jar
COPY ./entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 8080
CMD ["/bin/sh","-c","/entrypoint.sh"]
3 编写entrypoint.sh
在此之前回顾传统部署system运行时的指令:
#启动ruoyi-system
nohup java -javaagent:./skywalking-agent/skywalking-agent.jar \
-Dskywalking.agent.service_name=ruoyi-system \
-Dskywalking.collector.backend_service=192.168.79.35:11800 \
-Dspring.profiles.active=dev \
-Dspring.cloud.nacos.config.file-extension=yml \
-Dspring.cloud.nacos.discovery.server-addr=192.168.79.35:8848 \
-Dspring.cloud.nacos.config.server-addr=192.168.79.35:8848 \
-jar RuoYi-Cloud/ruoyi-modules/ruoyi-system/target/ruoyi-modules-system.jar &>/var/log/system.log &
#entrypoint.sh
[root@node4 ruoyi-system]# cat entrypoint.sh
#设定端口
PARAMS="--server.port=${Server_Port:-8080}"#JVM堆内存设置,
JAVA_OPTS="-Xms${XMS_OPTS:-150m} -Xmx${XMX_OPTS:-150m}"#Nacos相关选项
NACOS_OPTS=" \
-Djava.security.egd=file:/dev/./urandom \
-Dfile.encoding=utf8 \
-Dspring.profiles.active=${Nacos_Active:-dev} \
-Dspring.cloud.nacos.config.file-extension=yml \
-Dspring.cloud.nacos.discovery.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848} \
-Dspring.cloud.nacos.config.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848}
"#skywalking选项:
#边车模式的initContainer将skywalking.jar塞到了pod里面。
SKY_OPTS="-javaagent:/skywalking-agent/skywalking-agent.jar \
-Dskywalking.agent.service_name=ruoyi-system \
-Dskywalking.collector.backend_service=${Sky_Server_Addr:-localhost:11800}
"# 启动命令(指定sky选项、jvm堆内存选项、jar包,最后跟上params参数)
java ${SKY_OPTS} ${NACOS_OPTS} ${JAVA_OPTS} -jar /ruoyi-modules-system.jar ${PARAMS}
#路径及文件信息
[root@node4 ruoyi-system]# ls
Dockerfile entrypoint.sh pom.xml src target
4 制作镜像和推送
docker build -t harbor.oldxu.net/springcloud/ruoyi-system:v1.0 .
docker push harbor.oldxu.net/springcloud/ruoyi-system:v1.0
5 修改system组件配置
通过Kubernetes运行system之前,先登录Nacos修改ruoyi-system-dev.yml的相关配置信息;
修改redis地址,新增sentienl字段、 mysql地址
# spring配置
spring:cloud:sentinel:eager: truetransport:dashboard: sentinel-svc.dev.svc.cluster.local:8718 redis:host: redis-svc.dev.svc.cluster.localport: 6379password: datasource:druid:stat-view-servlet:enabled: trueloginUsername: adminloginPassword: 123456dynamic:druid:initial-size: 5min-idle: 5maxActive: 20maxWait: 60000timeBetweenEvictionRunsMillis: 60000minEvictableIdleTimeMillis: 300000validationQuery: SELECT 1 FROM DUALtestWhileIdle: truetestOnBorrow: falsetestOnReturn: falsepoolPreparedStatements: truemaxPoolPreparedStatementPerConnectionSize: 20filters: stat,slf4jconnectionProperties: druid.stat.mergeSql\=true;druid.stat.slowSqlMillis\=5000datasource:# 主库数据源master:driver-class-name: com.mysql.cj.jdbc.Driverurl: jdbc:mysql://mysql-ruoyi-svc.dev.svc.cluster.local:3306/ry-cloud?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8username: rootpassword: oldxu# 从库数据源# slave:# username: # password: # url: # driver-class-name: # seata: true # 开启seata代理,开启后默认每个数据源都代理,如果某个不需要代理可单独关闭# seata配置
seata:# 默认关闭,如需启用spring.datasource.dynami.seata需要同时开启enabled: false# Seata 应用编号,默认为 ${spring.application.name}application-id: ${spring.application.name}# Seata 事务组编号,用于 TC 集群名tx-service-group: ${spring.application.name}-group# 关闭自动代理enable-auto-data-source-proxy: false# 服务配置项service:# 虚拟组和分组的映射vgroup-mapping:ruoyi-system-group: defaultconfig:type: nacosnacos:serverAddr: 127.0.0.1:8848group: SEATA_GROUPnamespace:registry:type: nacosnacos:application: seata-serverserver-addr: 127.0.0.1:8848namespace:# mybatis配置
mybatis:# 搜索指定包别名typeAliasesPackage: com.ruoyi.system# 配置mapper的扫描,找到所有的mapper.xml映射文件mapperLocations: classpath:mapper/**/*.xml# swagger配置
swagger:title: 系统模块接口文档license: Powered By ruoyilicenseUrl: https://ruoyi.vip
#验证redis 、 mysql 、sentinel-svc
[root@master01 bin]# dig @10.96.0.10 redis-svc.dev.svc.cluster.local +short
10.111.240.148[root@master01 bin]# dig @10.96.0.10 sentinel-svc.dev.svc.cluster.local +short
10.111.31.36[root@master01 bin]# dig @10.96.0.10 mysql-ruoyi-svc.dev.svc.cluster.local +short
10.244.1.130
01-system-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: ruoyi-systemnamespace: dev
spec:replicas: 2selector:matchLabels:app: systemtemplate:metadata:labels:app: systemspec:imagePullSecrets:- name: harbor-adminvolumes:- name: skywalking-agentemptyDir: {}initContainers:- name: init-sky-java-agentimage: harbor.oldxu.net/springcloud/skywalking-java-agent:8.8command:- 'sh'- '-c'- 'mkdir -p /agent; cp -r /skywalking-agent/* /agent/;'volumeMounts:- name: skywalking-agentmountPath: /agentcontainers:- name: systemimage: harbor.oldxu.net/springcloud/ruoyi-system:v1.0env:- name: Nacos_Activevalue: dev- name: Nacos_Server_Addrvalue: "nacos-svc.dev.svc.cluster.local:8848"- name: Sky_Server_Addrvalue: "skywalking-oap-svc.dev.svc.cluster.local:11800"- name: XMS_OPTSvalue: 200m- name: XMX_OPTSvalue: 200mports:- containerPort: 8080livenessProbe: tcpSocket:port: 8080initialDelaySeconds: 60periodSeconds: 10timeoutSeconds: 10volumeMounts:- name: skywalking-agentmountPath: /skywalking-agent/
#验证nacos和 skywalking-oap
[root@master01 bin]# dig @10.96.0.10 nacos-svc.dev.svc.cluster.local +short
10.244.1.129
10.244.0.143
10.244.2.154[root@master01 bin]# dig @10.96.0.10 skywalking-oap-svc.dev.svc.cluster.local +short
10.111.30.115
登录nacos、sentinel、skywalking查看状态
nacos:
sentinel:
skywalking:
6.2 迁移微服务ruoyi-auth
1、编译auth项目
[root@node4 RuoYi-Cloud]# pwd
/root/k8sFile/project/danji-ruoyi/guanWang/RuoYi-Cloudmvn package -Dmaven.test.skip=true -pl ruoyi-auth/ -am
2、编写dockerfile和entrypoint.sh
``shell
[root@node4 ruoyi-auth]# pwd
/root/k8sFile/project/danji-ruoyi/guanWang/RuoYi-Cloud/ruoyi-auth
[root@node4 ruoyi-auth]# ls
Dockerfile entrypoint.sh pom.xml src target
dockerfile
```shell
FROM openjdk:8-jre-alpine
COPY ./target/*.jar /ruoyi-auth.jar
COPY ./entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.shEXPOSE 8080
CMD ["/bin/sh","-c","/entrypoint.sh"]
entrypoint.sh
[root@node4 ruoyi-auth]# cat entrypoint.sh
# 设定端口,默认不传参则为8080端口
PARAMS="--server.port=${Server_Port:-8080}"#JVM堆内存设定
JAVA_OPTS="-Xms${XMS_OPTS:-100M} -Xmx${XMX_OPTS:-100}"# Nacos相关选项
NACOS_OPTS="
-Djava.security.egd=file:/dev/./urandom \
-Dfile.encoding=utf8 \
-Dspring.profiles.active.file=${Nacos_Active:-dev} \
-Dspring.cloud.nacos.config.file-extension=yml \
-Dspring.cloud.nacos.discovery.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848} \
-Dspring.cloud.nacos.config.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848}
"#Skywalking相关选项
SKY_OPTS="
-javaagent:/skywalking-agent/skywalking-agent.jar \
-Dskywalking.agent.service_name=ruoyi-auth \
-Dskywalking.collector.backend_service=${Sky_Server_Addr:-localhost:11800}
"# 启动命令(指定sky选项、jvm堆内存选项、jar包,最后跟上params参数)
java ${SKY_OPTS} ${NACOS_OPTS} ${JAVA_OPTS} -jar /ruoyi-auth.jar ${PARAMS}
3、制作镜像,推送
docker build -t harbor.oldxu.net/springcloud/ruoyi-auth:v1.0 .
docker push harbor.oldxu.net/springcloud/ruoyi-auth:v1.0
4、去nacos修改ruoyi-auth-dev.yml
使用Kubernetes运行auth之前,先通过Nacos修改对应ruoyi-auth-dev.yml相关配置;
spring:cloud:sentinel:eager: truetransport:dashboard: sentinel-svc.dev.svc.cluster.local:8718 redis:host: redis-svc.dev.svc.cluster.localport: 6379password:
5、02-auth-deploy.yaml
运行auth应用
[root@master01 06-all-service]# cat 02-auth-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: ruoyi-authnamespace: dev
spec:replicas: 2selector:matchLabels:app: authtemplate:metadata:labels:app: authspec:imagePullSecrets:- name: harbor-adminvolumes:- name: skywalking-agentemptyDir: {}initContainers:- name: init-sky-java-agentimage: harbor.oldxu.net/springcloud/skywalking-java-agent:8.8command:- 'sh'- '-c'- 'mkdir -p /agent; cp -r /skywalking-agent/* /agent/;'volumeMounts:- name: skywalking-agentmountPath: /agentcontainers:- name: authimage: harbor.oldxu.net/springcloud/ruoyi-auth:v1.0env:- name: Nacos_Activevalue: dev- name: Nacos_Server_Addrvalue: "nacos-svc.dev.svc.cluster.local:8848" - name: Sky_Server_Addrvalue: "skywalking-oap-svc.dev.svc.cluster.local:11800"- name: XMS_OPTSvalue: 200m- name: XMX_OPTSvalue: 200mports:- containerPort: 8080livenessProbe:tcpSocket:port: 8080initialDelaySeconds: 60periodSeconds: 10timeoutSeconds: 10volumeMounts:- name: skywalking-agentmountPath: /skywalking-agent/
6.3 迁移微服务ruoyi-gateway
1、编译gateway项目
[root@node4 RuoYi-Cloud]# pwd
/root/k8sFile/project/danji-ruoyi/guanWang/RuoYi-Cloud
[root@node4 RuoYi-Cloud]# ls
bin docker LICENSE pom.xml README.md ruoyi-api ruoyi-auth ruoyi-common ruoyi-gateway ruoyi-modules ruoyi-ui ruoyi-visual sql
mvn package -Dmaven.test.skip=true -pl ruoyi-gateway/ -am
2、编写dockerfile和entrypoint
[root@node4 ruoyi-gateway]# ls
Dockerfile entrypoint.sh pom.xml src target
Dockerfile
#如果使用alpine镜像(openjdk:8-jre-alpine),会出现[网关异常处理]请求路径:/code
FROM openjdk:8-jre
COPY ./target/*.jar /ruoyi-gateway.jar
COPY ./entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 8080
CMD ["/bin/sh","-c","/entrypoint.sh"]
entrypoint.sh
# 设定端口,默认不传参则为8080端口
PARAMS="--server.port=${Server_Port:-8080}"#JVM堆内存设定
JAVA_OPTS="-Xms${XMS_OPTS:-100m} -Xmx${XMX_OPTS:-100m}
"#Nacos相关选项
NACOS_OPTS="
-Djava.security.egd=file:/dev/./urandom \
-Dfile.encoding=utf8 \
-Dspring.profiles.active=${Nacos_Active:-dev} \
-Dspring.cloud.nacos.config.file-extension=yml \
-Dspring.cloud.nacos.discovery.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848} \
-Dspring.cloud.nacos.config.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848}
"# Skywalking相关选项
SKY_OPTS="
-javaagent:/skywalking-agent/skywalking-agent.jar \
-Dskywalking.agent.service_name=ruoyi-gateway \
-Dskywalking.collector.backend_service=${Sky_Server_Addr:-localhost:11800}
"#启动命令(指定sky选项、jvm堆内存选项、jar包,最后跟上params参数)
java ${SKY_OPTS} ${NACOS_OPTS} ${JAVA_OPTS} -jar /ruoyi-gateway.jar ${PARAMS}
3、制作镜像并推送仓库
docker build -t harbor.oldxu.net/springcloud/ruoyi-gateway:v1.0 .
docker push harbor.oldxu.net/springcloud/ruoyi-gateway:v1.0
4、修改gateway组件配置(ruoyi-gateway-dev.yml)
使用Kubernetes运行gateway之前,先通过Nacos修改对应ruoyi-gateway-dev.yml的相关配置;
spring:redis:host: redis-svc.dev.svc.cluster.localport: 6379password: sentinel:eager: truetransport:dashboard: sentinel-svc.dev.svc.cluster.local:8718datasource:ds1:nacos:server-addr: nacos-svc.dev.svc.cluster:8848dataId: sentinel-ruoyi-gatewaygroupId: DEFAULT_GROUPdata-type: jsonrule-type: flow cloud:nacos:discovery:server-addr: nacos-svc.dev.svc.cluster.local:8848config:server-addr: nacos-svc.dev.svc.cluster.local:8848gateway:discovery:locator:lowerCaseServiceId: trueenabled: trueroutes:# 认证中心- id: ruoyi-authuri: lb://ruoyi-authpredicates:- Path=/auth/**filters:# 验证码处理- CacheRequestFilter- ValidateCodeFilter- StripPrefix=1# 代码生成- id: ruoyi-genuri: lb://ruoyi-genpredicates:- Path=/code/**filters:- StripPrefix=1# 定时任务- id: ruoyi-joburi: lb://ruoyi-jobpredicates:- Path=/schedule/**filters:- StripPrefix=1# 系统模块- id: ruoyi-systemuri: lb://ruoyi-systempredicates:- Path=/system/**filters:- StripPrefix=1# 文件服务- id: ruoyi-fileuri: lb://ruoyi-filepredicates:- Path=/file/**filters:- StripPrefix=1# 安全配置
security:# 验证码captcha:enabled: truetype: math# 防止XSS攻击xss:enabled: trueexcludeUrls:- /system/notice# 不校验白名单ignore:whites:- /auth/logout- /auth/login- /auth/register- /*/v2/api-docs- /csrf
5、 03-gateway-deploy-svc.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: ruoyi-gatewaynamespace: dev
spec:replicas: 2selector:matchLabels:app: gatewaytemplate:metadata:labels:app: gatewayspec:imagePullSecrets:- name: harbor-adminvolumes:- name: skywalking-agentemptyDir: {}initContainers:- name: init-sky-java-agentimage: harbor.oldxu.net/springcloud/skywalking-java-agent:8.8command:- 'sh'- '-c'- 'mkdir -p /agent; cp -r /skywalking-agent/* /agent/;'volumeMounts:- name: skywalking-agentmountPath: /agentcontainers:- name: gatewayimage: harbor.oldxu.net/springcloud/ruoyi-gateway:v2.0env:- name: Nacos_Activevalue: dev- name: Nacos_Server_Addrvalue: "nacos-svc.dev.svc.cluster.local:8848"- name: Sky_Server_Addrvalue: "skywalking-oap-svc.dev.svc.cluster.local:11800"- name: XMS_OPTSvalue: 500m- name: XMX_OPTSvalue: 500Mports:- containerPort: 8080livenessProbe:tcpSocket:port: 8080initialDelaySeconds: 60periodSeconds: 10timeoutSeconds: 10volumeMounts:- name: skywalking-agentmountPath: /skywalking-agent/ ---
apiVersion: v1
kind: Service
metadata:name: gateway-svcnamespace: dev
spec:selector:app: gatewayports:- port: 8080targetPort: 8080
6.4 迁移微服务ruoyi-monitor
1 编译monitor项目
[root@node4 RuoYi-Cloud]# pwd
/root/k8sFile/project/danji-ruoyi/guanWang/RuoYi-Cloud[root@node4 RuoYi-Cloud]# ls
bin docker LICENSE pom.xml README.md ruoyi-api ruoyi-auth ruoyi-common ruoyi-gateway ruoyi-modules ruoyi-ui ruoyi-visual sql[root@node4 RuoYi-Cloud]# mvn package -Dmaven.test.skip=true -pl ruoyi-visual/ruoyi-monitor/ -am
2 编写dockerfile 和 entrypoint.sh
cd /root/k8sFile/project/danji-ruoyi/guanWang/RuoYi-Cloud/ruoyi-visual/ruoyi-monitor[root@node4 ruoyi-monitor]# ls
Dockerfile entrypoint.sh pom.xml src target
dockerfile
FROM openjdk:8-jre-alpine
COPY ./target/*.jar /ruoyi-monitor.jar
COPY ./entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 8080
CMD ["/bin/sh", "-c", "/entrypoint.sh"]
entrypoint.sh
#设定端口,默认不传参则为8080端口
PARAMS="--server.port=${Server_Port:-8080}"#JVM堆内存设定
JAVA_OPTS="-Xms${XMS_OPTS:-100m} -Xmx${XMX_OPTS:-100m}"# Nacos相关选项
NACOS_OPTS="
-Djava.security.egd=file:/dev/./urandom \
-Dfile.encoding=utf8 \
-Dspring.profiles.active=${Nacos_Active:-dev} \
-Dspring.cloud.nacos.config.file-extension=yml \
-Dspring.cloud.nacos.discovery.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848} \
-Dspring.cloud.nacos.config.server-addr=${Nacos_Server_Addr:-127.0.0.1:8848}
"#Skywalking相关选项
SKY_OPTS="
-javaagent:/skywalking-agent/sky-java-agent.jar \
-Dskywalking.agent.service_name=ruoyi-monitor \
-Dskywalking.collector.backend_service=${Sky_Server_Addr:-11800}
"#启动命令(指定sky选项、jvm堆内存选项、jar包,最后跟上params参数)
java ${SKY_OPTS} ${NACOS_OPTS} ${JAVA_OPTS} -jar /ruoyi-monitor.jar ${PARAMS}
3 制作镜像并推送仓库
docker build -t harbor.oldxu.net/springcloud/ruoyi-monitor:v1.0 .
docker push harbor.oldxu.net/springcloud/ruoyi-monitor:v1.0
4 修改monitor组件配置(ruoyi-monitor-dev.yml)
使用Kubernetes运行monitor之前,先通过Nacos修改对应ruoyi-monitor-dev.yml的相关配置;
# spring
spring:cloud:sentinel:eager: truetransport:dashboard: sentinel-svc.dev.svc.cluster.local:8718 security:user:name: ruoyipassword: 123456boot:admin:ui:title: 若依服务状态监控
5、04-monitor-deploy-svc-ingress.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: ruoyi-monitornamespace: dev
spec:replicas: 1selector:matchLabels:app: monitortemplate:metadata:labels:app: monitorspec:imagePullSecrets:- name: harbor-adminvolumes:- name: skywalking-agentemptyDir: {}initContainers:- name: init-sky-java-agentimage: harbor.oldxu.net/springcloud/skywalking-java-agent:8.8command:- 'sh'- '-c'- 'mkdir -p /agent; cp -r /skywalking-agent/* /agent/;'volumeMounts:- name: skywalking-agentmountPath: /agentcontainers:- name: monitorimage: harbor.oldxu.net/springcloud/ruoyi-monitor:v3.0env:- name: Nacos_Activevalue: dev- name: Nacos_Server_Addrvalue: "nacos-svc.dev.svc.cluster.local:8848"- name: Sky_Server_Addrvalue: "skywalking-oap-svc.dev.svc.cluster.local:11800"- name: XMS_OPTSvalue: 200m- name: XMX_OPTSvalue: 200mports:- containerPort: 8080livenessProbe:tcpSocket:port: 8080initialDelaySeconds: 60periodSeconds: 10timeoutSeconds: 10volumeMounts:- name: skywalking-agentmountPath: /skywalking-agent/---
apiVersion: v1
kind: Service
metadata:name: monitor-svcnamespace: dev
spec:selector:app: monitorports:- port: 8080targetPort: 8080---
#apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:name: monitor-ingressnamespace: dev
spec:ingressClassName: "nginx"rules:- host: "monitor.oldxu.net"http:paths:- path: /pathType: Prefixbackend:serviceName: monitor-svcservicePort: 8080#service:# name: monitor-svc# port:# number: 8080
有个蛇皮问题。(monitor.oldxu.net:30080 会重定向到 monitor.oldxu.net/login),这是monitor程序的哪里搞了重定向?
还有静态文件js 、css文件会走80端口,不是走30080端口。
6.5 迁移微服务ruoyi-ui 前端
1 修改前端配置ruoyi-ui/vue.config.js
[root@node4 ruoyi-ui]# pwd
/root/k8sFile/project/danji-ruoyi/guanWang/RuoYi-Cloud/ruoyi-ui
修改网关的地址
devServer: {host: '0.0.0.0',port: port,open: true,proxy: {// detail: https://cli.vuejs.org/config/#devserver-proxy[process.env.VUE_APP_BASE_API]: {target: `http://gateway-svc.dev.svc.cluster.local:8080`,changeOrigin: true,pathRewrite: {['^' + process.env.VUE_APP_BASE_API]: ''}}},disableHostCheck: true},css: {
[root@master01 06-all-service]# dig @10.96.0.10 gateway-svc.dev.svc.cluster.local +short
10.97.133.31
2 编译前端项目
npm install --registry=https://registry.npmmirror.com
npm run build:prod
3 编写Dockerfile
[root@node4 ruoyi-ui]# ls
babel.config.js bin build dist Dockerfile node_modules package.json package-lock.json public README.md src vue.config.js vue.config.js.bak vue.config.js-danji
[root@node4 ruoyi-ui]#
[root@node4 ruoyi-ui]# cat Dockerfile
FROM nginx
COPY ./dist /code/
4 制作镜像并推送仓库
docker build -t harbor.oldxu.net/springcloud/ruoyi-ui:v1.0 .
docker push harbor.oldxu.net/springcloud/ruoyi-ui:v1.0
5 创建ConfigMap ( ruoyi.oldxu.net.conf)
ruoyi.oldxu.net.conf
server {listen 80;server_name ruoyi.oldxu.net;charset utf-8;root /code;location / {try_files $uri $uri/ /index.html;index index.html index.htm;}location /prod-api/ {proxy_set_header Host $http_host;proxy_set_header X-Real-IP $remote_addr;proxy_set_header REMOTE-HOST $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_pass http://gateway-svc.dev.svc.cluster.local:8080/;}
}
kubectl create configmap ruoyi-ui-conf --from-file=ruoyi.oldxu.net.conf -n dev
6、 05-ui-dp-svc-ingress.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: ruoyi-uinamespace: dev
spec:replicas: 2selector:matchLabels:app: uitemplate:metadata:labels:app: uispec:imagePullSecrets:- name: harbor-admincontainers:- name: uiimage: harbor.oldxu.net/springcloud/ruoyi-ui:v1.0ports:- containerPort: 80readinessProbe:tcpSocket:port: 80initialDelaySeconds: 60periodSeconds: 10timeoutSeconds: 10livenessProbe:tcpSocket:port: 80initialDelaySeconds: 60periodSeconds: 10timeoutSeconds: 10volumeMounts:- name: ngxconfsmountPath: /etc/nginx/conf.d/ volumes:- name: ngxconfsconfigMap:name: ruoyi-ui-conf---
apiVersion: v1
kind: Service
metadata:name: ui-svcnamespace: dev
spec:selector:app: uiports:- port: 80targetPort: 80---
#apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:name: ui-ingressnamespace: dev
spec:ingressClassName: "nginx" rules:- host: "ruoyi.oldxu.net"http:paths:- path: /pathType: Prefixbackend:serviceName: ui-svcservicePort: 80#service:# name: ui-svc# port:# number: 80
访问:
END
其他/迁移小结