01-常见应用集成于k8s

安排:

  • 安装一键式k8s资源平台Ratel到k8s集群中
  • 安装Redis集群到k8s集群中
  • 安装RabbitMQ集群到k8s集群中
  • 安装kafka到k8s集群中
  • 安装zookeeper到k8s集群中

安装一键式k8s资源平台Ratel到k8s集群中

mkdir /root/ratel

git clone https://github.com/dotbalo/ratel-doc.git

创建Secret

servers.yaml

- serverName: 'study'
  serverAddress: 'https://192.168.1.100:8443' # kubectl cluster-info获取
  #serverAdminUser: 'xxx'
  #serverAdminPassword: 'xxx#'
  serverAdminToken: 'null'
  serverDashboardUrl: https://192.168.1.100:30831/# #dashboard地址
  production: 'false'
  kubeConfigPath: "/mnt/study.config" # 容器内部挂载目录
  # harborConfig: "HarborUrl, HarborUsername, HarborPassword, HarborEmail #内部镜像仓库地址,生产环境使用

cp -a ~/.kube/config /root/ratel/study.config

kubectl create secret generic ratel-config --from-file=study.config  --from-file=servers.yaml -n kube-system

创建RBAC

创建权限管理namespace

kubectl create ns kube-users

然后添加如下的ClusterroleBinding

vim ratel-rbac.yaml

apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
  kind: ClusterRole
  metadata:
    annotations:
      rbac.authorization.kubernetes.io/autoupdate: "true"
    labels:
      kubernetes.io/bootstrapping: rbac-defaults
      rbac.authorization.k8s.io/aggregate-to-edit: "true"
    name: ratel-namespace-readonly
  rules:
  - apiGroups:
    - ""
    resources:
    - namespaces
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - metrics.k8s.io
    resources:
    - pods
    verbs:
    - get
    - list
    - watch
- apiVersion: rbac.authorization.k8s.io/v1
  kind: ClusterRole
  metadata:
    name: ratel-pod-delete
  rules:
  - apiGroups:
    - ""
    resources:
    - pods
    verbs:
    - get
    - list
    - delete
- apiVersion: rbac.authorization.k8s.io/v1
  kind: ClusterRole
  metadata:
    name: ratel-pod-exec
  rules:
  - apiGroups:
    - ""
    resources:
    - pods
    - pods/log
    verbs:
    - get
    - list
  - apiGroups:
    - ""
    resources:
    - pods/exec
    verbs:
    - create
- apiVersion: rbac.authorization.k8s.io/v1
  kind: ClusterRole
  metadata:
    annotations:
      rbac.authorization.kubernetes.io/autoupdate: "true"
    name: ratel-resource-edit
  rules:
  - apiGroups:
    - ""
    resources:
    - configmaps
    - persistentvolumeclaims
    - services
    - services/proxy
    verbs:
    - patch
    - update
  - apiGroups:
    - apps
    resources:
    - daemonsets
    - deployments
    - deployments/rollback
    - deployments/scale
    - statefulsets
    - statefulsets/scale
    verbs:
    - patch
    - update
  - apiGroups:
    - autoscaling
    resources:
    - horizontalpodautoscalers
    verbs:
    - patch
    - update
  - apiGroups:
    - batch
    resources:
    - cronjobs
    - jobs
    verbs:
    - patch
    - update
  - apiGroups:
    - extensions
    resources:
    - daemonsets
    - deployments
    - deployments/rollback
    - deployments/scale
    - ingresses
    - networkpolicies
    verbs:
    - patch
    - update
  - apiGroups:
    - networking.k8s.io
    resources:
    - ingresses
    - networkpolicies
    verbs:
    - patch
    - update
- apiVersion: rbac.authorization.k8s.io/v1
  kind: ClusterRole
  metadata:
    name: ratel-resource-readonly
  rules:
  - apiGroups:
    - ""
    resources:
    - configmaps
    - endpoints
    - persistentvolumeclaims
    - pods
    - replicationcontrollers
    - replicationcontrollers/scale
    - serviceaccounts
    - services
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - ""
    resources:
    - bindings
    - events
    - limitranges
    - namespaces/status
    - pods/log
    - pods/status
    - replicationcontrollers/status
    - resourcequotas
    - resourcequotas/status
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - ""
    resources:
    - namespaces
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - apps
    resources:
    - controllerrevisions
    - daemonsets
    - deployments
    - deployments/scale
    - replicasets
    - replicasets/scale
    - statefulsets
    - statefulsets/scale
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - autoscaling
    resources:
    - horizontalpodautoscalers
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - batch
    resources:
    - cronjobs
    - jobs
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - extensions
    resources:
    - daemonsets
    - deployments
    - deployments/scale
    - ingresses
    - networkpolicies
    - replicasets
    - replicasets/scale
    - replicationcontrollers/scale
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - policy
    resources:
    - poddisruptionbudgets
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - networking.k8s.io
    resources:
    - networkpolicies
    - ingresses
    verbs:
    - get
    - list
    - watch
  - apiGroups:
    - metrics.k8s.io
    resources:
    - pods
    verbs:
    - get
    - list
    - watch
kind: List
metadata:
  resourceVersion: ""
  selfLink: ""

kubectl create -f ratel-rbac.yaml

vim ratel-rbac-binding.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: ratel-namespace-readonly-sa
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ratel-namespace-readonly
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:serviceaccounts:kube-users
kubectl create -f ratel-rbac-binding.yaml

vim ratel-deploy.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: ratel
  name: ratel
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ratel
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: ratel
    spec:
      containers:
        - command:
            - sh
            - -c
            - ./ratel -c /mnt/servers.yaml
          env:
            - name: TZ
              value: Asia/Shanghai
            - name: LANG
              value: C.UTF-8
            - name: ProRunMode
              value: prod
            - name: ADMIN_USERNAME
              value: admin
            - name: ADMIN_PASSWORD
              value: ratel_password
          image: registry.cn-beijing.aliyuncs.com/dotbalo/ratel:latest
          imagePullPolicy: Always
          livenessProbe:
            failureThreshold: 2
            initialDelaySeconds: 10
            periodSeconds: 60
            successThreshold: 1
            tcpSocket:
              port: 8888
            timeoutSeconds: 2
          name: ratel
          ports:
            - containerPort: 8888
              name: web
              protocol: TCP
          readinessProbe:
            failureThreshold: 2
            initialDelaySeconds: 10
            periodSeconds: 60
            successThreshold: 1
            tcpSocket:
              port: 8888
            timeoutSeconds: 2
          resources:
            limits:
              cpu: 500m
              memory: 512Mi
            requests:
              cpu: 500m
              memory: 512Mi
          volumeMounts:
            - mountPath: /mnt
              name: ratel-config
      dnsPolicy: ClusterFirst
      imagePullSecrets:
        - name: myregistrykey
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
      volumes:
        - name: ratel-config
          secret:
            defaultMode: 420
            secretName: ratel-config
# kubectl label nodes k8s-node01 app=ratel

kubectl get pod -n kube-system

vi ratel-svc.yaml

apiVersion: v1
kind: Service
metadata:
  labels:
    app: ratel
  name: ratel
  namespace: kube-system
spec:
  ports:
    - name: container-1-web-1
      port: 8888
      protocol: TCP
      targetPort: 8888
  selector:
    app: ratel
  type: NodePort

kubectl apply -f ratel-svc.yaml

查看状态:

kubectl  get pod -n kube-system

pod状态

01-常见应用集成于k8s

svc

01-常见应用集成于k8s

日志

01-常见应用集成于k8s

登陆(账号密码在deployment中找:admin、ratel_password)

01-常见应用集成于k8s

参考文档:https://github.com/dotbalo/ratel-doc/blob/master/cluster/Install.md

在nodes中可以选择驱逐和禁止调度来维护节点

01-常见应用集成于k8s
01-常见应用集成于k8s

Depolyment设置,可以实现更新回滚查看yaml

01-常见应用集成于k8s
01-常见应用集成于k8s

下一步自动生成affinity配置

01-常见应用集成于k8s

下一步挂载配置

01-常见应用集成于k8s

下一步可以配置容器及健康检查参数

01-常见应用集成于k8s

下一步配置service

01-常见应用集成于k8s

最后配置ingress,create

01-常见应用集成于k8s

权限相关配置:

01-常见应用集成于k8s

修改权限及使用token

01-常见应用集成于k8s

如果要添加多个集群参考文档:https://github.com/dotbalo/ratel-doc/blob/master/cluster/addCluster.md

安装Redis集群到k8s集群中

安装单个redis镜像到k8s

在官网寻找官方镜像(建议使用OFFICIAL IMAGE官方镜像):

https://hub.docker.com/search?q=redis

01-常见应用集成于k8s

支持的可选版本,点开一个版本后后可以看到Dockerfile:

01-常见应用集成于k8s
01-常见应用集成于k8s

tags界面可以展示镜像大小及具体版本,可知,alpine系列版本image最小

01-常见应用集成于k8s

加入我们使用alpine来跑redis,可以选择自己挂载一个redis的configmap配置文件

官网寻找配置文件:https://raw.githubusercontent.com/antirez/redis/4.0/redis.conf

egrep -v "#|^$" redis.conf

redis.conf模版

bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile "/tmp/redis.log"
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
slave-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble no
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

部署redis

创建configmap

01-常见应用集成于k8s

创建deployment

01-常见应用集成于k8s
01-常见应用集成于k8s
01-常见应用集成于k8s
---
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: redis-single-node
  name: redis-single-node
  namespace: ratel-test1
spec:
  ports:
  - name: container-1-web-1
    port: 6379
    protocol: TCP
    targetPort: 6379
  selector:
    app: redis-single-node
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: redis-single-node
  name: redis-single-node
  namespace: ratel-test1
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis-single-node
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: redis-single-node
    spec:
      affinity: {}
      containers:
      - command:
        - sh
        - -c
        - redis-server 
        - /mnt/redis.conf
        env:
        - name: TZ
          value: Asia/Shanghai
        - name: LANG
          value: C.UTF-8
        image: redis:alpine
        imagePullPolicy: IfNotPresent
        lifecycle: {}
        livenessProbe:
          failureThreshold: 2
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          tcpSocket:
            port: 6379
          timeoutSeconds: 2
        name: redis-single-node
        ports:
        - containerPort: 6379
          name: web
          protocol: TCP
        resources:
          limits:
            cpu: 100m
            memory: 306Mi
          requests:
            cpu: 10m
            memory: 10Mi
        startupProbe:
          failureThreshold: 2
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          tcpSocket:
            port: 6379
          timeoutSeconds: 2
        volumeMounts:
        - mountPath: /usr/share/zoneinfo/Asia/Shanghai
          name: tz-config
        - mountPath: /etc/localtime
          name: tz-config
        - mountPath: /etc/timezone
          name: timezone
        - mountPath: /mnt
          name: redis-conf
          readOnly: true
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      securityContext: {}
      tolerations:
      - effect: NoExecute
        key: node.kubernetes.io/unreachable
        operator: Exists
        tolerationSeconds: 30
      - effect: NoExecute
        key: node.kubernetes.io/not-ready
        operator: Exists
        tolerationSeconds: 30
      volumes:
      - hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
        name: tz-config
      - hostPath:
          path: /etc/timezone
          type: ""
        name: timezone
      - configMap:
          name: redis-conf
        name: redis-conf
status: {}
01-常见应用集成于k8s
01-常见应用集成于k8s
01-常见应用集成于k8s

进入容器检测:

01-常见应用集成于k8s

一般情况下redis不会只部署一台,如果redis需要被不同业务组共用,建议单独开一个公用资源的namespace部署redis,redis一般不会通过service IP反问,而是通过service名称连接

01-常见应用集成于k8s

如果不在同一个名称空间,需要添加名称空间后缀

01-常见应用集成于k8s

安装redis集群

项目地址:https://github.com/ucloud/redis-cluster-operator

获取项目:

01-常见应用集成于k8s

根据文档部署:

01-常见应用集成于k8s

部署redis cluster operator

kubectl create -f deploy/crds/redis.kun_distributedredisclusters_crd.yaml
kubectl create -f deploy/crds/redis.kun_redisclusterbackups_crd.yaml

命名空间范围的操作符监视和管理单个命名空间中的资源,而集群范围的操作符在集群范围内监视和管理资源。您可以选择以命名空间范围或集群范围运行您的运算符。

// cluster-scoped
kubectl create -f deploy/service_account.yaml
kubectl create -f deploy/cluster/cluster_role.yaml
kubectl create -f deploy/cluster/cluster_role_binding.yaml
kubectl create -f deploy/cluster/operator.yaml

// namespace-scoped
$ kubectl create -f deploy/service_account.yaml
$ kubectl create -f deploy/namespace/role.yaml
$ kubectl create -f deploy/namespace/role_binding.yaml
$ kubectl create -f deploy/namespace/operator.yaml

使用 helm chart安装(本实验不通过此方法安装)

Add Helm repository

helm repo add ucloud-operator https://ucloud.github.io/redis-cluster-operator/
helm repo update

Install chart

helm install --generate-name ucloud-operator/redis-cluster-operator

Verify that the redis-cluster-operator is up and running:

$ kubectl get deployment

部署一个简单的 Redis Cluster

redis集群架构:

01-常见应用集成于k8s

mkdir /root/install-some-app

cd /root/install-some-app/redis-cluster-operator-master

kubectl apply -f deploy/example/custom-resources.yaml

custom-resources.yaml

01-常见应用集成于k8s
kubectl get distributedrediscluster
01-常见应用集成于k8s

kubectl get all -l redis.kun/name=example-distributedrediscluster

01-常见应用集成于k8s

因为集群没有做持久化存储,所以建议使用/root/install-some-app/redis-cluster-operator-master/deploy/example/persistent.yaml去配置一下storage.class持久化存储

01-常见应用集成于k8s

检测:

kubectl exec -it drc-example-distributedrediscluster-0-0 -- sh

根据路由找到数据:

01-常见应用集成于k8s

add:期间遇到的问题及修复:
报错:(error) CLUSTERDOWN Hash slot not served

解决过程:https://cloud.tencent.com/developer/article/1919678

redis集群的扩容缩容

https://github.com/ucloud/redis-cluster-operator

01-常见应用集成于k8s

扩容:

编辑自定义资源类型

kubectl edit DistributedRedisCluster example-distributedrediscluster

01-常见应用集成于k8s

缩容:

kubectl edit DistributedRedisCluster example-distributedrediscluster

修改masterSize: 3即可

自定义配置:
custom-config.yaml

01-常见应用集成于k8s

自定义账号密码:

01-常见应用集成于k8s

密码是经过base64加密

01-常见应用集成于k8s

清理集群:

kubectl delete -f deploy/example/redis.kun_v1alpha1_distributedrediscluster_cr.yaml

kubectl delete -f deploy/cluster/operator.yaml

kubectl delete -f deploy/cluster/cluster_role_binding.yaml

kubectl delete -f deploy/cluster/cluster_role.yaml

kubectl delete -f deploy/service_account.yaml

kubectl delete -f deploy/crds/redis.kun_redisclusterbackups_crd.yaml

kubectl delete -f deploy/crds/redis.kun_distributedredisclusters_crd.yaml

RabbitMQ集群安装:

StatefulSet方式安装:https://github.com/dotbalo/k8s/tree/master/k8s-rabbitmq-cluster

mkdir rabbitmq && cd rabbitmq

kubectl create ns public-service

vim rabbitmq-configmap.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: rmq-cluster-config
  namespace: public-service
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
data:
    enabled_plugins: |
      [rabbitmq_management,rabbitmq_peer_discovery_k8s].
    rabbitmq.conf: |
      loopback_users.guest = false
      default_user = RABBITMQ_USER
      default_pass = RABBITMQ_PASS
      ## Clustering
      cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
      cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
      cluster_formation.k8s.address_type = hostname
      #################################################
      # public-service is rabbitmq-cluster's namespace#
      #################################################
      cluster_formation.k8s.hostname_suffix = .rmq-cluster.public-service.svc.cluster.local
      cluster_formation.node_cleanup.interval = 10
      cluster_formation.node_cleanup.only_log_warning = true
      cluster_partition_handling = autoheal
      ## queue master locator
      queue_master_locator=min-masters

vim rabbitmq-secret.yaml

kind: Secret
apiVersion: v1
metadata:
  name: rmq-cluster-secret
  namespace: public-service
stringData:
  cookie: ERLANG_COOKIE
  password: RABBITMQ_PASS
  url: amqp://RABBITMQ_USER:RABBITMQ_PASS@rmq-cluster-balancer
  username: RABBITMQ_USER
type: Opaque

vim rabbitmq-service-cluster.yaml

kind: Service
apiVersion: v1
metadata:
  labels:
    app: rmq-cluster
  name: rmq-cluster
  namespace: public-service
spec:
  clusterIP: None
  ports:
  - name: amqp
    port: 5672
    targetPort: 5672
  selector:
    app: rmq-cluster

vim rabbitmq-service-lb.yaml

kind: Service
apiVersion: v1
metadata:
  labels:
    app: rmq-cluster
    type: LoadBalancer
  name: rmq-cluster-balancer
  namespace: public-service
spec:
  ports:
  - name: http
    port: 15672
    protocol: TCP
    targetPort: 15672
  - name: amqp
    port: 5672
    protocol: TCP
    targetPort: 5672
  selector:
    app: rmq-cluster
  type: NodePort

vim rabbitmq-rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: rmq-cluster
  namespace: public-service
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rmq-cluster
  namespace: public-service
rules:
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rmq-cluster
  namespace: public-service
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rmq-cluster
subjects:
- kind: ServiceAccount
  name: rmq-cluster
  namespace: public-service

vim rabbitmq-sts.yaml

kind: StatefulSet
apiVersion: apps/v1
metadata:
  labels:
    app: rmq-cluster
  name: rmq-cluster
  namespace: public-service
spec:
  replicas: 3
  selector:
    matchLabels:
      app: rmq-cluster
  serviceName: rmq-cluster
  template:
    metadata:
      labels:
        app: rmq-cluster
    spec:
      containers:
      - args:
        - -c
        - cp -v /etc/rabbitmq/rabbitmq.conf ${RABBITMQ_CONFIG_FILE}; exec docker-entrypoint.sh
          rabbitmq-server
        command:
        - sh
        env:
        - name: RABBITMQ_DEFAULT_USER
          valueFrom:
            secretKeyRef:
              key: username
              name: rmq-cluster-secret
        - name: RABBITMQ_DEFAULT_PASS
          valueFrom:
            secretKeyRef:
              key: password
              name: rmq-cluster-secret
        - name: RABBITMQ_ERLANG_COOKIE
          valueFrom:
            secretKeyRef:
              key: cookie
              name: rmq-cluster-secret
        - name: K8S_SERVICE_NAME
          value: rmq-cluster
        - name: POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: RABBITMQ_USE_LONGNAME
          value: "true"
        - name: RABBITMQ_NODENAME
          value: rabbit@$(POD_NAME).rmq-cluster.$(POD_NAMESPACE).svc.cluster.local
        - name: RABBITMQ_CONFIG_FILE
          value: /var/lib/rabbitmq/rabbitmq.conf
        image: registry.cn-beijing.aliyuncs.com/dotbalo/rabbitmq:3.7-management 
        imagePullPolicy: IfNotPresent
        livenessProbe:
          exec:
            command:
            - rabbitmqctl
            - status
          initialDelaySeconds: 30
          timeoutSeconds: 10
        name: rabbitmq
        ports:
        - containerPort: 15672
          name: http
          protocol: TCP
        - containerPort: 5672
          name: amqp
          protocol: TCP
        readinessProbe:
          exec:
            command:
            - rabbitmqctl
            - status
          initialDelaySeconds: 10
          timeoutSeconds: 10
        volumeMounts:
        - mountPath: /etc/rabbitmq
          name: config-volume
          readOnly: false
#        - mountPath: /var/lib/rabbitmq
#          name: rabbitmq-storage
#          readOnly: false
      serviceAccountName: rmq-cluster
      terminationGracePeriodSeconds: 30
      volumes:
      - configMap:
          items:
          - key: rabbitmq.conf
            path: rabbitmq.conf
          - key: enabled_plugins
            path: enabled_plugins
          name: rmq-cluster-config
        name: config-volume
#  volumeClaimTemplates:
#  - metadata:
#      name: rabbitmq-storage
#    spec:
#      accessModes:
#      - ReadWriteMany
#      storageClassName: "rmq-storage-class"
#      resources:
#        requests:
#          storage: 4Gi
01-常见应用集成于k8s

kubectl get svc -n public-service

01-常见应用集成于k8s

kubectl -n public-service exec -it rmq-cluster-0 -- bash

01-常见应用集成于k8s

kubectl get svc -n public-service && kubectl -n public-service get endpoints

01-常见应用集成于k8s

登陆:

01-常见应用集成于k8s

default_user = RABBITMQ_USER
default_pass = RABBITMQ_PASS

01-常见应用集成于k8s

rabbitmq扩容和缩容

kubectl -n public-service scale sts rmq-cluster --replicas=4

发布者:LJH,转发请注明出处:https://www.ljh.cool/37543.html

(1)
上一篇 2023年6月23日 下午9:27
下一篇 2023年8月11日 上午3:00

相关推荐