03-helm实战训练

rabbitmq-cluster改造

由https://www.ljh.cool/37543.html提供的RabbitMQ yaml改造helm chart

helm create rabbitmq-cluster

移除掉templates下的NOTES.txt

03-helm实战训练

_helpers.tpl

{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "rabbitmq-cluster.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "rabbitmq-cluster.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}

{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "rabbitmq-cluster.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Common labels
*/}}
{{- define "rabbitmq-cluster.labels" -}}
helm.sh/chart: {{ include "rabbitmq-cluster.chart" . }}
{{ include "rabbitmq-cluster.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}

{{/*
Selector labels
*/}}
{{- define "rabbitmq-cluster.selectorLabels" -}}
app.kubernetes.io/name: {{ include "rabbitmq-cluster.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

{{/*
Create the name of the service account to use
*/}}
{{- define "rabbitmq-cluster.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "rabbitmq-cluster.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

rabbitmq-sts.yaml

kind: StatefulSet
apiVersion: apps/v1
metadata:
  {{- if .Values.labels }}
  labels:
    {{- with .Values.labels }}
      {{- toYaml . | nindent 4 }}
    {{- end  }}
  {{- else }}
  labels:
    {{- include "rabbitmq-cluster.labels" . | nindent 4 }}
  {{- end }}
  name: {{ .Chart.Name  }}
spec:
  replicas: {{ .Values.repliaCount }}
  selector:
    matchLabels:
      {{- with .Values.labels }}
        {{- toYaml . | nindent 6 }}
      {{- end  }}
  serviceName: {{ .Values.service.headless.name }}
  template:
    metadata:
      labels:
        {{- with .Values.labels }}
          {{- toYaml . | nindent 8 }}
        {{- end  }}
    spec:
      containers:
      - args:
        - -c
        - cp -v /etc/rabbitmq/rabbitmq.conf ${RABBITMQ_CONFIG_FILE}; exec docker-entrypoint.sh
          rabbitmq-server
        command:
        - sh
        env:
        - name: RABBITMQ_DEFAULT_USER
          valueFrom:
            secretKeyRef:
              key: username
              name: {{ .Values.secret.name }}
        - name: RABBITMQ_DEFAULT_PASS
          valueFrom:
            secretKeyRef:
              key: password
              name: {{ .Values.secret.name }}
        - name: RABBITMQ_ERLANG_COOKIE
          valueFrom:
            secretKeyRef:
              key: cookie
              name: {{ .Values.secret.name }}
        - name: K8S_SERVICE_NAME
          value: {{ .Values.service.headless.name }}
        - name: POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: RABBITMQ_USE_LONGNAME
          value: "true"
        - name: RABBITMQ_NODENAME
          value: rabbit@$(POD_NAME).{{ .Values.service.headless.name }}.$(POD_NAMESPACE).svc.cluster.local
        - name: RABBITMQ_CONFIG_FILE
          value: /var/lib/rabbitmq/rabbitmq.conf
        image: {{ quote .Values.image.repository }}
        imagePullPolicy: {{ .Values.image.pullPolicy }}
        livenessProbe:
          exec:
            command:
            - rabbitmqctl
            - status
          initialDelaySeconds: 30
          timeoutSeconds: 10
        name: rabbitmq
        ports:
        - containerPort: 15672
          name: http
          protocol: TCP
        - containerPort: 5672
          name: amqp
          protocol: TCP
        readinessProbe:
          exec:
            command:
            - rabbitmqctl
            - status
          initialDelaySeconds: 10
          timeoutSeconds: 10
        volumeMounts:
        - mountPath: /etc/rabbitmq
          name: config-volume
          readOnly: false
      #  - mountPath: /var/lib/rabbitmq
      #    name: rabbitmq-storage
      #    readOnly: false
      serviceAccountName: {{ .Values.serviceAccount.name }}
      terminationGracePeriodSeconds: 30
      volumes:
      - configMap:
          items:
          - key: rabbitmq.conf
            path: rabbitmq.conf
          - key: enabled_plugins
            path: enabled_plugins
          name: {{ .Values.configmap.name }}
        name: config-volume
{{- if .Values.storage.storageClass.use  }}
  volumeClaimTemplates:
  - metadata:
      name: rabbitmq-storage
    spec:
      accessModes:
        {{- with .Values.storage.storageClass.accessModes }}
          {{- toYaml . | nindent 6 }}
        {{- end }}
      storageClassName: {{ .Values.storage.storageClass.name }}
      resources:
        requests:
          storage: {{ .Values.storage.storageClass.storage }}
{{- else }}
      - name: rabbitmq-storage
        emptyDir: {}
{{- end }}

rabbitmq-service-cluster.yaml

kind: Service
apiVersion: v1
metadata:
  labels:
    {{- with .Values.labels }}
      {{- toYaml . | nindent 4 }}
    {{- end  }}
  name: {{ .Values.service.headless.name }}
spec:
  clusterIP: None
  ports:
  - name: amqp
    port: 5672
    targetPort: 5672
  selector:
    {{- with .Values.labels }}
      {{- toYaml . | nindent 4 }}
    {{- end  }}

rabbitmq-service-lb.yaml

kind: Service
apiVersion: v1
metadata:
  labels:
    {{- with .Values.labels }}
      {{- toYaml . | nindent 4 }}
    {{- end  }}
    type: {{ .Values.service.loadbalancer.type }}
  name: {{ .Values.service.loadbalancer.name }}
spec:
  ports:
  - name: http
    port: 15672
    protocol: TCP
    targetPort: 15672
  - name: amqp
    port: 5672
    protocol: TCP
    targetPort: 5672
  selector:
    {{- with .Values.labels }}
      {{- toYaml . | nindent 4 }}
    {{- end  }}
  type: {{ .Values.service.loadbalancer.type }}

rabbitmq-configmap.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: {{ .Values.configmap.name}}
  namespace: public-service
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
data:
    enabled_plugins: |
      [rabbitmq_management,rabbitmq_peer_discovery_k8s].
    rabbitmq.conf: |
      loopback_users.guest = false
      default_user = {{ .Values.username }}
      default_pass = {{ .Values.password }}
      ## Clustering
      cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
      cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
      cluster_formation.k8s.address_type = hostname
      #################################################
      # public-service is rabbitmq-cluster's namespace#
      #################################################
      cluster_formation.k8s.hostname_suffix = .{{ .Values.service.headless.name }}.{{ .Release.Namespace }}.svc.cluster.local
      cluster_formation.node_cleanup.interval = 10
      cluster_formation.node_cleanup.only_log_warning = true
      cluster_partition_handling = autoheal
      ## queue master locator
      queue_master_locator=min-masters

rabbitmq-secret.yaml

kind: Secret
apiVersion: v1
metadata:
  name: {{ .Values.secret.name }}
stringData:
  cookie: ERLANG_COOKIE
  password: {{ .Values.password }}
  url: amqp://{{- .Values.username -}}:{{- .Values.password -}}@{{- .Values.service.loadbalancer.name }}
  username: {{ .Values.username }}
type: Opaque

rabbitmq-rbac.yaml

{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
  name: {{ .Values.serviceAccount.name }}
{{- end }}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rmq-cluster
rules:
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rmq-cluster
  namespace: public-service
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rmq-cluster
subjects:
- kind: ServiceAccount
  name: {{ .Values.serviceAccount.name }}
  namespace: {{ .Release.Namespace }}

测试:helm install rabbitmq-cluster --dry-run .

运行:
helm install rabbitmq-cluster --namespace public-service --set replicaCount=3 .

03-helm实战训练
03-helm实战训练

kubectl get pods,svc,cm,secret,role -n public-service

03-helm实战训练

http://192.168.1.100:31211/

03-helm实战训练
03-helm实战训练

Helm方式部署 zookeeper+kafka 集群

参考文章:bitnami官网:https://docs.bitnami.com/tutorials/deploy-scalable-kafka-zookeeper-cluster-kubernetes

由于在线安装,zookeeper的pod起不来,一直处于pending的状态,原因是因为pvc存储卷挂载的问题,所以这里选择把zookeeper和kafka的包下载下来,修改配置文件,然后进行离线安装

添加bitnami和官方helm仓库:

helm repo add bitnami https://charts.bitnami.com/bitnami

创建名称空间

kubectl create ns public-service

下载离线包

# 创建存放压缩包目录
cd ~ && mkdir zookeeper-kafka && cd zookeeper-kafka

# 拉取压缩包
helm pull bitnami/zookeeper --version=11.4.10
helm pull bitnami/kafka --version=22.1.2

[root@k8s-master01 zookeeper-kafka]# ls
kafka-22.1.2.tgz  zookeeper-11.4.10.tgz

# 解压
tar -zvxf kafka-22.1.2.tgz
tar -zvxf zookeeper-11.4.10.tgz

部署zookeeper集群

修改配置文件values.yaml

  • 主要修改内容:persistence模块 enabled设为false ;注释掉 storageClass: “”、existingClaim: “”
[root@k8s-master01 zookeeper]# cd /root/zookeeper-kafka/zookeeper
[root@k8s-master01 zookeeper]# vim values.yaml
03-helm实战训练

安装zookeeper集群

helm install zookeeper --set replicaCount=3 -n public-service --set auth.enabled=false --set allowAnonymousLogin=true .

等待几分钟,直到图表部署完毕,并记下输出中显示的服务名称,因为您将在后续步骤中需要该名称。

03-helm实战训练

查看pod状态:

root@k8s-master01 zookeeper]# kubectl -n public-service get pods
03-helm实战训练

修改kafka集群配置文件

cd ~/zookeeper-kafka/kafka
vim values.yaml

主要修改内容:persistence模块 enabled设为false ;注释掉 storageClass: “”、existingClaim: “”;kraft.enable 修改为 false

03-helm实战训练
03-helm实战训练

安装 kafka 集群

cd ~/zookeeper-kafka/kafka

helm install kafka -n public-service --set zookeeper.enabled=false --set replicaCount=3 --set externalZookeeper.servers=ZOOKEEPER-SERVICE-NAME .

ZOOKEEPER-SERVICE-NAME 替换为上一步结束时获得的Apache ZOOKEEPER服务名称即(zookeeper.public-service.svc.cluster.local)

03-helm实战训练

kubectl -n public-service get pods

03-helm实战训练

验证kafka与zookeeper是否绑定

kubectl -n public-service logs -f kafka-0 | grep socket

03-helm实战训练

测试集群

# 进入kafka的pod创建一个topic
[root@k8s-master01 kafka]# kubectl -n public-service exec -it kafka-0 -- bash
I have no name!@kafka-0:/$ kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1 --topic testtopic
Created topic testtopic.

# 启动一个消费者
[root@k8s-master01 ~]# kubectl -n public-service exec -it kafka-0 -- bash
I have no name!@kafka-0:/$ kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic testtopic


# 新开一个窗口,进入kafka的pod,启动一个生产者,输入消息;在消费者端可以收到消息
[root@k8s-master01 ~]# kubectl -n public-service exec -it kafka-0 -- bash
I have no name!@kafka-0:/$ kafka-console-producer.sh --bootstrap-server localhost:9092 --topic testtopic
03-helm实战训练

补充:

持久化存储,配置文件value.yaml中storageclass参数未设定,亲和力未设定,测试环境要求没有那么多,生产环境大家可以按需配置。

卸载应用

helm -n public-service uninstall zookeeper
helm -n public-service uninstall kafka

发布者:LJH,转发请注明出处:https://www.ljh.cool/37676.html

(0)
上一篇 2023年8月11日 上午3:00
下一篇 2023年8月21日 上午11:14

相关推荐