k8s搭建redis集群

273 阅读4分钟

k8s搭建redis集群

所需环境

  • k8s集群
  • kubernetes Dashboard 可视化界面(非必须)
  • local-path-provisioner动态本地存储
  • 安装redis集群三主三从

kubernetes Dashboard 可视化界面

参考博客: mac搭建k8s-docker方式

k8s集群

参考博客: mac搭建k8s-docker方式

local-path-provisioner动态本地存储

local-path-provisioner是一个 Kubernetes(K8s)的本地存储动态存储卷供应器,它允许用户在本地节点的文件系统上动态创建存储卷,用于存储应用程序的数据。

编写配置文件

vim local-path-storage.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: local-path-storage

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: local-path-provisioner-service-account
  namespace: local-path-storage

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: local-path-provisioner-role
  namespace: local-path-storage
rules:
  - apiGroups: [""]
    resources: ["pods"]
    verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: local-path-provisioner-role
rules:
  - apiGroups: [""]
    resources: ["nodes", "persistentvolumeclaims", "configmaps", "pods", "pods/log"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: local-path-provisioner-bind
  namespace: local-path-storage
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: local-path-provisioner-role
subjects:
  - kind: ServiceAccount
    name: local-path-provisioner-service-account
    namespace: local-path-storage

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: local-path-provisioner-bind
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: local-path-provisioner-role
subjects:
  - kind: ServiceAccount
    name: local-path-provisioner-service-account
    namespace: local-path-storage

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: local-path-provisioner
  namespace: local-path-storage
spec:
  replicas: 1
  selector:
    matchLabels:
      app: local-path-provisioner
  template:
    metadata:
      labels:
        app: local-path-provisioner
    spec:
      serviceAccountName: local-path-provisioner-service-account
      containers:
        - name: local-path-provisioner
          image: rancher/local-path-provisioner:v0.0.30
          imagePullPolicy: IfNotPresent
          command:
            - local-path-provisioner
            - --debug
            - start
            - --config
            - /etc/config/config.json
          volumeMounts:
            - name: config-volume
              mountPath: /etc/config/
          env:
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            - name: CONFIG_MOUNT_PATH
              value: /etc/config/
      volumes:
        - name: config-volume
          configMap:
            name: local-path-config

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: local-path-config
  namespace: local-path-storage
data:
  config.json: |-
    {
            "nodePathMap":[
            {
                    "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
                    "paths":["/opt/local-path-provisioner"]
            }
            ]
    }
  setup: |-
    #!/bin/sh
    set -eu
    mkdir -m 0777 -p "$VOL_DIR"
  teardown: |-
    #!/bin/sh
    set -eu
    rm -rf "$VOL_DIR"
  helperPod.yaml: |-
    apiVersion: v1
    kind: Pod
    metadata:
      name: helper-pod
    spec:
      priorityClassName: system-node-critical
      tolerations:
        - key: node.kubernetes.io/disk-pressure
          operator: Exists
          effect: NoSchedule
      containers:
      - name: helper-pod
        image: busybox
        imagePullPolicy: IfNotPresent

执行配置文件

kubectl apply -f local-path-storage.yaml

查看创建结果

image.png

安装redis集群三主三从

资源规划

编辑配置文件

vim redis-cluster-StatefulSet.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: redis-cluster-namespace
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: redis-cluster-config
  namespace: redis-cluster-namespace
data:
  redis.conf: |
    cluster-enabled yes
    cluster-config-file nodes.conf
    cluster-node-timeout 5000
    appendonly yes
    protected-mode no
    port 6379
---
apiVersion: v1
kind: Service
metadata:
  name: redis-cluster
  namespace: redis-cluster-namespace
spec:
  type: ClusterIP
  selector:
    app: redis-cluster
  ports:
  - name: client
    port: 6379
    targetPort: 6379
  - name: gossip
    port: 16379
    targetPort: 16379
---
apiVersion: v1
kind: Service
metadata:
  name: redis-cluster-client
  namespace: redis-cluster-namespace
spec:
  type: LoadBalancer
  selector:
    app: redis-cluster
  ports:
  - name: client
    port: 6379
    targetPort: 6379
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis-cluster
  namespace: redis-cluster-namespace
spec:
  serviceName: redis-cluster
  replicas: 6
  selector:
    matchLabels:
      app: redis-cluster
  template:
    metadata:
      labels:
        app: redis-cluster
    spec:
      containers:
      - name: redis
        image: redis:6.2.6
        ports:
        - containerPort: 6379
          name: client
        - containerPort: 16379
          name: gossip
        volumeMounts:
        - name: config
          mountPath: /usr/local/etc/redis
        - name: data
          mountPath: /data
        command:
        - redis-server
        - /usr/local/etc/redis/redis.conf
      volumes:
      - name: config
        configMap:
          name: redis-cluster-config
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "local-path"
      resources:
        requests:
          storage: 1Gi

执行配置文件

kubectl apply -f redis-cluster-StatefulSet.yaml

查看结果

image.png

image.png

image.png

加入集群

kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli --cluster create --cluster-replicas 1 $(kubectl get pods -n redis-cluster-namespace -l app=redis-cluster -o jsonpath='{range.items[*]}{.status.podIP}:6379 ')

查看集群状态

kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli -c cluster info

image.png

kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli -c cluster nodes

image.png

使用key值和数据验证

#注意,redis-cli参数:
# -c : 自动重定向到对应节点获取信息,如果不加,只会返回重定向信息,不会得到值

#不加 -c
[root@ src]# kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli
127.0.0.1:9001> set a a
(error) MOVED 15495 127.0.0.1:8083

#加上 -c
[root@ src]# kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli -c
127.0.0.1:9001> set a a
-> Redirected to slot [15495] located at 127.0.0.1:9003    #自动跳到9003
OK
127.0.0.1:9003> get a    #可以成功get到a的值
"a"

扩容

kubectl scale statefulset redis-cluster --replicas=10 -n redis-cluster-namespace

再次查看集群状态

扩容后重新加入集群

kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli --cluster add-node 10.244.1.191:6379 10.244.1.181:6379

成功分配hash槽

kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli --cluster rebalance 10.244.1.181:6379

平衡哈希槽

kubectl exec -it redis-cluster-0 -n redis-cluster-namespace -- redis-cli --cluster rebalance 10.244.1.181:6379

设置节点为从节点

kubectl exec -it redis-cluster-8 -n redis-cluster-namespace -- redis-cli --cluster replicate b4cf315a4d00b28e96d48981c6ae17d0c4a35b6f