16-Kubernetes-tidb-nfs单机版部署

288 阅读2分钟
[root@master simple]# tree
.
├── 0-tidb-namespace.yaml
├── 1-tidb-crd.yaml
├── 2-tidb-pv.yaml
├── 3-tidb-pvc.yaml
├── 4-tidb-storageclass.yaml
├── 5-tidb-cluster.yaml
└── 6-tidb-monitor.yaml

TiDB 默认会使用很多文件描述符,工作节点和上面的 Docker 进程的 ulimit 必须设置大于等于 1048576

[root@master /]# sudo vim /etc/security/limits.conf

root            soft    nofile          1048576
root            hard    nofile          1048576

创建命名空间

[root@master simple]# cat 0-tidb-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: tidb
  labels:
    xincan.kubernetes.io/company: xincan.cn
    xincan.kubernetes.io/version: 0.0.1
    xincan.kubernetes.io/product: component
[root@master simple]#

创建CRD

wget https://raw.githubusercontent.com/pingcap/tidb-operator/master/manifests/crd.yaml
[root@master simple]# kubectl apply -f 1-tidb-crd.yaml

部署operator

[root@master /]# helm install --namespace tidb tidb-operator pingcap/tidb-operator --version v1.2.1 \
    --set operatorImage=registry.cn-beijing.aliyuncs.com/tidb/tidb-operator:v1.2.1 \
    --set tidbBackupManagerImage=registry.cn-beijing.aliyuncs.com/tidb/tidb-backup-manager:v1.2.1 \
    --set scheduler.kubeSchedulerImageName=registry.aliyuncs.com/google_containers/kube-scheduler

创建pv

[root@master simple]# cat 2-tidb-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: baisc-pd-0
  namespace: tidb
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: tidb-pd-storage
  nfs:
    server: 192.168.1.80
    path: /hatech/nfs/data/xincan/tidb

---


apiVersion: v1
kind: PersistentVolume
metadata:
  name: baisc-tikv-0
  namespace: tidb
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: tidb-pd-storage
  nfs:
    server: 192.168.1.80
    path: /hatech/nfs/data/xincan/tidb
[root@master simple]#

创建pvc

[root@master simple]# cat 3-tidb-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pd-basic-pd-0
  namespace: tidb
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: tidb-pd-storage
  resources:
    requests:
      storage: 1Gi

---


apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: tikv-basic-tikv-0
  namespace: tidb
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: tidb-pd-storage
  resources:
    requests:
      storage: 1Gi
[root@master simple]#

创建storageclass

[root@master simple]# cat 4-tidb-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: tidb-pd-storage
  namespace: tidb
parameters:
  type: pd-standard
provisioner: kubernetes.io/gce-pd
reclaimPolicy: Retain
volumeBindingMode: Immediate

---


apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-storage
  namespace: tidb
parameters:
  type: pd-standard
provisioner: kubernetes.io/gce-pd
reclaimPolicy: Retain
volumeBindingMode: Immediate
[root@master simple]#

创建tidb cluster

[root@master simple]# cat 5-tidb-cluster.yaml
# IT IS NOT SUITABLE FOR PRODUCTION USE.
# This YAML describes a basic TiDB cluster with minimum resource requirements,
# which should be able to run in any Kubernetes cluster with storage support.
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
  name: basic
  namespace: tidb
spec:
  version: v5.1.1
  timezone: UTC
  pvReclaimPolicy: Retain
  enableDynamicConfiguration: true
  configUpdateStrategy: RollingUpdate
  discovery: {}
  pd:
    baseImage: uhub.service.ucloud.cn/pingcap/pd
    replicas: 1
    # if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
    # storageClassName: local-storage
    requests:
      storage: "1Gi"
    config: {}
  tikv:
    baseImage: uhub.service.ucloud.cn/pingcap/tikv
    replicas: 1
    # if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
    # storageClassName: local-storage
    requests:
      storage: "1Gi"
    config:
      storage:
        # In basic examples, we set this to avoid using too much storage.
        reserve-space: "0MB"
      rocksdb:
        # In basic examples, we set this to avoid the following error in some Kubernetes clusters:
        # "the maximum number of open file descriptors is too small, got 1024, expect greater or equal to 82920"
        max-open-files: 256
      raftdb:
        max-open-files: 256
  tidb:
    baseImage: uhub.service.ucloud.cn/pingcap/tidb
    replicas: 1
    service:
      type: ClusterIP
    config: {}
[root@master simple]#

创建TiDB监控

[root@master simple]# cat 6-tidb-monitor.yaml
apiVersion: pingcap.com/v1alpha1
kind: TidbMonitor
metadata:
  name: basic
  namespace: tidb
spec:
  clusters:
  - name: basic
  kubePrometheusURL: http://basic-prometheus.tidb:9090
  alertmanagerURL: alertmanager-main.monitoring:9093
  prometheus:
    baseImage: prom/prometheus
    version: v2.18.1
  grafana:
    baseImage: grafana/grafana
    version: 7.5.7
  initializer:
    baseImage: uhub.service.ucloud.cn/pingcap/tidb-monitor-initializer
    version: v5.1.1
  reloader:
    baseImage: uhub.service.ucloud.cn/pingcap/tidb-monitor-reloader
    version: v1.0.1
  imagePullPolicy: IfNotPresent
[root@master simple]#

登录dashboard

用户名称:root 默认密码:没有为空,直接回车