1、部署TiDB Operator1.1、下载TiDB Cluster CRD部署文件
Apachewget https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.7/manifests/crd.yaml1.2、创建TiDB Cluster CRD
CSSkubectl apply -f crd.yamlcustomresourcedefinition.apiextensions.k8s.io/tidbclusters.pingcap.com createdcustomresourcedefinition.apiextensions.k8s.io/backups.pingcap.com createdcustomresourcedefinition.apiextensions.k8s.io/restores.pingcap.com createdcustomresourcedefinition.apiextensions.k8s.io/backupschedules.pingcap.com createdcustomresourcedefinition.apiextensions.k8s.io/tidbmonitors.pingcap.com createdcustomresourcedefinition.apiextensions.k8s.io/tidbinitializers.pingcap.com createdcustomresourcedefinition.apiextensions.k8s.io/tidbclusterautoscalers.pingcap.com created1.3、检查CRD状态
YAML[root@k8s-master ~]# kubectl get crd |grep pingbackups.pingcap.com 2022-03-28T07:46:10Zbackupschedules.pingcap.com 2022-03-28T07:46:10Zdmclusters.pingcap.com 2022-03-28T07:46:11Zrestores.pingcap.com 2022-03-28T07:46:11Ztidbclusterautoscalers.pingcap.com 2022-03-28T07:46:11Ztidbclusters.pingcap.com 2022-03-28T07:46:11Ztidbinitializers.pingcap.com 2022-03-28T07:46:12Ztidbmonitors.pingcap.com 2022-03-28T07:46:12Ztidbngmonitorings.pingcap.com 2022-03-28T07:46:12Z2、安装配置TiDB Operator方法一:通过yaml2.1、下载TiDB Operator的docker iamge
SQLdocker pull pingcap/tidb-operator:v1.1.7docker pull pingcap/tidb-backup-manager:v1.1.7docker pull pingcap/advanced-statefulset:v0.3.3
mkdir -p /opt/soft/docker-imagedocker save -o tidb-backup-manager.tar pingcap/tidb-backup-managerdocker save -o tidb-operator.tar pingcap/tidb-operatordocker save -o advanced-statefulset.tar pingcap/advanced-statefulset2.2、创建tidb-operator部署文件
Gocat tidb-operator-deploy.yaml
Source: tidb-operator/templates/scheduler-policy-configmap.yaml
apiVersion: v1kind: ConfigMapmetadata:name: tidb-scheduler-policylabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerhelm.sh/chart: tidb-operator-v1.1.7data:policy.cfg: |-{"kind" : "Policy","apiVersion" : "v1","predicates": [{"name": "NoVolumeZoneConflict"},{"name": "MaxEBSVolumeCount"},{"name": "MaxAzureDiskVolumeCount"},{"name": "NoDiskConflict"},{"name": "GeneralPredicates"},{"name": "PodToleratesNodeTaints"},{"name": "CheckVolumeBinding"},{"name": "MaxGCEPDVolumeCount"},{"name": "MatchInterPodAffinity"},{"name": "CheckVolumeBinding"}],"priorities": [{"name": "SelectorSpreadPriority", "weight": 1},{"name": "InterPodAffinityPriority", "weight": 1},{"name": "LeastRequestedPriority", "weight": 1},{"name": "BalancedResourceAllocation", "weight": 1},{"name": "NodePreferAvoidPodsPriority", "weight": 1},{"name": "NodeAffinityPriority", "weight": 1},{"name": "TaintTolerationPriority", "weight": 1}],"extenders": [{"urlPrefix": "http://127.0.0.1:10262/scheduler","filterVerb": "filter","preemptVerb": "preempt","weight": 1,"httpTimeout": 30000000000,"enableHttps": false}]}
Source: tidb-operator/templates/controller-manager-rbac.yaml
kind: ServiceAccountapiVersion: v1metadata:name: tidb-controller-managerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: controller-managerhelm.sh/chart: tidb-operator-v1.1.7
kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1beta1metadata:name: tidb-operator:tidb-controller-managerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: controller-managerhelm.sh/chart: tidb-operator-v1.1.7rules:
- apiGroups: [""]resources:
- services
- eventsverbs: ["*"]
- apiGroups: [""]resources: ["endpoints","configmaps"]verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: [""]resources: ["serviceaccounts"]verbs: ["create","get","update","delete"]
- apiGroups: ["batch"]resources: ["jobs"]verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]resources: ["secrets"]verbs: ["create", "update", "get", "list", "watch","delete"]
- apiGroups: [""]resources: ["persistentvolumeclaims"]verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]resources: ["pods"]verbs: ["get", "list", "watch","update", "delete"]
- apiGroups: ["apps"]resources: ["statefulsets","deployments", "controllerrevisions"]verbs: ["*"]
- apiGroups: ["extensions"]resources: ["ingresses"]verbs: ["*"]
- apiGroups: ["apps.pingcap.com"]resources: ["statefulsets", "statefulsets/status"]verbs: ["*"]
- apiGroups: ["pingcap.com"]resources: [""]verbs: [""]
- nonResourceURLs: ["/metrics"]verbs: ["get"]
- apiGroups: [""]resources: ["nodes"]verbs: ["get", "list", "watch"]
- apiGroups: [""]resources: ["persistentvolumes"]verbs: ["get", "list", "watch", "patch","update"]
-
apiGroups: ["storage.k8s.io"]resources: ["storageclasses"]verbs: ["get", "list", "watch"]
- apiGroups: ["rbac.authorization.k8s.io"]resources: [clusterroles,roles]verbs: ["escalate","create","get","update", "delete"]
-
apiGroups: ["rbac.authorization.k8s.io"]resources: ["rolebindings","clusterrolebindings"]verbs: ["create","get","update", "delete"]
kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:name: tidb-operator:tidb-controller-managerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: controller-managerhelm.sh/chart: tidb-operator-v1.1.7subjects:
- kind: ServiceAccountname: tidb-controller-managernamespace: tidb-adminroleRef:kind: ClusterRolename: tidb-operator:tidb-controller-managerapiGroup: rbac.authorization.k8s.io
Source: tidb-operator/templates/scheduler-rbac.yaml
kind: ServiceAccountapiVersion: v1metadata:name: tidb-schedulerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerhelm.sh/chart: tidb-operator-v1.1.7
kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1beta1metadata:name: tidb-operator:tidb-schedulerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerhelm.sh/chart: tidb-operator-v1.1.7rules:
ConfigMap permission for --policy-configmap
- apiGroups: [""]resources: ["configmaps"]verbs: ["get", "list", "watch"]
- apiGroups: [""]resources: ["pods"]verbs: ["get", "list", "watch"]
- apiGroups: [""]resources: ["nodes"]verbs: ["get", "list"]
- apiGroups: ["pingcap.com"]resources: ["tidbclusters"]verbs: ["get"]
- apiGroups: [""]resources: ["persistentvolumeclaims"]verbs: ["get", "list", "update"]
Extra permissions for endpoints other than kube-scheduler
- apiGroups: [""]resources: ["endpoints"]verbs: ["delete", "get", "patch", "update"]
- apiGroups: ["coordination.k8s.io"]resources: ["leases"]verbs: ["create"]
-
apiGroups: ["coordination.k8s.io"]resources: ["leases"]resourceNames: ["tidb-scheduler"]verbs: ["get", "update"]
kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:name: tidb-operator:tidb-schedulerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerhelm.sh/chart: tidb-operator-v1.1.7subjects:
-
kind: ServiceAccountname: tidb-schedulernamespace: tidb-adminroleRef:kind: ClusterRolename: tidb-operator:tidb-schedulerapiGroup: rbac.authorization.k8s.io
kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:name: tidb-operator:kube-schedulerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerhelm.sh/chart: tidb-operator-v1.1.7subjects:
-
kind: ServiceAccountname: tidb-schedulernamespace: tidb-adminroleRef:kind: ClusterRolename: system:kube-schedulerapiGroup: rbac.authorization.k8s.io
kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:name: tidb-operator:volume-schedulerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerhelm.sh/chart: tidb-operator-v1.1.7subjects:
- kind: ServiceAccountname: tidb-schedulernamespace: tidb-adminroleRef:kind: ClusterRolename: system:volume-schedulerapiGroup: rbac.authorization.k8s.io
Source: tidb-operator/templates/controller-manager-deployment.yaml
apiVersion: apps/v1kind: Deploymentmetadata:name: tidb-controller-managerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: controller-managerhelm.sh/chart: tidb-operator-v1.1.7spec:replicas: 1selector:matchLabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: controller-managertemplate:metadata:labels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: controller-managerspec:serviceAccount: tidb-controller-managercontainers:
-
name: tidb-operatorimage: pingcap/tidb-operator:v1.1.7imagePullPolicy: IfNotPresentresources:requests:cpu: 80mmemory: 50Mi
command: - /usr/local/bin/tidb-controller-manager - -tidb-backup-manager-image=pingcap/tidb-backup-manager:v1.1.7 - -tidb-discovery-image=pingcap/tidb-operator:v1.1.7 - -cluster-scoped=true - -auto-failover=true - -pd-failover-period=5m - -tikv-failover-period=5m - -tiflash-failover-period=5m - -tidb-failover-period=5m - -v=2 env: - name: NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: TZ value: UTC
Source: tidb-operator/templates/scheduler-deployment.yaml
apiVersion: apps/v1kind: Deploymentmetadata:name: tidb-schedulerlabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/managed-by: Tillerapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerhelm.sh/chart: tidb-operator-v1.1.7spec:replicas: 1selector:matchLabels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulertemplate:metadata:labels:app.kubernetes.io/name: tidb-operatorapp.kubernetes.io/instance: tidb-operatorapp.kubernetes.io/component: schedulerspec:serviceAccount: tidb-schedulercontainers:
-
name: tidb-schedulerimage: pingcap/tidb-operator:v1.1.7imagePullPolicy: IfNotPresentresources:limits:cpu: 250mmemory: 150Mirequests:cpu: 80mmemory: 50Mi
command: - /usr/local/bin/tidb-scheduler - -v=2 - -port=10262 -
name: kube-schedulerimage: k8s.gcr.io/kube-scheduler:v1.14.0imagePullPolicy: IfNotPresentresources:limits:cpu: 250mmemory: 150Mirequests:cpu: 80mmemory: 50Mi
command: - kube-scheduler - --port=10261 - --leader-elect=true - --lock-object-name=tidb-scheduler - --lock-object-namespace=tidb-admin - --scheduler-name=tidb-scheduler - --v=2 - --policy-configmap=tidb-scheduler-policy - --policy-configmap-namespace=tidb-admin2.3、创建tidb-operator
C#
create tidb-admin namespace
[root@r21 soft]# kubectl create namespace tidb-admin
create tidb-operator
[root@r21 soft]# kubectl apply -f tidb-operator-deploy.yaml -n tidb-admin2.4、检查tidb-operator状态
Kotlin[root@k8s-master tidb-operator]# kubectl get pods -n tidb-adminNAME READY STATUS RESTARTS AGEtidb-controller-manager-6fb99fdb64-5zssz 1/1 Running 28 13dtidb-scheduler-9f9c785c5-x42b4 2/2 Running 17 29d方法二:通过helm2.1、添加pingcap仓库
C#helm repo add pingcap https://charts.pingcap.org/2.2、创建命名空间
SQLkubectl create namespace tidb-admin2.3、安装tidb-operator
C++helm install --namespace tidb-admin tidb-operator pingcap/tidb-operator --version v1.3.3 \--set operatorImage=registry.cn-beijing.aliyuncs.com/tidb/tidb-operator:v1.3.3 \--set tidbBackupManagerImage=registry.cn-beijing.aliyuncs.com/tidb/tidb-backup-manager:v1.3.3 \--set scheduler.kubeSchedulerImageName=registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler2.4、检查tidb-operator是否正常运行
Kotlin[root@k8s-master tidb-operator]# kubectl get pods --namespace tidb-admin -l app.kubernetes.io/instance=tidb-operatorNAME READY STATUS RESTARTS AGEtidb-controller-manager-6fb99fdb64-5zssz 1/1 Running 28 13dtidb-scheduler-9f9c785c5-x42b4 2/2 Running 17 29d1、创建pv
Groovymount --bind /home/data/data1/ /home/data/data1/mount --bind /home/data/data2/ /home/data/data2/mount --bind /home/data/data3/ /home/data/data3/3、为TiDB Cluster创建PV创建PV有两种方式:一种是人工管理 PV 的方式就叫作 Static Provisioning;另一种可以是Local PV,可使用 local-static-provisioner 项目中的 local-volume-provisioner 程序创建本地存储对象方式一:人工管理Static Provisioning方式3.1、创建PV
YAML1、在不同的work节点上创建不同的目录,有几个计算节点分别在节点上执行for i in seq 9; do mkdir -p /home/data/pv0$i; done2、在K8S的master节点上执行创建pv的语句.
[root@r22 disks]# for i in seq 9; docat <<EOF | kubectl apply -f -apiVersion: v1kind: PersistentVolumemetadata:name: tidb-cluster-r22-pv0${i}spec:capacity:storage: 5GivolumeMode: FilesystemaccessModes:
- ReadWriteOncepersistentVolumeReclaimPolicy: DeletestorageClassName: local-storagelocal:path: /mnt/disks/pv0${i}nodeAffinity:required:nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostnameoperator: Invalues:
- 192.168.10.22EOFdone
- key: kubernetes.io/hostnameoperator: Invalues:
- matchExpressions:
[root@r22 disks]# for i in seq 9; docat <<EOF | kubectl apply -f -apiVersion: v1kind: PersistentVolumemetadata:name: tidb-cluster-r23-pv0${i}spec:capacity:storage: 5GivolumeMode: FilesystemaccessModes:
- ReadWriteOncepersistentVolumeReclaimPolicy: DeletestorageClassName: local-storagelocal:path: /mnt/disks/pv0${i}nodeAffinity:required:nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostnameoperator: Invalues:
- 192.168.10.23EOFdone
- key: kubernetes.io/hostnameoperator: Invalues:
- matchExpressions:
[root@r22 disks]# for i in seq 9; docat <<EOF | kubectl apply -f -apiVersion: v1kind: PersistentVolumemetadata:name: tidb-cluster-r23-pv0${i}spec:capacity:storage: 5GivolumeMode: FilesystemaccessModes:
- ReadWriteOncepersistentVolumeReclaimPolicy: DeletestorageClassName: local-storagelocal:path: /mnt/disks/pv0${i}nodeAffinity:required:nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostnameoperator: Invalues:
- 192.168.10.24EOFdone3.2、检查PV状态
- key: kubernetes.io/hostnameoperator: Invalues:
- matchExpressions:
SQL[root@r22 soft]# kubectl get pvNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGEtidb-cluster-r22-pv01 5Gi RWO Delete Available local-storage 42stidb-cluster-r22-pv02 5Gi RWO Delete Available local-storage 42stidb-cluster-r22-pv03 5Gi RWO Delete Available local-storage 42stidb-cluster-r22-pv04 5Gi RWO Delete Available local-storage 42stidb-cluster-r22-pv05 5Gi RWO Delete Available local-storage 42stidb-cluster-r22-pv06 5Gi RWO Delete Available local-storage 42stidb-cluster-r22-pv07 5Gi RWO Delete Available local-storage 41stidb-cluster-r22-pv08 5Gi RWO Delete Available local-storage 41stidb-cluster-r22-pv09 5Gi RWO Delete Available local-storage 41stidb-cluster-r23-pv01 5Gi RWO Delete Available local-storage 33stidb-cluster-r23-pv02 5Gi RWO Delete Available local-storage 33stidb-cluster-r23-pv03 5Gi RWO Delete Available local-storage 33stidb-cluster-r23-pv04 5Gi RWO Delete Available local-storage 33stidb-cluster-r23-pv05 5Gi RWO Delete Available local-storage 33stidb-cluster-r23-pv06 5Gi RWO Delete Available local-storage 33stidb-cluster-r23-pv07 5Gi RWO Delete Available local-storage 33stidb-cluster-r23-pv08 5Gi RWO Delete Available local-storage 32stidb-cluster-r23-pv09 5Gi RWO Delete Available local-storage 32s方法二、通过local-volume-provisioner3.1、准备本地存储
Fortran##依次在K8S的各个计算节点依次执行1、创建对应的目录mkdir /home/data/{data1,data2,data3}2、依次挂载对应的节点mount --bind /home/data/data1/ /home/data/data1/mount --bind /home/data/data2/ /home/data/data2/mount --bind /home/data/data3/ /home/data/data3/###上述的/home/data/data1、/home/data/data2、/home/data/data13是 local-volume-provisioner使用的发现目录(discovery directory),local-volume-provisioner 会为发现目录下的每一个子目录创建对应的 PV3.2、下载和配置 local-volume-provisioner
YAML1、下载wget https://raw.githubusercontent.com/pingcap/tidb-operator/master/examples/local-pv/local-volume-provisioner.yaml
2、修改不同的路径。如果你使用与上一步中不同路径的发现目录,需要修改 ConfigMap 和 DaemonSet 定义。#cat local-volume-provisioner.yamlapiVersion: storage.k8s.io/v1kind: StorageClassmetadata:name: "local-storage"provisioner: "kubernetes.io/no-provisioner"volumeBindingMode: "WaitForFirstConsumer"
apiVersion: v1kind: ConfigMapmetadata:name: local-provisioner-confignamespace: kube-systemdata:setPVOwnerRef: "true"nodeLabelsForPV: |
-
kubernetes.io/hostnamestorageClassMap: |local-storage:hostDir: /home/datamountDir: /data
apiVersion: apps/v1kind: DaemonSetmetadata:name: local-volume-provisionernamespace: kube-systemlabels:app: local-volume-provisionerspec:selector:matchLabels:app: local-volume-provisionertemplate:metadata:labels:app: local-volume-provisionerspec:serviceAccountName: local-storage-admincontainers:
- image: "quay.io/external_storage/local-volume-provisioner:v2.3.4"name: provisionersecurityContext:privileged: trueenv:
- name: MY_NODE_NAMEvalueFrom:fieldRef:fieldPath: spec.nodeName
- name: MY_NAMESPACEvalueFrom:fieldRef:fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGEvalue: "quay.io/external_storage/local-volume-provisioner:v2.3.4"resources:requests:cpu: 100mmemory: 100Milimits:cpu: 100mmemory: 100MivolumeMounts:
- mountPath: /etc/provisioner/configname: provisioner-configreadOnly: true
- mountPath: /dataname: local-disksmountPropagation: "HostToContainer"volumes:
- name: provisioner-configconfigMap:name: local-provisioner-config
-
name: local-diskshostPath:path: /home/data
apiVersion: v1kind: ServiceAccountmetadata:name: local-storage-adminnamespace: kube-system
apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:name: local-storage-provisioner-pv-bindingnamespace: kube-systemsubjects:
-
kind: ServiceAccountname: local-storage-adminnamespace: kube-systemroleRef:kind: ClusterRolename: system:persistent-volume-provisionerapiGroup: rbac.authorization.k8s.io
apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:name: local-storage-provisioner-node-clusterrolenamespace: kube-systemrules:
-
apiGroups: [""]resources: ["nodes"]verbs: ["get"]
apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:name: local-storage-provisioner-node-bindingnamespace: kube-systemsubjects:
- kind: ServiceAccountname: local-storage-adminnamespace: kube-systemroleRef:kind: ClusterRolename: local-storage-provisioner-node-clusterroleapiGroup: rbac.authorization.k8s.io3.3、部署和检查 local-volume-provisioner 程序
-
- image: "quay.io/external_storage/local-volume-provisioner:v2.3.4"name: provisionersecurityContext:privileged: trueenv:
Fortran1、部署[root@k8s-master tidb]# kubectl apply -f local-volume-provisioner.yamlstorageclass.storage.k8s.io/local-storage unchangedconfigmap/local-provisioner-config unchangeddaemonset.apps/local-volume-provisioner unchangedserviceaccount/local-storage-admin unchangedclusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-pv-binding unchangedclusterrole.rbac.authorization.k8s.io/local-storage-provisioner-node-clusterrole unchangedclusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-node-binding unchanged
2、检查pv和pod状态[root@k8s-master tidb]# kubectl get po -n kube-system -l app=local-volume-provisioner && kubectl get pv NAME READY STATUS RESTARTS AGElocal-volume-provisioner-9gp9x 1/1 Running 0 29hlocal-volume-provisioner-kghc7 1/1 Running 0 29hlocal-volume-provisioner-v2vvt 1/1 Running 0 29hNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGElocal-pv-264b0ff0 446Gi RWO Retain Bound tidb/tikv-mycluster-tikv-0 local-storage 28hlocal-pv-27bc7b00 446Gi RWO Retain Bound tidb/pd-mycluster-pd-1 local-storage 28hlocal-pv-4653df42 446Gi RWO Delete Available local-storage 28hlocal-pv-993f4e47 446Gi RWO Retain Bound tidb/tikv-mycluster-tikv-2 local-storage 28hlocal-pv-ad7b1fa4 446Gi RWO Delete Available local-storage 28hlocal-pv-b9e5d531 446Gi RWO Retain Bound tidb/pd-mycluster-pd-0 local-storage 28hlocal-pv-bfe87b7 446Gi RWO Retain Bound tidb/pd-mycluster-pd-2 local-storage 28hlocal-pv-dc8fa7ee 446Gi RWO Retain Bound tidb/tikv-mycluster-tikv-1 local-storage 28hlocal-pv-f12d96bb 446Gi RWO Delete Available local-storage 28hpv-volume 50Gi RWO Recycle Bound default/www-web-0 slow 17dpv-volume1 8Gi RWO Recycle Terminating default/www-web-1 slow 17dtask-pv-volume 50Gi RWO Retain Bound default/pvc-claim manual 4、部署和测试TiDB Cluster4.1、下载相关的docker相关的镜像
Apachedocker pull pingcap/pd:v5.4.1docker pull pingcap/tikv:v5.4.1docker pull pingcap/tidb:v5.4.1docker pull pingcap/tidb-binlog:v5.4.1docker pull pingcap/ticdc:v5.4.1docker pull pingcap/tiflash:v5.4.1docker pull pingcap/tidb-monitor-reloader:v1.0.1docker pull pingcap/tidb-monitor-initializer:v5.4.1docker pull grafana/grafana:6.0.1docker pull prom/prometheus:v2.18.1docker pull busybox:1.26.2
docker save -o pd-v5.4.1.tar pingcap/pd:v5.4.1docker save -o tikv-v5.4.1.tar pingcap/tikv:v5.4.1docker save -o tidb-v5.4.1.tar pingcap/tidb:v5.4.1docker save -o tidb-binlog-v5.4.1.tar pingcap/tidb-binlog:v5.4.1docker save -o ticdc-v5.4.1.tar pingcap/ticdc:v5.4.1docker save -o tiflash-v5.4.1.tar pingcap/tiflash:v5.4.1docker save -o tidb-monitor-reloader-v1.0.1.tar pingcap/tidb-monitor-reloader:v1.0.1docker save -o tidb-monitor-initializer-v5.4.1.tar pingcap/tidb-monitor-initializer:v5.4.1docker save -o grafana-6.0.1.tar grafana/grafana:6.0.1docker save -o prometheus-v2.18.1.tar prom/prometheus:v2.18.1docker save -o busybox-1.26.2.tar busybox:1.26.24.2、下载 TiDB 的部署 yaml 文件
Rubywget https://github.com/pingcap/tidb-operator/blob/v1.1.7/examples/advanced/tidb-cluster.yaml4.3、对相关的配置进行编辑修改
YAML[root@k8s-master tidb]# cat 2.yaml apiVersion: pingcap.com/v1alpha1kind: TidbCluster metadata:name: myclusternamespace: tidb
spec:version: "v4.0.8"timezone: Asia/ShanghaihostNetwork: falseimagePullPolicy: IfNotPresent
enableDynamicConfiguration: true
pd:baseImage: pingcap/pdconfig: {}replicas: 3requests:cpu: "100m"storage: 12GimountClusterClientSecret: falsestorageClassName: "local-storage"
tidb:baseImage: pingcap/tidbreplicas: 2requests:cpu: "100m"config: {}service:externalTrafficPolicy: Clustertype: NodePortmysqlNodePort: 30020statusNodePort: 30040
tikv:baseImage: pingcap/tikvconfig: {}replicas: 3requests:cpu: "100m"storage: 12GimountClusterClientSecret: falsestorageClassName: "local-storage"enablePVReclaim: falsepvReclaimPolicy: RetaintlsCluster: {}4.4、创建tidb cluster并查看其状态
Gherkin[root@k8s-master tidb]# kubectl create namespace mycluster[root@k8s-master tidb]# kubectl apply -f tidb-cluster-sample.yaml[root@k8s-master tidb]# kubectl get pods -ntidb -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESmycluster-discovery-9d4fd98f-vjmxr 1/1 Running 0 29h 10.244.2.166 k8s-node2 <none> <none>mycluster-pd-0 1/1 Running 0 29h 10.244.3.49 k8s-node1 <none> <none>mycluster-pd-1 1/1 Running 1 28h 10.244.3.51 k8s-node1 <none> <none>mycluster-pd-2 1/1 Running 0 28h 10.244.3.52 k8s-node1 <none> <none>mycluster-tidb-0 2/2 Running 0 28h 10.244.2.170 k8s-node2 <none> <none>mycluster-tidb-1 2/2 Running 0 28h 10.244.3.50 k8s-node1 <none> <none>mycluster-tikv-0 1/1 Running 0 28h 10.244.2.167 k8s-node2 <none> <none>mycluster-tikv-1 1/1 Running 0 28h 10.244.2.168 k8s-node2 <none> <none>mycluster-tikv-2 1/1 Running 0 28h 10.244.2.169 k8s-node2 <none> <none>4.5、查看tidb的链接地址
Fortran[root@k8s-master ~]# kubectl get svc -n tidbNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEmycluster-discovery ClusterIP 10.109.245.112 <none> 10261/TCP,10262/TCP 29hmycluster-pd ClusterIP 10.110.11.225 <none> 2379/TCP 29hmycluster-pd-peer ClusterIP None <none> 2380/TCP,2379/TCP 29hmycluster-tidb NodePort 10.107.15.116 <none> 4000:30020/TCP,10080:30040/TCP 29hmycluster-tidb-peer ClusterIP None <none> 10080/TCP 29hmycluster-tikv-peer ClusterIP None <none> 20160/TCP 29h
[root@k8s-master tidb]# mysql -uroot -h172.16.4.169 -P30020Welcome to the MariaDB monitor. Commands end with ; or \g.Your MySQL connection id is 10455Server version: 5.7.25-TiDB-v4.0.8 TiDB Server (Apache License 2.0) Community Edition, MySQL 5.7 compatible
Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MySQL [(none)]>