From 8b8d20f15843315680718d633cc38c5872855a56 Mon Sep 17 00:00:00 2001 From: Keke Yi <40977455+yikeke@users.noreply.github.com> Date: Mon, 20 Apr 2020 12:15:32 +0800 Subject: [PATCH] tiup: fix some details in tiup deployment doc (#2291) --- how-to/deploy/orchestrated/tiup.md | 601 ++++++++++++++--------------- 1 file changed, 288 insertions(+), 313 deletions(-) diff --git a/how-to/deploy/orchestrated/tiup.md b/how-to/deploy/orchestrated/tiup.md index b75beb200bd12..9471b084275f4 100644 --- a/how-to/deploy/orchestrated/tiup.md +++ b/how-to/deploy/orchestrated/tiup.md @@ -47,7 +47,8 @@ The software and hardware recommendations for the **target machines** are as fol - Under AMD64 architecture, it is recommended to use CentOS 7.3 or above as the operating system. - Under ARM architecture, it is recommended to use CentOS 7.6 1810 as the operating system. - For the file system of TiKV data files, it is recommended to use EXT4 format. (refer to [Step 3](#step-3-mount-the-data-disk-ext4-filesystem-with-options-on-the-target-machines-that-deploy-tikv)) You can also use CentOS default XFS format. -- The target machines can communicate with each other on the Intranet. (It is recommended to disable the firewall `firewalld`, or open the required ports between the nodes of the TiDB cluster.) +- The target machines can communicate with each other on the Intranet. (It is recommended to [disable the firewall `firewalld`](#how-to-stop-the-firewall-service-of-deployment-machines), or enable the required ports between the nodes of the TiDB cluster.) +- If you need to bind CPU cores, [install the `numactl` tool](#how-to-install-the-numactl-tool). - If you need to bind CPU cores, install the `numactl` tool. For other software and hardware recommendations, refer to [TiDB Software and Hardware Recommendations](/how-to/deploy/hardware-recommendations.md). @@ -330,26 +331,44 @@ cat topology.yaml ``` ```yaml -# Global variables are applied to all deployments and as the default value of -# them if the specific deployment value missing. +# # Global variables are applied to all deployments and as the default value of +# # them if the specific deployment value missing. + global: user: "tidb" ssh_port: 22 deploy_dir: "/tidb-deploy" data_dir: "/tidb-data" +# # Monitored variables are used to all the machine monitored: - deploy_dir: "/tidb-deploy/monitored-9100" - data_dir: "/tidb-data/monitored-9100" - log_dir: "/tidb-deploy/monitored-9100/log" + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" +# # Server configs are used to specify the runtime configuration of TiDB components +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # ^ ^ +# # You can overwrite this configuration via instance-level `config` field server_configs: tidb: log.slow-threshold: 300 - log.level: warn binlog.enable: false binlog.ignore-error: false tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 readpool.storage.use-unified-pool: true readpool.coprocessor.use-unified-pool: true pd: @@ -357,7 +376,10 @@ server_configs: schedule.region-schedule-limit: 2048 schedule.replica-schedule-limit: 64 replication.enable-placement-rules: true - + tiflash: + logger.level: "info" + # pump: + # gc: 7 pd_servers: - host: 10.0.1.4 @@ -365,9 +387,9 @@ pd_servers: # name: "pd-1" # client_port: 2379 # peer_port: 2380 - # deploy_dir: "deploy/pd-2379" - # data_dir: "data/pd-2379" - # log_dir: "deploy/pd-2379/log" + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.pd` values # config: @@ -375,63 +397,112 @@ pd_servers: # schedule.max-merge-region-keys: 200000 - host: 10.0.1.5 - host: 10.0.1.6 + tidb_servers: - host: 10.0.1.7 # ssh_port: 22 # port: 4000 # status_port: 10080 - # deploy_dir: "deploy/tidb-4000" - # log_dir: "deploy/tidb-4000/log" + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.tidb` values # config: - # log.level: warn # log.slow-query-file: tidb-slow-overwrited.log - host: 10.0.1.8 - host: 10.0.1.9 + tikv_servers: - host: 10.0.1.1 # ssh_port: 22 # port: 20160 # status_port: 20180 - # deploy_dir: "deploy/tikv-20160" - # data_dir: "data/tikv-20160" - # log_dir: "deploy/tikv-20160/log" + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.tikv` values - # config: - # server.labels: - # zone: sh - # dc: sha - # rack: rack1 - # host: host1 + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - host: 10.0.1.2 - host: 10.0.1.3 + tiflash_servers: - host: 10.0.1.10 - # ssh_port: 22 - # tcp_port: 9000 - # http_port: 8123 - # flash_service_port: 3930 - # flash_proxy_port: 20170 - # flash_proxy_status_port: 20292 - # metrics_port: 8234 - # deploy_dir: deploy/tiflash-9000 - # data_dir: deploy/tiflash-9000/data - # log_dir: deploy/tiflash-9000/log - # numa_node: "0,1" - # # Config is used to overwrite the `server_configs.tiflash` values - # config: - # logger: - # level: "info" - # learner_config: - # log-level: "info" + # ssh_port: 22 + # tcp_port: 9000 + # http_port: 8123 + # flash_service_port: 3930 + # flash_proxy_port: 20170 + # flash_proxy_status_port: 20292 + # metrics_port: 8234 + # deploy_dir: /tidb-deploy/tiflash-9000 + # data_dir: /tidb-data/tiflash-9000 + # log_dir: /tidb-deploy/tiflash-9000/log + # numa_node: "0,1" + # # Config is used to overwrite the `server_configs.tiflash` values + # config: + # logger.level: "info" + # learner_config: + # log-level: "info" + # - host: 10.0.1.15 + # - host: 10.0.1.16 + +# pump_servers: +# - host: 10.0.1.17 +# ssh_port: 22 +# port: 8250 +# deploy_dir: "/tidb-deploy/pump-8249" +# data_dir: "/tidb-data/pump-8249" +# log_dir: "/tidb-deploy/pump-8249/log" +# numa_node: "0,1" +# # Config is used to overwrite the `server_configs.drainer` values +# config: +# gc: 7 +# - host: 10.0.1.18 +# - host: 10.0.1.19 +# drainer_servers: +# - host: 10.0.1.17 +# port: 8249 +# data_dir: "/tidb-data/drainer-8249" +# # if drainer doesn't have checkpoint, use initial commitTS to initial checkpoint +# # will get a latest timestamp from pd if setting to be -1 (default -1) +# commit_ts: -1 +# deploy_dir: "/tidb-deploy/drainer-8249" +# log_dir: "/tidb-deploy/drainer-8249/log" +# numa_node: "0,1" +# # Config is used to overwrite the `server_configs.drainer` values +# config: +# syncer.db-type: "mysql" +# syncer.to.host: "127.0.0.1" +# syncer.to.user: "root" +# syncer.to.password: "" +# syncer.to.port: 3306 +# - host: 10.0.1.19 + monitoring_servers: - host: 10.0.1.4 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + grafana_servers: - host: 10.0.1.4 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + alertmanager_servers: - host: 10.0.1.4 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" + ``` ### Scenario 2: Single machine with multiple instances @@ -452,22 +523,39 @@ You need to fill in the result in the configuration file (as described in the St - Configuration optimization for TiKV - - Make `readpool` thread pool self-adaptive. Configure the `readpool.unified.max-thread-count` parameter to make `readpool.storage` and `readpool.coprocessor` share a unified thread pool, and also enable self-adaptive switches for them. The calculation formula is as follows: + - Make `readpool` thread pool self-adaptive. Configure the `readpool.unified.max-thread-count` parameter to make `readpool.storage` and `readpool.coprocessor` share a unified thread pool, and also enable self-adaptive switches for them. + + - Enable `readpool.storage` and `readpool.coprocessor`: + + ```yaml + readpool.storage.use-unified-pool: true + readpool.coprocessor.use-unified-pool: true + ``` + + - The calculation formula is as follows: ``` readpool.unified.max-thread-count = cores * 0.8 / the number of TiKV instances ``` - - Make storage CF (all RocksDB column families) memory self-adaptive. Configure the `storage.block-cache.capacity` parameter to automatically balance memory usage among CFs. The calculation formula is as follows: - - ``` - storage.block-cache.capacity = (MEM_TOTAL * 0.5 / the number of TiKV instances) - ``` + - Make storage CF (all RocksDB column families) memory self-adaptive. Configure the `storage.block-cache.capacity` parameter to automatically balance memory usage among CFs. + + - The default setting of the `storage.block-cache` parameter is CF self-adaptive. You do not need to modify this configuration: - - If multiple TiKV instances are deployed on the same physical disk, you need to modify the `capacity` parameter in `conf/tikv.yml`: + ```yaml + storage.block-cache.shared: true + ``` + + - The calculation formula is as follows: + + ``` + storage.block-cache.capacity = (MEM_TOTAL * 0.5 / the number of TiKV instances) + ``` + + - If multiple TiKV instances are deployed on the same physical disk, you need to add the `capacity` parameter in the TiKV configuration: ``` - raftstore.capactiy = the total disk capacity / the number of TiKV instances + raftstore.capacity = the total disk capacity / the number of TiKV instances ``` - Label scheduling configuration @@ -514,7 +602,8 @@ You need to fill in the result in the configuration file (as described in the St > **Note:** > -> You do not need to manually create the `tidb` user, because the TiUP cluster component will automatically create the `tidb` user on the target machines. You can customize the user or keep it the same as the user of the Control Machine. +> - You do not need to manually create the `tidb` user, because the TiUP cluster component will automatically create the `tidb` user on the target machines. You can customize the user or keep it the same as the user of the Control Machine. +> - By default, `deploy_dir` of each component uses `/-` of the global configuration. For example, if you specify the `tidb` port as `4001`, then the TiDB component's default `deploy_dir` is `tidb-deploy/tidb-4001`. Therefore, when you specify non-default ports in multi-instance scenarios, you do not need to specify `deploy_dir` again. > **Note:** > @@ -531,8 +620,8 @@ cat topology.yaml ``` ```yaml -# Global variables are applied to all deployments and as the default value of -# them if the specific deployment value missing. +# # Global variables are applied to all deployments and as the default value of +# # them if the specific deployment value missing. global: user: "tidb" @@ -553,7 +642,7 @@ server_configs: readpool.storage.use-unified-pool: true readpool.coprocessor.use-unified-pool: true storage.block-cache.capacity: "" - raftstore.capactiy: "" + raftstore.capacity: "" pd: replication.location-labels: ["host"] replication.enable-placement-rules: true @@ -610,8 +699,7 @@ tikv_servers: log_dir: "/tidb-deploy/tikv-20160/log" numa_node: "0" config: - server.labels: - host: tikv1 + server.labels: { host: "tikv1" } - host: 10.0.1.1 port: 20161 status_port: 20181 @@ -620,8 +708,7 @@ tikv_servers: log_dir: "/tidb-deploy/tikv-20161/log" numa_node: "1" config: - server.labels: - host: tikv1 + server.labels: { host: "tikv1" } - host: 10.0.1.2 port: 20160 status_port: 20180 @@ -630,8 +717,7 @@ tikv_servers: log_dir: "/tidb-deploy/tikv-20160/log" numa_node: "0" config: - server.labels: - host: tikv2 + server.labels: { host: "tikv2" } - host: 10.0.1.2 port: 20161 status_port: 20181 @@ -640,8 +726,7 @@ tikv_servers: log_dir: "/tidb-deploy/tikv-20161/log" numa_node: "1" config: - server.labels: - host: tikv2 + server.labels: { host: "tikv2" } - host: 10.0.1.3 port: 20160 status_port: 20180 @@ -650,8 +735,7 @@ tikv_servers: log_dir: "/tidb-deploy/tikv-20160/log" numa_node: "0" config: - server.labels: - host: tikv3 + server.labels: { host: "tikv3" } - host: 10.0.1.3 port: 20161 status_port: 20181 @@ -660,8 +744,7 @@ tikv_servers: log_dir: "/tidb-deploy/tikv-20161/log" numa_node: "1" config: - server.labels: - host: tikv3 + server.labels: { host: "tikv3" } tiflash_servers: - host: 10.0.1.10 data_dir: /data1/tiflash/data @@ -725,8 +808,9 @@ cat topology.yaml ``` ```yaml -# Global variables are applied to all deployments and as the default value of -# them if the specific deployment value missing. +# # Global variables are applied to all deployments and as the default value of +# # them if the specific deployment value missing. + global: user: "tidb" ssh_port: 22 @@ -833,7 +917,7 @@ Usage: Flags: -h, --help help for deploy -i, --identity_file string The path of the SSH identity file. If specified, public key authentication will be used. - --user string The user name to login via SSH. The user must has root (or sudo) privilege. (default "root") + --user string The user name to login via SSH. The user must have root (or sudo) privilege. (default "root") -y, --yes Skip confirming the topology ``` @@ -849,78 +933,17 @@ Flags: {{< copyable "shell-regular" >}} ```shell -tiup cluster deploy tidb-test v4.0.0-beta.2 ./topology.yaml --user root -i /home/root/.ssh/gcp_rsa +tiup cluster deploy tidb-test v4.0.0-rc ./topology.yaml --user root -i /home/root/.ssh/gcp_rsa ``` In the above command: - The name of the TiDB cluster deployed through TiUP cluster is `tidb-test`. -- The deployment version is `v4.0.0-beta.2`. +- The deployment version is `v4.0.0-rc`. For other supported versions, see [How to view the TiDB versions supported by TiUP](#how-to-view-the-tidb-versions-supported-by-tiup). - The initialization configuration file is `topology.yaml`. -- Log in to the target machine through the `root` key to complete the cluster deployment, or you can use other users with `ssh` and `sudo` permissions to complete the deployment - -Expected output will include `Started cluster tidb-test successfully`: +- Log in to the target machine through the `root` key to complete the cluster deployment, or you can use other users with `ssh` and `sudo` privileges to complete the deployment. -```log -Starting /home/tidb/.tiup/components/cluster/v0.0.9/cluster deploy tidb-test v4.0.0-beta.2 ./topology.yaml --user root --identity_file /home/root/.ssh/gcp_rsa -Please confirm your topology: -TiDB Cluster: tidb-test -TiDB Version: v4.0.0-beta.2 -Type Host Ports Directories ----- ---- ----- ----------- -pd 10.0.1.4 2379/2380 /tidb-deploy/pd-2379,/tidb-data/pd-2379 -pd 10.0.1.5 2379/2380 /tidb-deploy/pd-2379,/tidb-data/pd-2379 -pd 10.0.1.6 2379/2380 /tidb-deploy/pd-2379,/tidb-data/pd-2379 -tikv 10.0.1.1 2060/20080 /tidb-deploy/tikv-2060,/tidb-data/tikv-2060 -tikv 10.0.1.2 2060/20080 /tidb-deploy/tikv-2060,/tidb-data/tikv-2060 -tikv 10.0.1.3 2060/20080 /tidb-deploy/tikv-2060,/tidb-data/tikv-2060 -tidb 10.0.1.7 4000/10080 /tidb-deploy/tidb-4000 -tidb 10.0.1.8 4000/10080 /tidb-deploy/tidb-4000 -tidb 10.0.1.9 4000/10080 /tidb-deploy/tidb-4000 -prometheus 10.0.1.4 9090 /tidb-deploy/prometheus-9090,/tidb-data/prometheus-9090 -grafana 10.0.1.4 3000 /tidb-deploy/grafana-3000 -alertmanager 10.0.1.4 9104/9105 /tidb-deploy/alertmanager-9104,/tidb-data/alertmanager-9104 -Attention: - 1. If the topology is not what you expected, check your yaml file. - 2. Please confirm there is no port/directory conflicts in same host. -Do you want to continue? [y/N]: y -Input SSH password: -+ [ Serial ] - SSHKeyGen: path=/home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa -+ [Parallel] - Download: component=blackbox_exporter, version=v0.12.0 -+ [Parallel] - Download: component=pd, version=v4.0.0-beta.2 -+ [Parallel] - Download: component=node_exporter, version=v0.17.0 -+ [Parallel] - Download: component=tikv, version=v4.0.0-beta.2 -+ [Parallel] - Download: component=tidb, version=v4.0.0-beta.2 -+ [Parallel] - Download: component=alertmanager, version=v0.17.0 -+ [Parallel] - Download: component=prometheus, version=v2.8.1 -+ [Parallel] - Download: component=grafana, version=v6.1.6 - -......Some log ignored...... - -Checking service state of pd - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 09:54:22 CST; 50s ago -Checking service state of tikv - 10.0.1.1 - Active: active (running) since Saturday 2020-04-04 09:54:35 CST; 38s ago - 10.0.1.2 - Active: active (running) since Saturday 2020-04-04 09:54:38 CST; 37s ago - 10.0.1.3 - Active: active (running) since Saturday 2020-04-04 09:54:41 CST; 35s ago -Checking service state of tidb - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 09:54:56 CST; 22s ago -Checking service state of prometheus - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 09:55:03 CST; 16s ago -Checking service state of grafana - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 09:55:05 CST; 16s ago -Checking service state of alertmanager - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 09:55:08 CST; 14s ago -Started cluster `tidb-test` successfully -``` +At the end of the output log, you will see ```Deployed cluster `tidb-test` successfully```. This indicates that the deployment is successful. ## 4. Verify the deployment status of the cluster @@ -958,7 +981,7 @@ Expected output will include the name, deployment user, version, and secret key Starting /home/tidb/.tiup/components/cluster/v0.4.3/cluster list Name User Version Path PrivateKey ---- ---- ------- ---- ---------- -tidb-test tidb v4.0.0-beta.2 /home/tidb/.tiup/storage/cluster/clusters/tidb-test /home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa +tidb-test tidb v4.0.0-rc /home/tidb/.tiup/storage/cluster/clusters/tidb-test /home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa ``` ### Step 7: Check the status of `tidb-test` @@ -969,26 +992,27 @@ tidb-test tidb v4.0.0-beta.2 /home/tidb/.tiup/storage/cluster/clusters tiup cluster display tidb-test ``` -Expected output will include the instance ID, role, host, listening port, and status (started, so the status is `Down`/`inactive`), and directory information: +Expected output will include the instance ID, role, host, listening port, and status (because the cluster is not started yet, so the status is `Down`/`inactive`), and directory information: ```log Starting /home/tidb/.tiup/components/cluster/v0.4.3/cluster display tidb-test TiDB Cluster: tidb-test -TiDB Version: v4.0.0-beta.2 -ID Role Host Ports Status Data Dir Deploy Dir --- ---- ---- ----- ------ -------- ---------- -10.0.1.4:9104 alertmanager 10.0.1.4 9104/9105 inactive /tidb-data/alertmanager-9104 /tidb-deploy/alertmanager-9104 -10.0.1.4:3000 grafana 10.0.1.4 3000 inactive - /tidb-deploy/grafana-3000 -10.0.1.4:2379 pd 10.0.1.4 2379/2380 Down /tidb-data/pd-2379 /tidb-deploy/pd-2379 -10.0.1.5:2379 pd 10.0.1.5 2379/2380 Down /tidb-data/pd-2379 /tidb-deploy/pd-2379 -10.0.1.6:2379 pd 10.0.1.6 2379/2380 Down /tidb-data/pd-2379 /tidb-deploy/pd-2379 -10.0.1.4:9090 prometheus 10.0.1.4 9090 inactive /tidb-data/prometheus-9090 /tidb-deploy/prometheus-9090 -10.0.1.7:4000 tidb 10.0.1.7 4000/10080 Down - /tidb-deploy/tidb-4000 -10.0.1.8:4000 tidb 10.0.1.8 4000/10080 Down - /tidb-deploy/tidb-4000 -10.0.1.9:4000 tidb 10.0.1.9 4000/10080 Down - /tidb-deploy/tidb-4000 -10.0.1.1:20160 tikv 10.0.1.1 20160/20180 Down /tidb-data/tikv-20160 /tidb-deploy/tikv-2060 -10.0.1.2:20160 tikv 10.0.1.2 20160/20180 Down /tidb-data/tikv-20160 /tidb-deploy/tikv-2060 -10.0.1.3:20160 tikv 10.0.1.4 20160/20180 Down /tidb-data/tikv-20160 /tidb-deploy/tikv-2060 +TiDB Version: v4.0.0-rc +ID Role Host Ports Status Data Dir Deploy Dir +-- ---- ---- ----- ------ -------- ---------- +10.0.1.4:9104 alertmanager 10.0.1.4 9104/9105 inactive /tidb-data/alertmanager-9104 /tidb-deploy/alertmanager-9104 +10.0.1.4:3000 grafana 10.0.1.4 3000 inactive - /tidb-deploy/grafana-3000 +10.0.1.4:2379 pd 10.0.1.4 2379/2380 Down /tidb-data/pd-2379 /tidb-deploy/pd-2379 +10.0.1.5:2379 pd 10.0.1.5 2379/2380 Down /tidb-data/pd-2379 /tidb-deploy/pd-2379 +10.0.1.6:2379 pd 10.0.1.6 2379/2380 Down /tidb-data/pd-2379 /tidb-deploy/pd-2379 +10.0.1.4:9090 prometheus 10.0.1.4 9090 inactive /tidb-data/prometheus-9090 /tidb-deploy/prometheus-9090 +10.0.1.7:4000 tidb 10.0.1.7 4000/10080 Down - /tidb-deploy/tidb-4000 +10.0.1.8:4000 tidb 10.0.1.8 4000/10080 Down - /tidb-deploy/tidb-4000 +10.0.1.9:4000 tidb 10.0.1.9 4000/10080 Down - /tidb-deploy/tidb-4000 +10.0.1.10:9000 tiflash 10.0.1.4 9000/8123/3930/20170/20292/8234 Down /tidb-data-lzs/tiflash-10000 /tidb-deploy-lzs/tiflash-10000 +10.0.1.1:20160 tikv 10.0.1.1 20160/20180 Down /tidb-data/tikv-20160 /tidb-deploy/tikv-2060 +10.0.1.2:20160 tikv 10.0.1.2 20160/20180 Down /tidb-data/tikv-20160 /tidb-deploy/tikv-2060 +10.0.1.3:20160 tikv 10.0.1.4 20160/20180 Down /tidb-data/tikv-20160 /tidb-deploy/tikv-2060 ``` ## 5. Start the cluster @@ -1001,53 +1025,7 @@ ID Role Host Ports Status Data Dir tiup cluster start tidb-test ``` -If the expected output returns `Started cluster tidb-test successfully`, it means that the startup is successful: - -```log -Starting /home/tidb/.tiup/components/cluster/v0.4.3/cluster start tidb-test -+ [ Serial ] - SSHKeySet: privateKey=/home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa, publicKey=/home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa.pub -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.1 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.2 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [ Serial ] - ClusterOperate: operation=StartOperation, options={Roles:[] Nodes:[] Force:false} -Starting component pd - Starting instance pd 10.0.1.4:2379 - Start pd 10.0.1.4:2379 success -Starting component node_exporter - Starting instance 10.0.1.4 - Start 10.0.1.4 success - -......Some log ignored...... - - Checking service state of pd - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 01:08:04 CST; 43s ago -Checking service state of tikv - 10.0.1.1 - Active: active (running) since Saturday 2020-04-04 01:08:15 CST; 33s ago - 10.0.1.2 - Active: active (running) since Saturday 2020-04-04 01:08:18 CST; 31s ago - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 01:08:21 CST; 29s ago -Checking service state of tidb - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 01:08:36 CST; 16s ago -Checking service state of prometheus - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 01:08:39 CST; 15s ago -Checking service state of grafana - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 01:08:41 CST; 14s ago -Checking service state of alertmanager - 10.0.1.4 - Active: active (running) since Saturday 2020-04-04 01:08:44 CST; 12s ago -Started cluster `tidb-test` successfully -``` +If the output log includes ```Started cluster `tidb-test` successfully```, it means that the startup is successful. ## 6. Verify the running status of the cluster @@ -1064,28 +1042,29 @@ Expected output (if the `Status` is `Up`, the cluster status is normal): ```log Starting /home/tidb/.tiup/components/cluster/v0.4.3/cluster display tidb-test TiDB Cluster: tidb-test -TiDB Version: v4.0.0-beta.2 -ID Role Host Ports Status Data Dir Deploy Dir --- ---- ---- ----- ------ -------- ---------- -10.0.1.4:9104 alertmanager 10.0.1.4 9104/9105 Up /tidb-data/alertmanager-9104 /tidb-deploy/alertmanager-9104 -10.0.1.4:3000 grafana 10.0.1.4 3000 Up - /tidb-deploy/grafana-3000 -10.0.1.4:2379 pd 10.0.1.4 2379/2380 Healthy|L /tidb-data/pd-2379 /tidb-deploy/pd-2379 -10.0.1.5:2379 pd 10.0.1.5 2379/2380 Healthy /tidb-data/pd-2379 /tidb-deploy/pd-2379 -10.0.1.6:2379 pd 10.0.1.6 2379/2380 Healthy /tidb-data/pd-2379 /tidb-deploy/pd-2379 -10.0.1.4:9090 prometheus 10.0.1.4 9090 Up /tidb-data/prometheus-9090 /tidb-deploy/prometheus-9090 -10.0.1.7:4000 tidb 10.0.1.7 4000/10080 Up - /tidb-deploy/tidb-4000 -10.0.1.8:4000 tidb 10.0.1.8 4000/10080 Up - /tidb-deploy/tidb-4000 -10.0.1.9:4000 tidb 10.0.1.9 4000/10080 Up - /tidb-deploy/tidb-4000 -10.0.1.1:2060 tikv 10.0.1.1 2060/20080 Up /tidb-data/tikv-2060 /tidb-deploy/tikv-2060 -10.0.1.2:2060 tikv 10.0.1.2 2060/20080 Up /tidb-data/tikv-2060 /tidb-deploy/tikv-2060 -10.0.1.3:2060 tikv 10.0.1.4 2060/20080 Up /tidb-data/tikv-2060 /tidb-deploy/tikv-2060 +TiDB Version: v4.0.0-rc +ID Role Host Ports Status Data Dir Deploy Dir +-- ---- ---- ----- ------ -------- ---------- +10.0.1.4:9104 alertmanager 10.0.1.4 9104/9105 Up /tidb-data/alertmanager-9104 /tidb-deploy/alertmanager-9104 +10.0.1.4:3000 grafana 10.0.1.4 3000 Up - /tidb-deploy/grafana-3000 +10.0.1.4:2379 pd 10.0.1.4 2379/2380 Healthy|L /tidb-data/pd-2379 /tidb-deploy/pd-2379 +10.0.1.5:2379 pd 10.0.1.5 2379/2380 Healthy /tidb-data/pd-2379 /tidb-deploy/pd-2379 +10.0.1.6:2379 pd 10.0.1.6 2379/2380 Healthy /tidb-data/pd-2379 /tidb-deploy/pd-2379 +10.0.1.4:9090 prometheus 10.0.1.4 9090 Up /tidb-data/prometheus-9090 /tidb-deploy/prometheus-9090 +10.0.1.7:4000 tidb 10.0.1.7 4000/10080 Up - /tidb-deploy/tidb-4000 +10.0.1.8:4000 tidb 10.0.1.8 4000/10080 Up - /tidb-deploy/tidb-4000 +10.0.1.9:4000 tidb 10.0.1.9 4000/10080 Up - /tidb-deploy/tidb-4000 +10.0.1.10:9000 tiflash 10.0.1.4 9000/8123/3930/20170/20292/8234 Up /tidb-data-lzs/tiflash-9000 /tidb-deploy-lzs/tiflash-9000 +10.0.1.1:2060 tikv 10.0.1.1 2060/20080 Up /tidb-data/tikv-2060 /tidb-deploy/tikv-2060 +10.0.1.2:2060 tikv 10.0.1.2 2060/20080 Up /tidb-data/tikv-2060 /tidb-deploy/tikv-2060 +10.0.1.3:2060 tikv 10.0.1.4 2060/20080 Up /tidb-data/tikv-2060 /tidb-deploy/tikv-2060 ``` ### Step 10: Check TiDB cluster status through TiDB Dashboard and Grafana #### Check TiDB cluster status through TiDB Dashboard -Log in to TiDB Dashboard via `{pd-leader-ip}:2379/dashboard`: +Log in to TiDB Dashboard via `{pd-leader-ip}:2379/dashboard` with the `root` user and password (empty by default) of the TiDB database. If you have modified the password of the `root` user, then enter the modified password. ![TiDB-Dashboard](/media/tiup/tidb-dashboard.png) @@ -1103,7 +1082,7 @@ Click Overview monitoring page to check TiDB port and load information: ![Grafana-overview](/media/tiup/grafana-overview.png) -### Log in to the database to execute simple SQL statements +### Step 11: Log in to the database to execute simple SQL statements > **Note:** > @@ -1209,50 +1188,7 @@ To stop the `tidb-test` cluster, run the following command: tiup cluster stop tidb-test ``` -The expected output is as follows. `Stopped cluster tidb-test successfully` indicates the cluster is successfully stopped. - -```log -Starting /home/tidb/.tiup/components/cluster/v0.4.3/cluster stop tidb-test -+ [ Serial ] - SSHKeySet: privateKey=/home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa, publicKey=/home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa.pub -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.5 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.5 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.2 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.1 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [ Serial ] - ClusterOperate: operation=StopOperation, options={Roles:[] Nodes:[] Force:false} -Stopping component alertmanager - Stopping instance 10.0.1.4 - Stop alertmanager 10.0.1.4:9104 success - -...... Some log ignored ...... - -Checking service state of pd - 10.0.1.5 - Active: inactive (dead) since Saturday 2020-04-04 15:35:42 CST; 15s ago -Checking service state of tikv - 10.0.1.1 - Active: inactive (dead) since Saturday 2020-04-04 15:35:21 CST; 38s ago - 10.0.1.2 - Active: inactive (dead) since Saturday 2020-04-04 15:35:23 CST; 37s ago - 10.0.1.3 - Active: inactive (dead) since Saturday 2020-04-04 15:35:24 CST; 37s ago -Checking service state of tidb - 10.0.1.5 - Active: inactive (dead) since Saturday 2020-04-04 15:35:15 CST; 49s ago -Checking service state of prometheus - 10.0.1.4 - Active: inactive (dead) since Saturday 2020-04-04 15:35:12 CST; 53s ago -Checking service state of grafana - 10.0.1.4 - Active: inactive (dead) since Saturday 2020-04-04 15:35:10 CST; 56s ago -Checking service state of alertmanager - 10.0.1.4 - Active: inactive (dead) since Saturday 2020-04-04 15:35:09 CST; 59s ago -Stopped cluster `tidb-test` successfully -``` +If the output log includes ```Stopped cluster `tidb-test` successfully```, then the cluster is successfully stopped. ## Destroy a TiDB cluster using TiUP @@ -1268,48 +1204,7 @@ To destroy the `tidb-test` cluster, including data and services, run the followi tiup cluster destroy tidb-test ``` -The expected output is as follows. `Destroy cluster tidb-test successfully` indicates the cluster is successfully destroyed. - -```log -Starting /home/tidb/.tiup/components/cluster/v0.4.3/cluster destroy tidb-test -This operation will destroy TiDB v4.0.0-beta.2 cluster tidb-test and its data. -Do you want to continue? [y/N]: y -Destroying cluster... -+ [ Serial ] - SSHKeySet: privateKey=/home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa, publicKey=/home/tidb/.tiup/storage/cluster/clusters/tidb-test/ssh/id_rsa.pub -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.2 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.1 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [Parallel] - UserSSH: user=tidb, host=10.0.1.4 -+ [ Serial ] - ClusterOperate: operation=StopOperation, options={Roles:[] Nodes:[] Force:false} -Stopping component alertmanager - Stopping instance 10.0.1.4 - Stop alertmanager 10.0.1.4:9104 success - -...... Some log ignored ...... - - Destroy monitored on 10.0.1.1 success -Destroying monitored 10.0.1.2 -Destroying monitored - Destroying instance 10.0.1.2 -Destroy monitored on 10.0.1.2 success -Destroying monitored 10.0.1.4 -Destroying monitored - Destroying instance 10.0.1.4 -Destroy monitored on 10.0.1.4 success -Destroying component pd -Destroying instance 10.0.1.4 -Deleting paths on 10.0.1.4: /tidb-data/pd-2379 /tidb-deploy/pd-2379 /tidb-deploy/pd-2379/log /etc/systemd/system/pd-2379.service -Destroy 10.0.1.4 success -Destroying monitored 10.0.1.4 -Destroying monitored - Destroying instance 10.0.1.4 -Destroy monitored on 10.0.1.4 success -Destroyed cluster `tidb-test` successfully -``` +If the output log includes ```Destroy cluster `tidb-test` successfully```, then the cluster is successfully destroyed. ## TiUP Deployment FAQs @@ -1385,7 +1280,6 @@ tidb_servers: numa_node: "0,1" # Config is used to overwrite the `server_configs.tidb` values config: - log.level: warn log.slow-query-file: tidb-slow-overwritten.log ``` @@ -1406,21 +1300,23 @@ tidb_servers: ```yaml server_configs: tidb: - binlog.enable: false - binlog.ignore-error: false + log.slow-threshold: 300 + binlog.enable: false + binlog.ignore-error: false tikv: - readpool.storage.low-concurrency: 8 - server.labels: - zone: sh - dc: sha - rack: rack1 - host: host1 + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: true + readpool.coprocessor.use-unified-pool: true pd: - replication.enable-placement-rules: true - label-property: - reject-leader: - - key: "dc" - value: "bja" + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + replication.enable-placement-rules: true pump: gc: 7 ``` @@ -1437,6 +1333,85 @@ tidb_servers: log_dir: "deploy/monitored-9100/log" ``` +### How to view the TiDB versions supported by TiUP + +Execute the following command to view the TiDB versions that TiUP supports: + +```shell +tiup list tidb --refresh +``` + +In the following output: + +- `Version` is the supported TiDB version +- `Installed` is the currently installed version +- `Release` is the release time +- `Platforms` is the supported platform + +```log +Available versions for tidb (Last Modified: 2020-02-26T15:20:35+08:00): +Version Installed Release: Platforms +------- --------- -------- --------- +master 2020-03-18T08:39:11.753360611+08:00 linux/amd64,darwin/amd64 +v3.0.1 2020-04-07T17:53:00+08:00 linux/amd64,darwin/amd64 +v3.0.2 2020-04-08T23:38:37+08:00 linux/amd64,darwin/amd64 +v3.0.3 2020-03-27T22:41:16.279411145+08:00 linux/amd64,darwin/amd64 +v3.0.4 2020-03-27T22:43:35.362550386+08:00 linux/amd64,darwin/amd64 +v3.0.5 2020-03-27T22:46:01.016467032+08:00 linux/amd64,darwin/amd64 +v3.0.6 2020-03-13T11:55:17.941641963+08:00 linux/amd64,darwin/amd64 +v3.0.7 2020-03-13T12:02:22.538128662+08:00 linux/amd64,darwin/amd64 +v3.0.8 2020-03-17T14:03:29.575448277+08:00 linux/amd64,darwin/amd64 +v3.0.9 2020-03-13T13:02:15.947260351+08:00 linux/amd64,darwin/amd64 +v3.0.10 2020-03-13T14:11:53.774527401+08:00 linux/amd64,darwin/amd64 +v3.0.11 2020-03-13T15:31:06.94547891+08:00 linux/amd64,darwin/amd64 +v3.0.12 2020-03-20T11:36:28.18950808+08:00 linux/amd64,darwin/amd64 +v3.1.0-beta.2 2020-03-19T00:48:48.266468238+08:00 linux/amd64,darwin/amd64 +v3.1.0-rc 2020-04-02T23:43:17.456327834+08:00 linux/amd64,darwin/amd64 +v4.0.0-beta 2020-03-13T12:43:55.508190493+08:00 linux/amd64,darwin/amd64 +v4.0.0-beta.1 2020-03-13T12:30:08.913759828+08:00 linux/amd64,darwin/amd64 +v4.0.0-beta.2 2020-03-18T22:52:00.830626492+08:00 linux/amd64,darwin/amd64 +v4.0.0-rc YES 2020-04-09T00:10:32+08:00 linux/amd64,darwin/amd64 +nightly 2020-04-10T08:42:23+08:00 darwin/amd64,linux/amd64 +``` + +### How to view the TiDB components supported by TiUP + +Execute the following command to view the TiDB components that TiUP supports: + +```shell +tiup list +``` + +In the following output: + +- `Name` is the supported component name +- `Installed` is whether or not the component is installed +- `Platforms` is the supported platform +- `Description` is the component description + +```log +Available components (Last Modified: 2020-02-27T15:20:35+08:00): +Name Installed Platforms Description +---- --------- --------- ----------- +tidb YES(v4.0.0-rc) darwin/amd64,linux/amd64 TiDB is an open source distributed HTAP database compatible with the MySQL protocol +tikv YES(v4.0.0-rc) darwin/amd64,linux/amd64 Distributed transactional key-value database, originally created to complement TiDB +pd YES(v4.0.0-rc) darwin/amd64,linux/amd64 PD is the abbreviation for Placement Driver. It is used to manage and schedule the TiKV cluster +playground YES(v0.0.5) darwin/amd64,linux/amd64 Bootstrap a local TiDB cluster +client darwin/amd64,linux/amd64 A simple mysql client to connect TiDB +prometheus darwin/amd64,linux/amd64 The Prometheus monitoring system and time series database. +tpc darwin/amd64,linux/amd64 A toolbox to benchmark workloads in TPC +package darwin/amd64,linux/amd64 A toolbox to package tiup component +grafana linux/amd64,darwin/amd64 Grafana is the open source analytics & monitoring solution for every database +alertmanager darwin/amd64,linux/amd64 Prometheus alertmanager +blackbox_exporter darwin/amd64,linux/amd64 Blackbox prober exporter +node_exporter darwin/amd64,linux/amd64 Exporter for machine metrics +pushgateway darwin/amd64,linux/amd64 Push acceptor for ephemeral and batch jobs +tiflash linux/amd64 The TiFlash Columnar Storage Engine +drainer linux/amd64 The drainer componet of TiDB binlog service +pump linux/amd64 The pump componet of TiDB binlog service +cluster YES(v0.4.6) linux/amd64,darwin/amd64 Deploy a TiDB cluster for production +``` + ### How to check whether the NTP service is normal 1. Run the following command. If it returns `running`, then the NTP service is running.