diff --git a/config-templates/complex-cdc.yaml b/config-templates/complex-cdc.yaml new file mode 100644 index 0000000000000..e4267e371912a --- /dev/null +++ b/config-templates/complex-cdc.yaml @@ -0,0 +1,126 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + + - host: 10.0.1.8 + - host: 10.0.1.9 + +cdc_servers: + - host: 10.0.1.1 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + - host: 10.0.1.2 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + - host: 10.0.1.3 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/config-templates/complex-mini.yaml b/config-templates/complex-mini.yaml new file mode 100644 index 0000000000000..4a4807867a2ac --- /dev/null +++ b/config-templates/complex-mini.yaml @@ -0,0 +1,113 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + binlog.enable: false + binlog.ignore-error: false + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/config-templates/complex-multi-instance.yaml b/config-templates/complex-multi-instance.yaml new file mode 100644 index 0000000000000..2513d0b3869ca --- /dev/null +++ b/config-templates/complex-multi-instance.yaml @@ -0,0 +1,150 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" + data_dir: "/tidb-data-monitored-9100" + log_dir: "/tidb-deploy/monitored-9100/log" + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + readpool.unified.max-thread-count: + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "" + raftstore.capacity: "" + pd: + replication.location-labels: ["host"] + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv3" } + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/config-templates/complex-tidb-binlog.yaml b/config-templates/complex-tidb-binlog.yaml new file mode 100644 index 0000000000000..36db4f2ca451b --- /dev/null +++ b/config-templates/complex-tidb-binlog.yaml @@ -0,0 +1,152 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + binlog.enable: true + binlog.ignore-error: true + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 + - host: 10.0.1.2 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 + - host: 10.0.1.3 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 +drainer_servers: + - host: 10.0.1.12 + port: 8249 + data_dir: "/tidb-data/drainer-8249" + # If drainer doesn't have a checkpoint, use initial commitTS as the initial checkpoint. + # Will get a latest timestamp from pd if commit_ts is set to -1 (the default value). + commit_ts: -1 + deploy_dir: "/tidb-deploy/drainer-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/config-templates/complex-tiflash.yaml b/config-templates/complex-tiflash.yaml new file mode 100644 index 0000000000000..99cf55e0f52b5 --- /dev/null +++ b/config-templates/complex-tiflash.yaml @@ -0,0 +1,131 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + replication.enable-placement-rules: true + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.8 + - host: 10.0.1.9 +tikv_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.2 + - host: 10.0.1.3 + +tiflash_servers: + - host: 10.0.1.11 + data_dir: /tidb-data/tiflash-9000 + deploy_dir: /tidb-deploy/tiflash-9000 + # ssh_port: 22 + # tcp_port: 9000 + # http_port: 8123 + # flash_service_port: 3930 + # flash_proxy_port: 20170 + # flash_proxy_status_port: 20292 + # metrics_port: 8234 + # deploy_dir: /tidb-deploy/tiflash-9000 + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tiflash` values. + # config: + # logger.level: "info" + # learner_config: + # log-level: "info" + # - host: 10.0.1.12 + # - host: 10.0.1.13 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/config-templates/geo-redundancy-deployment.yaml b/config-templates/geo-redundancy-deployment.yaml new file mode 100644 index 0000000000000..9f18ac900a60c --- /dev/null +++ b/config-templates/geo-redundancy-deployment.yaml @@ -0,0 +1,108 @@ +# Tip: PD priority needs to be manually set using the PD-ctl client tool. such as, member Leader_priority PD-name numbers. +# Global variables are applied to all deployments and used as the default value of +# the deployments if a specific deployment value is missing. + +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" +server_configs: + tidb: + log.level: debug + log.slow-query-file: tidb-slow.log + tikv: + server.grpc-compression-type: gzip + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 8 + pd: + replication.location-labels: ["zone","dc","rack","host"] + replication.max-replicas: 5 + label-property: + reject-leader: + - key: "dc" + value: "sha" +pd_servers: + - host: 10.0.1.6 + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + - host: 10.0.1.10 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + - host: 10.0.1.4 + - host: 10.0.1.5 +tikv_servers: + - host: 10.0.1.11 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host1 + - host: 10.0.1.12 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host2 + - host: 10.0.1.13 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host1 + - host: 10.0.1.14 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host2 + - host: 10.0.1.15 + ssh_port: 22 + port: 20160 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: sh + dc: sha + rack: rack1 + host: host1 + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 10 + raftstore.raft-min-election-timeout-ticks: 1000 + raftstore.raft-max-election-timeout-ticks: 1020 +monitoring_servers: + - host: 10.0.1.16 +grafana_servers: + - host: 10.0.1.16 \ No newline at end of file diff --git a/config-templates/simple-cdc.yaml b/config-templates/simple-cdc.yaml new file mode 100644 index 0000000000000..4d820082de20f --- /dev/null +++ b/config-templates/simple-cdc.yaml @@ -0,0 +1,36 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +cdc_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-mini.yaml b/config-templates/simple-mini.yaml new file mode 100644 index 0000000000000..57e48fd2d7416 --- /dev/null +++ b/config-templates/simple-mini.yaml @@ -0,0 +1,31 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-multi-instance.yaml b/config-templates/simple-multi-instance.yaml new file mode 100644 index 0000000000000..735b3be1af61a --- /dev/null +++ b/config-templates/simple-multi-instance.yaml @@ -0,0 +1,95 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tikv: + readpool.unified.max-thread-count: + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "" + raftstore.capacity: "" + pd: + replication.location-labels: ["host"] + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv3" } + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-tidb-binlog.yaml b/config-templates/simple-tidb-binlog.yaml new file mode 100644 index 0000000000000..558023e003561 --- /dev/null +++ b/config-templates/simple-tidb-binlog.yaml @@ -0,0 +1,47 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tidb: + binlog.enable: true + binlog.ignore-error: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +drainer_servers: + - host: 10.0.1.12 + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-tiflash.yaml b/config-templates/simple-tiflash.yaml new file mode 100644 index 0000000000000..1e20244408541 --- /dev/null +++ b/config-templates/simple-tiflash.yaml @@ -0,0 +1,40 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + pd: + replication.enable-placement-rules: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +tikv_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tiflash_servers: + - host: 10.0.1.11 + data_dir: /tidb-data/tiflash-9000 + deploy_dir: /tidb-deploy/tiflash-9000 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md new file mode 100644 index 0000000000000..84ccc5ffe860f --- /dev/null +++ b/geo-distributed-deployment-topology.md @@ -0,0 +1,87 @@ +--- +title: Geo-distributed Deployment topology +summary: Learn the geo-distributed deployment topology of TiDB. +category: how-to +--- + +# Geo-distributed Deployment topology + +This document takes the typical architecture of three data centers (DC) in two cities as an example, and introduces the geo-distributed deployment architecture and the key configuration. + +## Topology information + +| Instance | Count | Physical machine configuration | BJ IP | SH IP | Configuration | +| :-- | :-- | :-- | :-- | :-- | :-- | +| TiDB | 3 | 16 VCore 32GB * 1 | 10.0.1.1
10.0.1.2
10.0.1.3
10.0.1.4 | 10.0.1.5 | Default port
Global directory configuration | +| PD | 3 | 4 VCore 8GB * 1 | 10.0.1.6
10.0.1.7
10.0.1.8
10.0.1.9 | 10.0.1.10 | Default port
Global directory configuration | +| TiKV | 3 | 16 VCore 32GB 2TB (nvme ssd) * 1 | 10.0.1.11
10.0.1.12
10.0.1.13
10.0.1.14 | 10.0.1.15 | Default port
Global directory configuration | +| Monitoring & Grafana | 1 | 4 VCore 8GB * 1 500GB (ssd) | 10.0.1.16 | | Default port
Global directory configuration | + +### Topology templates + +- [The geo-distributed topology template](https://github.com/pingcap/docs/blob/master/config-templates/geo-redundancy-deployment.yaml) + +### Key parameters + +This section describes the key parameter configuration of the TiDB geo-distributed deployment. + +#### TiKV parameters + +- The gRPC compression format (`none` by default): + + To increase the transmission speed of gRPC packages between geo-distributed target nodes, set this parameter to `gzip`. + + ```yaml + server.grpc-compression-type: gzip + ``` + +- The label configuration: + + Since TiKV is deployed across different data centers, if the physical machines go down, the Region Group might lose three of the default five replicas, which causes the cluster unavailability. To address this issue, you can configure the labels to enable the smart scheduling of PD, which ensures that the Region Group does not allow three replicas to be located in TiKV instances on the same machine in the same cabinet of the same data center. + +- The TiKV configuration: + + The same host-level label information is configured for the same physical machine. + + ```yaml + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host2 + ``` + +- To prevent remote TiKV nodes from launching unnecessary Raft elections, it is required to increase the minimum and maximum number of ticks that the remote TiKV nodes need to launch an election. The two parameters are set to `0` by default. + + ```yaml + raftstore.raft-min-election-timeout-ticks: 1000 + raftstore.raft-max-election-timeout-ticks: 1020 + ``` + +#### PD parameters + +- The PD metadata information records the topology of the TiKV cluster. PD schedules the Raft Group replicas on the following four dimensions: + + ```yaml + replication.location-labels: ["zone","dc","rack","host"] + ``` + +- To ensure high availability of the cluster, adjust the number of Raft Group replicas to be `5`: + + ```yaml + replication.max-replicas: 5 + ``` + +- Forbid the remote TiKV Raft replica being elected as Leader: + + ```yaml + label-property: + reject-leader: + - key: "dc" + value: "sha" + ``` + +> **Note:** +> +> You do not need to manually create the `tidb` user in the configuration file. The TiUP cluster component automatically creates the `tidb` user on the target machines. You can customize the user, or keep the user consistent with the control machine. diff --git a/hybrid-deployment-topology.md b/hybrid-deployment-topology.md new file mode 100644 index 0000000000000..49c8f289d1b7b --- /dev/null +++ b/hybrid-deployment-topology.md @@ -0,0 +1,105 @@ +--- +title: Hybrid Deployment Topology +summary: Learn the hybrid deployment topology of TiDB clusters. +category: how-to +--- + +# Hybrid Deployment Topology + +This document describes the topology and key parameters of the TiKV and TiDB hybrid deployment. + +The hybrid deployment is usually used in the following scenario: + +The deployment machine has multiple CPU processors with sufficient memory. To improve the utilization rate of the physical machine resources, multiple instances can be deployed on a single machine, that is, TiDB and TiKV's CPU resources are isolated through NUMA node bindings. PD and Prometheus are deployed together, but their data directories need to use separate file systems. + +## Topology information + +| Instance | Count | Physical machine configuration | IP | Configuration | +| :-- | :-- | :-- | :-- | :-- | +| TiDB | 6 | 32 VCore 64GB | 10.0.1.1
10.0.1.2
10.0.1.3 | Configure NUMA to bind CPU cores | +| PD | 3 | 16 VCore 32 GB | 10.0.1.4
10.0.1.5
10.0.1.6 | Configure the `location_lables` parameter | +| TiKV | 6 | 32 VCore 64GB | 10.0.1.7
10.0.1.8
10.0.1.9 | 1. Separate the instance-level port and status_port;
2. Configure the global parameters `readpool`, `storage` and `raftstore`;
3. Configure labels of the instance-level host;
4. Configure NUMA to bind CPU cores | +| Monitoring & Grafana | 1 | 4 VCore 8GB * 1 500GB (ssd) | 10.0.1.10 | Default configuration | + +### Topology templates + +- [The simple template for the hybrid deployment](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-multi-instance.yaml) +- [The complex template for the hybrid deployment](https://github.com/pingcap/docs/blob/master/config-templates/complex-multi-instance.yaml) + +### Key parameters + +This section introduces the key parameters when you deploy multiple instances on a single machine, which is mainly used in scenarios when multiple instances of TiDB and TiKV are deployed on a single machine. You need to fill in the results into the configuration template according to the calculation methods provided below. + +- Optimize the configuration of TiKV + + - To configure `readpool` to be self-adaptive to the thread pool. By configuring the `readpool.unified.max-thread-count` parameter, you can make `readpool.storage` and `readpool.coprocessor` share a unified thread pool, and set the self-adaptive switch respectively. + + - Enable `readpool.storage` and `readpool.coprocessor`: + + ```yaml + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + ``` + + - The calculation method: + + ``` + readpool.unified.max-thread-count = cores * 0.8 / the number of TiKV instances + ``` + + - To configure the storage CF (all RocksDB column families) to be self-adaptive to memory. By configuring the `storage.block-cache.capacity` parameter, you can make CF automatically balance the memory usage. + + - `storage.block-cache` enables the CF self-adaptation by default. You do not need to modify it. + + ```yaml + storage.block-cache.shared: true + ``` + + - The calculation method: + + ``` + storage.block-cache.capacity = (MEM_TOTAL * 0.5 / the number of TiKV instances) + ``` + + - If multiple TiKV instances are deployed on the same physical disk, add the `capacity` parameter in the TiKV configuration: + + ``` + raftstore.capacity = disk total capacity / the number of TiKV instances + ``` + +- The label scheduling configuration + + Since multiple instances of TiKV are deployed on a single machine, if the physical machines go down, the Region Group might lose two of the default three replicas, which causes the cluster unavailability. To address this issue, you can use the label to enable the smart scheduling of PD, which ensures that the Region Group has more than two replicas in multiple TiKV instances on the same machine. + + - The TiKV configuration + + The same host-level label information is configured for the same physical machine: + + ```yml + config: + server.labels: + host: tikv1 + ``` + + - The PD configuration + + To enable PD to identify and scheduling Regions, configure the labels type for PD: + + ```yml + pd: + replication.location-labels: ["host"] + ``` + +- `numa_node` core binding + + - In the instance parameter module, configure the corresponding `numa_node` parameter and add the number of CPU cores. + + - Before using NUMA to bind cores, make sure that the numactl tool is installed, and confirm the information of CPUs in the physical machines. After that, configure the parameters. + + - The `numa_node` parameter corresponds to the `numactl --membind` configuration. + +> **Note:** +> +> - When editing the configuration file template, modify the required parameter, IP, port, and directory. +> - Each component uses the global `/-` as their `deploy_dir` by default. For example, if TiDB specifies the `4001` port, its `deploy_dir` is `/tidb-deploy/tidb-4001` by default. Therefore, in multi-instance scenarios, when specifying a non-default port, you do not need to specify the directory again. +> - You do not need to manually create the `tidb` user in the configuration file. The TiUP cluster component automatically creates the `tidb` user on the target machines. You can customize the user, or keep the user consistent with the control machine. diff --git a/minimal-deployment-topology.md b/minimal-deployment-topology.md new file mode 100644 index 0000000000000..ac07d6991202d --- /dev/null +++ b/minimal-deployment-topology.md @@ -0,0 +1,27 @@ +--- +title: Minimal Deployment Topology +summary: Learn the minimal deployment topology of TiDB clusters. +category: how-to +--- + +# Minimal Deployment Topology + +This document describes the minimal deployment topology of TiDB clusters. + +## Topology information + +| Instance | Count | Physical machine configuration | IP | Configuration | +| :-- | :-- | :-- | :-- | :-- | +| TiDB | 3 | 16 VCore 32GB * 1 | 10.0.1.1
10.0.1.2
10.0.1.3 | Default port
Global directory configuration | +| PD | 3 | 4 VCore 8GB * 1 |10.0.1.4
10.0.1.5
10.0.1.6 | Default port
Global directory configuration | +| TiKV | 3 | 16 VCore 32GB 2TB (nvme ssd) * 1 | 10.0.1.7
10.0.1.8
10.0.1.9 | Default port
Global directory configuration | +| Monitoring & Grafana | 1 | 4 VCore 8GB * 1 500GB (ssd) | 10.0.1.11 | Default port
Global directory configuration | + +### Topology templates + +- [The simple template for the minimal topology](https://github.com/pingcap/docs/blob/master/config-templates/simple-mini.yaml) +- [The complex template for the minimal topology](https://github.com/pingcap/docs/blob/master/config-templates/complex-mini.yaml) + +> **Note:** +> +> You do not need to manually create the `tidb` user in the configuration file. The TiUP cluster component automatically creates the `tidb` user on the target machines. You can customize the user, or keep the user consistent with the control machine. diff --git a/ticdc-deployment-topology.md b/ticdc-deployment-topology.md new file mode 100644 index 0000000000000..8d87ae2ff924e --- /dev/null +++ b/ticdc-deployment-topology.md @@ -0,0 +1,34 @@ +--- +title: TiCDC Deployment Topology +summary: Learn the deployment topology of TiCDC based on the minimal TiDB topology. +category: how-to +--- + +# TiCDC Deployment Topology + +> **Note:** +> +> TiCDC is an experimental feature. It is **NOT** recommended to use TiCDC in production environments. + +This document describes the deployment topology of TiCDC based on the minimal cluster topology. + +TiCDC is a tool for replicating the incremental data of TiDB, introduced in TiDB 4.0. It supports multiple downstream platforms, such as TiDB, MySQL, and MQ. Compared with TiDB Binlog, TiCDC has lower latency and native high availability. + +## Topology information + +| Instance | Count | Physical machine configuration | IP | Configuration | +| :-- | :-- | :-- | :-- | :-- | +| TiDB | 3 | 16 VCore 32GB * 1 | 10.0.1.1
10.0.1.2
10.0.1.3 | Default port
Global directory configuration | +| PD | 3 | 4 VCore 8GB * 1 | 10.0.1.4
10.0.1.5
10.0.1.6 | Default port
Global directory configuration | +| TiKV | 3 | 16 VCore 32GB 2TB (nvme ssd) * 1 | 10.0.1.7
10.0.1.8
10.0.1.9 | Default port
Global directory configuration | +| CDC | 3 | 8 VCore 16GB * 1 | 10.0.1.11
10.0.1.12
10.0.1.13 | Default port
Global directory configuration | +| Monitoring & Grafana | 1 | 4 VCore 8GB * 1 500GB (ssd) | 10.0.1.11 | Default port
Global directory configuration | + +### Topology templates + +- [The simple template for the TiCDC topology](https://github.com/pingcap/docs/blob/master/config-templates/simple-cdc.yaml) +- [The complex template for the TiCDC topology](https://github.com/pingcap/docs/blob/master/config-templates/complex-cdc.yaml) + +> **Note:** +> +> You do not need to manually create the `tidb` user in the configuration file. The TiUP cluster component automatically creates the `tidb` user on the target machines. You can customize the user, or keep the user consistent with the control machine. diff --git a/tidb-binlog-deployment-topology.md b/tidb-binlog-deployment-topology.md new file mode 100644 index 0000000000000..44e66a5d318e3 --- /dev/null +++ b/tidb-binlog-deployment-topology.md @@ -0,0 +1,46 @@ +--- +title: TiDB Binlog Deployment Topology +summary: Learn the deployment topology of TiDB Binlog based on the minimal TiDB topology. +category: how-to +--- + +# TiDB Binlog Deployment Topology + +This document describes the deployment topology of TiDB Binlog based on the minimal TiDB topology. + +TiDB Binlog is the widely used component for replicating incremental data. It provides near real-time backup and replication. + +## Topology information + +| Instance | Count | Physical machine configuration | IP | Configuration | +| :-- | :-- | :-- | :-- | :-- | +| TiDB | 3 | 16 VCore 32 GB | 10.0.1.1
10.0.1.2
10.0.1.3 | Default port configuration;
Enable `enable_binlog`;
Enable `ignore-error` | +| PD | 3 | 4 VCore 8 GB | 10.0.1.4
10.0.1.5
10.0.1.6 | Default port configuration | +| TiKV | 3 | 16 VCore 32 GB | 10.0.1.7
10.0.1.8
10.0.1.9 | Default port configuration | +| Pump| 3 | 8 VCore 16GB | 10.0.1.1
10.0.1.7
10.0.1.8 | Default port configuration;
Set GC time to 7 days | +| Drainer | 1 | 8 VCore 16GB | 10.0.1.12 | Default port configuration;
Set the default initialization commitTS -1 as the latest timestamp;
Configure the downstream target TiDB as `10.0.1.12:4000` | + +### Topology templates + +- [The simple template for the TiDB Binlog topology](https://github.com/pingcap/docs/blob/master/config-templates/simple-tidb-binlog.yaml) +- [The complex template for the TiDB Binlog topology](https://github.com/pingcap/docs/blob/master/config-templates/complex-tidb-binlog.yaml) + +### Key parameters + +The key parameters in the topology configuration templates are as follows: + +- `binlog.enable: true` + + - Enables the binlog service. + - Default value: `false`. + +- `binlog.ignore-error: true` + + - It is recommended to enable this configuration in high availability scenarios. + - If set to `true`, when an error occurs, TiDB stops writing data into binlog, and adds `1` to the value of the `tidb_server_critical_error_total` monitoring metric. + - If set to `false`, when TiDB fails to write data into binlog, the whole TiDB service is stopped. + +> **Note:** +> +> - When editing the configuration file template, if you do not need custom ports or directories, modify the IP only. +> - You do not need to manually create the `tidb` user in the configuration file. The TiUP cluster component automatically creates the `tidb` user on the target machines. You can customize the user, or keep the user consistent with the control machine. diff --git a/tiflash-topology-deployment.md b/tiflash-topology-deployment.md new file mode 100644 index 0000000000000..d57a783a13067 --- /dev/null +++ b/tiflash-topology-deployment.md @@ -0,0 +1,36 @@ +--- +title: TiFlash Deployment Topology +summary: Learn the deployment topology of TiFlash based on the minimal TiDB topology. +category: how-to +--- + +# TiFlash Deployment Topology + +This document describes the deployment topology of TiFlash based on the minimal TiDB topology. + +TiFlash is a columnar storage engine, and gradually becomes the standard cluster topology. It is suitable for real-time HTAP applications. + +## Topology information + +| Instance | Count | Physical machine configuration | IP | Configuration | +| :-- | :-- | :-- | :-- | :-- | +| TiDB | 3 | 16 VCore 32GB * 1 | 10.0.1.1
10.0.1.2
10.0.1.3 | Default port
Global directory configuration | +| PD | 3 | 4 VCore 8GB * 1 | 10.0.1.4
10.0.1.5
10.0.1.6 | Default port
Global directory configuration | +| TiKV | 3 | 16 VCore 32GB 2TB (nvme ssd) * 1 | 10.0.1.1
10.0.1.2
10.0.1.3 | Default port
Global directory configuration | +| TiFlash | 1 | 32 VCore 64 GB 2TB (nvme ssd) * 1 | 10.0.1.10 | Default port
Global directory configuration | +| Monitoring & Grafana | 1 | 4 VCore 8GB * 1 500GB (ssd) | 10.0.1.10 | Default port
Global directory configuration | + +### Topology templates + +- [The simple template for the TiFlash topology](https://github.com/pingcap/docs/blob/master/config-templates/simple-tiflash.yaml) +- [The complex template for the TiFlash topology](https://github.com/pingcap/docs/blob/master/config-templates/complex-tiflash.yaml) + +### Key parameters + +- To enable the [Placement Rules](/configure-placement-rules.md) feature of PD, set the value of `replication.enable-placement-rules` in the configuration template to `true`. +- The instance level `"-host"` configuration in `tiflash_servers` only supports IP, not domain name. +- For detailed TiFlash parameter description, see [TiFlash Configuration](/tiflash/tiflash-configuration.md). + +> **Note:** +> +> You do not need to manually create the `tidb` user in the configuration file. The TiUP cluster component automatically creates the `tidb` user on the target machines. You can customize the user, or keep the user consistent with the control machine.