From 7d0dad03d3abd632d41663ee3aa49bf1283a2e58 Mon Sep 17 00:00:00 2001 From: superlzs0476 Date: Sat, 23 May 2020 17:11:36 +0800 Subject: [PATCH 1/4] add topology YAML template --- complex-cdc.yaml | 46 ++++++++++++ complex-mini.yaml | 113 +++++++++++++++++++++++++++++ complex-multi-instant.yaml | 129 +++++++++++++++++++++++++++++++++ complex-tidb-binlog.yaml | 78 ++++++++++++++++++++ complex-tiflash.yaml | 49 +++++++++++++ geo-redundancy-deployment.yaml | 108 +++++++++++++++++++++++++++ simple-cdc.yaml | 30 ++++++++ simple-mini.yaml | 31 ++++++++ simple-multi-instant.yaml | 92 +++++++++++++++++++++++ simple-tidb-binlog.yaml | 46 ++++++++++++ simple-tiflash.yaml | 33 +++++++++ 11 files changed, 755 insertions(+) create mode 100644 complex-cdc.yaml create mode 100644 complex-mini.yaml create mode 100644 complex-multi-instant.yaml create mode 100644 complex-tidb-binlog.yaml create mode 100644 complex-tiflash.yaml create mode 100644 geo-redundancy-deployment.yaml create mode 100644 simple-cdc.yaml create mode 100644 simple-mini.yaml create mode 100644 simple-multi-instant.yaml create mode 100644 simple-tidb-binlog.yaml create mode 100644 simple-tiflash.yaml diff --git a/complex-cdc.yaml b/complex-cdc.yaml new file mode 100644 index 000000000000..c050867159b3 --- /dev/null +++ b/complex-cdc.yaml @@ -0,0 +1,46 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + pd: + replication.enable-placement-rules: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 +cdc_servers: + - host: 10.0.1.1 + ssh_port: 22 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + - host: 10.0.1.2 + ssh_port: 22 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + - host: 10.0.1.3 + ssh_port: 22 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/complex-mini.yaml b/complex-mini.yaml new file mode 100644 index 000000000000..4a4807867a2a --- /dev/null +++ b/complex-mini.yaml @@ -0,0 +1,113 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + binlog.enable: false + binlog.ignore-error: false + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-multi-instant.yaml b/complex-multi-instant.yaml new file mode 100644 index 000000000000..06eebd2e25fc --- /dev/null +++ b/complex-multi-instant.yaml @@ -0,0 +1,129 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" + data_dir: "/tidb-data-monitored-9100" + log_dir: "/tidb-deploy/monitored-9100/log" + +server_configs: + tikv: + readpool.unified.max-thread-count: <取值参考上文计算公式的结果> + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "<取值参考上文计算公式的结果>" + raftstore.capacity: "<取值参考上文计算公式的结果>" + pd: + replication.location-labels: ["host"] + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv3" } +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/complex-tidb-binlog.yaml b/complex-tidb-binlog.yaml new file mode 100644 index 000000000000..a21669a93e40 --- /dev/null +++ b/complex-tidb-binlog.yaml @@ -0,0 +1,78 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" +monitored: + node_exporter_port: 9122 + blackbox_exporter_port: 9137 + deploy_dir: "/tidb-deploy/monitored-9100" + data_dir: "/tidb-data/monitored-9100" + log_dir: "/tidb-deploy/monitored-9100/log" + +server_configs: + tidb: + binlog.enable: true + binlog.ignore-error: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 + - host: 10.0.1.2 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 + - host: 10.0.1.3 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 +drainer_servers: + - host: 10.0.1.12 + port: 8249 + data_dir: "/tidb-data/drainer-8249" + # If drainer doesn't have a checkpoint, use initial commitTS as the initial checkpoint. + # Will get a latest timestamp from pd if commit_ts is set to -1 (the default value). + commit_ts: -1 + deploy_dir: "/tidb-deploy/drainer-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/complex-tiflash.yaml b/complex-tiflash.yaml new file mode 100644 index 000000000000..d9ce16dac9db --- /dev/null +++ b/complex-tiflash.yaml @@ -0,0 +1,49 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + pd: + replication.enable-placement-rules: true +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 +tikv_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tiflash_servers: + - host: 10.0.1.11 + data_dir: /tidb-data/tiflash-9000 + deploy_dir: /tidb-deploy/tiflash-9000 + # ssh_port: 22 + # tcp_port: 9000 + # http_port: 8123 + # flash_service_port: 3930 + # flash_proxy_port: 20170 + # flash_proxy_status_port: 20292 + # metrics_port: 8234 + # deploy_dir: /tidb-deploy/tiflash-9000 + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tiflash` values. + # config: + # logger.level: "info" + # learner_config: + # log-level: "info" + # - host: 10.0.1.12 + # - host: 10.0.1.13 +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/geo-redundancy-deployment.yaml b/geo-redundancy-deployment.yaml new file mode 100644 index 000000000000..9f18ac900a60 --- /dev/null +++ b/geo-redundancy-deployment.yaml @@ -0,0 +1,108 @@ +# Tip: PD priority needs to be manually set using the PD-ctl client tool. such as, member Leader_priority PD-name numbers. +# Global variables are applied to all deployments and used as the default value of +# the deployments if a specific deployment value is missing. + +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" +server_configs: + tidb: + log.level: debug + log.slow-query-file: tidb-slow.log + tikv: + server.grpc-compression-type: gzip + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 8 + pd: + replication.location-labels: ["zone","dc","rack","host"] + replication.max-replicas: 5 + label-property: + reject-leader: + - key: "dc" + value: "sha" +pd_servers: + - host: 10.0.1.6 + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + - host: 10.0.1.10 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + - host: 10.0.1.4 + - host: 10.0.1.5 +tikv_servers: + - host: 10.0.1.11 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host1 + - host: 10.0.1.12 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host2 + - host: 10.0.1.13 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host1 + - host: 10.0.1.14 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host2 + - host: 10.0.1.15 + ssh_port: 22 + port: 20160 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: sh + dc: sha + rack: rack1 + host: host1 + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 10 + raftstore.raft-min-election-timeout-ticks: 1000 + raftstore.raft-max-election-timeout-ticks: 1020 +monitoring_servers: + - host: 10.0.1.16 +grafana_servers: + - host: 10.0.1.16 \ No newline at end of file diff --git a/simple-cdc.yaml b/simple-cdc.yaml new file mode 100644 index 000000000000..b7d7fa10110c --- /dev/null +++ b/simple-cdc.yaml @@ -0,0 +1,30 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 +cdc_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-mini.yaml b/simple-mini.yaml new file mode 100644 index 000000000000..57e48fd2d741 --- /dev/null +++ b/simple-mini.yaml @@ -0,0 +1,31 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-multi-instant.yaml b/simple-multi-instant.yaml new file mode 100644 index 000000000000..f24405b414ec --- /dev/null +++ b/simple-multi-instant.yaml @@ -0,0 +1,92 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tikv: + readpool.unified.max-thread-count: <取值参考上文计算公式的结果> + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "<取值参考上文计算公式的结果>" + raftstore.capacity: "<取值参考上文计算公式的结果>" + pd: + replication.location-labels: ["host"] + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv3" } +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-tidb-binlog.yaml b/simple-tidb-binlog.yaml new file mode 100644 index 000000000000..a63e2f51a77e --- /dev/null +++ b/simple-tidb-binlog.yaml @@ -0,0 +1,46 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tidb: + binlog.enable: true + binlog.ignore-error: true + pd: + replication.enable-placement-rules: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +drainer_servers: + - host: 10.0.1.12 + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-tiflash.yaml b/simple-tiflash.yaml new file mode 100644 index 000000000000..1ed6c02c69d6 --- /dev/null +++ b/simple-tiflash.yaml @@ -0,0 +1,33 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + pd: + replication.enable-placement-rules: true +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 +tikv_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tiflash_servers: + - host: 10.0.1.11 + data_dir: /tidb-data/tiflash-9000 + deploy_dir: /tidb-deploy/tiflash-9000 +monitoring_servers: + - host: 10.0.1.10 +grafana_servers: + - host: 10.0.1.10 +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file From 6a071871d20e069bac265d37e17611bebdaa7eb6 Mon Sep 17 00:00:00 2001 From: superlzs0476 Date: Sat, 23 May 2020 20:51:04 +0800 Subject: [PATCH 2/4] fix topology YAML template --- complex-cdc.yaml | 90 +++++++++++++++++++++++++++++++++++--- complex-multi-instant.yaml | 29 ++++++++++-- complex-tidb-binlog.yaml | 85 ++++++++++++++++++++++++++++++++--- complex-tiflash.yaml | 84 ++++++++++++++++++++++++++++++++++- simple-cdc.yaml | 6 +++ simple-multi-instant.yaml | 3 ++ simple-tidb-binlog.yaml | 5 ++- simple-tiflash.yaml | 7 +++ 8 files changed, 291 insertions(+), 18 deletions(-) diff --git a/complex-cdc.yaml b/complex-cdc.yaml index c050867159b3..e4267e371912 100644 --- a/complex-cdc.yaml +++ b/complex-cdc.yaml @@ -6,41 +6,121 @@ global: deploy_dir: "/tidb-deploy" data_dir: "/tidb-data" +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true pd: - replication.enable-placement-rules: true + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 pd_servers: - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 - host: 10.0.1.5 - host: 10.0.1.6 + tidb_servers: - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log - host: 10.0.1.2 - host: 10.0.1.3 + tikv_servers: - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 - host: 10.0.1.9 + cdc_servers: - host: 10.0.1.1 - ssh_port: 22 port: 8300 deploy_dir: "/tidb-deploy/cdc-8300" log_dir: "/tidb-deploy/cdc-8300/log" - host: 10.0.1.2 - ssh_port: 22 port: 8300 deploy_dir: "/tidb-deploy/cdc-8300" log_dir: "/tidb-deploy/cdc-8300/log" - host: 10.0.1.3 - ssh_port: 22 port: 8300 deploy_dir: "/tidb-deploy/cdc-8300" log_dir: "/tidb-deploy/cdc-8300/log" + monitoring_servers: - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + grafana_servers: - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-multi-instant.yaml b/complex-multi-instant.yaml index 06eebd2e25fc..09424c130929 100644 --- a/complex-multi-instant.yaml +++ b/complex-multi-instant.yaml @@ -14,14 +14,19 @@ monitored: log_dir: "/tidb-deploy/monitored-9100/log" server_configs: + tidb: + log.slow-threshold: 300 tikv: - readpool.unified.max-thread-count: <取值参考上文计算公式的结果> + readpool.unified.max-thread-count: <取值参考部署文档的计算公式结果> readpool.storage.use-unified-pool: false readpool.coprocessor.use-unified-pool: true - storage.block-cache.capacity: "<取值参考上文计算公式的结果>" - raftstore.capacity: "<取值参考上文计算公式的结果>" + storage.block-cache.capacity: "<取值参考部署文档的计算公式结果>" + raftstore.capacity: "<取值参考部署文档的计算公式结果>" pd: replication.location-labels: ["host"] + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 pd_servers: - host: 10.0.1.4 @@ -121,9 +126,25 @@ tikv_servers: numa_node: "1" config: server.labels: { host: "tikv3" } + monitoring_servers: - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + grafana_servers: - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-tidb-binlog.yaml b/complex-tidb-binlog.yaml index a21669a93e40..553884cb8a08 100644 --- a/complex-tidb-binlog.yaml +++ b/complex-tidb-binlog.yaml @@ -5,28 +5,86 @@ global: ssh_port: 22 deploy_dir: "/tidb-deploy" data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. monitored: - node_exporter_port: 9122 - blackbox_exporter_port: 9137 - deploy_dir: "/tidb-deploy/monitored-9100" - data_dir: "/tidb-data/monitored-9100" - log_dir: "/tidb-deploy/monitored-9100/log" + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. server_configs: tidb: + log.slow-threshold: 300 binlog.enable: true binlog.ignore-error: true + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 pd_servers: - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 - host: 10.0.1.5 - host: 10.0.1.6 tidb_servers: - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log - host: 10.0.1.2 - host: 10.0.1.3 tikv_servers: - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - host: 10.0.1.8 - host: 10.0.1.9 @@ -72,7 +130,22 @@ drainer_servers: syncer.to.port: 4000 monitoring_servers: - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + grafana_servers: - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-tiflash.yaml b/complex-tiflash.yaml index d9ce16dac9db..99cf55e0f52b 100644 --- a/complex-tiflash.yaml +++ b/complex-tiflash.yaml @@ -6,21 +6,87 @@ global: deploy_dir: "/tidb-deploy" data_dir: "/tidb-data" +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 replication.enable-placement-rules: true + pd_servers: - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 - host: 10.0.1.5 - host: 10.0.1.6 tidb_servers: - host: 10.0.1.7 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log - host: 10.0.1.8 - host: 10.0.1.9 tikv_servers: - host: 10.0.1.1 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - host: 10.0.1.2 - host: 10.0.1.3 + tiflash_servers: - host: 10.0.1.11 data_dir: /tidb-data/tiflash-9000 @@ -41,9 +107,25 @@ tiflash_servers: # log-level: "info" # - host: 10.0.1.12 # - host: 10.0.1.13 + monitoring_servers: - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + grafana_servers: - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/simple-cdc.yaml b/simple-cdc.yaml index b7d7fa10110c..4d820082de20 100644 --- a/simple-cdc.yaml +++ b/simple-cdc.yaml @@ -10,21 +10,27 @@ pd_servers: - host: 10.0.1.4 - host: 10.0.1.5 - host: 10.0.1.6 + tidb_servers: - host: 10.0.1.1 - host: 10.0.1.2 - host: 10.0.1.3 + tikv_servers: - host: 10.0.1.7 - host: 10.0.1.8 - host: 10.0.1.9 + cdc_servers: - host: 10.0.1.7 - host: 10.0.1.8 - host: 10.0.1.9 + monitoring_servers: - host: 10.0.1.10 + grafana_servers: - host: 10.0.1.10 + alertmanager_servers: - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-multi-instant.yaml b/simple-multi-instant.yaml index f24405b414ec..dcabeb242194 100644 --- a/simple-multi-instant.yaml +++ b/simple-multi-instant.yaml @@ -84,9 +84,12 @@ tikv_servers: numa_node: "1" config: server.labels: { host: "tikv3" } + monitoring_servers: - host: 10.0.1.10 + grafana_servers: - host: 10.0.1.10 + alertmanager_servers: - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-tidb-binlog.yaml b/simple-tidb-binlog.yaml index a63e2f51a77e..558023e00356 100644 --- a/simple-tidb-binlog.yaml +++ b/simple-tidb-binlog.yaml @@ -10,8 +10,6 @@ server_configs: tidb: binlog.enable: true binlog.ignore-error: true - pd: - replication.enable-placement-rules: true pd_servers: - host: 10.0.1.4 @@ -38,9 +36,12 @@ drainer_servers: syncer.to.user: "root" syncer.to.password: "" syncer.to.port: 4000 + monitoring_servers: - host: 10.0.1.10 + grafana_servers: - host: 10.0.1.10 + alertmanager_servers: - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-tiflash.yaml b/simple-tiflash.yaml index 1ed6c02c69d6..1e2024440854 100644 --- a/simple-tiflash.yaml +++ b/simple-tiflash.yaml @@ -9,25 +9,32 @@ global: server_configs: pd: replication.enable-placement-rules: true + pd_servers: - host: 10.0.1.4 - host: 10.0.1.5 - host: 10.0.1.6 + tidb_servers: - host: 10.0.1.7 - host: 10.0.1.8 - host: 10.0.1.9 + tikv_servers: - host: 10.0.1.1 - host: 10.0.1.2 - host: 10.0.1.3 + tiflash_servers: - host: 10.0.1.11 data_dir: /tidb-data/tiflash-9000 deploy_dir: /tidb-deploy/tiflash-9000 + monitoring_servers: - host: 10.0.1.10 + grafana_servers: - host: 10.0.1.10 + alertmanager_servers: - host: 10.0.1.10 \ No newline at end of file From 7e18507ea437b67bc3c16f179df7641be2b048d8 Mon Sep 17 00:00:00 2001 From: superlzs0476 Date: Sat, 23 May 2020 20:52:10 +0800 Subject: [PATCH 3/4] fix topology YAML template --- complex-tidb-binlog.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/complex-tidb-binlog.yaml b/complex-tidb-binlog.yaml index 553884cb8a08..36db4f2ca451 100644 --- a/complex-tidb-binlog.yaml +++ b/complex-tidb-binlog.yaml @@ -128,6 +128,7 @@ drainer_servers: syncer.to.user: "root" syncer.to.password: "" syncer.to.port: 4000 + monitoring_servers: - host: 10.0.1.10 # ssh_port: 22 From 76c21827e9d35551119ccde67eb4263a79560c90 Mon Sep 17 00:00:00 2001 From: superlzs0476 Date: Mon, 25 May 2020 20:53:13 +0800 Subject: [PATCH 4/4] update topology YAML template --- complex-multi-instant.yaml | 6 +++--- simple-multi-instant.yaml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/complex-multi-instant.yaml b/complex-multi-instant.yaml index 09424c130929..2513d0b3869c 100644 --- a/complex-multi-instant.yaml +++ b/complex-multi-instant.yaml @@ -17,11 +17,11 @@ server_configs: tidb: log.slow-threshold: 300 tikv: - readpool.unified.max-thread-count: <取值参考部署文档的计算公式结果> + readpool.unified.max-thread-count: readpool.storage.use-unified-pool: false readpool.coprocessor.use-unified-pool: true - storage.block-cache.capacity: "<取值参考部署文档的计算公式结果>" - raftstore.capacity: "<取值参考部署文档的计算公式结果>" + storage.block-cache.capacity: "" + raftstore.capacity: "" pd: replication.location-labels: ["host"] schedule.leader-schedule-limit: 4 diff --git a/simple-multi-instant.yaml b/simple-multi-instant.yaml index dcabeb242194..735b3be1af61 100644 --- a/simple-multi-instant.yaml +++ b/simple-multi-instant.yaml @@ -8,11 +8,11 @@ global: server_configs: tikv: - readpool.unified.max-thread-count: <取值参考上文计算公式的结果> + readpool.unified.max-thread-count: readpool.storage.use-unified-pool: false readpool.coprocessor.use-unified-pool: true - storage.block-cache.capacity: "<取值参考上文计算公式的结果>" - raftstore.capacity: "<取值参考上文计算公式的结果>" + storage.block-cache.capacity: "" + raftstore.capacity: "" pd: replication.location-labels: ["host"]