diff --git a/complex-cdc.yaml b/complex-cdc.yaml new file mode 100644 index 000000000000..e4267e371912 --- /dev/null +++ b/complex-cdc.yaml @@ -0,0 +1,126 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + + - host: 10.0.1.8 + - host: 10.0.1.9 + +cdc_servers: + - host: 10.0.1.1 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + - host: 10.0.1.2 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + - host: 10.0.1.3 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-mini.yaml b/complex-mini.yaml new file mode 100644 index 000000000000..4a4807867a2a --- /dev/null +++ b/complex-mini.yaml @@ -0,0 +1,113 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + binlog.enable: false + binlog.ignore-error: false + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-multi-instant.yaml b/complex-multi-instant.yaml new file mode 100644 index 000000000000..2513d0b3869c --- /dev/null +++ b/complex-multi-instant.yaml @@ -0,0 +1,150 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" + data_dir: "/tidb-data-monitored-9100" + log_dir: "/tidb-deploy/monitored-9100/log" + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + readpool.unified.max-thread-count: + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "" + raftstore.capacity: "" + pd: + replication.location-labels: ["host"] + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv3" } + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-tidb-binlog.yaml b/complex-tidb-binlog.yaml new file mode 100644 index 000000000000..36db4f2ca451 --- /dev/null +++ b/complex-tidb-binlog.yaml @@ -0,0 +1,152 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + binlog.enable: true + binlog.ignore-error: true + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 + - host: 10.0.1.2 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 + - host: 10.0.1.3 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8249" + data_dir: "/tidb-data/pump-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + gc: 7 +drainer_servers: + - host: 10.0.1.12 + port: 8249 + data_dir: "/tidb-data/drainer-8249" + # If drainer doesn't have a checkpoint, use initial commitTS as the initial checkpoint. + # Will get a latest timestamp from pd if commit_ts is set to -1 (the default value). + commit_ts: -1 + deploy_dir: "/tidb-deploy/drainer-8249" + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/complex-tiflash.yaml b/complex-tiflash.yaml new file mode 100644 index 000000000000..99cf55e0f52b --- /dev/null +++ b/complex-tiflash.yaml @@ -0,0 +1,131 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ +# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ +# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + replication.enable-placement-rules: true + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.8 + - host: 10.0.1.9 +tikv_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.2 + - host: 10.0.1.3 + +tiflash_servers: + - host: 10.0.1.11 + data_dir: /tidb-data/tiflash-9000 + deploy_dir: /tidb-deploy/tiflash-9000 + # ssh_port: 22 + # tcp_port: 9000 + # http_port: 8123 + # flash_service_port: 3930 + # flash_proxy_port: 20170 + # flash_proxy_status_port: 20292 + # metrics_port: 8234 + # deploy_dir: /tidb-deploy/tiflash-9000 + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tiflash` values. + # config: + # logger.level: "info" + # learner_config: + # log-level: "info" + # - host: 10.0.1.12 + # - host: 10.0.1.13 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/geo-redundancy-deployment.yaml b/geo-redundancy-deployment.yaml new file mode 100644 index 000000000000..9f18ac900a60 --- /dev/null +++ b/geo-redundancy-deployment.yaml @@ -0,0 +1,108 @@ +# Tip: PD priority needs to be manually set using the PD-ctl client tool. such as, member Leader_priority PD-name numbers. +# Global variables are applied to all deployments and used as the default value of +# the deployments if a specific deployment value is missing. + +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" +server_configs: + tidb: + log.level: debug + log.slow-query-file: tidb-slow.log + tikv: + server.grpc-compression-type: gzip + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 8 + pd: + replication.location-labels: ["zone","dc","rack","host"] + replication.max-replicas: 5 + label-property: + reject-leader: + - key: "dc" + value: "sha" +pd_servers: + - host: 10.0.1.6 + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + - host: 10.0.1.10 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + - host: 10.0.1.4 + - host: 10.0.1.5 +tikv_servers: + - host: 10.0.1.11 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host1 + - host: 10.0.1.12 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host2 + - host: 10.0.1.13 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host1 + - host: 10.0.1.14 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host2 + - host: 10.0.1.15 + ssh_port: 22 + port: 20160 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: sh + dc: sha + rack: rack1 + host: host1 + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 10 + raftstore.raft-min-election-timeout-ticks: 1000 + raftstore.raft-max-election-timeout-ticks: 1020 +monitoring_servers: + - host: 10.0.1.16 +grafana_servers: + - host: 10.0.1.16 \ No newline at end of file diff --git a/simple-cdc.yaml b/simple-cdc.yaml new file mode 100644 index 000000000000..4d820082de20 --- /dev/null +++ b/simple-cdc.yaml @@ -0,0 +1,36 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +cdc_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-mini.yaml b/simple-mini.yaml new file mode 100644 index 000000000000..57e48fd2d741 --- /dev/null +++ b/simple-mini.yaml @@ -0,0 +1,31 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-multi-instant.yaml b/simple-multi-instant.yaml new file mode 100644 index 000000000000..735b3be1af61 --- /dev/null +++ b/simple-multi-instant.yaml @@ -0,0 +1,95 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tikv: + readpool.unified.max-thread-count: + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "" + raftstore.capacity: "" + pd: + replication.location-labels: ["host"] + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv3" } + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-tidb-binlog.yaml b/simple-tidb-binlog.yaml new file mode 100644 index 000000000000..558023e00356 --- /dev/null +++ b/simple-tidb-binlog.yaml @@ -0,0 +1,47 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tidb: + binlog.enable: true + binlog.ignore-error: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +drainer_servers: + - host: 10.0.1.12 + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file diff --git a/simple-tiflash.yaml b/simple-tiflash.yaml new file mode 100644 index 000000000000..1e2024440854 --- /dev/null +++ b/simple-tiflash.yaml @@ -0,0 +1,40 @@ +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + pd: + replication.enable-placement-rules: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +tikv_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tiflash_servers: + - host: 10.0.1.11 + data_dir: /tidb-data/tiflash-9000 + deploy_dir: /tidb-deploy/tiflash-9000 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 \ No newline at end of file