Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

README, docker: add quick start #181

Merged
merged 7 commits into from
Mar 7, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .dockerignore
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@ backupmeta
*.ngo
*.coverprofile
coverage.txt
docker/data/
docker/logs/
35 changes: 35 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,41 @@ Notice BR supports building with Go version `Go >= 1.13`

When BR is built successfully, you can find binary in the `bin` directory.

## Quick start

```sh
# Start TiDB cluster
docker-compose -f docker-compose.yaml rm -s -v && \
docker-compose -f docker-compose.yaml build && \
docker-compose -f docker-compose.yaml up --remove-orphans

# Attach to control container to run BR
docker exec -it br_control_1 bash

# Load testing data to TiDB
go-ycsb load mysql -p workload=core \
-p mysql.host=tidb -p mysql.port=4000 -p mysql.user=root \
-p recordcount=100000 -p threadcount=100

# How many rows do we get? 100000 rows.
mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable"
kennytm marked this conversation as resolved.
Show resolved Hide resolved

# Build BR and backup!
make release && \
bin/br backup full --pd pd0:2379 --storage "local:///data/backup/full" \
--log-file "/logs/br_backup.log"

# Let's drop database.
mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;"

# Restore!
bin/br restore full --pd pd0:2379 --storage "local:///data/backup/full" \
--log-file "/logs/br_restore.log"

# How many rows do we get again? Expected to be 100000 rows.
mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable"
```

## Contributing

Contributions are welcomed and greatly appreciated. See [CONTRIBUTING](./CONTRIBUTING.md)
Expand Down
16 changes: 10 additions & 6 deletions cmd/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,16 +86,20 @@ func Init(cmd *cobra.Command) (err error) {
err = e
return
}
tidbLogCfg := logutil.LogConfig{}
if len(slowLogFilename) != 0 {
slowCfg := logutil.LogConfig{SlowQueryFile: slowLogFilename}
e = logutil.InitLogger(&slowCfg)
if e != nil {
err = e
return
}
tidbLogCfg.SlowQueryFile = slowLogFilename
} else {
// Hack! Discard slow log by setting log level to PanicLevel
logutil.SlowQueryLogger.SetLevel(logrus.PanicLevel)
// Disable annoying TiDB Log.
// TODO: some error logs outputs randomly, we need to fix them in TiDB.
tidbLogCfg.Level = "fatal"
kennytm marked this conversation as resolved.
Show resolved Hide resolved
}
e = logutil.InitLogger(&tidbLogCfg)
if e != nil {
err = e
return
}

// Initialize the pprof server.
Expand Down
194 changes: 194 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
---
# Source: tidb-docker-compose/templates/docker-compose.yml
version: '2.1'

services:
control:
image: control:latest
build:
context: .
dockerfile: ./docker/Dockerfile
volumes:
- ./docker/data:/data
- ./docker/logs:/logs
command: -c "/usr/bin/tail -f /dev/null"
depends_on:
- "tidb"
restart: on-failure

pd0:
image: pingcap/pd:latest
ports:
- "2379"
volumes:
- ./docker/config/pd.toml:/pd.toml:ro
- ./docker/data:/data
- ./docker/logs:/logs
command:
- --name=pd0
- --client-urls=http://0.0.0.0:2379
- --peer-urls=http://0.0.0.0:2380
- --advertise-client-urls=http://pd0:2379
- --advertise-peer-urls=http://pd0:2380
- --initial-cluster=pd0=http://pd0:2380
- --data-dir=/data/pd0
- --config=/pd.toml
- --log-file=/logs/pd0.log
# sysctls:
# net.core.somaxconn: 32768
# ulimits:
# nofile:
# soft: 1000000
# hard: 1000000
restart: on-failure

tikv0:
image: pingcap/tikv:latest
volumes:
- ./docker/config/tikv.toml:/tikv.toml:ro
- ./docker/data:/data
- ./docker/logs:/logs
command:
- --addr=0.0.0.0:20160
- --advertise-addr=tikv0:20160
- --data-dir=/data/tikv0
- --pd=pd0:2379
- --config=/tikv.toml
- --log-file=/logs/tikv0.log
depends_on:
- "pd0"
# sysctls:
# net.core.somaxconn: 32768
# ulimits:
# nofile:
# soft: 1000000
# hard: 1000000
restart: on-failure

tikv1:
image: pingcap/tikv:latest
volumes:
- ./docker/config/tikv.toml:/tikv.toml:ro
- ./docker/data:/data
- ./docker/logs:/logs
command:
- --addr=0.0.0.0:20160
- --advertise-addr=tikv1:20160
- --data-dir=/data/tikv1
- --pd=pd0:2379
- --config=/tikv.toml
- --log-file=/logs/tikv1.log
depends_on:
- "pd0"
# sysctls:
# net.core.somaxconn: 32768
# ulimits:
# nofile:
# soft: 1000000
# hard: 1000000
restart: on-failure

tikv2:
image: pingcap/tikv:latest
volumes:
- ./docker/config/tikv.toml:/tikv.toml:ro
- ./docker/data:/data
- ./docker/logs:/logs
command:
- --addr=0.0.0.0:20160
- --advertise-addr=tikv2:20160
- --data-dir=/data/tikv2
- --pd=pd0:2379
- --config=/tikv.toml
- --log-file=/logs/tikv2.log
depends_on:
- "pd0"
# sysctls:
# net.core.somaxconn: 32768
# ulimits:
# nofile:
# soft: 1000000
# hard: 1000000
restart: on-failure

tikv3:
image: pingcap/tikv:latest
volumes:
- ./docker/config/tikv.toml:/tikv.toml:ro
- ./docker/data:/data
- ./docker/logs:/logs
command:
- --addr=0.0.0.0:20160
- --advertise-addr=tikv3:20160
- --data-dir=/data/tikv3
- --pd=pd0:2379
- --config=/tikv.toml
- --log-file=/logs/tikv3.log
depends_on:
- "pd0"
# sysctls:
# net.core.somaxconn: 32768
# ulimits:
# nofile:
# soft: 1000000
# hard: 1000000
restart: on-failure

tikv4:
image: pingcap/tikv:latest
volumes:
- ./docker/config/tikv.toml:/tikv.toml:ro
- ./docker/data:/data
- ./docker/logs:/logs
command:
- --addr=0.0.0.0:20160
- --advertise-addr=tikv4:20160
- --data-dir=/data/tikv4
- --pd=pd0:2379
- --config=/tikv.toml
- --log-file=/logs/tikv4.log
depends_on:
- "pd0"
# sysctls:
# net.core.somaxconn: 32768
# ulimits:
# nofile:
# soft: 1000000
# hard: 1000000
restart: on-failure

tidb:
image: pingcap/tidb:latest
ports:
- "4000:4000"
- "10080:10080"
volumes:
- ./docker/config/tidb.toml:/tidb.toml:ro
- ./docker/logs:/logs
command:
- --store=tikv
- --path=pd0:2379
- --config=/tidb.toml
- --log-file=/logs/tidb.log
- --advertise-address=tidb
depends_on:
- "tikv0"
- "tikv1"
- "tikv2"
- "tikv3"
- "tikv4"
# sysctls:
# net.core.somaxconn: 32768
# ulimits:
# nofile:
# soft: 1000000
# hard: 1000000
restart: on-failure

tidb-vision:
image: pingcap/tidb-vision:latest
environment:
PD_ENDPOINT: pd0:2379
ports:
- "8010:8010"
restart: on-failure
24 changes: 24 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
FROM golang:1.13.8-buster as builder

# For loading data to TiDB
WORKDIR /go/src/github.com/pingcap/
RUN git clone https://github.com/pingcap/go-ycsb.git && \
cd go-ycsb && \
make

FROM golang:1.13.8-buster

RUN apt-get update && apt-get install -y --no-install-recommends \
git \
curl \
vim \
less \
default-mysql-client \
&& rm -rf /var/lib/apt/lists/*

WORKDIR /go/src/github.com/pingcap/br
COPY . .

COPY --from=builder /go/src/github.com/pingcap/go-ycsb/bin/go-ycsb /go/bin/go-ycsb

ENTRYPOINT ["/bin/bash"]
18 changes: 18 additions & 0 deletions docker/config/pd.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# PD Configuration.
[schedule]
# Disbale Region Merge
max-merge-region-size = 0
max-merge-region-key = 0
merge-schedule-limit = 0

max-snapshot-count = 10
max-pending-peer-count = 32
max-store-down-time = "30m"
leader-schedule-limit = 4
region-schedule-limit = 4
replica-schedule-limit = 8
tolerant-size-ratio = 5.0

[replication]
# The number of replicas for each region.
max-replicas = 3
9 changes: 9 additions & 0 deletions docker/config/tidb.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Run ddl worker on this tidb-server.
run-ddl = true

# Schema lease duration, very dangerous to change only if you know what you do.
lease = "360s"

# When create table, split a separated region for it. It is recommended to
# turn off this option if there will be a large number of tables created.
split-table = true
22 changes: 22 additions & 0 deletions docker/config/tikv.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[raftstore]
# true (default value) for high reliability, this can prevent data loss when power failure.
sync-log = true

[coprocessor]
# Make region split more aggressive.
region-max-keys = 100
region-split-keys = 80

[rocksdb]
# Number of open files that can be used by the DB. You may need to
# increase this if your database has a large working set. Value -1 means
# files opened are always kept open. You can estimate number of files based
# on target_file_size_base and target_file_size_multiplier for level-based
# compaction.
# If max-open-files = -1, RocksDB will prefetch index and filter blocks into
# block cache at startup, so if your database has a large working set, it will
# take several minutes to open the db.
max-open-files = 1024

[raftdb]
max-open-files = 1024