From 3c9559f2dd38e05cf45130ad6f55cb3abfc46103 Mon Sep 17 00:00:00 2001 From: ian Date: Mon, 8 Apr 2019 19:58:03 +0800 Subject: [PATCH 01/29] chore: make bors-changelog.py python3 compatible --- devtools/git/bors-changelog.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/devtools/git/bors-changelog.py b/devtools/git/bors-changelog.py index 8068b57375..2379dac955 100755 --- a/devtools/git/bors-changelog.py +++ b/devtools/git/bors-changelog.py @@ -5,16 +5,23 @@ import subprocess from collections import namedtuple, OrderedDict + +def _str(s): + if sys.version_info >= (3, 0): + return s.decode('utf-8') + return s + + if len(sys.argv) > 1: since = sys.argv[1] else: - tag_rev = subprocess.check_output( - ['git', 'rev-list', '--tags', '--max-count=1']).strip() - since = subprocess.check_output( - ['git', 'describe', '--tags', tag_rev]).strip() + tag_rev = _str(subprocess.check_output( + ['git', 'rev-list', '--tags', '--max-count=1']).strip()) + since = _str(subprocess.check_output( + ['git', 'describe', '--tags', tag_rev]).strip()) -logs = subprocess.check_output( - ['git', 'log', '--merges', '{}...HEAD'.format(since)]) +logs = _str(subprocess.check_output( + ['git', 'log', '--merges', '{}...HEAD'.format(since)])) START_RE = re.compile(r'\s+(\d+): (?:(\w+)(\([^\)]+\))?: )?(.*r=.*)') END_RE = re.compile(r'\s+Co-authored-by:') From 4a541d47cfb276f72e8f34ce92e6cb217d982e58 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Mon, 8 Apr 2019 19:23:13 +0800 Subject: [PATCH 02/29] fix: clear tx verfy cache when chain reorg --- shared/src/chain_state.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/shared/src/chain_state.rs b/shared/src/chain_state.rs index e1c1065254..03f20a50c1 100644 --- a/shared/src/chain_state.rs +++ b/shared/src/chain_state.rs @@ -263,6 +263,10 @@ impl ChainState { attached.extend(blk.commit_transactions().iter().skip(1).cloned()) } + if !detached.is_empty() { + self.txs_verify_cache.borrow_mut().clear(); + } + let retain: Vec<&Transaction> = detached.difference(&attached).collect(); for tx in retain { From e1b28e1606522c7aebf62d9f971623a7189071c8 Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 2 Apr 2019 15:37:01 +0800 Subject: [PATCH 03/29] docs: split README --- README.md | 127 ++----------------------------------------- docs/build.md | 51 +++++++++++++++++ docs/ckb-core-dev.md | 31 +++++++++++ docs/quick-start.md | 64 ++++++++++++++++++++++ 4 files changed, 151 insertions(+), 122 deletions(-) create mode 100644 docs/build.md create mode 100644 docs/ckb-core-dev.md create mode 100644 docs/quick-start.md diff --git a/README.md b/README.md index 20ff4122f5..bc79558860 100644 --- a/README.md +++ b/README.md @@ -34,126 +34,9 @@ The contribution workflow is described in [CONTRIBUTING.md](CONTRIBUTING.md), an --- -## Build dependencies +## Documentations -CKB is currently tested mainly with `stable-1.33.0` on Linux and Mac OSX. - -We recommend installing Rust through [rustup](https://www.rustup.rs/) - -```bash -# Get rustup from rustup.rs, then in your `ckb` folder: -rustup override set 1.33.0 -rustup component add rustfmt -rustup component add clippy -``` - -Report new breakage is welcome. - -You also need to get the following packages: - -* Ubuntu and Debian: - -```shell -sudo apt-get install git gcc libc6-dev pkg-config libssl-dev libclang-dev clang -``` - -* Arch Linux - -```shell -sudo pacman -Sy git gcc pkgconf clang -``` - -* macOS: - -```shell -brew install autoconf libtool -``` - ---- - -## Build from source & testing - -```bash -# get ckb source code -git clone https://github.com/nervosnetwork/ckb.git -cd ckb - -# build in release mode -make build -``` - -You can run the full test suite, or just run a specific package test: - -```bash -# Run the full suite -cargo test --all -# Run a specific package test -cargo test -p ckb-chain -``` - ---- - -## Quick Start - -### Start Node - -Create the default runtime directory: - -```shell -cp -r nodes_template/ nodes -``` - -Use the config file to start the node - -```shell -target/release/ckb run -``` - -It searches config file `ckb.toml`, `nodes/default.toml` in the shell -working directory in that order. Alternatively, the argument `-c` can specify -the config file used to start the node. - -The default config file saves data in `nodes/default/`. - -### Use RPC - -Find RPC port in the log output, the following command assumes 8114 is used: - -```shell -curl -d '{"id": 1, "jsonrpc": "2.0", "method":"get_tip_header","params": []}' \ - -H 'content-type:application/json' 'http://localhost:8114' -``` - -### Run Miner - -Run miner, gets a block template to mine. - -```shell -target/release/ckb miner -``` - -### Advanced - -Run multiple nodes in different data directories. - -Create the config file for new nodes, for example: - -```shell -cp nodes/default.toml nodes/node2.toml -``` - -Update `data_dir` configuration in config file to a different directory. - -``` -data_dir = "node2" -``` - -Then start the new node using the new config file - -```shell -target/release/ckb run -c nodes/node2.toml -``` - -The option `ckb.chain` configures the chain spec. It accepts a path to the spec toml file. The directory `nodes_template/spec` has all the pre-defined specs. Please note that nodes with different chain specs may fail to connect with each other. - -The chain spec can switch between different PoW engines. Wiki has the [instructions](https://github.com/nervosnetwork/ckb/wiki/PoW-Engines) about how to configure it. +- [Build CKB](docs/build.md) +- [Quick Start](docs/quick-start.md) +- [Configure CKB](docs/configure.md) +- [CKB Core Development](docs/ckb-core-dev.md) diff --git a/docs/build.md b/docs/build.md new file mode 100644 index 0000000000..1883ca7db5 --- /dev/null +++ b/docs/build.md @@ -0,0 +1,51 @@ +# Build CKB + +## Build dependencies + +CKB is currently tested mainly with `stable-1.33.0` on Linux and macOS. + +We recommend installing Rust through [rustup](https://www.rustup.rs/) + +```bash +# Get rustup from rustup.rs, then in your `ckb` folder: +rustup override set 1.33.0 +rustup component add rustfmt +rustup component add clippy +``` + +Report new breakage is welcome. + +You also need to get the following packages: + +#### Ubuntu and Debian + +```shell +sudo apt-get install git gcc libc6-dev pkg-config libssl-dev libclang-dev clang +``` + +#### Arch Linux + +```shell +sudo pacman -Sy git gcc pkgconf clang +``` + +#### macOS + +```shell +brew install autoconf libtool +``` + +--- + +## Build from source + +```bash +# get ckb source code +git clone https://github.com/nervosnetwork/ckb.git +cd ckb + +# build in release mode +make build +``` + +This will build the executable `target/release/ckb`. diff --git a/docs/ckb-core-dev.md b/docs/ckb-core-dev.md new file mode 100644 index 0000000000..3a645aca99 --- /dev/null +++ b/docs/ckb-core-dev.md @@ -0,0 +1,31 @@ +# CKB Core Development + +## Running Test + +Install dependencies + +``` +rustup component add rustfmt +rustup component add clippy +``` + +Run tests + +``` +make ci +``` + +Run acceptance integration tests + + +``` +cargo build +cd test && cargo run ../target/debug/ckb +``` + +## Chain Spec + +The subcommand `ckb init` has an option `--export-spec` to export spec files +as well, which allows editing the chain spec for development. + +The chain spec can switch between different PoW engines. Wiki has the [instructions](https://github.com/nervosnetwork/ckb/wiki/PoW-Engines) about how to configure it.- diff --git a/docs/quick-start.md b/docs/quick-start.md new file mode 100644 index 0000000000..3b06c3921e --- /dev/null +++ b/docs/quick-start.md @@ -0,0 +1,64 @@ +# Quick Start + +## Start Node + +Create the default runtime directory: + +```shell +cp -r nodes_template/ nodes +``` + +Use the config file to start the node + +```shell +target/release/ckb run +``` + +It searches config file `ckb.toml`, `nodes/default.toml` in the shell +working directory in that order. Alternatively, the argument `-c` can specify +the config file used to start the node. + +The default config file saves data in `nodes/default/`. + +## Use RPC + +Find RPC port in the log output, the following command assumes 8114 is used: + +```shell +curl -d '{"id": 1, "jsonrpc": "2.0", "method":"get_tip_header","params": []}' \ + -H 'content-type:application/json' 'http://localhost:8114' +``` + +## Run Miner + +Run miner, gets a block template to mine. + +```shell +target/release/ckb miner +``` + +## Run Multiple Nodes + +Run multiple nodes in different data directories. + +Create the config file for new nodes, for example: + +```shell +cp nodes/default.toml nodes/node2.toml +``` + +Update `data_dir` configuration in config file to a different directory. + +``` +data_dir = "node2" +``` + +Then start the new node using the new config file + +```shell +target/release/ckb run -c nodes/node2.toml +``` + +The option `ckb.chain` configures the chain spec. It accepts a path to the spec toml file. The directory `nodes_template/spec` has all the pre-defined specs. Please note that nodes with different chain specs may fail to connect with each other. + +The chain spec can switch between different PoW engines. Wiki has the [instructions](https://github.com/nervosnetwork/ckb/wiki/PoW-Engines) about how to configure it. From 9849169281deda203c9f9255dfff386d04d6ac3d Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 2 Apr 2019 16:35:38 +0800 Subject: [PATCH 04/29] docs: how to configure CKB --- docs/build.md | 17 ++++++---- docs/configure.md | 81 +++++++++++++++++++++++++++++++++++++++++++++ docs/quick-start.md | 47 ++++---------------------- 3 files changed, 97 insertions(+), 48 deletions(-) create mode 100644 docs/configure.md diff --git a/docs/build.md b/docs/build.md index 1883ca7db5..2049440fdf 100644 --- a/docs/build.md +++ b/docs/build.md @@ -1,6 +1,6 @@ # Build CKB -## Build dependencies +## Install Build Dependencies CKB is currently tested mainly with `stable-1.33.0` on Linux and macOS. @@ -9,8 +9,6 @@ We recommend installing Rust through [rustup](https://www.rustup.rs/) ```bash # Get rustup from rustup.rs, then in your `ckb` folder: rustup override set 1.33.0 -rustup component add rustfmt -rustup component add clippy ``` Report new breakage is welcome. @@ -35,9 +33,7 @@ sudo pacman -Sy git gcc pkgconf clang brew install autoconf libtool ``` ---- - -## Build from source +## Build from Source ```bash # get ckb source code @@ -48,4 +44,11 @@ cd ckb make build ``` -This will build the executable `target/release/ckb`. +This will build the executable `target/release/ckb`. Please add the directory +to `PATH` or copy/link the file into a directory already in the `PATH`. + +```base +export PATH="$(pwd)/target/release:$PATH" +# or +# ln -snf "$(pwd)/target/release/ckb" /usr/local/bin/ckb +``` diff --git a/docs/configure.md b/docs/configure.md new file mode 100644 index 0000000000..a4c02da79e --- /dev/null +++ b/docs/configure.md @@ -0,0 +1,81 @@ +# Configure CKB + +## How CKB Locates Config File + +CKB looks for configuration files in ``, which is the current working directory by default. Different subcommands use different config file names: + +- `ckb run`: `ckb.toml` +- `ckb miner`: `ckb-miner.toml` +- `ckb import`: `ckb.toml` +- `ckb export`: `ckb.toml` +- `ckb cli`: no config file required yet + +Command line argument `-C ` sets the value of `` to ``, which must come before subcommand. + +If configuration file is missing, the default config files bundled in the executable will be used. + +Some config file may refer to other files, for example, `chain.spec` in +`ckb.toml` and `system_cells` in chain spec file. The file is referred via +either absolute path, or a path relative to the directory containing the +config file currently being parsed. Take following directory hierarchy as an +example: + +``` +ckb.toml +specs/dev.toml +specs/cells/always_success +``` + +Then `ckb.toml` refers `dev.toml` as `specs/dev.toml`, while +`specs/dev.toml` refers `always_success` as `cells/always_success`. + +For security reason, there is a limitation of the file reference. The bundled +file can only refer to bundled files, while a file located in the file system +can either refer to another file in the file system or a bundled one. + +## How to Change Config + +First export the bundled config files into current directory using subcommand `init`. + +``` +ckb init +``` + +Then edit the generated config files according to the in-line comments. + +## Chain Spec + +The option `ckb.chain` configures the chain spec, which controls which kind of chain to run. +This option is set to a spec used for development by default. + +The subcommand `init` supports exporting the default options for different +chain specs. The following command lists all supported chain specs. + +``` +ckb init --list-specs +``` + +Here is an example to export config files for testnet. + +``` +ckb init --spec testnet +``` + +Nodes running different chain specs cannot synchronize with each other, so be carefully when editing this option. + +## How to Run Multiple Nodes + +Each node requires its own ``. Since the default ports will conflict, please export the config files and edit the listen ports in the config files. + +``` +mkdir node1 node2 +ckb -C node1 init +ckb -C node2 init +# Change listen ports 8114/8115 to 8116/8117 in node2/ckb.toml. +# Change `rpc_url` in node2/ckb.toml to use 8116. +# You may also want to add each other as a boot node in the configuration file. +# start node1 +ckb -C node1 run +# start node2 +ckb -C node2 run +``` diff --git a/docs/quick-start.md b/docs/quick-start.md index 3b06c3921e..206f9a3ee6 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -1,24 +1,15 @@ # Quick Start -## Start Node - -Create the default runtime directory: - -```shell -cp -r nodes_template/ nodes -``` +Following steps will assume that the shell can find the executable `ckb`, see +how to [build CKB from source](build.md). -Use the config file to start the node +## Start Node ```shell -target/release/ckb run +ckb run ``` -It searches config file `ckb.toml`, `nodes/default.toml` in the shell -working directory in that order. Alternatively, the argument `-c` can specify -the config file used to start the node. - -The default config file saves data in `nodes/default/`. +It will start a node using the default configurations and store files in `data/dev` in current directory. ## Use RPC @@ -34,31 +25,5 @@ curl -d '{"id": 1, "jsonrpc": "2.0", "method":"get_tip_header","params": []}' \ Run miner, gets a block template to mine. ```shell -target/release/ckb miner +ckb miner ``` - -## Run Multiple Nodes - -Run multiple nodes in different data directories. - -Create the config file for new nodes, for example: - -```shell -cp nodes/default.toml nodes/node2.toml -``` - -Update `data_dir` configuration in config file to a different directory. - -``` -data_dir = "node2" -``` - -Then start the new node using the new config file - -```shell -target/release/ckb run -c nodes/node2.toml -``` - -The option `ckb.chain` configures the chain spec. It accepts a path to the spec toml file. The directory `nodes_template/spec` has all the pre-defined specs. Please note that nodes with different chain specs may fail to connect with each other. - -The chain spec can switch between different PoW engines. Wiki has the [instructions](https://github.com/nervosnetwork/ckb/wiki/PoW-Engines) about how to configure it. From 96b367d7cfe88780086744aa01960460e268279e Mon Sep 17 00:00:00 2001 From: ian Date: Fri, 5 Apr 2019 12:46:33 +0800 Subject: [PATCH 05/29] feat: bundle app config --- .gitignore | 5 +- Cargo.lock | 115 +------ Cargo.toml | 7 +- core/src/script.rs | 2 +- db/src/config.rs | 1 + docker/hub/Dockerfile | 3 - network/src/config.rs | 1 + nodes_template/default.toml | 56 --- nodes_template/miner.toml | 19 - resource/Cargo.toml | 19 + resource/build.rs | 26 ++ resource/ckb-miner.toml | 41 +++ resource/ckb.toml | 80 +++++ .../specs}/cells/always_success | Bin .../specs}/cells/secp256k1_blake2b_lock | Bin .../spec => resource/specs}/dev.toml | 0 .../specs/integration.toml | 0 .../testnet => resource/specs}/testnet.toml | 0 resource/src/lib.rs | 324 ++++++++++++++++++ resource/src/template.rs | 108 ++++++ shared/src/shared.rs | 22 +- spec/Cargo.toml | 8 +- spec/build.rs | 8 - spec/src/lib.rs | 218 +++++------- src/cli/args.rs | 93 ----- src/cli/export.rs | 24 -- src/cli/import.rs | 31 -- src/cli/miner.rs | 102 ------ src/cli/mod.rs | 13 - src/helper.rs | 36 -- src/main.rs | 61 ++-- src/setup.rs | 280 --------------- src/setup/app_config.rs | 190 ++++++++++ src/setup/args.rs | 41 +++ src/setup/cli.rs | 139 ++++++++ src/setup/exit_code.rs | 45 +++ src/setup/mod.rs | 159 +++++++++ src/{cli => setup}/sentry_config.rs | 4 +- src/subcommand/cli.rs | 9 + src/subcommand/export.rs | 18 + src/subcommand/import.rs | 25 ++ src/subcommand/init.rs | 26 ++ src/subcommand/miner.rs | 24 ++ src/subcommand/mod.rs | 12 + src/{cli/run_impl.rs => subcommand/run.rs} | 35 +- src/system.rs | 16 + test/fixtures/nodes_template/default.toml | 54 --- .../nodes_template/spec/cells/always_success | Bin 344 -> 0 bytes test/src/main.rs | 1 + test/src/node.rs | 46 +-- util/dir/Cargo.toml | 6 - util/dir/src/lib.rs | 20 -- util/logger/src/lib.rs | 7 +- 53 files changed, 1490 insertions(+), 1090 deletions(-) delete mode 100644 nodes_template/default.toml delete mode 100644 nodes_template/miner.toml create mode 100644 resource/Cargo.toml create mode 100644 resource/build.rs create mode 100644 resource/ckb-miner.toml create mode 100644 resource/ckb.toml rename {nodes_template/spec => resource/specs}/cells/always_success (100%) rename {spec/chainspecs/testnet => resource/specs}/cells/secp256k1_blake2b_lock (100%) rename {nodes_template/spec => resource/specs}/dev.toml (100%) rename test/fixtures/nodes_template/spec/integration_test.toml => resource/specs/integration.toml (100%) rename {spec/chainspecs/testnet => resource/specs}/testnet.toml (100%) create mode 100644 resource/src/lib.rs create mode 100644 resource/src/template.rs delete mode 100644 spec/build.rs delete mode 100644 src/cli/args.rs delete mode 100644 src/cli/export.rs delete mode 100644 src/cli/import.rs delete mode 100644 src/cli/miner.rs delete mode 100644 src/cli/mod.rs delete mode 100644 src/helper.rs delete mode 100644 src/setup.rs create mode 100644 src/setup/app_config.rs create mode 100644 src/setup/args.rs create mode 100644 src/setup/cli.rs create mode 100644 src/setup/exit_code.rs create mode 100644 src/setup/mod.rs rename src/{cli => setup}/sentry_config.rs (91%) create mode 100644 src/subcommand/cli.rs create mode 100644 src/subcommand/export.rs create mode 100644 src/subcommand/import.rs create mode 100644 src/subcommand/init.rs create mode 100644 src/subcommand/miner.rs create mode 100644 src/subcommand/mod.rs rename src/{cli/run_impl.rs => subcommand/run.rs} (76%) create mode 100644 src/system.rs delete mode 100644 test/fixtures/nodes_template/default.toml delete mode 100755 test/fixtures/nodes_template/spec/cells/always_success delete mode 100644 util/dir/Cargo.toml delete mode 100644 util/dir/src/lib.rs diff --git a/.gitignore b/.gitignore index d7dc17eed2..bc75674a4d 100644 --- a/.gitignore +++ b/.gitignore @@ -39,4 +39,7 @@ tags .vagrant # runtime folder -/nodes/ +/ckb.toml +/ckb-miner.toml +/data +/specs diff --git a/Cargo.lock b/Cargo.lock index c245f47096..aa2b3ee70a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -316,17 +316,16 @@ dependencies = [ "ckb-network 0.9.0-pre", "ckb-notify 0.9.0-pre", "ckb-pow 0.9.0-pre", + "ckb-resource 0.9.0-pre", "ckb-rpc 0.9.0-pre", "ckb-shared 0.9.0-pre", "ckb-sync 0.9.0-pre", "ckb-traits 0.9.0-pre", "ckb-util 0.9.0-pre", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "config 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "crypto 0.9.0-pre", "ctrlc 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "dir 0.9.0-pre", "futures 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.9.0-pre", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -392,11 +391,9 @@ version = "0.9.0-pre" dependencies = [ "ckb-core 0.9.0-pre", "ckb-pow 0.9.0-pre", - "includedir 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "includedir_codegen 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ckb-resource 0.9.0-pre", "numext-fixed-hash 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "numext-fixed-uint 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "phf 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -570,6 +567,17 @@ dependencies = [ "siphasher 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ckb-resource" +version = "0.9.0-pre" +dependencies = [ + "includedir 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "includedir_codegen 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "phf 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ckb-rpc" version = "0.9.0-pre" @@ -776,21 +784,6 @@ dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "config" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rust-ini 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde-hjson 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "console" version = "0.7.5" @@ -1065,10 +1058,6 @@ dependencies = [ "generic-array 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "dir" -version = "0.9.0-pre" - [[package]] name = "dtoa" version = "0.4.3" @@ -1556,11 +1545,6 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "lazy_static" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "lazy_static" version = "1.3.0" @@ -1606,15 +1590,6 @@ dependencies = [ "vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "linked-hash-map" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_test 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "linked-hash-map" version = "0.5.1" @@ -1850,14 +1825,6 @@ dependencies = [ "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "num-traits" version = "0.2.6" @@ -2467,11 +2434,6 @@ dependencies = [ "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rust-ini" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "rustc-demangle" version = "0.1.13" @@ -2630,11 +2592,6 @@ dependencies = [ "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "serde" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "serde" version = "1.0.89" @@ -2643,18 +2600,6 @@ dependencies = [ "serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "serde-hjson" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "serde_bytes" version = "0.10.5" @@ -2683,14 +2628,6 @@ dependencies = [ "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "serde_test" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "serde_urlencoded" version = "0.5.4" @@ -3204,14 +3141,6 @@ dependencies = [ "tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "toml" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "toml" version = "0.5.0" @@ -3515,14 +3444,6 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "yaml-rust" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - [metadata] "checksum MacTypes-sys 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eaf9f0d0b1cc33a4d2aee14fb4b2eac03462ef4db29c8ac4057327d8a71ad86f" "checksum adler32 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7e522997b529f05601e05166c07ed17789691f562762c7f3b987263d2dedee5c" @@ -3566,7 +3487,6 @@ dependencies = [ "checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" "checksum clicolors-control 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "73abfd4c73d003a674ce5d2933fca6ce6c42480ea84a5ffe0a2dc39ed56300f9" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum config 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3e82d07fac0a5eeaa9d959b5194d01bb66e414665f547416958d2b430f8f4852" "checksum console 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2bf3720d3f3fc30b721ef1ae54e13af3264af4af39dc476a8de56a6ee1e2184b" "checksum core-foundation 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "286e0b41c3a20da26536c6000a280585d519fd07b3956b43aed8a79e9edce980" "checksum core-foundation-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "716c271e8613ace48344f723b60b900a93150271e5be206212d052bbc0883efa" @@ -3647,14 +3567,12 @@ dependencies = [ "checksum jsonrpc-http-server 10.0.1 (git+https://github.com/nervosnetwork/jsonrpc?branch=http_remake)" = "" "checksum jsonrpc-server-utils 10.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c9527f01ef25f251d64082cbefc0c6d6f367349afe6848ef908a674e06b2bdd3" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" "checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" "checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" "checksum libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)" = "aab692d7759f5cd8c859e169db98ae5b52c924add2af5fbbca11d12fefb567c1" "checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2" "checksum librocksdb-sys 5.17.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7dfb546562f9b450237bb8df7a31961849ee9fb1186d9e356db1d7a6b7609ff2" "checksum libsqlite3-sys 0.11.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3567bc1a0c84e2c0d71eeb4a1f08451babf7843babd733158777d9c686dad9f3" -"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" "checksum linked-hash-map 0.5.1 (git+https://github.com/nervosnetwork/linked-hash-map?rev=df27f21)" = "" "checksum linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" "checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" @@ -3680,7 +3598,6 @@ dependencies = [ "checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" "checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" "checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea" -"checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" "checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1" "checksum num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1a23f0ed30a54abaa0c7e83b1d2d87ada7c3c23078d1d87815af3e3b6385fbba" "checksum number_prefix 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "dbf9993e59c894e3c08aa1c2712914e9e6bf1fcbfc6bef283e2183df345a4fee" @@ -3743,7 +3660,6 @@ dependencies = [ "checksum ring 0.14.6 (registry+https://github.com/rust-lang/crates.io-index)" = "426bc186e3e95cac1e4a4be125a4aca7e84c2d616ffc02244eef36e2a60a093c" "checksum rocksdb 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3eca7dfb97566985090e6bc4a529af42d0adda683d346a024104ee1b1932e340" "checksum rusqlite 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6381ddfe91dbb659b4b132168da15985bc84162378cf4fcdc4eb99c857d063e2" -"checksum rust-ini 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" "checksum rustc-demangle 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "adacaae16d02b6ec37fdc7acfcddf365978de76d1983d3ee22afc260e1ca9619" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum rusty-fork 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9591f190d2852720b679c21f66ad929f9f1d7bb09d1193c26167586029d8489c" @@ -3761,13 +3677,10 @@ dependencies = [ "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" "checksum sentry 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "00dc2223b4c6de017ea99ce854c35019f92d2775e4b734b39342ff6cb35aced5" "checksum sentry-types 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b23e3d9c8c6e4a1523f24df6753c4088bfe16c44a73c8881c1d23c70f28ae280" -"checksum serde 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)" = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" "checksum serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)" = "92514fb95f900c9b5126e32d020f5c6d40564c27a5ea6d1d7d9f157a96623560" -"checksum serde-hjson 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0b833c5ad67d52ced5f5938b2980f32a9c1c5ef047f0b4fb3127e7a423c76153" "checksum serde_bytes 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)" = "defbb8a83d7f34cc8380751eeb892b825944222888aff18996ea7901f24aec88" "checksum serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "58fc82bec244f168b23d1963b45c8bf5726e9a15a9d146a067f9081aeed2de79" "checksum serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)" = "5a23aa71d4a4d43fdbfaac00eff68ba8a06a51759a89ac3304323e800c4dd40d" -"checksum serde_test 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)" = "110b3dbdf8607ec493c22d5d947753282f3bae73c0f56d322af1e8c78e4c23d5" "checksum serde_urlencoded 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d48f9f99cd749a2de71d29da5f948de7f2764cc5a9d7f3c97e3514d4ee6eabf2" "checksum sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" "checksum sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b4d8bfd0e469f417657573d8451fb33d16cfe0989359b93baf3a1ffc639543d" @@ -3817,7 +3730,6 @@ dependencies = [ "checksum tokio-udp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "66268575b80f4a4a710ef83d087fdfeeabdce9b74c797535fbac18a2cb906e92" "checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" "checksum tokio-yamux 0.1.4 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)" = "" -"checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" "checksum toml 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "87c5890a989fa47ecdc7bcb4c63a77a82c18f306714104b1decfd722db17b39e" "checksum trust-dns 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "65096825b064877da37eeeb9a83390bd23433eabfc503a6476dc5b1949034aa7" "checksum trust-dns-proto 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09144f0992b0870fa8d2972cc069cbf1e3c0fda64d1f3d45c4d68d0e0b52ad4e" @@ -3857,4 +3769,3 @@ dependencies = [ "checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" "checksum winutil 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7daf138b6b14196e3830a588acf1e86966c694d3e8fb026fb105b8b5dca07e6e" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -"checksum yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65923dd1784f44da1d2c3dbbc5e822045628c590ba72123e1c73d3c230c4434d" diff --git a/Cargo.toml b/Cargo.toml index b6cd1ee1fb..f0f1539c6e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,6 @@ toml = "0.5" log = "0.4" futures = "0.1" crossbeam-channel = "0.3" -config-tool = { package= "config", version = "0.9" } ckb-util = { path = "util" } ckb-core = { path = "core" } ckb-chain = { path = "chain" } @@ -26,10 +25,10 @@ ckb-db = { path = "db" } ckb-pow = { path = "pow" } ckb-network = { path = "network"} ckb-rpc = { path = "rpc"} +ckb-resource = { path = "resource"} logger = { path = "util/logger" } numext-fixed-hash = { version = "0.1", features = ["support_rand", "support_heapsize", "support_serde"] } numext-fixed-uint = { version = "0.1", features = ["support_rand", "support_heapsize", "support_serde"] } -dir = { path = "util/dir" } ctrlc = { version = "3.1", features = ["termination"] } ckb-sync = { path = "sync"} crypto = { path = "util/crypto"} @@ -52,7 +51,6 @@ members = [ "util/merkle-tree", "util/jsonrpc-types", "util/crypto", - "util/dir", "util/instrument", "util/build-info", "util/occupied-capacity", @@ -72,7 +70,8 @@ members = [ "verification", "script", "pow", - "benches" + "benches", + "resource", ] [profile.release] diff --git a/core/src/script.rs b/core/src/script.rs index f9449151d1..5524e4676e 100644 --- a/core/src/script.rs +++ b/core/src/script.rs @@ -110,7 +110,7 @@ mod tests { #[test] fn always_success_script_hash() { - let always_success = include_bytes!("../../nodes_template/spec/cells/always_success"); + let always_success = include_bytes!("../../resource/specs/cells/always_success"); let always_success_hash: H256 = (&blake2b_256(&always_success[..])).into(); let script = Script::new(0, vec![], always_success_hash); diff --git a/db/src/config.rs b/db/src/config.rs index fff1abc9d3..6c9bea5df8 100644 --- a/db/src/config.rs +++ b/db/src/config.rs @@ -4,6 +4,7 @@ use std::path::PathBuf; #[derive(Clone, Debug, Deserialize, Default)] pub struct DBConfig { + #[serde(default)] pub path: PathBuf, pub options: Option>, } diff --git a/docker/hub/Dockerfile b/docker/hub/Dockerfile index fe41e6d67b..e071deecd3 100644 --- a/docker/hub/Dockerfile +++ b/docker/hub/Dockerfile @@ -72,9 +72,6 @@ COPY --from=ckb-builder /ckb/target/release/ckb /bin/ckb RUN echo "#!/bin/bash \n ckb \$@" > ./entrypoint.sh RUN chmod +x ./entrypoint.sh -# copy "nodes_template" to `WORKDIR`/nodes/ -COPY --chown=ckb:ckb nodes_template/ nodes/ - #switch use USER ckb diff --git a/network/src/config.rs b/network/src/config.rs index 981aa5056c..1a544ee063 100644 --- a/network/src/config.rs +++ b/network/src/config.rs @@ -20,6 +20,7 @@ pub struct NetworkConfig { pub reserved_only: bool, pub max_peers: u32, pub max_outbound_peers: u32, + #[serde(default)] pub path: PathBuf, pub ping_interval_secs: u64, pub ping_timeout_secs: u64, diff --git a/nodes_template/default.toml b/nodes_template/default.toml deleted file mode 100644 index e1e60f22a8..0000000000 --- a/nodes_template/default.toml +++ /dev/null @@ -1,56 +0,0 @@ -data_dir = "default" - -[db] -path = "default/db" - -[chain] -spec = { Local = "spec/dev.toml" } - -[logger] -file = "ckb.log" -filter = "info" -color = true - -[network] -path = "default/network" -listen_addresses = ["/ip4/0.0.0.0/tcp/8115"] -public_addresses = [] -bootnodes = [] -reserved_peers = [] -reserved_only = false -max_peers = 125 -max_outbound_peers = 30 -ping_interval_secs = 15 -ping_timeout_secs = 20 -connect_outbound_interval_secs = 15 - -[rpc] -listen_address = "0.0.0.0:8114" - -# Default is 10MiB = 10 * 1024 * 1024 -max_request_body_size = 10485760 - -# List of API modules: ["Net", "Pool", "Miner", "Chain", "Trace"] -modules = ["Net", "Pool", "Miner", "Chain"] - -[sync] -verification_level = "Full" -orphan_block_limit = 1024 - -[tx_pool] -max_pool_size = 10000 -max_orphan_size = 10000 -max_proposal_size = 10000 -max_cache_size = 1000 -max_pending_size = 10000 -trace = 100 -txs_verify_cache_size = 100000 - -[block_assembler] -# value is set as always success binary hash -binary_hash = "0x0000000000000000000000000000000000000000000000000000000000000001" -args = [] - -[sentry] -# set to blank to disable sentry error collection -dsn = "https://48c6a88d92e246478e2d53b5917a887c@sentry.io/1422795" diff --git a/nodes_template/miner.toml b/nodes_template/miner.toml deleted file mode 100644 index 9f7da13c15..0000000000 --- a/nodes_template/miner.toml +++ /dev/null @@ -1,19 +0,0 @@ -data_dir = "default" -chain = { Local = "spec/dev.toml" } -rpc_url = "http://127.0.0.1:8114/" -cycles_limit = 100000000 -bytes_limit = 10000000 -max_version = 0 -block_on_submit = true - -# block template polling interval in milliseconds -poll_interval = 1000 - -[logger] -file = "miner.log" -filter = "info" -color = true - -[sentry] -# set to blank to disable sentry error collection -dsn = "https://48c6a88d92e246478e2d53b5917a887c@sentry.io/1422795" diff --git a/resource/Cargo.toml b/resource/Cargo.toml new file mode 100644 index 0000000000..6e25e8de36 --- /dev/null +++ b/resource/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "ckb-resource" +version = "0.9.0-pre" +license = "MIT" +authors = ["Nervos Core Dev "] +edition = "2018" +build = "build.rs" +include = ["/specs", "/ckb.toml", "/ckb-miner.toml"] + +[dependencies] +phf = "0.7.21" +includedir = "0.5.0" + +[dev-dependencies] +tempfile = "3.0" + +[build-dependencies] +includedir_codegen = "0.5.0" +walkdir = "2.1.4" diff --git a/resource/build.rs b/resource/build.rs new file mode 100644 index 0000000000..5072d3aeb8 --- /dev/null +++ b/resource/build.rs @@ -0,0 +1,26 @@ +use includedir_codegen::Compression; +use walkdir::WalkDir; + +fn main() { + let mut bundled = includedir_codegen::start("BUNDLED"); + + for f in &["ckb.toml", "ckb-miner.toml"] { + bundled + .add_file(f, Compression::Gzip) + .expect("add files to resource bundle"); + } + for entry in WalkDir::new("specs").follow_links(true).into_iter() { + match entry { + Ok(ref e) + if !e.file_type().is_dir() && !e.file_name().to_string_lossy().starts_with(".") => + { + bundled + .add_file(e.path(), Compression::Gzip) + .expect("add files to resource bundle"); + } + _ => (), + } + } + + bundled.build("bundled.rs").expect("build resource bundle"); +} diff --git a/resource/ckb-miner.toml b/resource/ckb-miner.toml new file mode 100644 index 0000000000..0862e266f1 --- /dev/null +++ b/resource/ckb-miner.toml @@ -0,0 +1,41 @@ +# Config generated by `ckb init` # {{ +# see => resource/src/template.rs +# testnet => # Config generated by `ckb init --spec testnet` +# }} + +data_dir = "data" + +[chain] +# Choose the kind of chains to run, possible values: +# - specs/dev.toml +# - specs/testnet.toml +spec = "specs/dev.toml" # {{ +# testnet => spec = "specs/testnet.toml" +# integration => spec = "specs/integration.toml" +# }} + +[logger] +filter = "info" # {{ +# integration => filter = "info,network=trace,rpc=debug,sync=debug,relay=debug" +# }} +color = true +# Print logs to stdout as well +copy_to_stdout = true + +[sentry] +# set to blank to disable sentry error collection +dsn = "https://48c6a88d92e246478e2d53b5917a887c@sentry.io/1422795" # {{ +# integration => dsn = "" +# }} + +[miner] +rpc_url = "http://127.0.0.1:8114/" # {{ +# _ => rpc_url = "http://127.0.0.1:{rpc_port}/" +# }} +cycles_limit = 100000000 +bytes_limit = 10000000 +max_version = 0 +block_on_submit = true + +# block template polling interval in milliseconds +poll_interval = 1000 diff --git a/resource/ckb.toml b/resource/ckb.toml new file mode 100644 index 0000000000..9341151275 --- /dev/null +++ b/resource/ckb.toml @@ -0,0 +1,80 @@ +# Config generated by `ckb init` # {{ +# see => resource/src/template.rs +# testnet => # Config generated by `ckb init --spec testnet` +# }} + +data_dir = "data" + +[chain] +# Choose the kind of chains to run, possible values: +# - specs/dev.toml +# - specs/testnet.toml +spec = "specs/dev.toml" # {{ +# testnet => spec = "specs/testnet.toml" +# integration => spec = "specs/integration.toml" +# }} + +[logger] +filter = "info" # {{ +# integration => filter = "info,network=trace,rpc=debug,sync=debug,relay=debug" +# }} +color = true +# Print logs to stdout as well +copy_to_stdout = true + +[sentry] +# set to blank to disable sentry error collection +dsn = "https://48c6a88d92e246478e2d53b5917a887c@sentry.io/1422795" # {{ +# integration => dsn = "" +# }} + +[network] +listen_addresses = ["/ip4/0.0.0.0/tcp/8115"] # {{ +# _ => listen_addresses = ["/ip4/0.0.0.0/tcp/{p2p_port}"] +# }} +public_addresses = [] + +# Node connects to nodes listed here to discovery other peers when there's no local stored peers. +# When chain.spec is changed, this usually should also be changed to the bootnodes in the new chain. +bootnodes = [] + +reserved_peers = [] +reserved_only = false +max_peers = 125 +max_outbound_peers = 30 +ping_interval_secs = 15 +ping_timeout_secs = 20 +connect_outbound_interval_secs = 15 # {{ +# integration => connect_outbound_interval_secs = 1 +# }} + +[rpc] +listen_address = "0.0.0.0:8114" # {{ +# _ => listen_address = "0.0.0.0:{rpc_port}" +# }} + +# Default is 10MiB = 10 * 1024 * 1024 +max_request_body_size = 10485760 + +# List of API modules: ["Net", "Pool", "Miner", "Chain", "Trace"] +modules = ["Net", "Pool", "Miner", "Chain"] # {{ +# integration => modules = ["Net", "Pool", "Miner", "Chain", "Trace", "IntegrationTest"] +# }} + +[sync] +verification_level = "Full" +orphan_block_limit = 1024 + +[tx_pool] +max_pool_size = 10000 +max_orphan_size = 10000 +max_proposal_size = 10000 +max_cache_size = 1000 +max_pending_size = 10000 +trace = 100 +txs_verify_cache_size = 100000 + +[block_assembler] +# value is set as always success binary hash +binary_hash = "0x0000000000000000000000000000000000000000000000000000000000000001" +args = [] diff --git a/nodes_template/spec/cells/always_success b/resource/specs/cells/always_success similarity index 100% rename from nodes_template/spec/cells/always_success rename to resource/specs/cells/always_success diff --git a/spec/chainspecs/testnet/cells/secp256k1_blake2b_lock b/resource/specs/cells/secp256k1_blake2b_lock similarity index 100% rename from spec/chainspecs/testnet/cells/secp256k1_blake2b_lock rename to resource/specs/cells/secp256k1_blake2b_lock diff --git a/nodes_template/spec/dev.toml b/resource/specs/dev.toml similarity index 100% rename from nodes_template/spec/dev.toml rename to resource/specs/dev.toml diff --git a/test/fixtures/nodes_template/spec/integration_test.toml b/resource/specs/integration.toml similarity index 100% rename from test/fixtures/nodes_template/spec/integration_test.toml rename to resource/specs/integration.toml diff --git a/spec/chainspecs/testnet/testnet.toml b/resource/specs/testnet.toml similarity index 100% rename from spec/chainspecs/testnet/testnet.toml rename to resource/specs/testnet.toml diff --git a/resource/src/lib.rs b/resource/src/lib.rs new file mode 100644 index 0000000000..37628eb263 --- /dev/null +++ b/resource/src/lib.rs @@ -0,0 +1,324 @@ +// Shields clippy errors in generated bundled.rs +#![allow(clippy::unreadable_literal)] + +mod template; + +pub use self::template::{ + TemplateContext, AVAILABLE_SPECS, DEFAULT_P2P_PORT, DEFAULT_RPC_PORT, DEFAULT_SPEC, +}; +pub use std::io::{Error, Result}; + +use self::template::Template; +use std::borrow::Cow; +use std::fs; +use std::io::{self, BufReader, Read}; +use std::path::{Path, PathBuf}; + +include!(concat!(env!("OUT_DIR"), "/bundled.rs")); + +pub const CKB_CONFIG_FILE_NAME: &str = "ckb.toml"; +pub const MINER_CONFIG_FILE_NAME: &str = "ckb-miner.toml"; +const SPECS_RESOURCE_DIR_NAME: &str = "specs/"; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum Resource { + Bundled(String), + FileSystem(PathBuf), +} + +impl Resource { + /// Gets resource content + pub fn get(&self) -> Result> { + match self { + Resource::Bundled(key) => BUNDLED.get(key), + Resource::FileSystem(path) => { + let mut file = BufReader::new(fs::File::open(path)?); + let mut data = Vec::new(); + file.read_to_end(&mut data)?; + Ok(Cow::Owned(data)) + } + } + } + + /// Gets resource input stream + pub fn read(&self) -> Result> { + match self { + Resource::Bundled(key) => BUNDLED.read(key), + Resource::FileSystem(path) => Ok(Box::new(BufReader::new(fs::File::open(path)?))), + } + } +} + +pub struct ResourceLocator { + root_dir: PathBuf, +} + +impl ResourceLocator { + pub fn root_dir(&self) -> &Path { + self.root_dir.as_path() + } + + /// Creates a ResourceLocator using `path` as root directory. + /// + /// It returns error if the directory does not exists and current user has no permission to create it. + pub fn with_root_dir(root_dir: PathBuf) -> Result { + fs::create_dir_all(&root_dir)?; + + root_dir + .canonicalize() + .map(|root_dir| ResourceLocator { root_dir }) + } + + pub fn current_dir() -> Result { + let root_dir = ::std::env::current_dir()?; + root_dir + .canonicalize() + .map(|root_dir| ResourceLocator { root_dir }) + } + + pub fn ckb(&self) -> Resource { + self.resolve(PathBuf::from(CKB_CONFIG_FILE_NAME)).unwrap() + } + + pub fn miner(&self) -> Resource { + self.resolve(PathBuf::from(MINER_CONFIG_FILE_NAME)).unwrap() + } + + /// Resolves a resource using a path. + /// + /// The path may be absolute or relative. This function tries the file system first. If the file + /// is absent in the file system and it is relative, the function will search in the bundled files. + /// + /// The relative path is relative to the resource root directory. + /// + /// All the bundled files are assumed in the resource root directory. + /// + /// It returns None when no resource with the path is found. + pub fn resolve(&self, path: PathBuf) -> Option { + if path.is_absolute() { + return file_system(path); + } + + file_system(self.root_dir.join(&path)).or_else(|| bundled(path)) + } + + /// Resolves a resource using a path as the path is refered in the resource `relative_to`. + /// + /// This function is similar to [`ResourceLocator::resolve`]. The difference is how to resolve a relative path. + /// + /// [`ResourceLocator::resolve`]: struct.ResourceLocator.html#method.open + /// + /// The relative path is relative to the directory containing the resource `relative_to`. + /// + /// For security reason, when `relative_to` is `Resource::Bundled`, the return value is either + /// `Some(Resource::Bundled)` or `None`. A bundled file is forbidden to reference a file in the + /// file system. + pub fn resolve_relative_to(&self, path: PathBuf, relative_to: &Resource) -> Option { + match relative_to { + Resource::Bundled(key) => { + // Bundled file can only refer to bundled files. + let relative_start_dir = parent_dir(PathBuf::from(key)).join(&path); + bundled(relative_start_dir) + } + Resource::FileSystem(relative_to_path) => { + if path.is_absolute() { + return file_system(path); + } + + let start_dir = parent_dir(relative_to_path.clone()); + file_system(start_dir.join(&path)).or_else(|| { + start_dir + .strip_prefix(&self.root_dir) + .ok() + .and_then(|relative_start_dir| bundled(relative_start_dir.join(path))) + }) + } + } + } + + pub fn export_ckb<'a>(&self, context: &TemplateContext<'a>) -> Result<()> { + let ckb = Resource::Bundled(CKB_CONFIG_FILE_NAME.to_string()); + let template = Template::new(from_utf8(ckb.get()?)?); + let mut out = fs::File::create(self.root_dir.join(CKB_CONFIG_FILE_NAME))?; + template.write_to(&mut out, context) + } + + pub fn export_miner<'a>(&self, context: &TemplateContext<'a>) -> Result<()> { + let miner = Resource::Bundled(MINER_CONFIG_FILE_NAME.to_string()); + let template = Template::new(from_utf8(miner.get()?)?); + let mut out = fs::File::create(self.root_dir.join(MINER_CONFIG_FILE_NAME))?; + template.write_to(&mut out, context) + } + + pub fn export_specs(&self) -> Result<()> { + for name in BUNDLED.file_names() { + if name.starts_with(SPECS_RESOURCE_DIR_NAME) { + let path = self.root_dir.join(name); + fs::create_dir_all(path.parent().unwrap())?; + let mut out = fs::File::create(path)?; + io::copy(&mut BUNDLED.read(name)?, &mut out)?; + } + } + + Ok(()) + } +} + +#[cfg(windows)] +fn path_as_key(path: &PathBuf) -> Cow { + Cow::Owned(path.to_string_lossy().replace("\\", "/")) +} + +#[cfg(not(windows))] +fn path_as_key(path: &PathBuf) -> Cow { + path.to_string_lossy() +} + +fn file_system(path: PathBuf) -> Option { + path.canonicalize().ok().map(Resource::FileSystem) +} + +fn bundled(path: PathBuf) -> Option { + let key = path_as_key(&path); + if BUNDLED.is_available(&key) { + Some(Resource::Bundled(key.into_owned())) + } else { + None + } +} + +fn parent_dir(mut path: PathBuf) -> PathBuf { + path.pop(); + path +} + +fn from_utf8(data: Cow<[u8]>) -> Result { + String::from_utf8(data.to_vec()).map_err(|err| Error::new(io::ErrorKind::Other, err)) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use std::path::{Path, PathBuf}; + + fn mkdir() -> tempfile::TempDir { + tempfile::Builder::new() + .prefix("ckb_resoruce_test") + .tempdir() + .unwrap() + } + + fn touch>(path: P) -> PathBuf { + fs::create_dir_all(path.as_ref().parent().unwrap()).expect("create dir in test"); + fs::OpenOptions::new() + .create(true) + .append(true) + .open(&path) + .expect("touch file in test"); + + path.as_ref().canonicalize().expect("touch file in test") + } + + #[test] + fn test_resource_locator_resolve() { + let dir = mkdir(); + let spec_dev_path = touch(dir.path().join("specs/dev.toml")); + + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()) + .expect("resource root dir exists"); + + assert_eq!( + locator.resolve("ckb.toml".into()), + Some(Resource::Bundled("ckb.toml".into())) + ); + + assert_eq!( + locator.resolve("specs/testnet.toml".into()), + Some(Resource::Bundled("specs/testnet.toml".into())) + ); + assert_eq!( + locator.resolve("specs/dev.toml".into()), + Some(Resource::FileSystem(spec_dev_path.clone())) + ); + + assert_eq!(locator.resolve(dir.path().join("ckb.toml")), None); + assert_eq!(locator.resolve("x.toml".into()), None); + } + + #[test] + fn test_resource_locator_resolve_relative_to() { + let dir = mkdir(); + let spec_dev_path = touch(dir.path().join("specs/dev.toml")); + let always_success_path = touch(dir.path().join("specs/cells/always_success")); + + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()) + .expect("resource root dir exists"); + + // Relative to Bundled("ckb.toml") + { + let ckb = Resource::Bundled("ckb.toml".into()); + + assert_eq!( + locator.resolve_relative_to("specs/dev.toml".into(), &ckb), + Some(Resource::Bundled("specs/dev.toml".into())) + ); + assert_eq!( + locator.resolve_relative_to("specs/testnet.toml".into(), &ckb), + Some(Resource::Bundled("specs/testnet.toml".into())) + ); + assert_eq!(locator.resolve_relative_to("x".into(), &ckb), None); + assert_eq!( + locator.resolve_relative_to(spec_dev_path.clone(), &ckb), + None, + ); + } + + // Relative to Bundled("specs/dev.toml") + { + let ckb = Resource::Bundled("specs/dev.toml".into()); + + assert_eq!( + locator.resolve_relative_to("cells/always_success".into(), &ckb), + Some(Resource::Bundled("specs/cells/always_success".into())) + ); + assert_eq!( + locator.resolve_relative_to("cells/secp256k1_blake2b_lock".into(), &ckb), + Some(Resource::Bundled( + "specs/cells/secp256k1_blake2b_lock".into() + )) + ); + assert_eq!(locator.resolve_relative_to("x".into(), &ckb), None); + assert_eq!( + locator.resolve_relative_to(always_success_path.clone(), &ckb), + None, + ); + } + + // Relative to FileSystem("specs/dev.toml") + { + let spec_dev = Resource::FileSystem(spec_dev_path.clone()); + + assert_eq!( + locator.resolve_relative_to("cells/always_success".into(), &spec_dev), + Some(Resource::FileSystem(always_success_path.clone())) + ); + assert_eq!( + locator.resolve_relative_to("cells/secp256k1_blake2b_lock".into(), &spec_dev), + Some(Resource::Bundled( + "specs/cells/secp256k1_blake2b_lock".into() + )) + ); + assert_eq!(locator.resolve_relative_to("x".into(), &spec_dev), None); + + assert_eq!( + locator.resolve_relative_to(always_success_path.clone(), &spec_dev), + Some(Resource::FileSystem(always_success_path.clone())), + ); + assert_eq!( + locator.resolve_relative_to(dir.path().join("ckb.toml"), &spec_dev), + None, + ); + } + } +} diff --git a/resource/src/template.rs b/resource/src/template.rs new file mode 100644 index 0000000000..cf036f470d --- /dev/null +++ b/resource/src/template.rs @@ -0,0 +1,108 @@ +pub const DEFAULT_SPEC: &str = "dev"; +pub const AVAILABLE_SPECS: &[&str] = &["dev", "testnet"]; +pub const DEFAULT_RPC_PORT: &str = "8114"; +pub const DEFAULT_P2P_PORT: &str = "8115"; + +const START_MARKER: &str = " # {{"; +const END_MAKER: &str = "# }}"; +const WILDCARD_BRANCH: &str = "# _ => "; + +use std::io; + +pub struct Template(T); + +pub struct TemplateContext<'a> { + pub spec: &'a str, + pub rpc_port: &'a str, + pub p2p_port: &'a str, +} + +impl<'a> Default for TemplateContext<'a> { + fn default() -> Self { + TemplateContext { + spec: DEFAULT_SPEC, + rpc_port: DEFAULT_RPC_PORT, + p2p_port: DEFAULT_P2P_PORT, + } + } +} + +impl Template { + pub fn new(content: T) -> Self { + Template(content) + } +} + +fn writeln(w: &mut W, s: &str, context: &TemplateContext) -> io::Result<()> { + writeln!( + w, + "{}", + s.replace("{rpc_port}", context.rpc_port) + .replace("{p2p_port}", context.p2p_port) + ) +} + +#[derive(Debug)] +pub enum TemplateState<'a> { + SearchStartMarker, + MatchBranch(&'a str), + SearchEndMarker, +} + +impl Template +where + T: AsRef, +{ + pub fn write_to<'c, W: io::Write>( + &self, + w: &mut W, + context: &TemplateContext<'c>, + ) -> io::Result<()> { + let spec_branch = format!("# {} => ", context.spec).to_string(); + + let mut state = TemplateState::SearchStartMarker; + for line in self.0.as_ref().lines() { + // dbg!((line, &state)); + match state { + TemplateState::SearchStartMarker => { + if line.ends_with(START_MARKER) { + state = TemplateState::MatchBranch(line); + } else { + writeln!(w, "{}", line)?; + } + } + TemplateState::MatchBranch(start_line) => { + if line == END_MAKER { + writeln!( + w, + "{}", + &start_line[..(start_line.len() - START_MARKER.len())], + )?; + state = TemplateState::SearchStartMarker; + } else if line.starts_with(&spec_branch) { + writeln(w, &line[spec_branch.len()..], context)?; + state = TemplateState::SearchEndMarker; + } else if line.starts_with(WILDCARD_BRANCH) { + writeln(w, &line[WILDCARD_BRANCH.len()..], context)?; + state = TemplateState::SearchEndMarker; + } + } + TemplateState::SearchEndMarker => { + if line == END_MAKER { + state = TemplateState::SearchStartMarker; + } + } + } + } + + if let TemplateState::MatchBranch(start_line) = state { + writeln!( + w, + "{}", + &start_line[..(start_line.len() - START_MARKER.len())], + )?; + } + + Ok(()) + } +} diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 1c9b522bac..e72a2c326d 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -42,12 +42,7 @@ impl ::std::clone::Clone for Shared { } impl Shared { - pub fn new( - store: CI, - consensus: Consensus, - txs_verify_cache_size: usize, - tx_pool_config: TxPoolConfig, - ) -> Self { + pub fn new(store: CI, consensus: Consensus, tx_pool_config: TxPoolConfig) -> Self { let store = Arc::new(store); let chain_state = { // check head in store or save the genesis block as head @@ -73,6 +68,7 @@ impl Shared { .expect("block_ext stored") .total_difficulty; + let txs_verify_cache_size = tx_pool_config.txs_verify_cache_size; Arc::new(Mutex::new(ChainState::new( &store, header, @@ -378,22 +374,10 @@ impl SharedBuilder { self } - pub fn txs_verify_cache_size(mut self, value: usize) -> Self { - if let Some(c) = self.tx_pool_config.as_mut() { - c.txs_verify_cache_size = value; - }; - self - } - pub fn build(self) -> Shared> { let store = ChainKVStore::new(self.db.unwrap()); let consensus = self.consensus.unwrap_or_else(Consensus::default); let tx_pool_config = self.tx_pool_config.unwrap_or_else(Default::default); - Shared::new( - store, - consensus, - tx_pool_config.txs_verify_cache_size, - tx_pool_config, - ) + Shared::new(store, consensus, tx_pool_config) } } diff --git a/spec/Cargo.toml b/spec/Cargo.toml index 25ad4254d3..ecdaf9d174 100644 --- a/spec/Cargo.toml +++ b/spec/Cargo.toml @@ -4,8 +4,6 @@ version = "0.9.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2018" -build = "build.rs" -include = ["/chainspecs"] [dependencies] serde = "1.0" @@ -15,8 +13,4 @@ numext-fixed-hash = { version = "0.1", features = ["support_rand", "support_heap numext-fixed-uint = { version = "0.1", features = ["support_rand", "support_heapsize", "support_serde"] } ckb-core = { path = "../core" } ckb-pow = { path = "../pow" } -includedir = "0.5.0" -phf = "0.7.21" - -[build-dependencies] -includedir_codegen = "0.5.0" +ckb-resource = { path = "../resource" } diff --git a/spec/build.rs b/spec/build.rs deleted file mode 100644 index 69b7c92dbb..0000000000 --- a/spec/build.rs +++ /dev/null @@ -1,8 +0,0 @@ -use includedir_codegen::Compression; - -fn main() { - includedir_codegen::start("FILES") - .dir("chainspecs", Compression::Gzip) - .build("chainspecs.rs") - .unwrap(); -} diff --git a/spec/src/lib.rs b/spec/src/lib.rs index 76f599871d..c3a7427671 100644 --- a/spec/src/lib.rs +++ b/spec/src/lib.rs @@ -5,9 +5,6 @@ //! In order to run a chain different to the official public one, //! with a config file specifying chain = "path" under [ckb]. -// Shields clippy errors in generated chainspecs.rs file. -#![allow(clippy::unreadable_literal)] - use crate::consensus::Consensus; use ckb_core::block::BlockBuilder; use ckb_core::header::HeaderBuilder; @@ -15,74 +12,29 @@ use ckb_core::script::Script; use ckb_core::transaction::{CellOutput, Transaction, TransactionBuilder}; use ckb_core::{Capacity, Cycle}; use ckb_pow::{Pow, PowEngine}; +use ckb_resource::{Resource, ResourceLocator}; use numext_fixed_hash::H256; use numext_fixed_uint::U256; use serde_derive::Deserialize; use std::error::Error; -use std::fs::File; -use std::io::Read; -use std::path::{Display, Path, PathBuf}; +use std::fmt; +use std::path::PathBuf; use std::sync::Arc; pub mod consensus; -include!(concat!(env!("OUT_DIR"), "/chainspecs.rs")); - -#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] -pub enum SpecPath { - Testnet, - Local(PathBuf), -} - -impl SpecPath { - pub fn display(&self) -> Display { - match self { - SpecPath::Testnet => Path::new("Testnet").display(), - SpecPath::Local(path) => path.display(), - } - } - - pub fn expand_path>(&self, base: P) -> Self { - match self { - SpecPath::Testnet => SpecPath::Testnet, - SpecPath::Local(path) => { - if path.is_relative() { - SpecPath::Local(base.as_ref().join(path)) - } else { - SpecPath::Local(path.to_path_buf()) - } - } - } - } - - fn path(&self) -> PathBuf { - match self { - SpecPath::Testnet => PathBuf::from("testnet/testnet.toml"), - SpecPath::Local(path) => PathBuf::from(path), - } - } - - fn load_file>(&self, path: P) -> Result, Box> { - match self { - SpecPath::Testnet => { - let s = path.as_ref().to_str().expect("chain spec path"); - Ok(FILES - .get(&format!("chainspecs/{}", s)) - .expect("hardcoded spec") - .to_vec()) - } - SpecPath::Local(_) => { - let mut file = File::open(&path)?; - let mut data = Vec::new(); - file.read_to_end(&mut data)?; - Ok(data) - } - } - } +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct ChainSpec { + pub resource: Resource, + pub name: String, + pub genesis: Genesis, + pub params: Params, + pub system_cells: Vec, + pub pow: Pow, } -#[derive(Clone, PartialEq, Eq, Debug, Deserialize)] -pub struct ChainSpec { +#[derive(Deserialize)] +struct ChainSpecConfig { pub name: String, pub genesis: Genesis, pub params: Params, @@ -119,38 +71,80 @@ pub struct SystemCell { pub path: PathBuf, } -pub(self) fn build_system_cell_transaction( - cells: &[SystemCell], - spec_path: &SpecPath, -) -> Result> { - let mut outputs = Vec::new(); - for system_cell in cells { - let data = spec_path.load_file(&system_cell.path)?; - - // TODO: we should provide a proper lock script here so system cells - // can be updated. - let output = CellOutput::new(data.len() as Capacity, data, Script::default(), None); - outputs.push(output); +#[derive(Debug)] +pub struct FileNotFoundError; + +impl FileNotFoundError { + fn boxed() -> Box { + Box::new(FileNotFoundError) } +} - Ok(TransactionBuilder::default().outputs(outputs).build()) +impl Error for FileNotFoundError {} + +impl fmt::Display for FileNotFoundError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ChainSpec: file not found") + } } impl ChainSpec { - pub fn read_from_file(spec_path: &SpecPath) -> Result> { - let config_bytes = spec_path.load_file(spec_path.path())?; - let config_str = String::from_utf8(config_bytes)?; - let mut spec: Self = toml::from_str(&config_str)?; - spec.resolve_paths(spec_path.path().parent().expect("chain spec path resolve")); - - Ok(spec) + pub fn resolve_relative_to( + locator: &ResourceLocator, + spec_path: PathBuf, + config_file: &Resource, + ) -> Result> { + let resource = match locator.resolve_relative_to(spec_path, config_file) { + Some(r) => r, + None => return Err(FileNotFoundError::boxed()), + }; + let config_bytes = resource.get()?; + let spec_config: ChainSpecConfig = toml::from_slice(&config_bytes)?; + + let system_cells_result: Result, FileNotFoundError> = spec_config + .system_cells + .into_iter() + .map(|c| { + locator + .resolve_relative_to(c.path, &resource) + .ok_or(FileNotFoundError) + }) + .collect(); + + Ok(ChainSpec { + resource, + system_cells: system_cells_result?, + name: spec_config.name, + genesis: spec_config.genesis, + params: spec_config.params, + pow: spec_config.pow, + }) } pub fn pow_engine(&self) -> Arc { self.pow.engine() } - pub fn to_consensus(&self, spec_path: &SpecPath) -> Result> { + fn build_system_cell_transaction(&self) -> Result> { + let outputs_result: Result, _> = self + .system_cells + .iter() + .map(|c| { + c.get().map(|data| { + let data = data.into_owned(); + // TODO: we should provide a proper lock script here so system cells + // can be updated. + CellOutput::new(data.len() as u64, data, Script::default(), None) + }) + }) + .collect(); + + let outputs = outputs_result?; + + Ok(TransactionBuilder::default().outputs(outputs).build()) + } + + pub fn to_consensus(&self) -> Result> { let header = HeaderBuilder::default() .version(self.genesis.version) .parent_hash(self.genesis.parent_hash.clone()) @@ -164,10 +158,7 @@ impl ChainSpec { .build(); let genesis_block = BlockBuilder::default() - .commit_transaction(build_system_cell_transaction( - &self.system_cells, - &spec_path, - )?) + .commit_transaction(self.build_system_cell_transaction()?) .header(header) .build(); @@ -180,14 +171,6 @@ impl ChainSpec { Ok(consensus) } - - fn resolve_paths(&mut self, base: &Path) { - for mut cell in &mut self.system_cells { - if cell.path.is_relative() { - cell.path = base.join(&cell.path); - } - } - } } #[cfg(test)] @@ -196,36 +179,19 @@ pub mod test { #[test] fn test_chain_spec_load() { - println!( - "{:?}", - Path::new(env!("CARGO_MANIFEST_DIR")) - .join("../nodes_template/spec/dev.toml") - .display() - ); - let dev = ChainSpec::read_from_file(&SpecPath::Local( - Path::new(env!("CARGO_MANIFEST_DIR")).join("../nodes_template/spec/dev.toml"), - )); + let locator = ResourceLocator::current_dir().unwrap(); + let ckb = locator.ckb(); + let dev = ChainSpec::resolve_relative_to(&locator, PathBuf::from("specs/dev.toml"), &ckb); assert!(dev.is_ok(), format!("{:?}", dev)); - for cell in &dev.unwrap().system_cells { - assert!(cell.path.exists()); - } } #[test] fn always_success_type_hash() { - let spec_path = SpecPath::Local( - Path::new(env!("CARGO_MANIFEST_DIR")).join("../nodes_template/spec/dev.toml"), - ); - let always_success_path = Path::new(env!("CARGO_MANIFEST_DIR")) - .join("../nodes_template/spec/cells/always_success"); - - let tx = build_system_cell_transaction( - &[SystemCell { - path: always_success_path, - }], - &spec_path, - ) - .unwrap(); + let locator = ResourceLocator::current_dir().unwrap(); + let ckb = locator.ckb(); + let dev = ChainSpec::resolve_relative_to(&locator, PathBuf::from("specs/dev.toml"), &ckb) + .unwrap(); + let tx = dev.build_system_cell_transaction().unwrap(); // Tx and Output hash will be used in some test cases directly, assert here for convenience assert_eq!( @@ -248,12 +214,14 @@ pub mod test { #[test] fn test_testnet_chain_spec_load() { - let spec_path = SpecPath::Testnet; - let result = ChainSpec::read_from_file(&spec_path); - assert!(result.is_ok(), format!("{:?}", result)); - let chain_spec = result.unwrap(); - - let result = build_system_cell_transaction(&chain_spec.system_cells, &spec_path); + let locator = ResourceLocator::current_dir().unwrap(); + let ckb = locator.ckb(); + let testnet = + ChainSpec::resolve_relative_to(&locator, PathBuf::from("specs/testnet.toml"), &ckb); + assert!(testnet.is_ok(), format!("{:?}", testnet)); + let chain_spec = testnet.unwrap(); + + let result = chain_spec.build_system_cell_transaction(); assert!(result.is_ok(), format!("{:?}", result)); let tx = result.unwrap(); diff --git a/src/cli/args.rs b/src/cli/args.rs deleted file mode 100644 index b7434bb86c..0000000000 --- a/src/cli/args.rs +++ /dev/null @@ -1,93 +0,0 @@ -// use build_info::Version; -use build_info::{get_version, Version}; -use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; - -const CKB_CONFIG_HELP: &str = "Specify the configuration file PATH. Tries ckb.toml, nodes/default.toml in working directory when omitted."; -const MINER_CONFIG_HELP: &str = "Specify the configuration file PATH. Tries miner.toml, nodes/miner.toml in working directory when omitted."; - -pub fn get_matches() -> ArgMatches<'static> { - let version = get_version!(); - - App::new("ckb") - .author("Nervos Core Dev ") - .about("Nervos CKB - The Common Knowledge Base") - .version(version.short().as_str()) - .long_version(version.long().as_str()) - .setting(AppSettings::SubcommandRequiredElseHelp) - .subcommand(run()) - .subcommand(miner()) - .subcommand(export()) - .subcommand(import()) - .subcommand(cli()) - .get_matches() -} - -fn run() -> App<'static, 'static> { - SubCommand::with_name("run") - .arg(arg_config_with_help(CKB_CONFIG_HELP)) - .about("Running ckb node") -} - -fn miner() -> App<'static, 'static> { - SubCommand::with_name("miner") - .arg(arg_config_with_help(MINER_CONFIG_HELP)) - .about("Running ckb miner") -} - -fn arg_config_with_help(help: &'static str) -> Arg<'static, 'static> { - Arg::with_name("config") - .short("c") - .long("config") - .value_name("CONFIG") - .takes_value(true) - .help(help) -} - -fn arg_format() -> Arg<'static, 'static> { - Arg::with_name("format") - .short("f") - .long("format") - .value_name("FORMAT") - .required(true) - .takes_value(true) - .help("Specify the format.") -} - -fn export() -> App<'static, 'static> { - SubCommand::with_name("export") - .about("Export ckb data") - .arg(arg_format()) - .arg(arg_config_with_help(CKB_CONFIG_HELP)) - .arg( - Arg::with_name("target") - .short("t") - .long("target") - .value_name("PATH") - .required(true) - .index(1) - .help("Specify the export target path."), - ) -} - -fn import() -> App<'static, 'static> { - SubCommand::with_name("import") - .about("Import ckb data") - .arg(arg_config_with_help(CKB_CONFIG_HELP)) - .arg(arg_format()) - .arg( - Arg::with_name("source") - .short("s") - .long("source") - .value_name("PATH") - .required(true) - .index(1) - .help("Specify the exported data path."), - ) -} - -fn cli() -> App<'static, 'static> { - SubCommand::with_name("cli") - .about("Running ckb cli") - .setting(AppSettings::SubcommandRequiredElseHelp) - .subcommand(SubCommand::with_name("keygen").about("Generate new key")) -} diff --git a/src/cli/export.rs b/src/cli/export.rs deleted file mode 100644 index be2bc9a1ae..0000000000 --- a/src/cli/export.rs +++ /dev/null @@ -1,24 +0,0 @@ -use super::super::setup::Setup; -use ckb_db::diskdb::RocksDB; -use ckb_instrument::{Export, Format}; -use ckb_shared::cachedb::CacheDB; -use ckb_shared::shared::SharedBuilder; -use clap::{value_t, ArgMatches}; - -pub fn export(setup: &Setup, matches: &ArgMatches) { - let format = value_t!(matches.value_of("format"), Format).unwrap_or_else(|e| e.exit()); - let target = value_t!(matches.value_of("target"), String).unwrap_or_else(|e| e.exit()); - - let shared = SharedBuilder::>::default() - .consensus( - setup - .chain_spec - .to_consensus(&setup.configs.chain.spec) - .unwrap(), - ) - .db(&setup.configs.db) - .build(); - Export::new(shared, format, target.into()) - .execute() - .unwrap_or_else(|e| panic!("Export error {:?} ", e)); -} diff --git a/src/cli/import.rs b/src/cli/import.rs deleted file mode 100644 index c4c12fca24..0000000000 --- a/src/cli/import.rs +++ /dev/null @@ -1,31 +0,0 @@ -use super::super::setup::Setup; -use ckb_chain::chain::ChainBuilder; -use ckb_db::diskdb::RocksDB; -use ckb_instrument::{Format, Import}; -use ckb_notify::NotifyService; -use ckb_shared::cachedb::CacheDB; -use ckb_shared::shared::SharedBuilder; -use clap::{value_t, ArgMatches}; - -pub fn import(setup: &Setup, matches: &ArgMatches) { - let format = value_t!(matches.value_of("format"), Format).unwrap_or_else(|e| e.exit()); - let source = value_t!(matches.value_of("source"), String).unwrap_or_else(|e| e.exit()); - - let shared = SharedBuilder::>::default() - .consensus( - setup - .chain_spec - .to_consensus(&setup.configs.chain.spec) - .unwrap(), - ) - .db(&setup.configs.db) - .build(); - - let notify = NotifyService::default().start::<&str>(None); - let chain_service = ChainBuilder::new(shared.clone(), notify).build(); - let chain_controller = chain_service.start::<&str>(Some("ImportChainService")); - - Import::new(chain_controller, format, source.into()) - .execute() - .unwrap_or_else(|e| panic!("Import error {:?} ", e)); -} diff --git a/src/cli/miner.rs b/src/cli/miner.rs deleted file mode 100644 index de4bc5554f..0000000000 --- a/src/cli/miner.rs +++ /dev/null @@ -1,102 +0,0 @@ -use crate::cli::SentryConfig; -use crate::helper::{require_path_exists, to_absolute_path}; -use ckb_chain_spec::{ChainSpec, SpecPath}; -use ckb_miner::{Client, Miner, MinerConfig}; -use ckb_util::Mutex; -use clap::ArgMatches; -use crossbeam_channel::unbounded; -use dir::Directories; -use logger::{self, Config as LogConfig}; -use serde_derive::Deserialize; -use std::error::Error; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use std::thread; - -const DEFAULT_CONFIG_PATHS: &[&str] = &["miner.toml", "nodes/miner.toml"]; - -#[derive(Clone, Debug, Deserialize)] -struct Config { - pub logger: LogConfig, - #[serde(flatten)] - pub miner: MinerConfig, - pub chain: SpecPath, - pub data_dir: PathBuf, - pub sentry: SentryConfig, -} - -impl Config { - fn resolve_paths(&mut self, base: &Path) { - self.chain = self.chain.expand_path(base); - - if self.data_dir.is_relative() { - self.data_dir = base.join(&self.data_dir); - } - - let dirs = Directories::new(&self.data_dir); - if let Some(ref file) = self.logger.file { - let path = dirs.join("logs"); - self.logger.file = Some(path.join(file)); - } - } - - pub fn read_from_file>(path: P) -> Result> { - let config_str = std::fs::read_to_string(path.as_ref())?; - let mut config: Self = toml::from_str(&config_str)?; - config.resolve_paths(path.as_ref().parent().unwrap_or_else(|| { - eprintln!("Invalid config file path {:?}", path.as_ref()); - ::std::process::exit(1); - })); - Ok(config) - } -} - -pub fn miner(matches: &ArgMatches) { - let config_path = get_config_path(matches); - - let config = Config::read_from_file(config_path).unwrap_or_else(|e| { - eprintln!("Invalid config file {:?}", e); - ::std::process::exit(1); - }); - - let _logger_guard = logger::init(config.logger.clone()).expect("Init Logger"); - let _sentry_guard = config.sentry.clone().init(); - - let chain_spec = ChainSpec::read_from_file(&config.chain).expect("Load chain spec"); - - let (new_work_tx, new_work_rx) = unbounded(); - - let work = Arc::new(Mutex::new(None)); - - let client = Client::new(Arc::clone(&work), new_work_tx, config.miner); - - let miner = Miner::new(work, chain_spec.pow_engine(), new_work_rx, client.clone()); - - thread::Builder::new() - .name("client".to_string()) - .spawn(move || client.poll_block_template()) - .expect("Start client failed!"); - - miner.run() -} - -fn find_default_config_path() -> Option { - DEFAULT_CONFIG_PATHS - .iter() - .map(PathBuf::from) - .find(|p| p.exists()) -} - -pub fn get_config_path(matches: &ArgMatches) -> PathBuf { - to_absolute_path( - matches - .value_of("config") - .map_or_else(find_default_config_path, |v| { - require_path_exists(PathBuf::from(v)) - }) - .unwrap_or_else(|| { - eprintln!("Miner config file not found!"); - ::std::process::exit(1); - }), - ) -} diff --git a/src/cli/mod.rs b/src/cli/mod.rs deleted file mode 100644 index a24e7f2d70..0000000000 --- a/src/cli/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod args; -mod export; -mod import; -mod miner; -mod run_impl; -mod sentry_config; - -pub use self::args::get_matches; -pub use self::export::export; -pub use self::import::import; -pub use self::miner::miner; -pub use self::run_impl::{keygen, run}; -pub use self::sentry_config::SentryConfig; diff --git a/src/helper.rs b/src/helper.rs deleted file mode 100644 index 63a0e2636b..0000000000 --- a/src/helper.rs +++ /dev/null @@ -1,36 +0,0 @@ -use ckb_util::{Condvar, Mutex}; -use ctrlc; -use std::path::PathBuf; -use std::sync::Arc; - -pub fn wait_for_exit() { - let exit = Arc::new((Mutex::new(()), Condvar::new())); - - // Handle possible exits - let e = Arc::<(Mutex<()>, Condvar)>::clone(&exit); - let _ = ctrlc::set_handler(move || { - e.1.notify_all(); - }); - - // Wait for signal - let mut l = exit.0.lock(); - exit.1.wait(&mut l); -} - -pub fn require_path_exists(path: PathBuf) -> Option { - if path.exists() { - Some(path) - } else { - None - } -} - -pub fn to_absolute_path(path: PathBuf) -> PathBuf { - if path.is_absolute() { - path - } else { - let mut absulute_path = ::std::env::current_dir().expect("get current_dir"); - absulute_path.push(path); - absulute_path - } -} diff --git a/src/main.rs b/src/main.rs index 33008d22c0..4fe998233a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,44 +1,41 @@ -mod cli; -mod helper; mod setup; +mod subcommand; +mod system; -use crate::setup::{get_config_path, Setup}; -use clap::ArgMatches; -use log::info; +use setup::{cli, ExitCode, Setup}; -fn main() { +fn run_app() -> Result<(), ExitCode> { // Always print backtrace on panic. ::std::env::set_var("RUST_BACKTRACE", "full"); - let matches = cli::get_matches(); - - match matches.subcommand() { - ("cli", Some(cli_matches)) => match cli_matches.subcommand() { - ("keygen", _) => cli::keygen(), - _ => unreachable!(), - }, - ("run", Some(run_matches)) => { - let setup = setup(&run_matches); - let _logger_guard = logger::init(setup.configs.logger.clone()).expect("Init Logger"); - let _sentry_guard = setup.configs.sentry.clone().init(); - cli::run(setup); + let app_matches = cli::get_matches(); + match app_matches.subcommand() { + (cli::CMD_INIT, Some(matches)) => return subcommand::init(Setup::init(&matches)?), + (cli::CMD_CLI, Some(matches)) => { + return match matches.subcommand() { + (cli::CMD_KEYGEN, _) => subcommand::cli::keygen(), + _ => unreachable!(), + }; } - ("miner", Some(miner_matches)) => cli::miner(&miner_matches), - ("export", Some(export_matches)) => cli::export(&setup(&export_matches), export_matches), - ("import", Some(import_matches)) => cli::import(&setup(&import_matches), import_matches), + _ => { + // continue + } + } + + let setup = Setup::from_matches(&app_matches)?; + let _guard = setup.setup_app(); + + match app_matches.subcommand() { + (cli::CMD_RUN, _) => subcommand::run(setup.run()?), + (cli::CMD_MINER, _) => subcommand::miner(setup.miner()?), + (cli::CMD_EXPORT, Some(matches)) => subcommand::export(setup.export(&matches)?), + (cli::CMD_IMPORT, Some(matches)) => subcommand::import(setup.import(&matches)?), _ => unreachable!(), } } -fn setup(matches: &ArgMatches<'static>) -> Setup { - let config_path = get_config_path(matches); - info!(target: "main", "Setup with config {}", config_path.display()); - Setup::setup(&config_path).unwrap_or_else(|e| { - eprintln!( - "Failed to setup with config {}, cause err: {:?}", - config_path.display(), - e - ); - ::std::process::exit(1); - }) +fn main() { + if let Some(exit_code) = run_app().err() { + ::std::process::exit(exit_code.into()); + } } diff --git a/src/setup.rs b/src/setup.rs deleted file mode 100644 index 7a53c04106..0000000000 --- a/src/setup.rs +++ /dev/null @@ -1,280 +0,0 @@ -use crate::cli::SentryConfig; -use crate::helper::{require_path_exists, to_absolute_path}; -use ckb_chain_spec::{ChainSpec, SpecPath}; -use ckb_db::DBConfig; -use ckb_miner::BlockAssemblerConfig; -use ckb_network::NetworkConfig; -use ckb_rpc::Config as RpcConfig; -use ckb_shared::tx_pool::TxPoolConfig; -use ckb_sync::Config as SyncConfig; -use clap::ArgMatches; -use config_tool::{Config as ConfigTool, ConfigError, File}; -use dir::Directories; -use logger::Config as LogConfig; -use serde_derive::Deserialize; -use std::error::Error; -use std::path::{Path, PathBuf}; - -const DEFAULT_CONFIG_PATHS: &[&str] = &["ckb.toml", "nodes/default.toml"]; - -#[derive(Clone, Debug)] -pub struct Setup { - pub configs: Configs, - pub chain_spec: ChainSpec, - pub dirs: Directories, -} - -#[derive(Clone, Debug, Deserialize)] -pub struct ChainConfig { - pub spec: SpecPath, -} - -#[derive(Clone, Debug, Deserialize)] -pub struct Configs { - pub data_dir: PathBuf, - pub db: DBConfig, - pub chain: ChainConfig, - pub logger: LogConfig, - pub network: NetworkConfig, - pub rpc: RpcConfig, - pub block_assembler: BlockAssemblerConfig, - pub sync: SyncConfig, - pub tx_pool: TxPoolConfig, - pub sentry: SentryConfig, -} - -pub fn get_config_path(matches: &ArgMatches) -> PathBuf { - to_absolute_path( - matches - .value_of("config") - .map_or_else(find_default_config_path, |v| { - require_path_exists(PathBuf::from(v)) - }) - .unwrap_or_else(|| { - eprintln!("No config file found!"); - ::std::process::exit(1); - }), - ) -} - -fn find_default_config_path() -> Option { - DEFAULT_CONFIG_PATHS - .iter() - .map(PathBuf::from) - .find(|p| p.exists()) -} - -impl Setup { - pub(crate) fn with_configs(mut configs: Configs) -> Result> { - let dirs = Directories::new(&configs.data_dir); - - if let Some(file) = configs.logger.file { - let path = dirs.join("logs"); - configs.logger.file = Some(path.join(file)); - } - - let chain_spec = ChainSpec::read_from_file(&configs.chain.spec).map_err(|e| { - Box::new(ConfigError::Message(format!( - "invalid chain spec {}, {}", - configs.chain.spec.display(), - e - ))) - })?; - - Ok(Setup { - configs, - chain_spec, - dirs, - }) - } - - pub fn setup>(config_path: T) -> Result> { - let mut config_tool = ConfigTool::new(); - - config_tool.merge(File::from(config_path.as_ref()))?; - - let mut configs: Configs = config_tool.try_into()?; - configs.resolve_paths(config_path.as_ref().parent().unwrap()); - - Self::with_configs(configs) - } -} - -impl Configs { - fn resolve_paths(&mut self, base: &Path) { - if self.data_dir.is_relative() { - self.data_dir = base.join(&self.data_dir); - } - self.chain.spec = self.chain.spec.expand_path(base); - if self.db.path.is_relative() { - self.db.path = base.join(&self.db.path); - } - if self.network.path.is_relative() { - self.network.path = base.join(&self.network.path); - } - } -} - -#[cfg(test)] -pub mod test { - use super::*; - use config_tool::File as ConfigFile; - use std::fs::File; - use std::io::Write; - use std::path::Path; - use tempfile; - - fn override_default_config_file>(config_path: &T) -> Result> { - let mut config_tool = ConfigTool::new(); - let default_config_path = - Path::new(env!("CARGO_MANIFEST_DIR")).join("nodes_template/default.toml"); - config_tool.merge(ConfigFile::from(default_config_path.as_path()))?; - config_tool.merge(ConfigFile::from(config_path.as_ref()))?; - - let mut configs: Configs = config_tool.try_into()?; - configs.resolve_paths(default_config_path.parent().unwrap()); - - Setup::with_configs(configs) - } - - fn write_file>(file: P, content: &str) { - let mut file = File::create(file).expect("test dir clean"); - file.write_all(content.as_bytes()) - .expect("write test content");; - } - - fn test_chain_spec() -> &'static str { - r#" - name = "ckb_test_custom" - - [genesis] - version = 0 - parent_hash = "0x0000000000000000000000000000000000000000000000000000000000000000" - timestamp = 0 - txs_commit = "0x0000000000000000000000000000000000000000000000000000000000000000" - txs_proposal = "0x0000000000000000000000000000000000000000000000000000000000000000" - difficulty = "0x233" - uncles_hash = "0x0000000000000000000000000000000000000000000000000000000000000000" - - [genesis.seal] - nonce = 233 - proof = [2, 3, 3] - - [params] - initial_block_reward = 233 - max_block_cycles = 100000000 - - [pow] - func = "Cuckoo" - - [pow.params] - edge_bits = 29 - cycle_length = 42 - - [[system_cells]] - path = "verify" - - [[system_cells]] - path = "always_success" - "# - } - - #[test] - fn test_load_config() { - let tmp_dir = tempfile::Builder::new() - .prefix("test_load_config") - .tempdir() - .unwrap(); - - let test_conifg = r#" - [network] - listen_addresses = ["/ip4/1.1.1.1/tcp/1"] - "#; - let config_path = tmp_dir.path().join("config.toml"); - write_file(&config_path, test_conifg); - let setup = override_default_config_file(&config_path); - assert!(setup.is_ok()); - assert_eq!( - setup.unwrap().configs.network.listen_addresses, - vec!["/ip4/1.1.1.1/tcp/1".parse().unwrap()] - ); - } - - #[test] - fn test_load_db_config() { - let tmp_dir = tempfile::Builder::new() - .prefix("test_load_db_config") - .tempdir() - .unwrap(); - - let test_conifg = r#" - [db.options] - disable_auto_compactions = "true" - paranoid_file_checks = "true" - "#; - let config_path = tmp_dir.path().join("config.toml"); - write_file(&config_path, test_conifg); - let setup = override_default_config_file(&config_path).unwrap(); - let options: Vec<(&str, &str)> = setup - .configs - .db - .options - .as_ref() - .unwrap() - .iter() - .map(|(k, v)| (k.as_str(), v.as_str())) - .collect(); - assert_eq!( - options.contains(&("disable_auto_compactions", "true")), - true - ); - assert_eq!(options.contains(&("paranoid_file_checks", "true")), true); - } - - #[test] - fn test_custom_chain_spec_with_config() { - let tmp_dir = tempfile::Builder::new() - .prefix("test_custom_chain_spec_with_config") - .tempdir() - .unwrap(); - - let chain_spec_path = tmp_dir.path().join("ckb_test_custom.toml"); - let test_config = format!( - r#" - [chain] - spec = {{ Local = "{}" }} - "#, - chain_spec_path.to_str().unwrap() - ); - - let config_path = tmp_dir.path().join("config.toml"); - write_file(&config_path, &test_config); - write_file(&chain_spec_path, test_chain_spec()); - - let setup = override_default_config_file(&config_path); - assert!(setup.is_ok()); - assert_eq!(setup.unwrap().chain_spec.name, "ckb_test_custom"); - } - - #[test] - fn test_testnet_chain_spec_with_config() { - let tmp_dir = tempfile::Builder::new() - .prefix("test_testnet_chain_spec_with_config") - .tempdir() - .unwrap(); - - let test_config = r#" - [chain] - spec = "Testnet" - "#; - - let config_path = tmp_dir.path().join("config.toml"); - write_file(&config_path, &test_config); - - let setup = override_default_config_file(&config_path); - assert!(setup.is_ok()); - let setup = setup.unwrap(); - assert_eq!(setup.configs.chain.spec, SpecPath::Testnet); - assert_eq!(setup.chain_spec.name, "ckb_testnet"); - } -} diff --git a/src/setup/app_config.rs b/src/setup/app_config.rs new file mode 100644 index 0000000000..bf7f34c8d7 --- /dev/null +++ b/src/setup/app_config.rs @@ -0,0 +1,190 @@ +use std::fs; +use std::path::{Path, PathBuf}; + +use serde_derive::Deserialize; + +use ckb_chain_spec::ChainSpec; +use ckb_db::DBConfig; +use ckb_miner::BlockAssemblerConfig; +use ckb_miner::MinerConfig; +use ckb_network::NetworkConfig; +use ckb_resource::{Resource, ResourceLocator}; +use ckb_rpc::Config as RpcConfig; +use ckb_shared::tx_pool::TxPoolConfig; +use ckb_sync::Config as SyncConfig; +use logger::Config as LogConfig; + +use super::sentry_config::SentryConfig; +use super::{cli, ExitCode}; + +pub struct AppConfig { + resource: Resource, + content: AppConfigContent, +} + +pub enum AppConfigContent { + CKB(Box), + Miner(Box), +} + +#[derive(Clone, Debug, Deserialize)] +pub struct CKBAppConfig { + pub chain: ChainConfig, + pub data_dir: PathBuf, + pub logger: LogConfig, + pub sentry: SentryConfig, + + pub block_assembler: BlockAssemblerConfig, + #[serde(default)] + pub db: DBConfig, + pub network: NetworkConfig, + pub rpc: RpcConfig, + pub sync: SyncConfig, + pub tx_pool: TxPoolConfig, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct MinerAppConfig { + pub chain: ChainConfig, + pub data_dir: PathBuf, + pub logger: LogConfig, + pub sentry: SentryConfig, + + pub miner: MinerConfig, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct ChainConfig { + pub spec: PathBuf, +} + +impl AppConfig { + pub fn load_for_subcommand( + locator: &ResourceLocator, + subcommand_name: &str, + ) -> Result { + match subcommand_name { + cli::CMD_MINER => { + let resource = locator.miner(); + let config: MinerAppConfig = toml::from_slice(&resource.get()?)?; + + Ok(AppConfig { + resource, + content: AppConfigContent::with_miner( + config.derive_options(locator.root_dir())?, + ), + }) + } + _ => { + let resource = locator.ckb(); + let config: CKBAppConfig = toml::from_slice(&resource.get()?)?; + Ok(AppConfig { + resource, + content: AppConfigContent::with_ckb( + config.derive_options(locator.root_dir(), subcommand_name)?, + ), + }) + } + } + } + + pub fn logger(&self) -> &LogConfig { + match &self.content { + AppConfigContent::CKB(config) => &config.logger, + AppConfigContent::Miner(config) => &config.logger, + } + } + + pub fn sentry(&self) -> &SentryConfig { + match &self.content { + AppConfigContent::CKB(config) => &config.sentry, + AppConfigContent::Miner(config) => &config.sentry, + } + } + + pub fn chain_spec(&self, locator: &ResourceLocator) -> Result { + let spec_path = PathBuf::from(match &self.content { + AppConfigContent::CKB(config) => &config.chain.spec, + AppConfigContent::Miner(config) => &config.chain.spec, + }); + ChainSpec::resolve_relative_to(locator, spec_path, &self.resource).map_err(|err| { + eprintln!("{:?}", err); + ExitCode::Config + }) + } + + pub fn into_ckb(self) -> Result, ExitCode> { + match self.content { + AppConfigContent::CKB(config) => Ok(config), + _ => { + eprintln!("unmatched config file"); + Err(ExitCode::Failure) + } + } + } + + pub fn into_miner(self) -> Result, ExitCode> { + match self.content { + AppConfigContent::Miner(config) => Ok(config), + _ => { + eprintln!("unmatched config file"); + Err(ExitCode::Failure) + } + } + } +} + +impl AppConfigContent { + fn with_ckb(config: CKBAppConfig) -> AppConfigContent { + AppConfigContent::CKB(Box::new(config)) + } + fn with_miner(config: MinerAppConfig) -> AppConfigContent { + AppConfigContent::Miner(Box::new(config)) + } +} + +impl CKBAppConfig { + fn derive_options(mut self, root_dir: &Path, subcommand_name: &str) -> Result { + self.data_dir = canonicalize_data_dir(self.data_dir, root_dir)?; + self.logger.file = Some(touch( + mkdir(self.data_dir.join("logs"))?.join(subcommand_name.to_string() + ".log"), + )?); + self.db.path = mkdir(self.data_dir.join("db"))?; + self.network.path = mkdir(self.data_dir.join("network"))?; + + Ok(self) + } +} + +impl MinerAppConfig { + fn derive_options(mut self, root_dir: &Path) -> Result { + self.data_dir = canonicalize_data_dir(self.data_dir, root_dir)?; + self.logger.file = Some(touch(mkdir(self.data_dir.join("logs"))?.join("miner.log"))?); + + Ok(self) + } +} + +fn canonicalize_data_dir(data_dir: PathBuf, root_dir: &Path) -> Result { + let path = if data_dir.is_absolute() { + data_dir + } else { + root_dir.join(data_dir) + }; + + mkdir(path) +} + +fn mkdir(dir: PathBuf) -> Result { + fs::create_dir_all(&dir)?; + dir.canonicalize().map_err(Into::into) +} + +fn touch(path: PathBuf) -> Result { + fs::OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + + Ok(path) +} diff --git a/src/setup/args.rs b/src/setup/args.rs new file mode 100644 index 0000000000..b64a2b704c --- /dev/null +++ b/src/setup/args.rs @@ -0,0 +1,41 @@ +use super::app_config::CKBAppConfig; +use ckb_chain_spec::consensus::Consensus; +use ckb_instrument::Format; +use ckb_miner::MinerConfig; +use ckb_pow::PowEngine; +use ckb_resource::ResourceLocator; +use std::path::PathBuf; +use std::sync::Arc; + +pub struct ExportArgs { + pub config: Box, + pub consensus: Consensus, + pub format: Format, + pub target: PathBuf, +} + +pub struct ImportArgs { + pub config: Box, + pub consensus: Consensus, + pub format: Format, + pub source: PathBuf, +} + +pub struct RunArgs { + pub config: Box, + pub consensus: Consensus, +} + +pub struct MinerArgs { + pub config: MinerConfig, + pub pow_engine: Arc, +} + +pub struct InitArgs { + pub locator: ResourceLocator, + pub spec: String, + pub rpc_port: String, + pub p2p_port: String, + pub export_specs: bool, + pub list_specs: bool, +} diff --git a/src/setup/cli.rs b/src/setup/cli.rs new file mode 100644 index 0000000000..4a092e87bf --- /dev/null +++ b/src/setup/cli.rs @@ -0,0 +1,139 @@ +use build_info::{get_version, Version}; +use ckb_resource::{DEFAULT_P2P_PORT, DEFAULT_RPC_PORT, DEFAULT_SPEC}; +use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; + +pub const CMD_RUN: &str = "run"; +pub const CMD_MINER: &str = "miner"; +pub const CMD_EXPORT: &str = "export"; +pub const CMD_IMPORT: &str = "import"; +pub const CMD_INIT: &str = "init"; +pub const CMD_CLI: &str = "cli"; +pub const CMD_KEYGEN: &str = "keygen"; + +pub const ARG_CONFIG_DIR: &str = "config-dir"; +pub const ARG_FORMAT: &str = "format"; +pub const ARG_TARGET: &str = "target"; +pub const ARG_SOURCE: &str = "source"; +pub const ARG_LIST_SPECS: &str = "list-specs"; +pub const ARG_SPEC: &str = "spec"; +pub const ARG_EXPORT_SPECS: &str = "export-specs"; +pub const ARG_P2P_PORT: &str = "p2p-port"; +pub const ARG_RPC_PORT: &str = "rpc-port"; + +pub fn get_matches() -> ArgMatches<'static> { + let version = get_version!(); + + App::new("ckb") + .author("Nervos Core Dev ") + .about("Nervos CKB - The Common Knowledge Base") + .version(version.short().as_str()) + .long_version(version.long().as_str()) + .setting(AppSettings::SubcommandRequiredElseHelp) + .arg( + Arg::with_name(ARG_CONFIG_DIR) + .global(true) + .short("C") + .value_name("path") + .takes_value(true) + .help( + "Run as if ckb was started in instead of the current working directory.", + ), + ) + .subcommand(run()) + .subcommand(miner()) + .subcommand(export()) + .subcommand(import()) + .subcommand(cli()) + .subcommand(init()) + .get_matches() +} + +fn run() -> App<'static, 'static> { + SubCommand::with_name(CMD_RUN).about("Running ckb node") +} + +fn miner() -> App<'static, 'static> { + SubCommand::with_name(CMD_MINER).about("Running ckb miner") +} + +fn arg_format() -> Arg<'static, 'static> { + Arg::with_name(ARG_FORMAT) + .short("f") + .long(ARG_FORMAT) + .possible_values(&["bin", "json"]) + .required(true) + .takes_value(true) + .help("Specify the format.") +} + +fn export() -> App<'static, 'static> { + SubCommand::with_name(CMD_EXPORT) + .about("Export ckb data") + .arg(arg_format()) + .arg( + Arg::with_name(ARG_TARGET) + .short("t") + .long(ARG_TARGET) + .value_name("path") + .required(true) + .index(1) + .help("Specify the export target path."), + ) +} + +fn import() -> App<'static, 'static> { + SubCommand::with_name(CMD_IMPORT) + .about("Import ckb data") + .arg(arg_format()) + .arg( + Arg::with_name(ARG_SOURCE) + .short("s") + .long(ARG_SOURCE) + .value_name("path") + .required(true) + .index(1) + .help("Specify the exported data path."), + ) +} + +fn cli() -> App<'static, 'static> { + SubCommand::with_name(CMD_CLI) + .about("CLI tools") + .setting(AppSettings::SubcommandRequiredElseHelp) + .subcommand(SubCommand::with_name(CMD_KEYGEN).about("Generate new key")) +} + +fn init() -> App<'static, 'static> { + SubCommand::with_name(CMD_INIT) + .about("Create a CKB direcotry or reinitialize an existing one") + .arg( + Arg::with_name(ARG_LIST_SPECS) + .short("l") + .long(ARG_LIST_SPECS) + .help("List available chain specs"), + ) + .arg( + Arg::with_name(ARG_SPEC) + .short("s") + .long(ARG_SPEC) + .default_value(DEFAULT_SPEC) + .help("Export config files for "), + ) + .arg( + Arg::with_name(ARG_RPC_PORT) + .long(ARG_RPC_PORT) + .default_value(DEFAULT_RPC_PORT) + .help("Replace CKB RPC port in the exported config file"), + ) + .arg( + Arg::with_name(ARG_P2P_PORT) + .long(ARG_P2P_PORT) + .default_value(DEFAULT_P2P_PORT) + .help("Replace CKB P2P port in the exported config file"), + ) + .arg( + Arg::with_name(ARG_EXPORT_SPECS) + .long(ARG_EXPORT_SPECS) + .help("Export spec files as well"), + ) +} diff --git a/src/setup/exit_code.rs b/src/setup/exit_code.rs new file mode 100644 index 0000000000..1ace6310fc --- /dev/null +++ b/src/setup/exit_code.rs @@ -0,0 +1,45 @@ +use std::io; + +/// Uses 0, 64 - 113 as exit code. +#[repr(i32)] +#[derive(Copy, Clone)] +pub enum ExitCode { + Cli = 64, + Config = 65, + IO = 66, + Failure = 113, +} + +impl ExitCode { + pub fn into(self) -> i32 { + self as i32 + } +} + +impl From for ExitCode { + fn from(err: io::Error) -> ExitCode { + eprintln!("IO Error: {:?}", err); + ExitCode::IO + } +} + +impl From for ExitCode { + fn from(err: toml::de::Error) -> ExitCode { + eprintln!("Config Error: {:?}", err); + ExitCode::Config + } +} + +impl From for ExitCode { + fn from(err: log::SetLoggerError) -> ExitCode { + eprintln!("Config Error: {:?}", err); + ExitCode::Config + } +} + +impl From for ExitCode { + fn from(err: clap::Error) -> ExitCode { + eprintln!("Args Error: {:?}", err); + ExitCode::Cli + } +} diff --git a/src/setup/mod.rs b/src/setup/mod.rs new file mode 100644 index 0000000000..466ea5c2c0 --- /dev/null +++ b/src/setup/mod.rs @@ -0,0 +1,159 @@ +mod app_config; +mod args; +pub mod cli; +mod exit_code; +mod sentry_config; + +pub use app_config::AppConfig; +pub use args::{ExportArgs, ImportArgs, InitArgs, MinerArgs, RunArgs}; +pub use exit_code::ExitCode; + +use ckb_chain_spec::{consensus::Consensus, ChainSpec}; +use ckb_instrument::Format; +use ckb_resource::ResourceLocator; +use clap::{value_t, ArgMatches}; +use logger::LoggerInitGuard; +use std::path::PathBuf; + +pub struct Setup { + subcommand_name: String, + resource_locator: ResourceLocator, + config: AppConfig, +} + +pub struct SetupGuard { + #[allow(dead_code)] + logger_guard: LoggerInitGuard, + #[allow(dead_code)] + sentry_guard: Option, +} + +impl Setup { + pub fn from_matches<'m>(matches: &ArgMatches<'m>) -> Result { + let subcommand_name = match matches.subcommand_name() { + Some(subcommand_name) => subcommand_name, + None => { + eprintln!("expect a subcommand"); + return Err(ExitCode::Cli); + } + }; + + let resource_locator = locator_from_matches(matches)?; + let config = AppConfig::load_for_subcommand(&resource_locator, subcommand_name)?; + + Ok(Setup { + subcommand_name: subcommand_name.to_string(), + resource_locator, + config, + }) + } + + pub fn setup_app(&self) -> Result { + let logger_guard = logger::init(self.config.logger().clone())?; + let sentry_guard = if is_daemon(&self.subcommand_name) { + Some(self.config.sentry().init()) + } else { + None + }; + + Ok(SetupGuard { + logger_guard, + sentry_guard, + }) + } + + pub fn run(self) -> Result { + let consensus = self.consensus()?; + let config = self.config.into_ckb()?; + + Ok(RunArgs { config, consensus }) + } + + pub fn miner(self) -> Result { + let spec = self.chain_spec()?; + let config = self.config.into_miner()?; + let pow_engine = spec.pow_engine(); + + Ok(MinerArgs { + pow_engine, + config: config.miner, + }) + } + + pub fn import<'m>(self, matches: &ArgMatches<'m>) -> Result { + let consensus = self.consensus()?; + let config = self.config.into_ckb()?; + let format = value_t!(matches.value_of(cli::ARG_FORMAT), Format)?; + let source = value_t!(matches.value_of(cli::ARG_SOURCE), PathBuf)?; + + Ok(ImportArgs { + config, + consensus, + format, + source, + }) + } + + pub fn export<'m>(self, matches: &ArgMatches<'m>) -> Result { + let consensus = self.consensus()?; + let config = self.config.into_ckb()?; + let format = value_t!(matches.value_of(cli::ARG_FORMAT), Format)?; + let target = value_t!(matches.value_of(cli::ARG_TARGET), PathBuf)?; + + Ok(ExportArgs { + config, + consensus, + format, + target, + }) + } + + pub fn init<'m>(matches: &ArgMatches<'m>) -> Result { + let locator = locator_from_matches(matches)?; + let export_specs = matches.is_present(cli::ARG_EXPORT_SPECS); + let list_specs = matches.is_present(cli::ARG_LIST_SPECS); + let spec = matches.value_of(cli::ARG_SPEC).unwrap().to_string(); + let rpc_port = matches.value_of(cli::ARG_RPC_PORT).unwrap().to_string(); + let p2p_port = matches.value_of(cli::ARG_P2P_PORT).unwrap().to_string(); + + Ok(InitArgs { + locator, + spec, + rpc_port, + p2p_port, + export_specs, + list_specs, + }) + } + + fn chain_spec(&self) -> Result { + self.config.chain_spec(&self.resource_locator) + } + + fn consensus(&self) -> Result { + consensus_from_spec(&self.chain_spec()?) + } +} + +fn is_daemon(subcommand_name: &str) -> bool { + match subcommand_name { + cli::CMD_RUN => true, + cli::CMD_MINER => true, + _ => false, + } +} + +fn consensus_from_spec(spec: &ChainSpec) -> Result { + spec.to_consensus().map_err(|err| { + eprintln!("{:?}", err); + ExitCode::Config + }) +} + +fn locator_from_matches<'m>(matches: &ArgMatches<'m>) -> Result { + let config_dir = match matches.value_of(cli::ARG_CONFIG_DIR) { + Some(arg_config_dir) => PathBuf::from(arg_config_dir), + None => ::std::env::current_dir()?, + }; + ResourceLocator::with_root_dir(config_dir).map_err(Into::into) +} diff --git a/src/cli/sentry_config.rs b/src/setup/sentry_config.rs similarity index 91% rename from src/cli/sentry_config.rs rename to src/setup/sentry_config.rs index 073e3207e5..4a59856670 100644 --- a/src/cli/sentry_config.rs +++ b/src/setup/sentry_config.rs @@ -8,8 +8,8 @@ pub struct SentryConfig { } impl SentryConfig { - pub fn init(self) -> sentry::internals::ClientInitGuard { - let guard = sentry::init(&self); + pub fn init(&self) -> sentry::internals::ClientInitGuard { + let guard = sentry::init(self); if guard.is_enabled() { sentry::integrations::panic::register_panic_handler(); info!(target: "sentry", "**Notice**: \ diff --git a/src/subcommand/cli.rs b/src/subcommand/cli.rs new file mode 100644 index 0000000000..bf1bf66e78 --- /dev/null +++ b/src/subcommand/cli.rs @@ -0,0 +1,9 @@ +use crate::setup::ExitCode; +use crypto::secp::Generator; +use numext_fixed_hash::H256; + +pub fn keygen() -> Result<(), ExitCode> { + let result: H256 = Generator::new().random_privkey().into(); + println!("{:#x}", result); + Ok(()) +} diff --git a/src/subcommand/export.rs b/src/subcommand/export.rs new file mode 100644 index 0000000000..48cf159c63 --- /dev/null +++ b/src/subcommand/export.rs @@ -0,0 +1,18 @@ +use crate::setup::{ExitCode, ExportArgs}; +use ckb_db::diskdb::RocksDB; +use ckb_instrument::Export; +use ckb_shared::cachedb::CacheDB; +use ckb_shared::shared::SharedBuilder; + +pub fn export(args: ExportArgs) -> Result<(), ExitCode> { + let shared = SharedBuilder::>::default() + .consensus(args.consensus) + .db(&args.config.db) + .build(); + Export::new(shared, args.format, args.target) + .execute() + .map_err(|err| { + eprintln!("Export error: {:?}", err); + ExitCode::Failure + }) +} diff --git a/src/subcommand/import.rs b/src/subcommand/import.rs new file mode 100644 index 0000000000..191d6d49b5 --- /dev/null +++ b/src/subcommand/import.rs @@ -0,0 +1,25 @@ +use crate::setup::{ExitCode, ImportArgs}; +use ckb_chain::chain::ChainBuilder; +use ckb_db::diskdb::RocksDB; +use ckb_instrument::Import; +use ckb_notify::NotifyService; +use ckb_shared::cachedb::CacheDB; +use ckb_shared::shared::SharedBuilder; + +pub fn import(args: ImportArgs) -> Result<(), ExitCode> { + let shared = SharedBuilder::>::default() + .consensus(args.consensus) + .db(&args.config.db) + .build(); + + let notify = NotifyService::default().start::<&str>(None); + let chain_service = ChainBuilder::new(shared.clone(), notify).build(); + let chain_controller = chain_service.start::<&str>(Some("ImportChainService")); + + Import::new(chain_controller, args.format, args.source) + .execute() + .map_err(|err| { + eprintln!("Import error: {:?}", err); + ExitCode::Failure + }) +} diff --git a/src/subcommand/init.rs b/src/subcommand/init.rs new file mode 100644 index 0000000000..d5e5fa55ad --- /dev/null +++ b/src/subcommand/init.rs @@ -0,0 +1,26 @@ +use crate::setup::{ExitCode, InitArgs}; +use ckb_resource::{TemplateContext, AVAILABLE_SPECS}; + +pub fn init(args: InitArgs) -> Result<(), ExitCode> { + if args.list_specs { + for spec in AVAILABLE_SPECS { + println!("{}", spec); + } + return Ok(()); + } + + let context = TemplateContext { + spec: &args.spec, + rpc_port: &args.rpc_port, + p2p_port: &args.p2p_port, + }; + + args.locator.export_ckb(&context)?; + args.locator.export_miner(&context)?; + + if args.export_specs { + args.locator.export_specs()?; + } + + Ok(()) +} diff --git a/src/subcommand/miner.rs b/src/subcommand/miner.rs new file mode 100644 index 0000000000..9e161f6fe0 --- /dev/null +++ b/src/subcommand/miner.rs @@ -0,0 +1,24 @@ +use crate::setup::{ExitCode, MinerArgs}; +use ckb_miner::{Client, Miner}; +use ckb_util::Mutex; +use crossbeam_channel::unbounded; +use std::sync::Arc; +use std::thread; + +pub fn miner(args: MinerArgs) -> Result<(), ExitCode> { + let (new_work_tx, new_work_rx) = unbounded(); + + let work = Arc::new(Mutex::new(None)); + + let client = Client::new(Arc::clone(&work), new_work_tx, args.config); + + let miner = Miner::new(work, args.pow_engine, new_work_rx, client.clone()); + + thread::Builder::new() + .name("client".to_string()) + .spawn(move || client.poll_block_template()) + .expect("Start client failed!"); + + miner.run(); + Ok(()) +} diff --git a/src/subcommand/mod.rs b/src/subcommand/mod.rs new file mode 100644 index 0000000000..fc49eb694f --- /dev/null +++ b/src/subcommand/mod.rs @@ -0,0 +1,12 @@ +pub mod cli; +mod export; +mod import; +mod init; +mod miner; +mod run; + +pub use self::export::export; +pub use self::import::import; +pub use self::init::init; +pub use self::miner::miner; +pub use self::run::run; diff --git a/src/cli/run_impl.rs b/src/subcommand/run.rs similarity index 76% rename from src/cli/run_impl.rs rename to src/subcommand/run.rs index b132591461..b78215ab4b 100644 --- a/src/cli/run_impl.rs +++ b/src/subcommand/run.rs @@ -1,5 +1,5 @@ -use crate::helper::wait_for_exit; -use crate::Setup; +use crate::setup::{ExitCode, RunArgs}; +use crate::system::wait_for_exit; use ckb_chain::chain::{ChainBuilder, ChainController}; use ckb_db::diskdb::RocksDB; use ckb_miner::BlockAssembler; @@ -11,22 +11,14 @@ use ckb_shared::index::ChainIndex; use ckb_shared::shared::{Shared, SharedBuilder}; use ckb_sync::{NetTimeProtocol, NetworkProtocol, Relayer, Synchronizer}; use ckb_traits::chain_provider::ChainProvider; -use crypto::secp::Generator; use log::info; -use numext_fixed_hash::H256; use std::sync::Arc; -pub fn run(setup: Setup) { - let consensus = setup - .chain_spec - .to_consensus(&setup.configs.chain.spec) - .unwrap(); - +pub fn run(args: RunArgs) -> Result<(), ExitCode> { let shared = SharedBuilder::>::default() - .consensus(consensus) - .db(&setup.configs.db) - .tx_pool_config(setup.configs.tx_pool.clone()) - .txs_verify_cache_size(setup.configs.tx_pool.txs_verify_cache_size) + .consensus(args.consensus) + .db(&args.config.db) + .tx_pool_config(args.config.tx_pool) .build(); let notify = NotifyService::default().start(Some("notify")); @@ -34,11 +26,11 @@ pub fn run(setup: Setup) { let chain_controller = setup_chain(shared.clone(), notify.clone()); info!(target: "main", "chain genesis hash: {:#x}", shared.genesis_hash()); - let block_assembler = BlockAssembler::new(shared.clone(), setup.configs.block_assembler); + let block_assembler = BlockAssembler::new(shared.clone(), args.config.block_assembler); let block_assembler_controller = block_assembler.start(Some("MinerAgent"), ¬ify); let synchronizer = - Synchronizer::new(chain_controller.clone(), shared.clone(), setup.configs.sync); + Synchronizer::new(chain_controller.clone(), shared.clone(), args.config.sync); let relayer = Relayer::new( chain_controller.clone(), @@ -49,7 +41,7 @@ pub fn run(setup: Setup) { let net_time_checker = NetTimeProtocol::default(); let network_state = Arc::new( - NetworkState::from_config(setup.configs.network).expect("Init network state failed"), + NetworkState::from_config(args.config.network).expect("Init network state failed"), ); let protocols = vec![ CKBProtocol::new( @@ -79,7 +71,7 @@ pub fn run(setup: Setup) { .expect("Start network service failed"); let rpc_server = RpcServer::new( - setup.configs.rpc, + args.config.rpc, network_controller, shared, chain_controller, @@ -92,6 +84,8 @@ pub fn run(setup: Setup) { rpc_server.close(); info!(target: "main", "Jsonrpc shutdown"); + + Ok(()) } fn setup_chain( @@ -101,8 +95,3 @@ fn setup_chain( let chain_service = ChainBuilder::new(shared, notify).build(); chain_service.start(Some("ChainService")) } - -pub fn keygen() { - let result: H256 = Generator::new().random_privkey().into(); - println!("{:#x}", result); -} diff --git a/src/system.rs b/src/system.rs new file mode 100644 index 0000000000..e02ee7d760 --- /dev/null +++ b/src/system.rs @@ -0,0 +1,16 @@ +use ckb_util::{Condvar, Mutex}; +use std::sync::Arc; + +pub fn wait_for_exit() { + let exit = Arc::new((Mutex::new(()), Condvar::new())); + + // Handle possible exits + let e = Arc::<(Mutex<()>, Condvar)>::clone(&exit); + let _ = ctrlc::set_handler(move || { + e.1.notify_all(); + }); + + // Wait for signal + let mut l = exit.0.lock(); + exit.1.wait(&mut l); +} diff --git a/test/fixtures/nodes_template/default.toml b/test/fixtures/nodes_template/default.toml deleted file mode 100644 index e37013e567..0000000000 --- a/test/fixtures/nodes_template/default.toml +++ /dev/null @@ -1,54 +0,0 @@ -data_dir = "default" - -[db] -path = "default/db" - -[chain] -spec = { Local = "spec/integration_test.toml" } - -[logger] -file = "ckb.log" -filter = "info,network=trace,rpc=debug,sync=debug,relay=debug" -color = true - -[network] -path = "default/network" -listen_addresses = ["/ip4/0.0.0.0/tcp/P2P_PORT"] -public_addresses = [] -bootnodes = [] -reserved_peers = [] -reserved_only = false -max_peers = 125 -max_outbound_peers = 30 -ping_interval_secs = 15 -ping_timeout_secs = 20 -connect_outbound_interval_secs = 1 - -[rpc] -listen_address = "0.0.0.0:RPC_PORT" - -max_request_body_size = 10485760 - -modules = ["Net", "Pool", "Miner", "Chain", "IntegrationTest", "Trace"] - -[sync] -verification_level = "Full" -orphan_block_limit = 1024 - -[tx_pool] -max_pool_size = 10000 -max_orphan_size = 10000 -max_proposal_size = 10000 -max_cache_size = 1000 -max_pending_size = 10000 -trace = 100 -txs_verify_cache_size = 100000 - -[block_assembler] -# value is set as always success binary hash -binary_hash = "0x0000000000000000000000000000000000000000000000000000000000000001" -args = [] - -[sentry] -# set to blank to disable sentry error collection -dsn = "" diff --git a/test/fixtures/nodes_template/spec/cells/always_success b/test/fixtures/nodes_template/spec/cells/always_success deleted file mode 100755 index 248e53c5a08dd0483754ad729e4c597c0c78b076..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 344 zcmb<-^>JfjWMqH=CWg-pAYKKSWMFWBO3Z-LtU%gUG Y3C4iZAid0JVjwQeAA)FdFki!j0c?N{-2eap diff --git a/test/src/main.rs b/test/src/main.rs index d059a4df04..67f9648afd 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -7,6 +7,7 @@ fn main() { filter: Some("info".to_owned()), color: true, file: None, + copy_to_stdout: true, }; let _logger_guard = logger::init(log_config).expect("init Logger"); diff --git a/test/src/node.rs b/test/src/node.rs index 4f6855b72c..bad4cb5141 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -4,19 +4,14 @@ use ckb_core::block::{Block, BlockBuilder}; use ckb_core::header::{HeaderBuilder, Seal}; use ckb_core::script::Script; use ckb_core::transaction::{CellInput, CellOutput, OutPoint, Transaction, TransactionBuilder}; -use fs_extra::dir::{copy, CopyOptions}; use jsonrpc_client_http::{HttpHandle, HttpTransport}; use jsonrpc_types::{BlockTemplate, CellbaseTemplate}; use log::info; use numext_fixed_hash::H256; use rand; -use std::fs::File; -use std::io::{Error, Read, Write}; -use std::path::PathBuf; +use std::io::Error; use std::process::{Child, Command, Stdio}; -const DEFAULT_CONFIG_FILE: &str = "default.toml"; - pub struct Node { pub binary: String, pub dir: String, @@ -52,11 +47,7 @@ impl Node { pub fn start(&mut self) { self.init_config_file().expect("failed to init config file"); let child_process = Command::new(self.binary.to_owned()) - .args(&[ - "run", - "-c", - &format!("{}/{}", self.dir, DEFAULT_CONFIG_FILE), - ]) + .args(&["-C", &self.dir, "run"]) .stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::inherit()) @@ -206,22 +197,21 @@ impl Node { } fn init_config_file(&self) -> Result<(), Error> { - let mut options = CopyOptions::new(); - options.copy_inside = true; - let source = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures/nodes_template"); - let dest = PathBuf::from(&self.dir); - copy(source, &dest, &options).expect("failed to copy template"); - - let mut data = String::new(); - { - let mut file = File::open(dest.join(DEFAULT_CONFIG_FILE))?; - file.read_to_string(&mut data)?; - } - let new_data = data - .replace("P2P_PORT", &self.p2p_port.to_string()) - .replace("RPC_PORT", &self.rpc_port.to_string()); - let mut file = File::create(dest.join(DEFAULT_CONFIG_FILE))?; - file.write_all(new_data.as_bytes())?; - Ok(()) + let rpc_port = format!("{}", self.rpc_port).to_string(); + let p2p_port = format!("{}", self.p2p_port).to_string(); + Command::new(self.binary.to_owned()) + .args(&[ + "-C", + &self.dir, + "init", + "--spec", + "integration", + "--rpc-port", + &rpc_port, + "--p2p-port", + &p2p_port, + ]) + .output() + .map(|_| ()) } } diff --git a/util/dir/Cargo.toml b/util/dir/Cargo.toml deleted file mode 100644 index 4aceece87e..0000000000 --- a/util/dir/Cargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "dir" -version = "0.9.0-pre" -license = "MIT" -authors = ["Nervos Core Dev "] -edition = "2018" diff --git a/util/dir/src/lib.rs b/util/dir/src/lib.rs deleted file mode 100644 index 6e8cf10b22..0000000000 --- a/util/dir/src/lib.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::fs; -use std::path::{Path, PathBuf}; - -#[derive(Debug, PartialEq, Clone)] -pub struct Directories { - pub base: PathBuf, -} - -impl Directories { - pub fn new>(base: P) -> Self { - let base = base.as_ref().to_path_buf(); - Directories { base } - } - - pub fn join>(&self, path: P) -> PathBuf { - let result = self.base.join(path.as_ref()); - fs::create_dir_all(&result).expect("Unable to create dir"); - result - } -} diff --git a/util/logger/src/lib.rs b/util/logger/src/lib.rs index 6469172ab4..d7569d8d1e 100644 --- a/util/logger/src/lib.rs +++ b/util/logger/src/lib.rs @@ -39,6 +39,7 @@ impl Logger { let (sender, receiver) = unbounded(); let file = config.file; let enable_color = config.color; + let copy_to_stdout = config.copy_to_stdout; let tb = thread::Builder::new() .name("LogWriter".to_owned()) @@ -66,7 +67,9 @@ impl Logger { let _ = file.write_all(removed_color.as_bytes()); let _ = file.write_all(b"\n"); }; - println!("{}", output); + if copy_to_stdout { + println!("{}", output); + } } Ok(Message::Terminate) | Err(_) => { break; @@ -93,6 +96,7 @@ pub struct Config { pub filter: Option, pub color: bool, pub file: Option, + pub copy_to_stdout: bool, } impl Default for Config { @@ -101,6 +105,7 @@ impl Default for Config { filter: None, color: !cfg!(windows), file: None, + copy_to_stdout: true, } } } From 6b1788c4ca57a89efcd5bb5a569cee9c7a6d2aff Mon Sep 17 00:00:00 2001 From: ian Date: Fri, 5 Apr 2019 20:03:35 +0800 Subject: [PATCH 06/29] chore(deps): remove fs_extra from ckb-test --- test/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/Cargo.toml b/test/Cargo.toml index 6d94d35a12..e509ac1c2a 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -14,7 +14,6 @@ ckb-shared = { path = "../shared" } ckb-sync = { path = "../sync" } ckb-protocol = { path = "../protocol"} numext-fixed-hash = { version = "0.1", features = ["support_rand", "support_heapsize", "support_serde"] } -fs_extra = "1.1" tempfile = "3.0" jsonrpc-client-core = "0.5.0" jsonrpc-client-http = "0.5.0" From 334fe76be5667a23aef253fa62a130185715bade Mon Sep 17 00:00:00 2001 From: ian Date: Mon, 8 Apr 2019 11:21:38 +0800 Subject: [PATCH 07/29] test: loading bundled config files --- resource/src/lib.rs | 2 +- src/setup/app_config.rs | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/resource/src/lib.rs b/resource/src/lib.rs index 37628eb263..ddf4f1060b 100644 --- a/resource/src/lib.rs +++ b/resource/src/lib.rs @@ -204,7 +204,7 @@ mod tests { fn mkdir() -> tempfile::TempDir { tempfile::Builder::new() - .prefix("ckb_resoruce_test") + .prefix("ckb_resource_test") .tempdir() .unwrap() } diff --git a/src/setup/app_config.rs b/src/setup/app_config.rs index bf7f34c8d7..a28af549ff 100644 --- a/src/setup/app_config.rs +++ b/src/setup/app_config.rs @@ -188,3 +188,35 @@ fn touch(path: PathBuf) -> Result { Ok(path) } + +#[cfg(test)] +mod tests { + use super::*; + + fn mkdir() -> tempfile::TempDir { + tempfile::Builder::new() + .prefix("app_config_test") + .tempdir() + .unwrap() + } + + #[test] + fn test_ckb_toml() { + let dir = mkdir(); + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap(); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN) + .unwrap_or_else(|err| panic!(err)); + let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err)); + assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/dev.toml")); + } + + #[test] + fn test_miner_toml() { + let dir = mkdir(); + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap(); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER) + .unwrap_or_else(|err| panic!(err)); + let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err)); + assert_eq!(miner_config.chain.spec, PathBuf::from("specs/dev.toml")); + } +} From f8b818914d75da7d213999355c98b1d2cf7f3360 Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 9 Apr 2019 10:28:19 +0800 Subject: [PATCH 08/29] test: loading exported config files --- src/setup/app_config.rs | 120 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) diff --git a/src/setup/app_config.rs b/src/setup/app_config.rs index a28af549ff..8801f2486e 100644 --- a/src/setup/app_config.rs +++ b/src/setup/app_config.rs @@ -192,6 +192,7 @@ fn touch(path: PathBuf) -> Result { #[cfg(test)] mod tests { use super::*; + use ckb_resource::TemplateContext; fn mkdir() -> tempfile::TempDir { tempfile::Builder::new() @@ -208,6 +209,15 @@ mod tests { .unwrap_or_else(|err| panic!(err)); let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err)); assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/dev.toml")); + assert_eq!( + ckb_config.logger.file, + Some(locator.root_dir().join("data/logs/run.log")) + ); + assert_eq!(ckb_config.db.path, locator.root_dir().join("data/db")); + assert_eq!( + ckb_config.network.path, + locator.root_dir().join("data/network") + ); } #[test] @@ -218,5 +228,115 @@ mod tests { .unwrap_or_else(|err| panic!(err)); let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err)); assert_eq!(miner_config.chain.spec, PathBuf::from("specs/dev.toml")); + assert_eq!( + miner_config.logger.file, + Some(locator.root_dir().join("data/logs/miner.log")) + ); + } + + #[test] + fn test_export_dev_config_files() { + let dir = mkdir(); + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap(); + let context = TemplateContext { + spec: "dev", + rpc_port: "7000", + p2p_port: "8000", + }; + { + locator.export_ckb(&context).expect("export config files"); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN) + .unwrap_or_else(|err| panic!(err)); + let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err)); + assert_eq!(ckb_config.logger.filter, Some("info".to_string())); + assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/dev.toml")); + assert_eq!( + ckb_config.network.listen_addresses, + vec!["/ip4/0.0.0.0/tcp/8000".parse().unwrap()] + ); + assert_eq!(ckb_config.network.connect_outbound_interval_secs, 15); + assert_eq!(ckb_config.rpc.listen_address, "0.0.0.0:7000"); + } + { + locator.export_miner(&context).expect("export config files"); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER) + .unwrap_or_else(|err| panic!(err)); + let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err)); + assert_eq!(miner_config.logger.filter, Some("info".to_string())); + assert_eq!(miner_config.chain.spec, PathBuf::from("specs/dev.toml")); + assert_eq!(miner_config.miner.rpc_url, "http://127.0.0.1:7000/"); + } + } + + #[test] + fn test_export_testnet_config_files() { + let dir = mkdir(); + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap(); + let context = TemplateContext { + spec: "testnet", + rpc_port: "7000", + p2p_port: "8000", + }; + locator.export_ckb(&context).expect("export config files"); + { + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN) + .unwrap_or_else(|err| panic!(err)); + let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err)); + assert_eq!(ckb_config.logger.filter, Some("info".to_string())); + assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/testnet.toml")); + assert_eq!( + ckb_config.network.listen_addresses, + vec!["/ip4/0.0.0.0/tcp/8000".parse().unwrap()] + ); + assert_eq!(ckb_config.network.connect_outbound_interval_secs, 15); + assert_eq!(ckb_config.rpc.listen_address, "0.0.0.0:7000"); + } + { + locator.export_miner(&context).expect("export config files"); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER) + .unwrap_or_else(|err| panic!(err)); + let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err)); + assert_eq!(miner_config.logger.filter, Some("info".to_string())); + assert_eq!(miner_config.chain.spec, PathBuf::from("specs/testnet.toml")); + assert_eq!(miner_config.miner.rpc_url, "http://127.0.0.1:7000/"); + } + } + + #[test] + fn test_export_integration_config_files() { + let dir = mkdir(); + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap(); + let context = TemplateContext { + spec: "integration", + rpc_port: "7000", + p2p_port: "8000", + }; + locator.export_ckb(&context).expect("export config files"); + { + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN) + .unwrap_or_else(|err| panic!(err)); + let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err)); + assert_eq!( + ckb_config.chain.spec, + PathBuf::from("specs/integration.toml") + ); + assert_eq!( + ckb_config.network.listen_addresses, + vec!["/ip4/0.0.0.0/tcp/8000".parse().unwrap()] + ); + assert_eq!(ckb_config.network.connect_outbound_interval_secs, 1); + assert_eq!(ckb_config.rpc.listen_address, "0.0.0.0:7000"); + } + { + locator.export_miner(&context).expect("export config files"); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER) + .unwrap_or_else(|err| panic!(err)); + let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err)); + assert_eq!( + miner_config.chain.spec, + PathBuf::from("specs/integration.toml") + ); + assert_eq!(miner_config.miner.rpc_url, "http://127.0.0.1:7000/"); + } } } From b11ae831e57fdcf2c2f653bd820e26721d8df1e0 Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 9 Apr 2019 10:37:23 +0800 Subject: [PATCH 09/29] feat: export config files atomically. Write to temp file first, then move to the target location. --- resource/Cargo.toml | 2 -- resource/src/lib.rs | 18 +++++++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/resource/Cargo.toml b/resource/Cargo.toml index 6e25e8de36..a906a1918d 100644 --- a/resource/Cargo.toml +++ b/resource/Cargo.toml @@ -10,8 +10,6 @@ include = ["/specs", "/ckb.toml", "/ckb-miner.toml"] [dependencies] phf = "0.7.21" includedir = "0.5.0" - -[dev-dependencies] tempfile = "3.0" [build-dependencies] diff --git a/resource/src/lib.rs b/resource/src/lib.rs index ddf4f1060b..20465bed55 100644 --- a/resource/src/lib.rs +++ b/resource/src/lib.rs @@ -13,6 +13,7 @@ use std::borrow::Cow; use std::fs; use std::io::{self, BufReader, Read}; use std::path::{Path, PathBuf}; +use tempfile::NamedTempFile; include!(concat!(env!("OUT_DIR"), "/bundled.rs")); @@ -139,15 +140,21 @@ impl ResourceLocator { pub fn export_ckb<'a>(&self, context: &TemplateContext<'a>) -> Result<()> { let ckb = Resource::Bundled(CKB_CONFIG_FILE_NAME.to_string()); let template = Template::new(from_utf8(ckb.get()?)?); - let mut out = fs::File::create(self.root_dir.join(CKB_CONFIG_FILE_NAME))?; - template.write_to(&mut out, context) + let mut out = NamedTempFile::new()?; + template.write_to(&mut out, context)?; + out.into_temp_path() + .persist(self.root_dir.join(CKB_CONFIG_FILE_NAME)) + .map_err(Into::into) } pub fn export_miner<'a>(&self, context: &TemplateContext<'a>) -> Result<()> { let miner = Resource::Bundled(MINER_CONFIG_FILE_NAME.to_string()); let template = Template::new(from_utf8(miner.get()?)?); - let mut out = fs::File::create(self.root_dir.join(MINER_CONFIG_FILE_NAME))?; - template.write_to(&mut out, context) + let mut out = NamedTempFile::new()?; + template.write_to(&mut out, context)?; + out.into_temp_path() + .persist(self.root_dir.join(MINER_CONFIG_FILE_NAME)) + .map_err(Into::into) } pub fn export_specs(&self) -> Result<()> { @@ -155,8 +162,9 @@ impl ResourceLocator { if name.starts_with(SPECS_RESOURCE_DIR_NAME) { let path = self.root_dir.join(name); fs::create_dir_all(path.parent().unwrap())?; - let mut out = fs::File::create(path)?; + let mut out = NamedTempFile::new()?; io::copy(&mut BUNDLED.read(name)?, &mut out)?; + out.into_temp_path().persist(path)?; } } From 511a52ad9cebe50a8b0e06819db2b61420e5569d Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 9 Apr 2019 10:50:05 +0800 Subject: [PATCH 10/29] feat: warn when ckb init overwrites files --- resource/src/lib.rs | 6 ++++++ src/setup/args.rs | 1 + src/setup/cli.rs | 7 +++++++ src/setup/mod.rs | 2 ++ src/subcommand/init.rs | 5 +++++ 5 files changed, 21 insertions(+) diff --git a/resource/src/lib.rs b/resource/src/lib.rs index 20465bed55..30938f3ce2 100644 --- a/resource/src/lib.rs +++ b/resource/src/lib.rs @@ -137,6 +137,12 @@ impl ResourceLocator { } } + pub fn exported(&self) -> bool { + BUNDLED + .file_names() + .any(|name| self.root_dir.join(name).exists()) + } + pub fn export_ckb<'a>(&self, context: &TemplateContext<'a>) -> Result<()> { let ckb = Resource::Bundled(CKB_CONFIG_FILE_NAME.to_string()); let template = Template::new(from_utf8(ckb.get()?)?); diff --git a/src/setup/args.rs b/src/setup/args.rs index b64a2b704c..1d5aaa14d7 100644 --- a/src/setup/args.rs +++ b/src/setup/args.rs @@ -38,4 +38,5 @@ pub struct InitArgs { pub p2p_port: String, pub export_specs: bool, pub list_specs: bool, + pub force: bool, } diff --git a/src/setup/cli.rs b/src/setup/cli.rs index 4a092e87bf..b75d46375f 100644 --- a/src/setup/cli.rs +++ b/src/setup/cli.rs @@ -19,6 +19,7 @@ pub const ARG_SPEC: &str = "spec"; pub const ARG_EXPORT_SPECS: &str = "export-specs"; pub const ARG_P2P_PORT: &str = "p2p-port"; pub const ARG_RPC_PORT: &str = "rpc-port"; +pub const ARG_FORCE: &str = "force"; pub fn get_matches() -> ArgMatches<'static> { let version = get_version!(); @@ -119,6 +120,12 @@ fn init() -> App<'static, 'static> { .default_value(DEFAULT_SPEC) .help("Export config files for "), ) + .arg( + Arg::with_name(ARG_FORCE) + .short("f") + .long(ARG_FORCE) + .help("Force overwriting existing files"), + ) .arg( Arg::with_name(ARG_RPC_PORT) .long(ARG_RPC_PORT) diff --git a/src/setup/mod.rs b/src/setup/mod.rs index 466ea5c2c0..b38db49369 100644 --- a/src/setup/mod.rs +++ b/src/setup/mod.rs @@ -112,6 +112,7 @@ impl Setup { let locator = locator_from_matches(matches)?; let export_specs = matches.is_present(cli::ARG_EXPORT_SPECS); let list_specs = matches.is_present(cli::ARG_LIST_SPECS); + let force = matches.is_present(cli::ARG_FORCE); let spec = matches.value_of(cli::ARG_SPEC).unwrap().to_string(); let rpc_port = matches.value_of(cli::ARG_RPC_PORT).unwrap().to_string(); let p2p_port = matches.value_of(cli::ARG_P2P_PORT).unwrap().to_string(); @@ -123,6 +124,7 @@ impl Setup { p2p_port, export_specs, list_specs, + force, }) } diff --git a/src/subcommand/init.rs b/src/subcommand/init.rs index d5e5fa55ad..1015934178 100644 --- a/src/subcommand/init.rs +++ b/src/subcommand/init.rs @@ -15,6 +15,11 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { p2p_port: &args.p2p_port, }; + if !args.force && args.locator.exported() { + eprintln!("Config files already exists, use --force to overwrite."); + return Err(ExitCode::Failure); + } + args.locator.export_ckb(&context)?; args.locator.export_miner(&context)?; From 95f4f8a8186f325fd4bd5b92de86c7a95be7c43f Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 9 Apr 2019 11:01:26 +0800 Subject: [PATCH 11/29] feat: ckb init verbose output --- resource/src/lib.rs | 2 +- src/subcommand/init.rs | 21 +++++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/resource/src/lib.rs b/resource/src/lib.rs index 30938f3ce2..494a078654 100644 --- a/resource/src/lib.rs +++ b/resource/src/lib.rs @@ -19,7 +19,7 @@ include!(concat!(env!("OUT_DIR"), "/bundled.rs")); pub const CKB_CONFIG_FILE_NAME: &str = "ckb.toml"; pub const MINER_CONFIG_FILE_NAME: &str = "ckb-miner.toml"; -const SPECS_RESOURCE_DIR_NAME: &str = "specs/"; +pub const SPECS_RESOURCE_DIR_NAME: &str = "specs/"; #[derive(Clone, Debug, Eq, PartialEq)] pub enum Resource { diff --git a/src/subcommand/init.rs b/src/subcommand/init.rs index 1015934178..ba8dacc4b4 100644 --- a/src/subcommand/init.rs +++ b/src/subcommand/init.rs @@ -1,5 +1,8 @@ use crate::setup::{ExitCode, InitArgs}; -use ckb_resource::{TemplateContext, AVAILABLE_SPECS}; +use ckb_resource::{ + TemplateContext, AVAILABLE_SPECS, CKB_CONFIG_FILE_NAME, MINER_CONFIG_FILE_NAME, + SPECS_RESOURCE_DIR_NAME, +}; pub fn init(args: InitArgs) -> Result<(), ExitCode> { if args.list_specs { @@ -15,15 +18,29 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { p2p_port: &args.p2p_port, }; - if !args.force && args.locator.exported() { + let exported = args.locator.exported(); + if !args.force && exported { eprintln!("Config files already exists, use --force to overwrite."); return Err(ExitCode::Failure); } + println!( + "{} CKB directory in {}", + if !exported { + "Initialized" + } else { + "Reinitialized" + }, + args.locator.root_dir().display() + ); + + println!("export {}", CKB_CONFIG_FILE_NAME); args.locator.export_ckb(&context)?; + println!("export {}", MINER_CONFIG_FILE_NAME); args.locator.export_miner(&context)?; if args.export_specs { + println!("export {}", SPECS_RESOURCE_DIR_NAME); args.locator.export_specs()?; } From 7a333e322aa18136c7b55994f764f35587ff8f08 Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 9 Apr 2019 11:01:55 +0800 Subject: [PATCH 12/29] chore: add `make clean` to remove runtime files --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5fcb8dcf8a..1deb47ca56 100644 --- a/Makefile +++ b/Makefile @@ -68,6 +68,9 @@ gen-clean: $(CFBC) -o $(shell dirname $@) $*.bfbs rm -f $*.bfbs $*_builder.rs -.PHONY: build prod prod-test docker gen gen-clean +clean: + rm -rf ckb.toml ckb-miner.toml specs/ + +.PHONY: build prod prod-test docker gen gen-clean clean .PHONY: fmt test clippy doc doc-deps check stats .PHONY: ci info security-audit From 349e8e88a860e04d2205f4db721bdbdb5ef4f5c7 Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 9 Apr 2019 15:51:09 +0800 Subject: [PATCH 13/29] chore: build release packages in travis --- .travis.yml | 16 +++++++++++++--- devtools/ci/script.sh | 21 +++++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index a312748972..073cc2780c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ git: depth: 2 submodules: false -if: 'branch IN (master, develop, staging, trying) OR type != push OR fork = true OR tag =~ ^v' +if: 'branch IN (master, develop, staging, trying) OR type != push OR fork = true OR tag IS present' env: global: @@ -18,10 +18,10 @@ matrix: include: - rust: 1.33.0 os: osx - env: FMT=true CHECK=true TEST=true + env: FMT=true CHECK=true TEST=true REL_PKG=darwin_amd64.zip - rust: 1.33.0 os: linux - env: TEST=true + env: TEST=true REL_PKG=linux_amd64.tar.gz addons: apt: @@ -37,6 +37,16 @@ before_install: if [ "$TRAVIS_OS_NAME" = "osx" ]; then ulimit -n 8192; fi install: ./devtools/ci/install.sh script: ./devtools/ci/script.sh +deploy: + provider: releases + api_key: "$GITHUB_TOKEN" + file: "releases/ckb_${TRAVIS_TAG}_${REL_PKG}" + skip_cleanup: true + draft: true + on: + tags: true + condition: '"$GITHUB_TOKEN" != "" && "$REL_PKG" != ""' + before_cache: - rm -rf ./target/debug/incremental/ - cargo sweep -f diff --git a/devtools/ci/script.sh b/devtools/ci/script.sh index ea731302d2..06d89931b8 100755 --- a/devtools/ci/script.sh +++ b/devtools/ci/script.sh @@ -26,3 +26,24 @@ if [ "$TRAVIS_BRANCH" = master -o "$TRAVIS_BRANCH" = staging -o "$TRAVIS_BRANCH" # cargo build --release # cargo run --release -p ckb-test target/release/ckb fi + +if [ -n "$TRAVIS_TAG" -a -n "$GITHUB_TOKEN" -a -n "$REL_PKG" ]; then + make build + rm -rf releases + mkdir releases + PKG_NAME="ckb_${TRAVIS_TAG}_${REL_PKG%%.*}" + mkdir "releases/$PKG_NAME" + mv target/release/ckb "releases/$PKG_NAME" + cp README.md CHANGELOG.md COPYING "releases/$PKG_NAME" + if [ -d docs ]; then + cp -R docs "releases/$PKG_NAME" + fi + + pushd releases + if [ "${REL_PKG#*.}" = "tar.gz" ]; then + tar -czf $PKG_NAME.tar.gz $PKG_NAME + else + zip -r $PKG_NAME.zip $PKG_NAME + fi + popd +fi From fa52571b1eb7f0f460f722c93dba270716c5a16f Mon Sep 17 00:00:00 2001 From: ian Date: Wed, 10 Apr 2019 10:40:10 +0800 Subject: [PATCH 14/29] chore: add init scripts for systemd --- devtools/ci/script.sh | 1 + devtools/init/README.md | 11 +++ devtools/init/linux-systemd/README.md | 82 +++++++++++++++++++ devtools/init/linux-systemd/ckb-miner.service | 21 +++++ devtools/init/linux-systemd/ckb.service | 21 +++++ 5 files changed, 136 insertions(+) create mode 100644 devtools/init/README.md create mode 100644 devtools/init/linux-systemd/README.md create mode 100644 devtools/init/linux-systemd/ckb-miner.service create mode 100644 devtools/init/linux-systemd/ckb.service diff --git a/devtools/ci/script.sh b/devtools/ci/script.sh index 06d89931b8..e23f8e9c5c 100755 --- a/devtools/ci/script.sh +++ b/devtools/ci/script.sh @@ -35,6 +35,7 @@ if [ -n "$TRAVIS_TAG" -a -n "$GITHUB_TOKEN" -a -n "$REL_PKG" ]; then mkdir "releases/$PKG_NAME" mv target/release/ckb "releases/$PKG_NAME" cp README.md CHANGELOG.md COPYING "releases/$PKG_NAME" + cp -R devtools/init/ "releases/$PKG_NAME" if [ -d docs ]; then cp -R docs "releases/$PKG_NAME" fi diff --git a/devtools/init/README.md b/devtools/init/README.md new file mode 100644 index 0000000000..3d9cb45285 --- /dev/null +++ b/devtools/init/README.md @@ -0,0 +1,11 @@ +# Init/Service Scripts + +This folder provides the init/service scripts to start CKB node and miner as +daemons on various Unix like distributions. + +See the README in each folder for the detailed instructions. + +## Disclaimer + +Users are expected to know how to administer their system, and these files +should be considered as only a guide or suggestion to setup CKB. diff --git a/devtools/init/linux-systemd/README.md b/devtools/init/linux-systemd/README.md new file mode 100644 index 0000000000..7b3698ff1d --- /dev/null +++ b/devtools/init/linux-systemd/README.md @@ -0,0 +1,82 @@ +# CKB systemd unit configuration + +The provided files should work with systemd version 219 or later. + +## Instructions + +The following instructions assume that: + +* you want to run ckb as user `ckb` and group `ckb`, and store data in `/var/lib/ckb`. +* you want to join testnet. +* you are logging in as a non-root user account that has `sudo` permissions to execute commands as root. + +First, get ckb and move the binary into the system binary directory, and setup the appropriate ownership and permissions: + +```bash +sudo cp /path/to/ckb /usr/local/bin +sudo chown root:root /usr/local/bin/ckb +sudo chmod 755 /usr/local/bin/ckb +``` + +Setup the directories and generate config files for testnet. + +```bash +sudo mkdir /var/lib/ckb +sudo /usr/local/bin/ckb init -C /var/lib/ckb --spec testnet --log stdout +``` + +Setup the user and group and the appropriate ownership and permissions. + +```bash +sudo groupadd ckb +sudo useradd \ + -g ckb --no-user-group \ + --home-dir /var/lib/ckb --no-create-home \ + --shell /usr/sbin/nologin \ + --system ckb + +sudo chown -R ckb:ckb /var/lib/ckb +sudo chmod 755 /var/lib/ckb +sudo chmod 644 /var/lib/ckb/ckb.toml /var/lib/ckb/ckb-miner.toml +``` + +Install the systemd service unit configuration file, reload the systemd daemon, +and start the node: + +```bash +curl -L -O https://raw.githubusercontent.com/nervosnetwork/ckb/master/devtools/init/linux-systemd/ckb.service +sudo cp ckb.service /etc/systemd/system/ +sudo chown root:root /etc/systemd/system/ckb.service +sudo chmod 644 /etc/systemd/system/ckb.service +sudo systemctl daemon-reload +sudo systemctl start ckb.service +``` + +Start the node automatically on boot if you like: + +```bash +sudo systemctl enable ckb.service +``` + +If ckb doesn't seem to start properly you can view the logs to figure out the problem: + +```bash +journalctl --boot -u ckb.service +``` + +Following the similar instructions to start a miner: + +```bash +curl -L -O https://raw.githubusercontent.com/nervosnetwork/ckb/master/devtools/init/linux-systemd/ckb-miner.service +sudo cp ckb-miner.service /etc/systemd/system/ +sudo chown root:root /etc/systemd/system/ckb-miner.service +sudo chmod 644 /etc/systemd/system/ckb-miner.service +sudo systemctl daemon-reload +sudo systemctl start ckb-miner.service +``` + +Let the miner starts automatically on boot: + +```bash +sudo systemctl enable ckb-miner.service +``` diff --git a/devtools/init/linux-systemd/ckb-miner.service b/devtools/init/linux-systemd/ckb-miner.service new file mode 100644 index 0000000000..338cd1881e --- /dev/null +++ b/devtools/init/linux-systemd/ckb-miner.service @@ -0,0 +1,21 @@ +[Unit] +Description=Nervos CKB Miner +Documentation=https://github.com/nervosnetwork/ckb +After=network-online.target +Wants=network-online.target systemd-networkd-wait-online.service + +[Service] +User=ckb +Group=ckb +WorkingDirectory=/var/lib/ckb + +ExecStart=/usr/local/bin/ckb miner +Restart=on-abnormal +KillMode=mixed +TimeoutStopSec=5s + +LimitNOFILE=1048576 +LimitNPROC=512 + +[Install] +WantedBy=multi-user.target diff --git a/devtools/init/linux-systemd/ckb.service b/devtools/init/linux-systemd/ckb.service new file mode 100644 index 0000000000..7bc6894295 --- /dev/null +++ b/devtools/init/linux-systemd/ckb.service @@ -0,0 +1,21 @@ +[Unit] +Description=Nervos CKB Node +Documentation=https://github.com/nervosnetwork/ckb +After=network-online.target +Wants=network-online.target systemd-networkd-wait-online.service + +[Service] +User=ckb +Group=ckb +WorkingDirectory=/var/lib/ckb + +ExecStart=/usr/local/bin/ckb +Restart=on-abnormal +KillMode=mixed +TimeoutStopSec=5s + +LimitNOFILE=1048576 +LimitNPROC=512 + +[Install] +WantedBy=multi-user.target From 702cd8831695483d519dc4e5375e396bfccbe752 Mon Sep 17 00:00:00 2001 From: ian Date: Wed, 10 Apr 2019 11:15:07 +0800 Subject: [PATCH 15/29] feat: allow not log to file --- resource/ckb-miner.toml | 8 +++++-- resource/ckb.toml | 8 +++++-- resource/src/template.rs | 6 +++++ src/setup/app_config.rs | 49 ++++++++++++++++++++++++++++++++++++---- src/setup/args.rs | 2 ++ src/setup/cli.rs | 8 +++++++ src/setup/mod.rs | 8 +++++++ src/subcommand/init.rs | 2 ++ test/src/main.rs | 4 +--- util/logger/src/lib.rs | 25 +++++++++++--------- 10 files changed, 98 insertions(+), 22 deletions(-) diff --git a/resource/ckb-miner.toml b/resource/ckb-miner.toml index 0862e266f1..6af270c648 100644 --- a/resource/ckb-miner.toml +++ b/resource/ckb-miner.toml @@ -19,8 +19,12 @@ filter = "info" # {{ # integration => filter = "info,network=trace,rpc=debug,sync=debug,relay=debug" # }} color = true -# Print logs to stdout as well -copy_to_stdout = true +log_to_file = true # {{ +# _ => log_to_file = {log_to_file} +# }} +log_to_stdout = true # {{ +# _ => log_to_stdout = {log_to_stdout} +# }} [sentry] # set to blank to disable sentry error collection diff --git a/resource/ckb.toml b/resource/ckb.toml index 9341151275..6cb5d333f9 100644 --- a/resource/ckb.toml +++ b/resource/ckb.toml @@ -19,8 +19,12 @@ filter = "info" # {{ # integration => filter = "info,network=trace,rpc=debug,sync=debug,relay=debug" # }} color = true -# Print logs to stdout as well -copy_to_stdout = true +log_to_file = true # {{ +# _ => log_to_file = {log_to_file} +# }} +log_to_stdout = true # {{ +# _ => log_to_stdout = {log_to_stdout} +# }} [sentry] # set to blank to disable sentry error collection diff --git a/resource/src/template.rs b/resource/src/template.rs index cf036f470d..b9dce47a6d 100644 --- a/resource/src/template.rs +++ b/resource/src/template.rs @@ -15,6 +15,8 @@ pub struct TemplateContext<'a> { pub spec: &'a str, pub rpc_port: &'a str, pub p2p_port: &'a str, + pub log_to_file: bool, + pub log_to_stdout: bool, } impl<'a> Default for TemplateContext<'a> { @@ -23,6 +25,8 @@ impl<'a> Default for TemplateContext<'a> { spec: DEFAULT_SPEC, rpc_port: DEFAULT_RPC_PORT, p2p_port: DEFAULT_P2P_PORT, + log_to_file: true, + log_to_stdout: true, } } } @@ -39,6 +43,8 @@ fn writeln(w: &mut W, s: &str, context: &TemplateContext) -> io::R "{}", s.replace("{rpc_port}", context.rpc_port) .replace("{p2p_port}", context.p2p_port) + .replace("{log_to_file}", &format!("{}", context.log_to_file)) + .replace("{log_to_stdout}", &format!("{}", context.log_to_stdout)) ) } diff --git a/src/setup/app_config.rs b/src/setup/app_config.rs index 8801f2486e..231b32ede4 100644 --- a/src/setup/app_config.rs +++ b/src/setup/app_config.rs @@ -146,9 +146,11 @@ impl AppConfigContent { impl CKBAppConfig { fn derive_options(mut self, root_dir: &Path, subcommand_name: &str) -> Result { self.data_dir = canonicalize_data_dir(self.data_dir, root_dir)?; - self.logger.file = Some(touch( - mkdir(self.data_dir.join("logs"))?.join(subcommand_name.to_string() + ".log"), - )?); + if self.logger.log_to_file { + self.logger.file = Some(touch( + mkdir(self.data_dir.join("logs"))?.join(subcommand_name.to_string() + ".log"), + )?); + } self.db.path = mkdir(self.data_dir.join("db"))?; self.network.path = mkdir(self.data_dir.join("network"))?; @@ -159,7 +161,9 @@ impl CKBAppConfig { impl MinerAppConfig { fn derive_options(mut self, root_dir: &Path) -> Result { self.data_dir = canonicalize_data_dir(self.data_dir, root_dir)?; - self.logger.file = Some(touch(mkdir(self.data_dir.join("logs"))?.join("miner.log"))?); + if self.logger.log_to_file { + self.logger.file = Some(touch(mkdir(self.data_dir.join("logs"))?.join("miner.log"))?); + } Ok(self) } @@ -242,6 +246,8 @@ mod tests { spec: "dev", rpc_port: "7000", p2p_port: "8000", + log_to_file: true, + log_to_stdout: true, }; { locator.export_ckb(&context).expect("export config files"); @@ -268,6 +274,37 @@ mod tests { } } + #[test] + fn test_log_to_stdout_only() { + let dir = mkdir(); + let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap(); + let context = TemplateContext { + spec: "dev", + rpc_port: "7000", + p2p_port: "8000", + log_to_file: false, + log_to_stdout: true, + }; + { + locator.export_ckb(&context).expect("export config files"); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN) + .unwrap_or_else(|err| panic!(err)); + let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err)); + assert_eq!(ckb_config.logger.file, None); + assert_eq!(ckb_config.logger.log_to_file, false); + assert_eq!(ckb_config.logger.log_to_stdout, true); + } + { + locator.export_miner(&context).expect("export config files"); + let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER) + .unwrap_or_else(|err| panic!(err)); + let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err)); + assert_eq!(miner_config.logger.file, None); + assert_eq!(miner_config.logger.log_to_file, false); + assert_eq!(miner_config.logger.log_to_stdout, true); + } + } + #[test] fn test_export_testnet_config_files() { let dir = mkdir(); @@ -276,6 +313,8 @@ mod tests { spec: "testnet", rpc_port: "7000", p2p_port: "8000", + log_to_file: true, + log_to_stdout: true, }; locator.export_ckb(&context).expect("export config files"); { @@ -310,6 +349,8 @@ mod tests { spec: "integration", rpc_port: "7000", p2p_port: "8000", + log_to_file: true, + log_to_stdout: true, }; locator.export_ckb(&context).expect("export config files"); { diff --git a/src/setup/args.rs b/src/setup/args.rs index 1d5aaa14d7..934bc4be21 100644 --- a/src/setup/args.rs +++ b/src/setup/args.rs @@ -36,6 +36,8 @@ pub struct InitArgs { pub spec: String, pub rpc_port: String, pub p2p_port: String, + pub log_to_file: bool, + pub log_to_stdout: bool, pub export_specs: bool, pub list_specs: bool, pub force: bool, diff --git a/src/setup/cli.rs b/src/setup/cli.rs index b75d46375f..5b6df9da1f 100644 --- a/src/setup/cli.rs +++ b/src/setup/cli.rs @@ -20,6 +20,7 @@ pub const ARG_EXPORT_SPECS: &str = "export-specs"; pub const ARG_P2P_PORT: &str = "p2p-port"; pub const ARG_RPC_PORT: &str = "rpc-port"; pub const ARG_FORCE: &str = "force"; +pub const ARG_LOG_TO: &str = "log-to"; pub fn get_matches() -> ArgMatches<'static> { let version = get_version!(); @@ -120,6 +121,13 @@ fn init() -> App<'static, 'static> { .default_value(DEFAULT_SPEC) .help("Export config files for "), ) + .arg( + Arg::with_name(ARG_LOG_TO) + .long(ARG_LOG_TO) + .possible_values(&["file", "stdout", "both"]) + .default_value("both") + .help("Configures where the logs should print"), + ) .arg( Arg::with_name(ARG_FORCE) .short("f") diff --git a/src/setup/mod.rs b/src/setup/mod.rs index b38db49369..ea5c4a17ea 100644 --- a/src/setup/mod.rs +++ b/src/setup/mod.rs @@ -116,6 +116,12 @@ impl Setup { let spec = matches.value_of(cli::ARG_SPEC).unwrap().to_string(); let rpc_port = matches.value_of(cli::ARG_RPC_PORT).unwrap().to_string(); let p2p_port = matches.value_of(cli::ARG_P2P_PORT).unwrap().to_string(); + let (log_to_file, log_to_stdout) = match matches.value_of(cli::ARG_LOG_TO) { + Some("file") => (true, false), + Some("stdout") => (false, true), + Some("both") => (true, true), + _ => unreachable!(), + }; Ok(InitArgs { locator, @@ -125,6 +131,8 @@ impl Setup { export_specs, list_specs, force, + log_to_file, + log_to_stdout, }) } diff --git a/src/subcommand/init.rs b/src/subcommand/init.rs index ba8dacc4b4..cf214e8f4d 100644 --- a/src/subcommand/init.rs +++ b/src/subcommand/init.rs @@ -16,6 +16,8 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { spec: &args.spec, rpc_port: &args.rpc_port, p2p_port: &args.p2p_port, + log_to_file: args.log_to_file, + log_to_stdout: args.log_to_stdout, }; let exported = args.locator.exported(); diff --git a/test/src/main.rs b/test/src/main.rs index 67f9648afd..b3425e941d 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -5,9 +5,7 @@ use std::env; fn main() { let log_config = Config { filter: Some("info".to_owned()), - color: true, - file: None, - copy_to_stdout: true, + ..Default::default() }; let _logger_guard = logger::init(log_config).expect("init Logger"); diff --git a/util/logger/src/lib.rs b/util/logger/src/lib.rs index d7569d8d1e..dc21e6616e 100644 --- a/util/logger/src/lib.rs +++ b/util/logger/src/lib.rs @@ -37,9 +37,14 @@ impl Logger { } let (sender, receiver) = unbounded(); - let file = config.file; - let enable_color = config.color; - let copy_to_stdout = config.copy_to_stdout; + let Config { + color, + file, + log_to_file, + log_to_stdout, + .. + } = config; + let file = if log_to_file { file } else { None }; let tb = thread::Builder::new() .name("LogWriter".to_owned()) @@ -58,16 +63,12 @@ impl Logger { match receiver.recv() { Ok(Message::Record(record)) => { let removed_color = sanitize_color(record.as_ref()); - let output = if enable_color { - record - } else { - removed_color.clone() - }; + let output = if color { record } else { removed_color.clone() }; if let Some(mut file) = file.as_ref() { let _ = file.write_all(removed_color.as_bytes()); let _ = file.write_all(b"\n"); }; - if copy_to_stdout { + if log_to_stdout { println!("{}", output); } } @@ -96,7 +97,8 @@ pub struct Config { pub filter: Option, pub color: bool, pub file: Option, - pub copy_to_stdout: bool, + pub log_to_file: bool, + pub log_to_stdout: bool, } impl Default for Config { @@ -105,7 +107,8 @@ impl Default for Config { filter: None, color: !cfg!(windows), file: None, - copy_to_stdout: true, + log_to_file: false, + log_to_stdout: true, } } } From f8aaec0eea3571aadca659f890502cf198918b59 Mon Sep 17 00:00:00 2001 From: ian Date: Wed, 10 Apr 2019 11:20:49 +0800 Subject: [PATCH 16/29] docs: ckb is available via github releases now --- README.md | 2 +- docs/{build.md => get-ckb.md} | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) rename docs/{build.md => get-ckb.md} (78%) diff --git a/README.md b/README.md index bc79558860..bbfdf22bd6 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ The contribution workflow is described in [CONTRIBUTING.md](CONTRIBUTING.md), an ## Documentations -- [Build CKB](docs/build.md) +- [Get CKB](docs/get-ckb.md) - [Quick Start](docs/quick-start.md) - [Configure CKB](docs/configure.md) - [CKB Core Development](docs/ckb-core-dev.md) diff --git a/docs/build.md b/docs/get-ckb.md similarity index 78% rename from docs/build.md rename to docs/get-ckb.md index 2049440fdf..0ff74ae7a5 100644 --- a/docs/build.md +++ b/docs/get-ckb.md @@ -1,6 +1,14 @@ -# Build CKB +# Get CKB -## Install Build Dependencies +## Download from Releases + +We will publish binaries for each release via [Github Releases](https://github.com/nervosnetwork/ckb/releases). If your system +is listed there, you can download the package directory. + + +## Build from Source + +### Install Build Dependencies CKB is currently tested mainly with `stable-1.33.0` on Linux and macOS. @@ -33,7 +41,7 @@ sudo pacman -Sy git gcc pkgconf clang brew install autoconf libtool ``` -## Build from Source +### Build ```bash # get ckb source code From b2fc3090047cc0a9fde190ed783161addfcaeb30 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Thu, 4 Apr 2019 20:33:33 +0800 Subject: [PATCH 17/29] refactor: avoid recursive lock --- network/src/benches/sqlite_peer_store.rs | 31 +-- network/src/network.rs | 136 ++++------ network/src/peer_store.rs | 12 +- network/src/peer_store/sqlite/peer_store.rs | 56 ++-- network/src/peers_registry.rs | 284 ++++++++------------ network/src/protocols/discovery.rs | 2 - network/src/protocols/identify.rs | 3 +- network/src/protocols/mod.rs | 4 +- network/src/protocols/outbound_peer.rs | 12 +- network/src/tests/peers_registry.rs | 173 +++++++++--- network/src/tests/sqlite_peer_store.rs | 22 +- 11 files changed, 369 insertions(+), 366 deletions(-) diff --git a/network/src/benches/sqlite_peer_store.rs b/network/src/benches/sqlite_peer_store.rs index f9fd11c049..c2917dc99f 100644 --- a/network/src/benches/sqlite_peer_store.rs +++ b/network/src/benches/sqlite_peer_store.rs @@ -8,16 +8,14 @@ use ckb_network::{ peer_store::{PeerStore, SqlitePeerStore}, PeerId, SessionType, }; -use ckb_util::Mutex; use criterion::Criterion; use std::rc::Rc; fn insert_peer_info_benchmark(c: &mut Criterion) { c.bench_function("insert 100 peer_info", |b| { b.iter({ - let mut peer_store = - SqlitePeerStore::memory("bench_db_insert_100_peer_info".to_string()) - .expect("memory"); + let peer_store = SqlitePeerStore::memory("bench_db_insert_100_peer_info".to_string()) + .expect("memory"); let peer_ids = (0..100).map(|_| PeerId::random()).collect::>(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); move || { @@ -29,9 +27,8 @@ fn insert_peer_info_benchmark(c: &mut Criterion) { }); c.bench_function("insert 1000 peer_info", |b| { b.iter({ - let mut peer_store = - SqlitePeerStore::memory("bench_db_insert_1000_peer_info".to_string()) - .expect("memory"); + let peer_store = SqlitePeerStore::memory("bench_db_insert_1000_peer_info".to_string()) + .expect("memory"); let peer_ids = (0..1000).map(|_| PeerId::random()).collect::>(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); move || { @@ -45,7 +42,7 @@ fn insert_peer_info_benchmark(c: &mut Criterion) { // filesystem benchmark c.bench_function("insert 100 peer_info on filesystem", move |b| { b.iter({ - let mut peer_store = SqlitePeerStore::temp().expect("temp"); + let peer_store = SqlitePeerStore::temp().expect("temp"); let peer_ids = (0..100).map(|_| PeerId::random()).collect::>(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); move || { @@ -59,12 +56,10 @@ fn insert_peer_info_benchmark(c: &mut Criterion) { fn random_order_benchmark(c: &mut Criterion) { { - let peer_store = Rc::new(Mutex::new( - SqlitePeerStore::memory("bench_db_random_order".to_string()).expect("memory"), - )); + let peer_store = + Rc::new(SqlitePeerStore::memory("bench_db_random_order".to_string()).expect("memory")); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); { - let mut peer_store = peer_store.lock(); for _ in 0..8000 { let peer_id = PeerId::random(); peer_store.add_connected_peer(&peer_id, addr.clone(), SessionType::Outbound); @@ -79,10 +74,7 @@ fn random_order_benchmark(c: &mut Criterion) { move || { let peer_store = Rc::clone(&peer_store); let count = 1000; - assert_eq!( - peer_store.lock().peers_to_attempt(count).len() as u32, - count - ); + assert_eq!(peer_store.peers_to_attempt(count).len() as u32, count); } }) } @@ -95,10 +87,7 @@ fn random_order_benchmark(c: &mut Criterion) { move || { let peer_store = Rc::clone(&peer_store); let count = 2000; - assert_eq!( - peer_store.lock().peers_to_attempt(count).len() as u32, - count - ); + assert_eq!(peer_store.peers_to_attempt(count).len() as u32, count); } }) } @@ -110,7 +99,7 @@ fn random_order_benchmark(c: &mut Criterion) { "random order 1000 / 8000 peer_info on filesystem", move |b| { b.iter({ - let mut peer_store = SqlitePeerStore::temp().expect("temp"); + let peer_store = SqlitePeerStore::temp().expect("temp"); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); for _ in 0..8000 { let peer_id = PeerId::random(); diff --git a/network/src/network.rs b/network/src/network.rs index 26fa284888..74990c3223 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -1,4 +1,4 @@ -use crate::errors::{Error, ProtocolError}; +use crate::errors::Error; use crate::peer_store::{sqlite::SqlitePeerStore, PeerStore, Status}; use crate::peers_registry::{ConnectionStatus, PeersRegistry, RegisterResult}; use crate::protocols::{ @@ -60,8 +60,8 @@ pub struct SessionInfo { pub struct NetworkState { protocol_ids: RwLock>, - pub(crate) peers_registry: RwLock, - peer_store: Arc>, + pub(crate) peers_registry: PeersRegistry, + peer_store: Arc, listened_addresses: RwLock>, pub(crate) original_listened_addresses: RwLock>, // For avoid repeat failed dial @@ -82,14 +82,14 @@ impl NetworkState { .chain(config.public_addresses.iter()) .map(|addr| (addr.to_owned(), std::u8::MAX)) .collect(); - let peer_store: Arc> = { - let mut peer_store = + let peer_store: Arc = { + let peer_store = SqlitePeerStore::file(config.peer_store_path().to_string_lossy().to_string())?; let bootnodes = config.bootnodes()?; for (peer_id, addr) in bootnodes { peer_store.add_bootnode(peer_id, addr); } - Arc::new(RwLock::new(peer_store)) + Arc::new(peer_store) }; let reserved_peers = config @@ -108,8 +108,8 @@ impl NetworkState { Ok(NetworkState { peer_store, config, + peers_registry, failed_dials: RwLock::new(LruCache::new(FAILED_DIAL_CACHE_SIZE)), - peers_registry: RwLock::new(peers_registry), listened_addresses: RwLock::new(listened_addresses), original_listened_addresses: RwLock::new(Vec::new()), local_private_key: local_private_key.clone(), @@ -120,12 +120,12 @@ impl NetworkState { pub fn report(&self, peer_id: &PeerId, behaviour: Behaviour) { info!(target: "network", "report {:?} because {:?}", peer_id, behaviour); - self.peer_store.write().report(peer_id, behaviour); + self.peer_store.report(peer_id, behaviour); } pub fn drop_peer(&self, p2p_control: &mut ServiceControl, peer_id: &PeerId) { debug!(target: "network", "drop peer {:?}", peer_id); - if let Some(peer) = self.peers_registry.write().drop_peer(&peer_id) { + if let Some(peer) = self.peers_registry.drop_peer(&peer_id) { if let Err(err) = p2p_control.disconnect(peer.session_id) { error!(target: "network", "disconnect peer error {:?}", err); } @@ -135,16 +135,17 @@ impl NetworkState { pub fn drop_all(&self, p2p_control: &mut ServiceControl) { debug!(target: "network", "drop all connections..."); let mut peer_ids = Vec::new(); - let mut peers_registry = self.peers_registry.write(); - for (peer_id, peer) in peers_registry.peers_iter() { - peer_ids.push(peer_id.clone()); - if let Err(err) = p2p_control.disconnect(peer.session_id) { - error!(target: "network", "disconnect peer error {:?}", err); + { + for (peer_id, peer) in self.peers_registry.peers_guard().read().iter() { + peer_ids.push(peer_id.clone()); + if let Err(err) = p2p_control.disconnect(peer.session_id) { + error!(target: "network", "disconnect peer error {:?}", err); + } } } - peers_registry.drop_all(); + self.peers_registry.drop_all(); - let mut peer_store = self.peer_store().write(); + let peer_store = self.peer_store(); for peer_id in peer_ids { if peer_store.peer_status(&peer_id) != Status::Disconnected { peer_store.report(&peer_id, Behaviour::UnexpectedDisconnect); @@ -167,36 +168,30 @@ impl NetworkState { } pub(crate) fn get_peer_index(&self, peer_id: &PeerId) -> Option { - let peers_registry = self.peers_registry.read(); - peers_registry.get(&peer_id).map(|peer| peer.peer_index) + self.peers_registry + .peers_guard() + .read() + .get(&peer_id) + .map(|peer| peer.peer_index) } pub(crate) fn get_peer_id(&self, peer_index: PeerIndex) -> Option { - let peers_registry = self.peers_registry.read(); - peers_registry - .get_peer_id(peer_index) - .map(|peer_id| peer_id.to_owned()) + self.peers_registry.get_peer_id(peer_index) } pub(crate) fn connection_status(&self) -> ConnectionStatus { - let peers_registry = self.peers_registry.read(); - peers_registry.connection_status() + self.peers_registry.connection_status() } pub(crate) fn modify_peer(&self, peer_id: &PeerId, f: F) where F: FnOnce(&mut Peer) -> (), { - let mut peers_registry = self.peers_registry.write(); - if let Some(peer) = peers_registry.get_mut(peer_id) { - f(peer); - } + self.peers_registry.modify_peer(peer_id, f); } pub(crate) fn peers_indexes(&self) -> Vec { - let peers_registry = self.peers_registry.read(); - let iter = peers_registry.connected_peers_indexes(); - iter.collect::>() + self.peers_registry.connected_peers_indexes() } pub(crate) fn ban_peer( @@ -206,10 +201,10 @@ impl NetworkState { timeout: Duration, ) { self.drop_peer(p2p_control, peer_id); - self.peer_store.write().ban_peer(peer_id, timeout); + self.peer_store.ban_peer(peer_id, timeout); } - pub(crate) fn peer_store(&self) -> &RwLock { + pub(crate) fn peer_store(&self) -> &Arc { &self.peer_store } @@ -236,10 +231,7 @@ impl NetworkState { // A workaround method for `add_node` rpc call, need to re-write it after new p2p lib integration. pub fn add_node(&self, peer_id: &PeerId, address: Multiaddr) { - let _ = self - .peer_store() - .write() - .add_discovered_addr(peer_id, address); + let _ = self.peer_store().add_discovered_addr(peer_id, address); } fn to_external_url(&self, addr: &Multiaddr) -> String { @@ -255,33 +247,14 @@ impl NetworkState { protocol_id: ProtocolId, protocol_version: ProtocolVersion, ) -> Result { - let mut peers_registry = self.peers_registry.write(); - let register_result = if session_type.is_outbound() { - peers_registry.try_outbound_peer( - peer_id.clone(), - connected_addr, - session_id, - session_type, - ) - } else { - peers_registry.accept_inbound_peer( - peer_id.clone(), - connected_addr, - session_id, - session_type, - ) - }?; - // add session to peer - match peers_registry.get_mut(&peer_id) { - Some(peer) => match peer.protocol_version(protocol_id) { - Some(_) => return Err(ProtocolError::Duplicate(protocol_id).into()), - None => { - peer.protocols.insert(protocol_id, protocol_version); - } - }, - None => unreachable!("get peer after inserted"), - } - Ok(register_result) + self.peers_registry.accept_connection( + peer_id, + connected_addr, + session_id, + session_type, + protocol_id, + protocol_version, + ) } pub fn peer_protocol_version( @@ -289,20 +262,25 @@ impl NetworkState { peer_id: &PeerId, protocol_id: ProtocolId, ) -> Option { - let peers_registry = self.peers_registry.read(); - peers_registry + self.peers_registry + .peers_guard() + .read() .get(peer_id) .and_then(|peer| peer.protocol_version(protocol_id)) } + pub fn session_info(&self, peer_id: &PeerId, protocol_id: ProtocolId) -> Option { - let peers_registry = self.peers_registry.read(); - peers_registry.get(peer_id).map(|peer| { - let protocol_version = peer.protocol_version(protocol_id); - SessionInfo { - peer: peer.clone(), - protocol_version, - } - }) + self.peers_registry + .peers_guard() + .read() + .get(peer_id) + .map(|peer| { + let protocol_version = peer.protocol_version(protocol_id); + SessionInfo { + peer: peer.clone(), + protocol_version, + } + }) } pub fn get_protocol_ids bool>(&self, filter: F) -> Vec { @@ -398,7 +376,7 @@ impl ServiceHandle for EventHandler { .map(|pubkey| pubkey.peer_id()) .expect("Secio must enabled"); - let mut peer_store = self.network_state.peer_store().write(); + let peer_store = self.network_state.peer_store(); if peer_store.peer_status(&peer_id) == Status::Connected { peer_store.report(&peer_id, Behaviour::UnexpectedDisconnect); peer_store.update_status(&peer_id, Status::Disconnected); @@ -432,7 +410,7 @@ impl ServiceHandle for EventHandler { Ok(register_result) => { // update status in peer_store if let RegisterResult::New(_) = register_result { - let mut peer_store = self.network_state.peer_store().write(); + let peer_store = self.network_state.peer_store(); peer_store.update_status(&peer_id, Status::Connected); } } @@ -596,7 +574,6 @@ impl NetworkService { let bootnodes = self .network_state .peer_store() - .read() .bootnodes(max((config.max_outbound_peers / 2) as u32, 1)) .clone(); // dial half bootnodes @@ -681,12 +658,13 @@ impl NetworkController { } pub fn connected_peers(&self) -> Vec<(PeerId, Peer, MultiaddrList)> { - let peer_store = self.network_state.peer_store().read(); + let peer_store = self.network_state.peer_store(); self.network_state .peers_registry + .peers_guard() .read() - .peers_iter() + .iter() .map(|(peer_id, peer)| { ( peer_id.clone(), diff --git a/network/src/peer_store.rs b/network/src/peer_store.rs index 8860b404a5..78029c2955 100644 --- a/network/src/peer_store.rs +++ b/network/src/peer_store.rs @@ -30,18 +30,18 @@ impl Default for PeerScoreConfig { pub trait PeerStore: Send + Sync { /// Add a peer and address into peer_store /// this method will assume peer is connected, which implies address is "verified". - fn add_connected_peer(&mut self, peer_id: &PeerId, address: Multiaddr, endpoint: SessionType); + fn add_connected_peer(&self, peer_id: &PeerId, address: Multiaddr, endpoint: SessionType); /// Add discovered peer addresses /// this method will assume peer and addr is untrust since we have not connected to it. - fn add_discovered_addr(&mut self, peer_id: &PeerId, address: Multiaddr) -> bool; + fn add_discovered_addr(&self, peer_id: &PeerId, address: Multiaddr) -> bool; /// Report peer behaviours - fn report(&mut self, peer_id: &PeerId, behaviour: Behaviour) -> ReportResult; + fn report(&self, peer_id: &PeerId, behaviour: Behaviour) -> ReportResult; /// Update peer status - fn update_status(&mut self, peer_id: &PeerId, status: Status); + fn update_status(&self, peer_id: &PeerId, status: Status); fn peer_status(&self, peer_id: &PeerId) -> Status; fn peer_score(&self, peer_id: &PeerId) -> Option; /// Add bootnode - fn add_bootnode(&mut self, peer_id: PeerId, addr: Multiaddr); + fn add_bootnode(&self, peer_id: PeerId, addr: Multiaddr); /// This method randomly return peers, it return bootnodes if no other peers in PeerStore. fn bootnodes(&self, count: u32) -> Vec<(PeerId, Multiaddr)>; /// Get addrs of a peer, note a peer may have multiple addrs @@ -54,7 +54,7 @@ pub trait PeerStore: Send + Sync { /// Randomly get peers fn random_peers(&self, count: u32) -> Vec<(PeerId, Multiaddr)>; /// Ban a peer - fn ban_peer(&mut self, peer_id: &PeerId, timeout: Duration); + fn ban_peer(&self, peer_id: &PeerId, timeout: Duration); /// Check peer ban status fn is_banned(&self, peer_id: &PeerId) -> bool; /// peer score config diff --git a/network/src/peer_store/sqlite/peer_store.rs b/network/src/peer_store/sqlite/peer_store.rs index 10d1c20622..5ab99300c9 100644 --- a/network/src/peer_store/sqlite/peer_store.rs +++ b/network/src/peer_store/sqlite/peer_store.rs @@ -15,6 +15,7 @@ use crate::peer_store::{ Behaviour, Multiaddr, PeerId, PeerScoreConfig, PeerStore, ReportResult, Score, Status, }; use crate::SessionType; +use ckb_util::RwLock; use faketime::unix_time; use fnv::FnvHashMap; use std::time::Duration; @@ -30,17 +31,17 @@ const DEFAULT_POOL_SIZE: u32 = 32; const DEFAULT_ADDRS: u32 = 3; pub struct SqlitePeerStore { - bootnodes: Vec<(PeerId, Multiaddr)>, + bootnodes: RwLock>, peer_score_config: PeerScoreConfig, - ban_list: FnvHashMap, Duration>, + ban_list: RwLock, Duration>>, pub(crate) pool: ConnectionPool, } impl SqlitePeerStore { pub fn new(connection_pool: ConnectionPool, peer_score_config: PeerScoreConfig) -> Self { - let mut peer_store = SqlitePeerStore { - bootnodes: Vec::new(), - ban_list: Default::default(), + let peer_store = SqlitePeerStore { + bootnodes: RwLock::new(Vec::new()), + ban_list: RwLock::new(Default::default()), pool: connection_pool, peer_score_config, }; @@ -63,31 +64,32 @@ impl SqlitePeerStore { Self::file("".into()) } - fn prepare(&mut self) -> Result<(), DBError> { + fn prepare(&self) -> Result<(), DBError> { self.create_tables()?; self.reset_status()?; self.load_banlist() } - fn create_tables(&mut self) -> Result<(), DBError> { + fn create_tables(&self) -> Result<(), DBError> { self.pool.fetch(|conn| db::create_tables(conn)) } - fn reset_status(&mut self) -> Result { + fn reset_status(&self) -> Result { self.pool.fetch(|conn| db::PeerInfo::reset_status(conn)) } - fn load_banlist(&mut self) -> Result<(), DBError> { + fn load_banlist(&self) -> Result<(), DBError> { self.clear_expires_banned_ip()?; let now = unix_time(); let ban_records = self.pool.fetch(|conn| db::get_ban_records(conn, now))?; + let mut guard = self.ban_list.write(); for (ip, ban_time) in ban_records { - self.ban_list.insert(ip, ban_time); + guard.insert(ip, ban_time); } Ok(()) } - fn ban_ip(&mut self, addr: &Multiaddr, timeout: Duration) { + fn ban_ip(&self, addr: &Multiaddr, timeout: Duration) { let ip = { match addr.extract_ip_addr_binary() { Some(binary) => binary, @@ -100,8 +102,9 @@ impl SqlitePeerStore { .fetch(|conn| db::insert_ban_record(&conn, &ip, ban_time)) .expect("ban ip"); } - self.ban_list.insert(ip, ban_time); - if self.ban_list.len() > BAN_LIST_CLEAR_EXPIRES_SIZE { + let mut guard = self.ban_list.write(); + guard.insert(ip, ban_time); + if guard.len() > BAN_LIST_CLEAR_EXPIRES_SIZE { self.clear_expires_banned_ip().expect("clear ban list"); } } @@ -112,25 +115,26 @@ impl SqlitePeerStore { None => return false, }; let now = unix_time(); - match self.ban_list.get(&ip) { + match self.ban_list.read().get(&ip) { Some(ban_time) => *ban_time > now, None => false, } } - fn clear_expires_banned_ip(&mut self) -> Result<(), DBError> { + fn clear_expires_banned_ip(&self) -> Result<(), DBError> { let now = unix_time(); let ips = self .pool .fetch(|conn| db::clear_expires_banned_ip(conn, now))?; + let mut guard = self.ban_list.write(); for ip in ips { - self.ban_list.remove(&ip); + guard.remove(&ip); } Ok(()) } /// check and try delete peer_info if peer_infos reach limit - fn check_store_limit(&mut self) -> Result<(), ()> { + fn check_store_limit(&self) -> Result<(), ()> { let peer_info_count = self .pool .fetch(|conn| db::PeerInfo::count(conn)) @@ -167,7 +171,7 @@ impl SqlitePeerStore { Ok(()) } - fn fetch_peer_info(&mut self, peer_id: &PeerId) -> db::PeerInfo { + fn fetch_peer_info(&self, peer_id: &PeerId) -> db::PeerInfo { let blank_addr = &Multiaddr::from_bytes(Vec::new()).expect("null multiaddr"); self.pool .fetch(|conn| { @@ -207,7 +211,7 @@ impl SqlitePeerStore { } impl PeerStore for SqlitePeerStore { - fn add_connected_peer(&mut self, peer_id: &PeerId, addr: Multiaddr, endpoint: SessionType) { + fn add_connected_peer(&self, peer_id: &PeerId, addr: Multiaddr, endpoint: SessionType) { if self.check_store_limit().is_err() { return; } @@ -243,7 +247,7 @@ impl PeerStore for SqlitePeerStore { .expect("upsert peer info"); } - fn add_discovered_addr(&mut self, peer_id: &PeerId, addr: Multiaddr) -> bool { + fn add_discovered_addr(&self, peer_id: &PeerId, addr: Multiaddr) -> bool { // peer store is full if self.check_store_limit().is_err() { return false; @@ -256,7 +260,7 @@ impl PeerStore for SqlitePeerStore { inserted > 0 } - fn report(&mut self, peer_id: &PeerId, behaviour: Behaviour) -> ReportResult { + fn report(&self, peer_id: &PeerId, behaviour: Behaviour) -> ReportResult { if self.is_banned(peer_id) { return ReportResult::Banned; } @@ -272,7 +276,7 @@ impl PeerStore for SqlitePeerStore { ReportResult::Ok } - fn update_status(&mut self, peer_id: &PeerId, status: Status) { + fn update_status(&self, peer_id: &PeerId, status: Status) { if let Some(peer) = self.get_peer_info(peer_id) { self.pool .fetch(|conn| db::PeerInfo::update_status(&conn, peer.id, status)) @@ -290,14 +294,14 @@ impl PeerStore for SqlitePeerStore { self.get_peer_info(peer_id).map(|peer| peer.score) } - fn add_bootnode(&mut self, peer_id: PeerId, addr: Multiaddr) { - self.bootnodes.push((peer_id, addr)); + fn add_bootnode(&self, peer_id: PeerId, addr: Multiaddr) { + self.bootnodes.write().push((peer_id, addr)); } // should return high scored nodes if possible, otherwise, return boostrap nodes fn bootnodes(&self, count: u32) -> Vec<(PeerId, Multiaddr)> { let mut peers = self.peers_to_attempt(count); if peers.len() < count as usize { - for (peer_id, addr) in &self.bootnodes { + for (peer_id, addr) in self.bootnodes.read().iter() { let peer = (peer_id.to_owned(), addr.to_owned()); if !peers.contains(&peer) { peers.push(peer); @@ -348,7 +352,7 @@ impl PeerStore for SqlitePeerStore { .expect("get random peers") } - fn ban_peer(&mut self, peer_id: &PeerId, timeout: Duration) { + fn ban_peer(&self, peer_id: &PeerId, timeout: Duration) { if let Some(peer) = self.get_peer_info(peer_id) { self.ban_ip(&peer.connected_addr, timeout); } diff --git a/network/src/peers_registry.rs b/network/src/peers_registry.rs index 0ccb13ceab..a3794b0f2a 100644 --- a/network/src/peers_registry.rs +++ b/network/src/peers_registry.rs @@ -1,7 +1,7 @@ use crate::peer_store::PeerStore; use crate::{ errors::{Error, PeerError}, - Peer, PeerId, PeerIndex, SessionType, + Peer, PeerId, PeerIndex, ProtocolId, ProtocolVersion, SessionType, }; use ckb_util::RwLock; use fnv::{FnvHashMap, FnvHashSet}; @@ -9,7 +9,6 @@ use log::debug; use p2p::{multiaddr::Multiaddr, SessionId}; use rand::seq::SliceRandom; use rand::thread_rng; -use std::collections::hash_map::Entry; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -30,78 +29,6 @@ impl RegisterResult { } } -struct PeerManage { - id_allocator: AtomicUsize, - peers: FnvHashMap, - pub(crate) peer_id_by_index: FnvHashMap, -} - -impl PeerManage { - #[inline] - fn get(&self, peer_id: &PeerId) -> Option<&Peer> { - self.peers.get(peer_id) - } - - #[inline] - fn get_peer_id(&self, peer_index: PeerIndex) -> Option<&PeerId> { - self.peer_id_by_index.get(&peer_index) - } - - #[inline] - fn get_mut(&mut self, peer_id: &PeerId) -> Option<&mut Peer> { - self.peers.get_mut(peer_id) - } - - #[inline] - fn remove(&mut self, peer_id: &PeerId) -> Option { - if let Some(peer) = self.peers.remove(peer_id) { - self.peer_id_by_index.remove(&peer.peer_index); - return Some(peer); - } - None - } - - #[inline] - fn iter(&self) -> impl Iterator { - self.peers.iter() - } - #[inline] - fn add_peer( - &mut self, - peer_id: PeerId, - connected_addr: Multiaddr, - session_id: SessionId, - session_type: SessionType, - ) -> RegisterResult { - match self.peers.entry(peer_id.clone()) { - Entry::Occupied(entry) => RegisterResult::Exist(entry.get().peer_index), - Entry::Vacant(entry) => { - let peer_index = self.id_allocator.fetch_add(1, Ordering::Relaxed); - let peer = Peer::new(peer_index, connected_addr, session_id, session_type); - entry.insert(peer); - self.peer_id_by_index.insert(peer_index, peer_id); - RegisterResult::New(peer_index) - } - } - } - - fn clear(&mut self) { - self.peers.clear(); - self.peer_id_by_index.clear(); - self.id_allocator.store(0, Ordering::Relaxed) - } -} - -impl Default for PeerManage { - fn default() -> Self { - PeerManage { - id_allocator: AtomicUsize::new(0), - peers: FnvHashMap::with_capacity_and_hasher(20, Default::default()), - peer_id_by_index: FnvHashMap::with_capacity_and_hasher(20, Default::default()), - } - } -} - #[derive(Clone, Copy, Debug)] pub struct ConnectionStatus { pub total: u32, @@ -112,9 +39,10 @@ pub struct ConnectionStatus { } pub(crate) struct PeersRegistry { - // store all known peers - peer_store: Arc>, - peers: PeerManage, + id_allocator: AtomicUsize, + peers: RwLock>, + peer_id_by_index: RwLock>, + peer_store: Arc, // max inbound limitation max_inbound: u32, // max outbound limitation @@ -154,7 +82,7 @@ where impl PeersRegistry { pub fn new( - peer_store: Arc>, + peer_store: Arc, max_inbound: u32, max_outbound: u32, reserved_only: bool, @@ -166,8 +94,13 @@ impl PeersRegistry { reserved_peers_set.insert(reserved_peer); } PeersRegistry { + id_allocator: AtomicUsize::new(0), + peers: RwLock::new(FnvHashMap::with_capacity_and_hasher(20, Default::default())), + peer_id_by_index: RwLock::new(FnvHashMap::with_capacity_and_hasher( + 20, + Default::default(), + )), peer_store, - peers: Default::default(), reserved_peers: reserved_peers_set, max_inbound, max_outbound, @@ -175,87 +108,83 @@ impl PeersRegistry { } } - #[inline] - pub fn get_peer_id(&self, peer_index: PeerIndex) -> Option<&PeerId> { - self.peers.get_peer_id(peer_index) + pub fn get_peer_id(&self, peer_index: PeerIndex) -> Option { + self.peer_indexes_guard().read().get(&peer_index).cloned() } pub fn is_reserved(&self, peer_id: &PeerId) -> bool { self.reserved_peers.contains(&peer_id) } - pub fn accept_inbound_peer( - &mut self, + pub(crate) fn accept_connection( + &self, peer_id: PeerId, - addr: Multiaddr, + connected_addr: Multiaddr, session_id: SessionId, session_type: SessionType, + protocol_id: ProtocolId, + protocol_version: ProtocolVersion, ) -> Result { - if let Some(peer) = self.peers.get(&peer_id) { + let mut peers = self.peers.write(); + + if let Some(peer) = peers.get(&peer_id) { return Ok(RegisterResult::Exist(peer.peer_index)); } - if !self.is_reserved(&peer_id) { - if self.reserved_only { - return Err(Error::Peer(PeerError::NonReserved(peer_id))); - } - if self.peer_store.read().is_banned(&peer_id) { - return Err(Error::Peer(PeerError::Banned(peer_id))); - } - let connection_status = self.connection_status(); - // check peers connection limitation - if connection_status.unreserved_inbound >= self.max_inbound - && !self.try_evict_inbound_peer() - { - return Err(Error::Peer(PeerError::ReachMaxInboundLimit(peer_id))); - } - } - Ok(self.register_peer(peer_id, addr, session_id, session_type)) - } + let inbound = session_type.is_inbound(); + let mut peer_id_by_index = self.peer_id_by_index.write(); - pub fn try_outbound_peer( - &mut self, - peer_id: PeerId, - addr: Multiaddr, - session_id: SessionId, - session_type: SessionType, - ) -> Result { - if let Some(peer) = self.peers.get(&peer_id) { - return Ok(RegisterResult::Exist(peer.peer_index)); - } if !self.is_reserved(&peer_id) { if self.reserved_only { return Err(Error::Peer(PeerError::NonReserved(peer_id))); } - if self.peer_store.read().is_banned(&peer_id) { + // ban_list lock acquired + if self.peer_store.is_banned(&peer_id) { return Err(Error::Peer(PeerError::Banned(peer_id))); } - let connection_status = self.connection_status(); + + let connection_status = self._connection_status(peers.iter()); // check peers connection limitation - // TODO: implement extra outbound peer logic - if connection_status.unreserved_outbound >= self.max_outbound { + if inbound { + if connection_status.unreserved_inbound >= self.max_inbound + && !self._try_evict_inbound_peer(&mut peers, &mut peer_id_by_index) + { + return Err(Error::Peer(PeerError::ReachMaxInboundLimit(peer_id))); + } + } else if connection_status.unreserved_outbound >= self.max_outbound { return Err(Error::Peer(PeerError::ReachMaxOutboundLimit(peer_id))); } } - Ok(self.register_peer(peer_id, addr, session_id, session_type)) + self.peer_store + .add_connected_peer(&peer_id, connected_addr.clone(), session_type); + let peer_index = self.id_allocator.fetch_add(1, Ordering::Relaxed); + let mut peer = Peer::new(peer_index, connected_addr, session_id, session_type); + peer.protocols.insert(protocol_id, protocol_version); + peers.insert(peer_id.clone(), peer); + peer_id_by_index.insert(peer_index, peer_id); + Ok(RegisterResult::New(peer_index)) } - fn try_evict_inbound_peer(&mut self) -> bool { + fn _try_evict_inbound_peer( + &self, + peers: &mut FnvHashMap, + peer_id_by_index: &mut FnvHashMap, + ) -> bool { let peer_id: PeerId = { - let mut candidate_peers = self - .peers - .iter() - .filter(|(peer_id, peer)| peer.is_inbound() && !self.is_reserved(peer_id)) - .collect::>(); - let peer_store = self.peer_store.read(); + let mut candidate_peers = { + peers + .iter() + .filter(|(peer_id, peer)| peer.is_inbound() && !self.is_reserved(peer_id)) + .collect::>() + }; // Protect peers based on characteristics that an attacker hard to simulate or manipulate // Protect peers which has the highest score sort_then_drop_last_n_elements( &mut candidate_peers, EVICTION_PROTECT_PEERS, |(peer_id1, _), (peer_id2, _)| { - let peer1_score = peer_store.peer_score(peer_id1).unwrap_or_default(); - let peer2_score = peer_store.peer_score(peer_id2).unwrap_or_default(); + let peer1_score = self.peer_store.peer_score(peer_id1).unwrap_or_default(); + let peer2_score = self.peer_store.peer_score(peer_id2).unwrap_or_default(); peer1_score.cmp(&peer2_score) }, ); @@ -308,52 +237,33 @@ impl PeersRegistry { // randomly evict a lowest scored peer match evict_group .iter() - .min_by_key(|peer_id| peer_store.peer_score(peer_id).unwrap_or_default()) + .min_by_key(|peer_id| self.peer_store.peer_score(peer_id).unwrap_or_default()) { Some(peer_id) => peer_id.to_owned().to_owned(), None => return false, } }; debug!(target: "network", "evict inbound peer {:?}", peer_id); - self.drop_peer(&peer_id); + self._drop_peer(&peer_id, peers, peer_id_by_index); true } - // registry a new peer - fn register_peer( - &mut self, - peer_id: PeerId, - connected_addr: Multiaddr, - session_id: SessionId, - session_type: SessionType, - ) -> RegisterResult { - self.peer_store - .write() - .add_connected_peer(&peer_id, connected_addr.clone(), session_type); - self.peers - .add_peer(peer_id, connected_addr, session_id, session_type) - } - - #[inline] - pub(crate) fn peers_iter(&self) -> impl Iterator { - self.peers.iter() - } - - #[inline] - pub fn get(&self, peer_id: &PeerId) -> Option<&Peer> { - self.peers.get(peer_id) - } - - #[inline] - pub fn get_mut(&mut self, peer_id: &PeerId) -> Option<&mut Peer> { - self.peers.get_mut(peer_id) + pub fn modify_peer( + &self, + peer_id: &PeerId, + callback: impl FnOnce(&mut Peer) -> R, + ) -> Option { + self.peers.write().get_mut(peer_id).map(callback) } - pub fn connection_status(&self) -> ConnectionStatus { + pub fn _connection_status<'a>( + &self, + peers: impl Iterator, + ) -> ConnectionStatus { let mut total: u32 = 0; let mut unreserved_inbound: u32 = 0; let mut unreserved_outbound: u32 = 0; - for (peer_id, peer_connection) in self.peers.iter() { + for (peer_id, peer_connection) in peers { total += 1; if self.is_reserved(peer_id) { continue; @@ -373,19 +283,61 @@ impl PeersRegistry { } } - #[inline] - pub fn connected_peers_indexes(&self) -> impl Iterator + '_ { - self.peers.peer_id_by_index.iter().map(|(k, _v)| *k) + pub fn connection_status(&self) -> ConnectionStatus { + self._connection_status(self.peers.read().iter()) } #[inline] - pub fn drop_peer(&mut self, peer_id: &PeerId) -> Option { - self.peers.remove(peer_id) + pub fn connected_peers_indexes(&self) -> Vec { + self.peer_id_by_index + .read() + .iter() + .map(|(k, _v)| *k) + .collect::>() + } + + fn _drop_peer( + &self, + peer_id: &PeerId, + peers: &mut FnvHashMap, + peer_id_by_index: &mut FnvHashMap, + ) -> Option { + if let Some(peer) = peers.remove(peer_id) { + peer_id_by_index.remove(&peer.peer_index); + return Some(peer); + } + None } #[inline] - pub fn drop_all(&mut self) { + pub fn drop_peer(&self, peer_id: &PeerId) -> Option { + let mut peers = self.peers.write(); + let mut peer_id_by_index = self.peer_id_by_index.write(); + self._drop_peer(peer_id, &mut peers, &mut peer_id_by_index) + } + + pub fn peers_guard(&self) -> &RwLock> { + &self.peers + } + + fn peer_indexes_guard(&self) -> &RwLock> { + &self.peer_id_by_index + } + + fn _drop_all( + &self, + peers: &mut FnvHashMap, + peer_id_by_index: &mut FnvHashMap, + ) { + peers.clear(); + peer_id_by_index.clear(); + self.id_allocator.store(0, Ordering::Relaxed) + } + + pub fn drop_all(&self) { debug!(target: "network", "drop_all"); - self.peers.clear() + let mut peers = self.peers.write(); + let mut peer_id_by_index = self.peer_id_by_index.write(); + self._drop_all(&mut peers, &mut peer_id_by_index); } } diff --git a/network/src/protocols/discovery.rs b/network/src/protocols/discovery.rs index 360a7deedb..5ba78ea339 100644 --- a/network/src/protocols/discovery.rs +++ b/network/src/protocols/discovery.rs @@ -203,7 +203,6 @@ impl Stream for DiscoveryService { let _ = self .network_state .peer_store() - .write() .add_discovered_addr(&peer_id, addr); } } @@ -220,7 +219,6 @@ impl Stream for DiscoveryService { let addrs = self .network_state .peer_store() - .read() .random_peers(n as u32) .into_iter() .filter_map(|(peer_id, mut addr)| { diff --git a/network/src/protocols/identify.rs b/network/src/protocols/identify.rs index 5bd06340d1..2519b0731f 100644 --- a/network/src/protocols/identify.rs +++ b/network/src/protocols/identify.rs @@ -57,7 +57,7 @@ impl Callback for IdentifyCallback { ); self.remote_listen_addrs .insert(peer_id.clone(), addrs.clone()); - let mut peer_store = self.network_state.peer_store().write(); + let peer_store = self.network_state.peer_store(); for addr in addrs { let _ = peer_store.add_discovered_addr(&peer_id, addr); } @@ -104,7 +104,6 @@ impl Callback for IdentifyCallback { let _ = self .network_state .peer_store() - .write() .add_discovered_addr(local_peer_id, transformed_addr); } // NOTE: for future usage diff --git a/network/src/protocols/mod.rs b/network/src/protocols/mod.rs index bce567e39e..47679622a7 100644 --- a/network/src/protocols/mod.rs +++ b/network/src/protocols/mod.rs @@ -161,7 +161,7 @@ impl ServiceProtocol for CKBHandler { Ok(register_result) => { // update status in peer_store if let RegisterResult::New(_) = register_result { - let mut peer_store = network.peer_store().write(); + let peer_store = network.peer_store(); peer_store.report(&peer_id, Behaviour::Connect); peer_store.update_status(&peer_id, Status::Connected); } @@ -340,6 +340,7 @@ impl CKBProtocolContext for DefaultCKBProtocolContext { let session_id = self .network_state .peers_registry + .peers_guard() .read() .get(&peer_id) .ok_or_else(|| PeerError::NotFound(peer_id.to_owned())) @@ -365,7 +366,6 @@ impl CKBProtocolContext for DefaultCKBProtocolContext { if self .network_state .peer_store() - .write() .report(&peer_id, behaviour) .is_banned() { diff --git a/network/src/protocols/outbound_peer.rs b/network/src/protocols/outbound_peer.rs index 513543fe39..e54d60557b 100644 --- a/network/src/protocols/outbound_peer.rs +++ b/network/src/protocols/outbound_peer.rs @@ -30,11 +30,7 @@ impl OutboundPeerService { } fn attempt_dial_peers(&mut self, count: u32) { - let attempt_peers = self - .network_state - .peer_store() - .read() - .peers_to_attempt(count + 5); + let attempt_peers = self.network_state.peer_store().peers_to_attempt(count + 5); let mut p2p_control = self.p2p_control.clone(); trace!(target: "network", "count={}, attempt_peers: {:?}", count, attempt_peers); for (peer_id, addr) in attempt_peers @@ -61,11 +57,7 @@ impl OutboundPeerService { } fn feeler_peers(&mut self, count: u32) { - let peers = self - .network_state - .peer_store() - .read() - .peers_to_feeler(count); + let peers = self.network_state.peer_store().peers_to_feeler(count); let mut p2p_control = self.p2p_control.clone(); for (peer_id, addr) in peers .into_iter() diff --git a/network/src/tests/peers_registry.rs b/network/src/tests/peers_registry.rs index 7e1eb4fd3e..0e0f0de4e7 100644 --- a/network/src/tests/peers_registry.rs +++ b/network/src/tests/peers_registry.rs @@ -2,16 +2,18 @@ use crate::{ multiaddr::ToMultiaddr, peer_store::{PeerStore, SqlitePeerStore}, peers_registry::{PeersRegistry, EVICTION_PROTECT_PEERS}, - Behaviour, PeerId, SessionType, + Behaviour, PeerId, ProtocolId, ProtocolVersion, SessionType, }; -use ckb_util::RwLock; use std::sync::Arc; use std::time::{Duration, Instant}; -fn new_peer_store() -> Arc> { - Arc::new(RwLock::new(SqlitePeerStore::temp().expect("temp"))) +fn new_peer_store() -> Arc { + Arc::new(SqlitePeerStore::temp().expect("temp")) } +const TEST_PROTOCOL_ID: ProtocolId = 0; +const TEST_PROTOCOL_VERSION: ProtocolVersion = 0; + #[test] fn test_accept_inbound_peer_in_reserve_only_mode() { let peer_store = new_peer_store(); @@ -21,7 +23,7 @@ fn test_accept_inbound_peer_in_reserve_only_mode() { let session_type = SessionType::Inbound; // reserved_only mode: only accept reserved_peer - let mut peers = PeersRegistry::new( + let peers = PeersRegistry::new( Arc::clone(&peer_store), 3, 3, @@ -29,14 +31,24 @@ fn test_accept_inbound_peer_in_reserve_only_mode() { vec![reserved_peer.clone()], ); assert!(peers - .accept_inbound_peer(PeerId::random(), addr.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr.clone(), + session_id, + session_type, + 0, + 0 + ) .is_err()); + peers - .accept_inbound_peer( + .accept_connection( reserved_peer.clone(), addr.clone(), session_id, session_type, + 0, + 0, ) .expect("accept"); } @@ -49,7 +61,7 @@ fn test_accept_inbound_peer_until_full() { let session_id = 1; let session_type = SessionType::Inbound; // accept node until inbound connections is full - let mut peers = PeersRegistry::new( + let peers = PeersRegistry::new( Arc::clone(&peer_store), 3, 3, @@ -57,30 +69,67 @@ fn test_accept_inbound_peer_until_full() { vec![reserved_peer.clone()], ); peers - .accept_inbound_peer(PeerId::random(), addr.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr.clone(), + session_id, + session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, + ) .expect("accept"); peers - .accept_inbound_peer(PeerId::random(), addr.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr.clone(), + session_id, + session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, + ) .expect("accept"); peers - .accept_inbound_peer(PeerId::random(), addr.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr.clone(), + session_id, + session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, + ) .expect("accept"); println!("{:?}", peers.connection_status()); assert!(peers - .accept_inbound_peer(PeerId::random(), addr.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr.clone(), + session_id, + session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION + ) .is_err(),); // should still accept reserved peer peers - .accept_inbound_peer( + .accept_connection( reserved_peer.clone(), addr.clone(), session_id, session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, ) .expect("accept"); // should refuse accept low score peer assert!(peers - .accept_inbound_peer(PeerId::random(), addr.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr.clone(), + session_id, + session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION + ) .is_err()); } @@ -101,7 +150,7 @@ fn test_accept_inbound_peer_eviction() { // prepare protected peers let longest_connection_time_peers_count = 5; let protected_peers_count = 3 * EVICTION_PROTECT_PEERS + longest_connection_time_peers_count; - let mut peers_registry = PeersRegistry::new( + let peers_registry = PeersRegistry::new( Arc::clone(&peer_store), (protected_peers_count + longest_connection_time_peers_count) as u32, 3, @@ -110,17 +159,29 @@ fn test_accept_inbound_peer_eviction() { ); for _ in 0..protected_peers_count { assert!(peers_registry - .accept_inbound_peer(PeerId::random(), addr2.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr2.clone(), + session_id, + session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION + ) .is_ok()); } - let mut peers_iter = peers_registry - .peers_iter() - .map(|(peer_id, _)| peer_id.to_owned()) - .collect::>() - .into_iter(); + let peers: Vec<_> = { + peers_registry + .peers_guard() + .read() + .iter() + .map(|(peer_id, _)| peer_id) + .cloned() + .collect() + }; + + let mut peers_iter = peers.iter(); // higest scored peers { - let mut peer_store = peer_store.write(); for _ in 0..EVICTION_PROTECT_PEERS { let peer_id = peers_iter.next().unwrap(); peer_store.report(&peer_id, Behaviour::Ping); @@ -130,8 +191,9 @@ fn test_accept_inbound_peer_eviction() { // lowest ping peers for _ in 0..EVICTION_PROTECT_PEERS { let peer_id = peers_iter.next().unwrap(); - let mut peer = peers_registry.get_mut(&peer_id).unwrap(); - peer.ping = Some(Duration::from_secs(0)); + peers_registry.modify_peer(&peer_id, |peer| { + peer.ping = Some(Duration::from_secs(0)); + }); } // to prevent time error, we set now to 10ago. @@ -139,69 +201,82 @@ fn test_accept_inbound_peer_eviction() { // peers which most recently sent messages for _ in 0..EVICTION_PROTECT_PEERS { let peer_id = peers_iter.next().unwrap(); - let mut peer = peers_registry.get_mut(&peer_id).unwrap(); - peer.last_message_time = Some(now + Duration::from_secs(10)); + peers_registry.modify_peer(&peer_id, |peer| { + peer.last_message_time = Some(now + Duration::from_secs(10)); + }); } // protect 5 peers which have the longest connection time for _ in 0..longest_connection_time_peers_count { let peer_id = peers_iter.next().unwrap(); - let mut peer = peers_registry.get_mut(&peer_id).unwrap(); - peer.connected_time = now - Duration::from_secs(10); + peers_registry.modify_peer(&peer_id, |peer| { + peer.connected_time = now - Duration::from_secs(10); + }); } let mut new_peer_ids = (0..3).map(|_| PeerId::random()).collect::>(); // setup 3 node and 1 reserved node from addr1 peers_registry - .accept_inbound_peer( + .accept_connection( reserved_peer.clone(), addr1.clone(), session_id, session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, ) .expect("accept"); peers_registry - .accept_inbound_peer( + .accept_connection( evict_target.clone(), addr1.clone(), session_id, session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, ) .expect("accept"); peers_registry - .accept_inbound_peer( + .accept_connection( new_peer_ids[0].clone(), addr1.clone(), session_id, session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, ) .expect("accept"); peers_registry - .accept_inbound_peer( + .accept_connection( new_peer_ids[1].clone(), addr1.clone(), session_id, session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, ) .expect("accept"); // setup 2 node from addr2 peers_registry - .accept_inbound_peer( + .accept_connection( lowest_score_peer.clone(), addr2.clone(), session_id, session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, ) .expect("accept"); peers_registry - .accept_inbound_peer( + .accept_connection( new_peer_ids[2].clone(), addr2.clone(), session_id, session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, ) .expect("accept"); // setup score { - let mut peer_store = peer_store.write(); peer_store.report(&lowest_score_peer, Behaviour::FailedToPing); peer_store.report(&lowest_score_peer, Behaviour::FailedToPing); peer_store.report(&lowest_score_peer, Behaviour::FailedToPing); @@ -216,14 +291,30 @@ fn test_accept_inbound_peer_eviction() { lowest_score_peer.clone(), ]); for peer_id in new_peer_ids { - let mut peer = peers_registry.get_mut(&peer_id).unwrap(); - // push the connected_time to make sure peer is unprotect - peer.connected_time = now + Duration::from_secs(10); + peers_registry.modify_peer(&peer_id, |peer| { + // push the connected_time to make sure peer is unprotect + peer.connected_time = now + Duration::from_secs(10); + }); } // should evict evict target - assert!(peers_registry.get(&evict_target).is_some()); + assert!(peers_registry + .peers_guard() + .read() + .get(&evict_target) + .is_some()); peers_registry - .accept_inbound_peer(PeerId::random(), addr1.clone(), session_id, session_type) + .accept_connection( + PeerId::random(), + addr1.clone(), + session_id, + session_type, + TEST_PROTOCOL_ID, + TEST_PROTOCOL_VERSION, + ) .expect("accept"); - assert!(peers_registry.get(&evict_target).is_none()); + assert!(peers_registry + .peers_guard() + .read() + .get(&evict_target) + .is_none()); } diff --git a/network/src/tests/sqlite_peer_store.rs b/network/src/tests/sqlite_peer_store.rs index 8c2f9e176c..5e76b43fc5 100644 --- a/network/src/tests/sqlite_peer_store.rs +++ b/network/src/tests/sqlite_peer_store.rs @@ -19,7 +19,7 @@ fn new_peer_store() -> SqlitePeerStore { #[test] fn test_add_connected_peer() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); let peer_id = PeerId::random(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); peer_store.add_connected_peer(&peer_id, addr, SessionType::Outbound); @@ -32,7 +32,7 @@ fn test_add_connected_peer() { #[test] fn test_add_discovered_addr() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); let peer_id = PeerId::random(); peer_store.add_discovered_addr(&peer_id, "/ip4/127.0.0.1".to_multiaddr().unwrap()); assert_eq!(peer_store.peer_addrs(&peer_id, 2).unwrap().len(), 1); @@ -40,7 +40,7 @@ fn test_add_discovered_addr() { #[test] fn test_report() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); let peer_id = PeerId::random(); assert!(peer_store.report(&peer_id, Behaviour::Ping).is_ok()); assert!( @@ -51,7 +51,7 @@ fn test_report() { #[test] fn test_update_status() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); let peer_id = PeerId::random(); peer_store.update_status(&peer_id, Status::Connected); assert_eq!(peer_store.peer_status(&peer_id), Status::Unknown); @@ -63,7 +63,7 @@ fn test_update_status() { #[test] fn test_ban_peer() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); let peer_id = PeerId::random(); peer_store.ban_peer(&peer_id, Duration::from_secs(10)); assert!(!peer_store.is_banned(&peer_id)); @@ -75,7 +75,7 @@ fn test_ban_peer() { #[test] fn test_attepmt_ban() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); let peer_id = PeerId::random(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); peer_store.add_connected_peer(&peer_id, addr.clone(), SessionType::Inbound); @@ -87,7 +87,7 @@ fn test_attepmt_ban() { #[test] fn test_bootnodes() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); assert!(peer_store.bootnodes(1).is_empty()); let peer_id = PeerId::random(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); @@ -103,7 +103,7 @@ fn test_bootnodes() { #[test] fn test_peers_to_attempt() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); assert!(peer_store.peers_to_attempt(1).is_empty()); let peer_id = PeerId::random(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); @@ -118,7 +118,7 @@ fn test_peers_to_attempt() { #[test] fn test_peers_to_feeler() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); assert!(peer_store.peers_to_feeler(1).is_empty()); let peer_id = PeerId::random(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); @@ -139,7 +139,7 @@ fn test_peers_to_feeler() { #[test] fn test_random_peers() { - let mut peer_store: Box = Box::new(new_peer_store()); + let peer_store: Box = Box::new(new_peer_store()); assert!(peer_store.random_peers(1).is_empty()); let peer_id = PeerId::random(); let addr = "/ip4/127.0.0.1".to_multiaddr().unwrap(); @@ -157,7 +157,7 @@ fn test_random_peers() { #[test] fn test_delete_peer_info() { - let mut peer_store = new_peer_store(); + let peer_store = new_peer_store(); let addr1 = "/ip4/127.0.0.1".to_multiaddr().unwrap(); let addr2 = "/ip4/192.163.1.1".to_multiaddr().unwrap(); let now = faketime::unix_time(); From 192bfcfaa011d8b692c94c0d13b18ffe16e1c94c Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Wed, 10 Apr 2019 17:51:17 +0800 Subject: [PATCH 18/29] fix: accept_connection protocol registry --- network/src/peers_registry.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/network/src/peers_registry.rs b/network/src/peers_registry.rs index a3794b0f2a..f24f5742c2 100644 --- a/network/src/peers_registry.rs +++ b/network/src/peers_registry.rs @@ -127,7 +127,8 @@ impl PeersRegistry { ) -> Result { let mut peers = self.peers.write(); - if let Some(peer) = peers.get(&peer_id) { + if let Some(peer) = peers.get_mut(&peer_id) { + peer.protocols.insert(protocol_id, protocol_version); return Ok(RegisterResult::Exist(peer.peer_index)); } From 2a972cb6efbc6336d6bbb2bef9325e8429a34442 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Wed, 10 Apr 2019 17:52:24 +0800 Subject: [PATCH 19/29] refactor: get rid of `let _ =` pattern --- core/src/script.rs | 2 +- network/src/network.rs | 4 ++- network/src/protocols/discovery.rs | 8 ++++-- network/src/protocols/identify.rs | 19 ++++++++----- rpc/src/module/miner.rs | 7 +++-- rpc/src/module/pool.rs | 7 +++-- rpc/src/module/trace.rs | 7 +++-- sync/src/net_time_checker.rs | 16 ++++++++--- sync/src/relayer/block_proposal_process.rs | 6 +++- sync/src/relayer/compact_block_process.rs | 7 ++++- .../src/relayer/get_block_proposal_process.rs | 6 +++- .../relayer/get_block_transactions_process.rs | 7 +++-- sync/src/relayer/mod.rs | 28 +++++++++++++++---- sync/src/relayer/transaction_process.rs | 8 ++++-- sync/src/synchronizer/get_blocks_process.rs | 12 ++++++-- sync/src/synchronizer/get_headers_process.rs | 12 ++++++-- sync/src/synchronizer/mod.rs | 21 +++++++++++--- 17 files changed, 135 insertions(+), 42 deletions(-) diff --git a/core/src/script.rs b/core/src/script.rs index f9449151d1..ce103d7289 100644 --- a/core/src/script.rs +++ b/core/src/script.rs @@ -27,7 +27,7 @@ fn prefix_hex(bytes: &[u8]) -> String { let mut dst = vec![0u8; bytes.len() * 2 + 2]; dst[0] = b'0'; dst[1] = b'x'; - let _ = hex_encode(bytes, &mut dst[2..]); + hex_encode(bytes, &mut dst[2..]).expect("hex encode buffer checked"); unsafe { String::from_utf8_unchecked(dst) } } diff --git a/network/src/network.rs b/network/src/network.rs index 74990c3223..1b1a6fed10 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -231,7 +231,9 @@ impl NetworkState { // A workaround method for `add_node` rpc call, need to re-write it after new p2p lib integration. pub fn add_node(&self, peer_id: &PeerId, address: Multiaddr) { - let _ = self.peer_store().add_discovered_addr(peer_id, address); + if !self.peer_store().add_discovered_addr(peer_id, address) { + warn!(target: "network", "add_node failed {:?}", peer_id); + } } fn to_external_url(&self, addr: &Multiaddr) -> String { diff --git a/network/src/protocols/discovery.rs b/network/src/protocols/discovery.rs index 5ba78ea339..87b24ec204 100644 --- a/network/src/protocols/discovery.rs +++ b/network/src/protocols/discovery.rs @@ -200,10 +200,14 @@ impl Stream for DiscoveryService { _ => true, }) .collect::(); - let _ = self + + if !self .network_state .peer_store() - .add_discovered_addr(&peer_id, addr); + .add_discovered_addr(&peer_id, addr) + { + warn!(target: "network", "add_discovered_addr failed {:?}", peer_id); + } } } } diff --git a/network/src/protocols/identify.rs b/network/src/protocols/identify.rs index 2519b0731f..0589050bd6 100644 --- a/network/src/protocols/identify.rs +++ b/network/src/protocols/identify.rs @@ -1,9 +1,6 @@ // use crate::peer_store::Behaviour; use crate::NetworkState; -use std::collections::HashMap; -use std::sync::Arc; - -use log::{debug, trace}; +use log::{debug, trace, warn}; use p2p::{ multiaddr::{Multiaddr, Protocol}, secio::PeerId, @@ -11,6 +8,8 @@ use p2p::{ utils::{is_reachable, multiaddr_to_socketaddr}, }; use p2p_identify::{Callback, MisbehaveResult, Misbehavior}; +use std::collections::HashMap; +use std::sync::Arc; const MAX_RETURN_LISTEN_ADDRS: usize = 10; @@ -59,7 +58,9 @@ impl Callback for IdentifyCallback { .insert(peer_id.clone(), addrs.clone()); let peer_store = self.network_state.peer_store(); for addr in addrs { - let _ = peer_store.add_discovered_addr(&peer_id, addr); + if !peer_store.add_discovered_addr(&peer_id, addr) { + warn!(target: "network", "add_discovered_addr failed {:?}", peer_id); + } } } @@ -101,10 +102,14 @@ impl Callback for IdentifyCallback { { debug!(target: "network", "identify add transformed addr: {:?}", transformed_addr); let local_peer_id = self.network_state.local_peer_id(); - let _ = self + + if !self .network_state .peer_store() - .add_discovered_addr(local_peer_id, transformed_addr); + .add_discovered_addr(local_peer_id, transformed_addr) + { + warn!(target: "network", "add_discovered_addr failed {:?}", local_peer_id); + } } // NOTE: for future usage MisbehaveResult::Continue diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index b023da5b39..d514773905 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -11,7 +11,7 @@ use flatbuffers::FlatBufferBuilder; use jsonrpc_core::{Error, Result}; use jsonrpc_derive::rpc; use jsonrpc_types::{Block, BlockTemplate}; -use log::debug; +use log::{debug, warn}; use numext_fixed_hash::H256; use std::collections::HashSet; use std::sync::Arc; @@ -72,7 +72,10 @@ impl MinerRpc for MinerRpcImpl { RelayMessage::build_compact_block(fbb, &block, &HashSet::new()); fbb.finish(message, None); for peer in nc.connected_peers() { - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "rpc", "relay block error {:?}", ret); + } } }, ); diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index 288837afca..e499ebc3cc 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -12,7 +12,7 @@ use flatbuffers::FlatBufferBuilder; use jsonrpc_core::Result; use jsonrpc_derive::rpc; use jsonrpc_types::Transaction; -use log::debug; +use log::{debug, warn}; use numext_fixed_hash::H256; #[rpc] @@ -61,7 +61,10 @@ impl PoolRpc for PoolRpcImpl { |mut nc| { for peer in nc.connected_peers() { debug!(target: "rpc", "relay transaction {} to peer#{}", tx_hash, peer); - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "rpc", "relay transaction error {:?}", ret); + } } }, ); diff --git a/rpc/src/module/trace.rs b/rpc/src/module/trace.rs index b5f6ef6176..e37f753e27 100644 --- a/rpc/src/module/trace.rs +++ b/rpc/src/module/trace.rs @@ -13,7 +13,7 @@ use flatbuffers::FlatBufferBuilder; use jsonrpc_core::Result; use jsonrpc_derive::rpc; use jsonrpc_types::Transaction; -use log::debug; +use log::{debug, warn}; use numext_fixed_hash::H256; #[rpc] @@ -60,7 +60,10 @@ impl TraceRpc for TraceRpcImpl { |mut nc| { for peer in nc.connected_peers() { debug!(target: "rpc", "relay transaction {} to peer#{}", tx_hash, peer); - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "rpc", "relay transaction error {:?}", ret); + } } }, ); diff --git a/sync/src/net_time_checker.rs b/sync/src/net_time_checker.rs index 3cf14965b6..6a909d0bf9 100644 --- a/sync/src/net_time_checker.rs +++ b/sync/src/net_time_checker.rs @@ -94,8 +94,10 @@ impl CKBProtocolHandler for NetTimeProtocol { fn received(&self, nc: Box, peer: PeerIndex, data: Bytes) { if nc.session_info(peer).map(|s| s.peer.is_outbound()) != Some(true) { info!(target: "network", "Peer {} is not outbound but sends us time message", peer); - let _ = nc.report_peer(peer, Behaviour::UnexpectedMessage); - return; + let ret = nc.report_peer(peer, Behaviour::UnexpectedMessage); + if ret.is_err() { + warn!(target: "network", "report_peer peer {:?} UnexpectedMessage error {:?}", peer, ret); + } } let timestamp = match get_root::(&data) @@ -106,7 +108,10 @@ impl CKBProtocolHandler for NetTimeProtocol { Some(timestamp) => timestamp, None => { info!(target: "network", "Peer {} sends us malformed message", peer); - let _ = nc.report_peer(peer, Behaviour::UnexpectedMessage); + let ret = nc.report_peer(peer, Behaviour::UnexpectedMessage); + if ret.is_err() { + warn!(target: "network", "report_peer peer {:?} UnexpectedMessage error {:?}", peer, ret); + } return; } }; @@ -128,7 +133,10 @@ impl CKBProtocolHandler for NetTimeProtocol { let fbb = &mut FlatBufferBuilder::new(); let message = TimeMessage::build_time(fbb, now); fbb.finish(message, None); - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "network", "NetTimeProtocol connected init msg send error {:?}", ret); + } } } fn disconnected(&self, _nc: Box, _peer: PeerIndex) {} diff --git a/sync/src/relayer/block_proposal_process.rs b/sync/src/relayer/block_proposal_process.rs index 4f0c294c13..2837687208 100644 --- a/sync/src/relayer/block_proposal_process.rs +++ b/sync/src/relayer/block_proposal_process.rs @@ -4,6 +4,7 @@ use ckb_shared::index::ChainIndex; use ckb_traits::chain_provider::ChainProvider; use ckb_util::TryInto; use failure::Error as FailureError; +use log::warn; pub struct BlockProposalProcess<'a, CI: ChainIndex + 'a> { message: &'a BlockProposal<'a>, @@ -22,10 +23,13 @@ where let chain_state = self.relayer.shared.chain_state().lock(); let txs = FlatbuffersVectorIterator::new(cast!(self.message.transactions())?); for tx in txs { - let _ = chain_state.add_tx_to_pool( + let ret = chain_state.add_tx_to_pool( TryInto::try_into(tx)?, self.relayer.shared.consensus().max_block_cycles(), ); + if ret.is_err() { + warn!(target: "relay", "BlockProposal add_tx_to_pool error {:?}", ret) + } } Ok(()) } diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 673b1178f5..38283b8521 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -10,6 +10,7 @@ use ckb_verification::{HeaderResolverWrapper, HeaderVerifier, Verifier}; use failure::Error as FailureError; use flatbuffers::FlatBufferBuilder; use fnv::FnvHashMap; +use log::warn; use numext_fixed_hash::H256; use std::sync::Arc; @@ -94,7 +95,11 @@ where .collect::>(), ); fbb.finish(message, None); - let _ = self.nc.send(self.peer, fbb.finished_data().to_vec()); + let ret = self.nc.send(self.peer, fbb.finished_data().to_vec()); + + if ret.is_err() { + warn!(target: "relay", "CompactBlockProcess relay error {:?}", ret); + } } Ok(()) } diff --git a/sync/src/relayer/get_block_proposal_process.rs b/sync/src/relayer/get_block_proposal_process.rs index 2903e86d7d..e5d02da5b4 100644 --- a/sync/src/relayer/get_block_proposal_process.rs +++ b/sync/src/relayer/get_block_proposal_process.rs @@ -5,6 +5,7 @@ use ckb_shared::index::ChainIndex; use ckb_util::TryInto; use failure::Error as FailureError; use flatbuffers::FlatBufferBuilder; +use log::warn; pub struct GetBlockProposalProcess<'a, CI: ChainIndex + 'a> { message: &'a GetBlockProposal<'a>, @@ -62,7 +63,10 @@ where let message = RelayMessage::build_block_proposal(fbb, &transactions); fbb.finish(message, None); - let _ = self.nc.send(self.peer, fbb.finished_data().to_vec()); + let ret = self.nc.send(self.peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "relay", "GetBlockProposalProcess response error {:?}", ret); + } Ok(()) } } diff --git a/sync/src/relayer/get_block_transactions_process.rs b/sync/src/relayer/get_block_transactions_process.rs index 125d96ce35..f8e1482f83 100644 --- a/sync/src/relayer/get_block_transactions_process.rs +++ b/sync/src/relayer/get_block_transactions_process.rs @@ -5,7 +5,7 @@ use ckb_shared::index::ChainIndex; use ckb_util::TryInto; use failure::Error as FailureError; use flatbuffers::FlatBufferBuilder; -use log::debug; +use log::{debug, warn}; pub struct GetBlockTransactionsProcess<'a, CI: ChainIndex + 'a> { message: &'a GetBlockTransactions<'a>, @@ -49,7 +49,10 @@ where let message = RelayMessage::build_block_transactions(fbb, &hash, &transactions); fbb.finish(message, None); - let _ = self.nc.send(self.peer, fbb.finished_data().to_vec()); + let ret = self.nc.send(self.peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "relay", "GetBlockTransactionsProcess response error {:?}", ret); + } } Ok(()) diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 7797c45231..88dae3673a 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -33,6 +33,7 @@ use ckb_util::Mutex; use failure::Error as FailureError; use flatbuffers::FlatBufferBuilder; use fnv::{FnvHashMap, FnvHashSet}; +use log::warn; use log::{debug, info}; use numext_fixed_hash::H256; use std::collections::HashSet; @@ -128,7 +129,11 @@ where fn process(&self, nc: &mut CKBProtocolContext, peer: PeerIndex, message: RelayMessage) { if self.try_process(nc, peer, message).is_err() { - let _ = nc.report_peer(peer, Behaviour::UnexpectedMessage); + let ret = nc.report_peer(peer, Behaviour::UnexpectedMessage); + + if ret.is_err() { + warn!(target: "network", "report_peer peer {:?} UnexpectedMessage error {:?}", peer, ret); + } } } @@ -158,7 +163,10 @@ where RelayMessage::build_get_block_proposal(fbb, block.header.number(), &unknown_ids); fbb.finish(message, None); - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "relay", "relay get_block_proposal error {:?}", ret); + } } pub fn accept_block(&self, nc: &mut CKBProtocolContext, peer: PeerIndex, block: &Arc) { @@ -170,7 +178,10 @@ where for peer_id in nc.connected_peers() { if peer_id != peer { - let _ = nc.send(peer_id, fbb.finished_data().to_vec()); + let ret = nc.send(peer_id, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "relay", "relay compact_block error {:?}", ret); + } } } } else { @@ -279,7 +290,11 @@ where RelayMessage::build_block_proposal(fbb, &txs.into_iter().collect::>()); fbb.finish(message, None); - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + + if ret.is_err() { + warn!(target: "relay", "send block_proposal error {:?}", ret); + } } } @@ -305,7 +320,10 @@ where Ok(msg) => msg, _ => { info!(target: "sync", "Peer {} sends us a malformed message", peer); - let _ = nc.report_peer(peer, Behaviour::UnexpectedMessage); + let ret = nc.report_peer(peer, Behaviour::UnexpectedMessage); + if ret.is_err() { + warn!(target: "network", "report_peer peer {:?} UnexpectedMessage error {:?}", peer, ret); + } return; } }; diff --git a/sync/src/relayer/transaction_process.rs b/sync/src/relayer/transaction_process.rs index cf9f6ab45d..1903d76fa8 100644 --- a/sync/src/relayer/transaction_process.rs +++ b/sync/src/relayer/transaction_process.rs @@ -9,7 +9,7 @@ use ckb_util::TryInto; use ckb_verification::TransactionError; use failure::Error as FailureError; use flatbuffers::FlatBufferBuilder; -use log::debug; +use log::{debug, warn}; use std::time::Duration; const DEFAULT_BAN_TIME: Duration = Duration::from_secs(3600 * 24 * 3); @@ -64,7 +64,11 @@ where .get(&peer) .map_or(true, |filter| filter.contains(&tx)) { - let _ = self.nc.send(peer, fbb.finished_data().to_vec()); + let ret = self.nc.send(peer, fbb.finished_data().to_vec()); + + if ret.is_err() { + warn!(target: "relay", "relay Transaction error {:?}", ret); + } } } } diff --git a/sync/src/synchronizer/get_blocks_process.rs b/sync/src/synchronizer/get_blocks_process.rs index fc81ec9142..3ec64179cf 100644 --- a/sync/src/synchronizer/get_blocks_process.rs +++ b/sync/src/synchronizer/get_blocks_process.rs @@ -5,7 +5,7 @@ use ckb_shared::index::ChainIndex; use ckb_util::TryInto; use failure::Error as FailureError; use flatbuffers::FlatBufferBuilder; -use log::debug; +use log::{debug, warn}; pub struct GetBlocksProcess<'a, CI: ChainIndex + 'a> { message: &'a GetBlocks<'a>, @@ -59,12 +59,18 @@ where let message = SyncMessage::build_filtered_block(fbb, &block, &transactions_index); fbb.finish(message, None); - let _ = self.nc.send(self.peer, fbb.finished_data().to_vec()); + let ret = self.nc.send(self.peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "relay", "response GetBlocks error {:?}", ret); + } } else { let fbb = &mut FlatBufferBuilder::new(); let message = SyncMessage::build_block(fbb, &block); fbb.finish(message, None); - let _ = self.nc.send(self.peer, fbb.finished_data().to_vec()); + let ret = self.nc.send(self.peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "relay", "response GetBlocks error {:?}", ret); + } } } else { // TODO response not found diff --git a/sync/src/synchronizer/get_headers_process.rs b/sync/src/synchronizer/get_headers_process.rs index 4f9c1d869f..1ce91b5265 100644 --- a/sync/src/synchronizer/get_headers_process.rs +++ b/sync/src/synchronizer/get_headers_process.rs @@ -71,12 +71,20 @@ where let fbb = &mut FlatBufferBuilder::new(); let message = SyncMessage::build_headers(fbb, &headers); fbb.finish(message, None); - let _ = self.nc.send(self.peer, fbb.finished_data().to_vec()); + let ret = self.nc.send(self.peer, fbb.finished_data().to_vec()); + + if ret.is_err() { + warn!(target: "sync", "response GetHeaders error {:?}", ret); + } } else { warn!(target: "sync", "\n\nunknown block headers from peer {} {:#?}\n\n", self.peer, block_locator_hashes); // Got 'headers' message without known blocks // ban or close peers - let _ = self.nc.report_peer(self.peer, Behaviour::SyncUseless); + let report_ret = self.nc.report_peer(self.peer, Behaviour::SyncUseless); + + if report_ret.is_err() { + warn!(target: "sync", "report behaviour SyncUseless error {:?}", report_ret); + } // disconnect peer anyway self.nc.disconnect(self.peer); } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 9fdff61699..8f31860a28 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -182,7 +182,10 @@ impl Synchronizer { fn process(&self, nc: &mut CKBProtocolContext, peer: PeerIndex, message: SyncMessage) { if self.try_process(nc, peer, message).is_err() { - let _ = nc.report_peer(peer, Behaviour::UnexpectedMessage); + let ret = nc.report_peer(peer, Behaviour::UnexpectedMessage); + if ret.is_err() { + warn!(target: "network", "report_peer peer {:?} UnexpectedMessage error {:?}", peer, ret); + } } } @@ -537,7 +540,11 @@ impl Synchronizer { let fbb = &mut FlatBufferBuilder::new(); let message = SyncMessage::build_get_headers(fbb, &locator_hash); fbb.finish(message, None); - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + + if ret.is_err() { + warn!(target: "sync", "send_getheaders_to_peer error {:?}", ret); + } } // - If at timeout their best known block now has more work than our tip @@ -699,7 +706,10 @@ impl Synchronizer { let fbb = &mut FlatBufferBuilder::new(); let message = SyncMessage::build_get_blocks(fbb, v_fetch); fbb.finish(message, None); - let _ = nc.send(peer, fbb.finished_data().to_vec()); + let ret = nc.send(peer, fbb.finished_data().to_vec()); + if ret.is_err() { + warn!(target: "sync", "send_getblocks error {:?}", ret); + } debug!(target: "sync", "send_getblocks len={:?} to peer={}", v_fetch.len() , peer); } } @@ -720,7 +730,10 @@ where Ok(msg) => msg, _ => { info!(target: "sync", "Peer {} sends us a malformed message", peer); - let _ = nc.report_peer(peer, Behaviour::UnexpectedMessage); + let ret = nc.report_peer(peer, Behaviour::UnexpectedMessage); + if ret.is_err() { + warn!(target: "sync", "report_peer peer {:?} UnexpectedMessage error {:?}", peer, ret) + } return; } }; From b34a0c0067a6171a022e42be82841c96dcb1c9dc Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Tue, 2 Apr 2019 19:18:08 +0800 Subject: [PATCH 20/29] fix: temp fix tx relay flood --- rpc/src/module/pool.rs | 5 ++++- shared/src/chain_state.rs | 13 +++++++++++-- shared/src/tx_pool/types.rs | 2 ++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index e499ebc3cc..cf9e7392fb 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -47,7 +47,10 @@ impl PoolRpc for PoolRpcImpl { Ok(cycles) => Some(cycles), }; let entry = PoolEntry::new(tx.clone(), 0, cycles); - chain_state.mut_tx_pool().enqueue_tx(entry); + if !chain_state.mut_tx_pool().enqueue_tx(entry) { + // Duplicate tx + return Ok(tx_hash); + } cycles }; match cycles { diff --git a/shared/src/chain_state.rs b/shared/src/chain_state.rs index 03f20a50c1..adff0b2052 100644 --- a/shared/src/chain_state.rs +++ b/shared/src/chain_state.rs @@ -104,7 +104,12 @@ impl ChainState { let short_id = tx.proposal_short_id(); let rtx = self.resolve_tx_from_pool(&tx, &tx_pool); let verify_result = self.verify_rtx(&rtx, max_cycles); + let tx_hash = tx.hash(); if self.contains_proposal_id(&short_id) { + if !self.filter.insert(tx_hash.clone()) { + trace!(target: "tx_pool", "discarding already known transaction {:#x}", tx_hash); + return Err(PoolError::Duplicate); + } let entry = PoolEntry::new(tx, 0, verify_result.map(Some).unwrap_or(None)); self.staging_tx(&mut tx_pool, entry, max_cycles)?; Ok(verify_result.map_err(PoolError::InvalidTx)?) @@ -113,12 +118,16 @@ impl ChainState { Ok(cycles) => { // enqueue tx with cycles let entry = PoolEntry::new(tx, 0, Some(cycles)); - tx_pool.enqueue_tx(entry); + if !tx_pool.enqueue_tx(entry) { + return Err(PoolError::Duplicate); + } Ok(cycles) } Err(TransactionError::UnknownInput) => { let entry = PoolEntry::new(tx, 0, None); - tx_pool.enqueue_tx(entry); + if !tx_pool.enqueue_tx(entry) { + return Err(PoolError::Duplicate); + } Err(PoolError::InvalidTx(TransactionError::UnknownInput)) } Err(err) => Err(PoolError::InvalidTx(err)), diff --git a/shared/src/tx_pool/types.rs b/shared/src/tx_pool/types.rs index d6ff0b313a..64cd81d0cd 100644 --- a/shared/src/tx_pool/types.rs +++ b/shared/src/tx_pool/types.rs @@ -79,6 +79,8 @@ pub enum PoolError { TimeOut, /// BlockNumber is not right InvalidBlockNumber, + /// Duplicate tx + Duplicate, } impl fmt::Display for PoolError { From e1ac356d33d309d4ed0dcf9be000150cec590478 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Wed, 10 Apr 2019 22:11:44 +0800 Subject: [PATCH 21/29] fix: create tempfile in root dir prevent cross-device link (#438) --- resource/src/lib.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/resource/src/lib.rs b/resource/src/lib.rs index 494a078654..789df621d0 100644 --- a/resource/src/lib.rs +++ b/resource/src/lib.rs @@ -146,21 +146,19 @@ impl ResourceLocator { pub fn export_ckb<'a>(&self, context: &TemplateContext<'a>) -> Result<()> { let ckb = Resource::Bundled(CKB_CONFIG_FILE_NAME.to_string()); let template = Template::new(from_utf8(ckb.get()?)?); - let mut out = NamedTempFile::new()?; + let mut out = NamedTempFile::new_in(&self.root_dir)?; template.write_to(&mut out, context)?; - out.into_temp_path() - .persist(self.root_dir.join(CKB_CONFIG_FILE_NAME)) - .map_err(Into::into) + out.persist(self.root_dir.join(CKB_CONFIG_FILE_NAME))?; + Ok(()) } pub fn export_miner<'a>(&self, context: &TemplateContext<'a>) -> Result<()> { let miner = Resource::Bundled(MINER_CONFIG_FILE_NAME.to_string()); let template = Template::new(from_utf8(miner.get()?)?); - let mut out = NamedTempFile::new()?; + let mut out = NamedTempFile::new_in(&self.root_dir)?; template.write_to(&mut out, context)?; - out.into_temp_path() - .persist(self.root_dir.join(MINER_CONFIG_FILE_NAME)) - .map_err(Into::into) + out.persist(self.root_dir.join(MINER_CONFIG_FILE_NAME))?; + Ok(()) } pub fn export_specs(&self) -> Result<()> { @@ -168,7 +166,7 @@ impl ResourceLocator { if name.starts_with(SPECS_RESOURCE_DIR_NAME) { let path = self.root_dir.join(name); fs::create_dir_all(path.parent().unwrap())?; - let mut out = NamedTempFile::new()?; + let mut out = NamedTempFile::new_in(&self.root_dir)?; io::copy(&mut BUNDLED.read(name)?, &mut out)?; out.into_temp_path().persist(path)?; } From f55cec5eff1166bada042c67408c1d447bb368ce Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Wed, 3 Apr 2019 14:21:32 +0800 Subject: [PATCH 22/29] feat: experimental deadlock detection --- Cargo.lock | 36 ++++++++++++++++++++++++++++++++++++ src/helper.rs | 38 ++++++++++++++++++++++++++++++++++++++ src/main.rs | 2 +- src/subcommand/run.rs | 4 +++- src/system.rs | 16 ---------------- util/Cargo.toml | 2 +- util/src/lib.rs | 4 +++- 7 files changed, 82 insertions(+), 20 deletions(-) create mode 100644 src/helper.rs delete mode 100644 src/system.rs diff --git a/Cargo.lock b/Cargo.lock index aa2b3ee70a..8d4fef42d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1141,6 +1141,11 @@ name = "faster-hex" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "fixedbitset" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "flatbuffers" version = "0.5.0" @@ -1978,6 +1983,11 @@ dependencies = [ "vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ordermap" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "owning_ref" version = "0.4.0" @@ -2027,10 +2037,13 @@ name = "parking_lot_core" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "backtrace 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)", + "petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)", + "thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2044,6 +2057,15 @@ name = "percent-encoding" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "petgraph" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "phf" version = "0.7.24" @@ -2915,6 +2937,16 @@ dependencies = [ "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "thread-id" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "thread_local" version = "0.3.6" @@ -3526,6 +3558,7 @@ dependencies = [ "checksum faketime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17087bd8c5a4a3e8bd40ecd9d3bda587459abcf67ca94211df09ec8451404cf8" "checksum faster-hex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2ea2e4ecc921ec2cbb8b10f0d400fe448554779d2ac5e0bfbb065836d9d8483a" "checksum faster-hex 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b8cccaafb5aae8c282692e5590f341925edea6c696e8715ff0d973320b2646" +"checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33" "checksum flatbuffers 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea0c34f669be9911826facafe996adfda978aeee67285a13556869e2d8b8331f" "checksum flatbuffers-verifier 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "c0118b65128a67eb0eeb805f6032d37325659e42198b3d3f34f205eddda73bd1" "checksum flate2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f87e68aa82b2de08a6e037f1385455759df6e445a8df5e005b4297191dbf18aa" @@ -3612,6 +3645,7 @@ dependencies = [ "checksum openssl 0.10.20 (registry+https://github.com/rust-lang/crates.io-index)" = "5a0d6b781aac4ac1bd6cafe2a2f0ad8c16ae8e1dd5184822a16c50139f8838d9" "checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" "checksum openssl-sys 0.9.43 (registry+https://github.com/rust-lang/crates.io-index)" = "33c86834957dd5b915623e94f2f4ab2c70dd8f6b70679824155d5ae21dbd495d" +"checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" "checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" "checksum parity-multiaddr 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "61ae6944d4435d41f4d0f12108c5cbb9207cbb14bc8f2b4984c6e930dc9c8e41" "checksum parity-multihash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e8eab0287ccde7821e337a124dc5a4f1d6e4c25d10cc91e3f9361615dd95076" @@ -3619,6 +3653,7 @@ dependencies = [ "checksum parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f" "checksum phf 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "b3da44b85f8e8dfaec21adae67f95d93244b2ecf6ad2a692320598dcc8e6dd18" "checksum phf_codegen 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "b03e85129e324ad4166b06b2c7491ae27fe3ec353af72e72cd1654c7225d517e" "checksum phf_generator 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "09364cc93c159b8b06b1f4dd8a4398984503483891b0c26b867cf431fb132662" @@ -3711,6 +3746,7 @@ dependencies = [ "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" "checksum termios 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72b620c5ea021d75a735c943269bb07d30c9b77d6ac6b236bc8b5c496ef05625" "checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +"checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" "checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" "checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" "checksum tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e9175261fbdb60781fcd388a4d6cc7e14764a2b629a7ad94abb439aed223a44f" diff --git a/src/helper.rs b/src/helper.rs new file mode 100644 index 0000000000..3f860a9298 --- /dev/null +++ b/src/helper.rs @@ -0,0 +1,38 @@ +use ckb_util::{parking_lot::deadlock, Condvar, Mutex}; +use log::warn; +use std::sync::Arc; +use std::thread; +use std::time::Duration; + +pub fn wait_for_exit() { + let exit = Arc::new((Mutex::new(()), Condvar::new())); + + // Handle possible exits + let e = Arc::<(Mutex<()>, Condvar)>::clone(&exit); + let _ = ctrlc::set_handler(move || { + e.1.notify_all(); + }); + + // Wait for signal + let mut l = exit.0.lock(); + exit.1.wait(&mut l); +} + +pub fn deadlock_detection() { + thread::spawn(move || loop { + thread::sleep(Duration::from_secs(10)); + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } + + warn!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + warn!("Deadlock #{}", i); + for t in threads { + warn!("Thread Id {:#?}", t.thread_id()); + warn!("{:#?}", t.backtrace()); + } + } + }); +} diff --git a/src/main.rs b/src/main.rs index 4fe998233a..6ae1a79485 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,6 @@ +mod helper; mod setup; mod subcommand; -mod system; use setup::{cli, ExitCode, Setup}; diff --git a/src/subcommand/run.rs b/src/subcommand/run.rs index b78215ab4b..6d5b2d6415 100644 --- a/src/subcommand/run.rs +++ b/src/subcommand/run.rs @@ -1,5 +1,5 @@ +use crate::helper::{deadlock_detection, wait_for_exit}; use crate::setup::{ExitCode, RunArgs}; -use crate::system::wait_for_exit; use ckb_chain::chain::{ChainBuilder, ChainController}; use ckb_db::diskdb::RocksDB; use ckb_miner::BlockAssembler; @@ -15,6 +15,8 @@ use log::info; use std::sync::Arc; pub fn run(args: RunArgs) -> Result<(), ExitCode> { + deadlock_detection(); + let shared = SharedBuilder::>::default() .consensus(args.consensus) .db(&args.config.db) diff --git a/src/system.rs b/src/system.rs deleted file mode 100644 index e02ee7d760..0000000000 --- a/src/system.rs +++ /dev/null @@ -1,16 +0,0 @@ -use ckb_util::{Condvar, Mutex}; -use std::sync::Arc; - -pub fn wait_for_exit() { - let exit = Arc::new((Mutex::new(()), Condvar::new())); - - // Handle possible exits - let e = Arc::<(Mutex<()>, Condvar)>::clone(&exit); - let _ = ctrlc::set_handler(move || { - e.1.notify_all(); - }); - - // Wait for signal - let mut l = exit.0.lock(); - exit.1.wait(&mut l); -} diff --git a/util/Cargo.toml b/util/Cargo.toml index 504b766886..c127b8cfc0 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -6,4 +6,4 @@ authors = ["Nervos Core Dev "] edition = "2018" [dependencies] -parking_lot = "0.7" +parking_lot = {version = "0.7", features = ["deadlock_detection"]} diff --git a/util/src/lib.rs b/util/src/lib.rs index 7774644263..bf111fca80 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,7 +1,9 @@ mod unstable; pub use crate::unstable::{TryFrom, TryInto}; -pub use parking_lot::{Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +pub use parking_lot::{ + self, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard, +}; /// Helper macro for reducing boilerplate code for matching `Option` together /// with early return. From 9995c5c755411be8585e5b91264663fad0aa4da8 Mon Sep 17 00:00:00 2001 From: Linfeng Qian Date: Wed, 3 Apr 2019 15:30:11 +0800 Subject: [PATCH 23/29] fix: Network error handling --- network/src/network.rs | 62 +++++++++++++++---------- network/src/protocols/mod.rs | 12 +++++ shared/src/chain_state.rs | 4 +- sync/src/relayer/transaction_process.rs | 3 +- 4 files changed, 54 insertions(+), 27 deletions(-) diff --git a/network/src/network.rs b/network/src/network.rs index 1b1a6fed10..2a7117383c 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -338,33 +338,47 @@ pub struct EventHandler { } impl ServiceHandle for EventHandler { - fn handle_error(&mut self, _context: &mut ServiceContext, error: ServiceError) { + fn handle_error(&mut self, context: &mut ServiceContext, error: ServiceError) { warn!(target: "network", "p2p service error: {:?}", error); - if let ServiceError::DialerError { - ref address, - ref error, - } = error - { - debug!(target: "network", "add self address: {:?}", address); - if error == &P2pError::ConnectSelf { - let addr = address - .iter() - .filter(|proto| match proto { - multiaddr::Protocol::P2p(_) => false, - _ => true, - }) - .collect(); - self.network_state - .listened_addresses - .write() - .insert(addr, std::u8::MAX); + match error { + ServiceError::DialerError { + ref address, + ref error, + } => { + debug!(target: "network", "add self address: {:?}", address); + if error == &P2pError::ConnectSelf { + let addr = address + .iter() + .filter(|proto| match proto { + multiaddr::Protocol::P2p(_) => false, + _ => true, + }) + .collect(); + self.network_state + .listened_addresses + .write() + .insert(addr, std::u8::MAX); + } + if let Some(peer_id) = extract_peer_id(address) { + self.network_state + .failed_dials + .write() + .insert(peer_id, Instant::now()); + } } - if let Some(peer_id) = extract_peer_id(address) { - self.network_state - .failed_dials - .write() - .insert(peer_id, Instant::now()); + ServiceError::ProtocolError { id, .. } => { + if let Err(err) = context.control().disconnect(id) { + warn!(target: "network", "send disconnect task(session_id={}) failed, error={:?}", id, err); + } + } + ServiceError::MuxerError { + session_context, .. + } => { + if let Err(err) = context.control().disconnect(session_context.id) { + warn!(target: "network", "send disconnect task(session_id={}) failed, error={:?}", session_context.id, err); + } } + _ => {} } } diff --git a/network/src/protocols/mod.rs b/network/src/protocols/mod.rs index 47679622a7..00ff013628 100644 --- a/network/src/protocols/mod.rs +++ b/network/src/protocols/mod.rs @@ -20,6 +20,10 @@ use p2p::{ }; use std::sync::Arc; use std::time::{Duration, Instant}; +use tokio::codec::length_delimited; + +// Max message frame length: 20MB +const MAX_FRAME_LENGTH: usize = 20 * 1024 * 1024; pub type ProtocolVersion = u32; @@ -76,6 +80,13 @@ impl CKBProtocol { MetaBuilder::default() .id(self.id) .name(move |_| protocol_name.clone()) + .codec(|| { + Box::new( + length_delimited::Builder::new() + .max_frame_length(MAX_FRAME_LENGTH) + .new_codec(), + ) + }) .support_versions(supported_versions) .service_handle(move || { let handler = CKBHandler::new(self.id, self.network_state, self.handler); @@ -337,6 +348,7 @@ impl CKBProtocolContext for DefaultCKBProtocolContext { .network_state .get_peer_id(peer_index) .ok_or_else(|| PeerError::IndexNotFound(peer_index))?; + let session_id = self .network_state .peers_registry diff --git a/shared/src/chain_state.rs b/shared/src/chain_state.rs index adff0b2052..807640a60e 100644 --- a/shared/src/chain_state.rs +++ b/shared/src/chain_state.rs @@ -12,7 +12,7 @@ use ckb_core::transaction::{OutPoint, ProposalShortId, Transaction}; use ckb_core::Cycle; use ckb_verification::{TransactionError, TransactionVerifier}; use fnv::FnvHashSet; -use log::error; +use log::{error, trace}; use lru_cache::LruCache; use numext_fixed_hash::H256; use numext_fixed_uint::U256; @@ -106,7 +106,7 @@ impl ChainState { let verify_result = self.verify_rtx(&rtx, max_cycles); let tx_hash = tx.hash(); if self.contains_proposal_id(&short_id) { - if !self.filter.insert(tx_hash.clone()) { + if !tx_pool.filter.insert(tx_hash.clone()) { trace!(target: "tx_pool", "discarding already known transaction {:#x}", tx_hash); return Err(PoolError::Duplicate); } diff --git a/sync/src/relayer/transaction_process.rs b/sync/src/relayer/transaction_process.rs index 1903d76fa8..8101301527 100644 --- a/sync/src/relayer/transaction_process.rs +++ b/sync/src/relayer/transaction_process.rs @@ -73,7 +73,8 @@ where } } Err(PoolError::InvalidTx(TransactionError::UnknownInput)) - | Err(PoolError::InvalidTx(TransactionError::Conflict)) => { + | Err(PoolError::InvalidTx(TransactionError::Conflict)) + | Err(PoolError::Duplicate) => { // this error may occured when peer's tip is different with us, // we can't proof peer is bad so just ignore this debug!(target: "relay", "peer {} relay a conflict or missing input tx: {:?}", self.peer, tx); From 52c529ecff80fd4051e26a1ab8344a749368c50d Mon Sep 17 00:00:00 2001 From: Linfeng Qian Date: Wed, 3 Apr 2019 16:11:10 +0800 Subject: [PATCH 24/29] fix: Temporary remove peer eviction logic --- sync/src/synchronizer/mod.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 8f31860a28..71edba4e5a 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -629,9 +629,10 @@ impl Synchronizer { } for peer in eviction { warn!(target: "sync", "timeout eviction peer={}", peer); - // Do not connect this peer in 3 minutes - nc.ban_peer(peer, Duration::from_secs(180)); - nc.disconnect(peer); + + // FIXME: learn how bitcoin handle this evicted peer, then fix this. + // nc.ban_peer(peer, Duration::from_secs(180)); + // nc.disconnect(peer); } } From 973b8aac3511b226edf7073780cf2cac38f29dd0 Mon Sep 17 00:00:00 2001 From: Linfeng Qian Date: Thu, 4 Apr 2019 14:18:54 +0800 Subject: [PATCH 25/29] fix: Uncomment eviction disconnect code --- sync/src/synchronizer/mod.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 71edba4e5a..aa452947bc 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -628,11 +628,8 @@ impl Synchronizer { } } for peer in eviction { - warn!(target: "sync", "timeout eviction peer={}", peer); - - // FIXME: learn how bitcoin handle this evicted peer, then fix this. - // nc.ban_peer(peer, Duration::from_secs(180)); - // nc.disconnect(peer); + info!(target: "sync", "timeout eviction peer={}", peer); + nc.disconnect(peer); } } From b00844bede5802875c53b02a2dcce81086686b64 Mon Sep 17 00:00:00 2001 From: Linfeng Qian Date: Thu, 4 Apr 2019 17:38:21 +0800 Subject: [PATCH 26/29] fix: Remove sender when channel disconnected --- network/src/protocols/discovery.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/network/src/protocols/discovery.rs b/network/src/protocols/discovery.rs index 87b24ec204..b6d97f96dc 100644 --- a/network/src/protocols/discovery.rs +++ b/network/src/protocols/discovery.rs @@ -126,6 +126,7 @@ impl ServiceProtocol for DiscoveryProtocol { } else { warn!(target: "network", "other channel error: {:?}", err); } + self.discovery_senders.remove(&session.id); } } } From 4648db29cc001320619d161fbfc02fac626dd2b4 Mon Sep 17 00:00:00 2001 From: Linfeng Qian Date: Thu, 4 Apr 2019 18:00:05 +0800 Subject: [PATCH 27/29] chore: Downgrade some log level from debug to trace --- network/src/protocols/mod.rs | 4 ++-- sync/src/synchronizer/block_fetcher.rs | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/network/src/protocols/mod.rs b/network/src/protocols/mod.rs index 00ff013628..f7b3c9083c 100644 --- a/network/src/protocols/mod.rs +++ b/network/src/protocols/mod.rs @@ -11,7 +11,7 @@ use crate::{ NetworkState, PeerIndex, ProtocolContext, ProtocolContextMutRef, ServiceControl, SessionInfo, }; use bytes::Bytes; -use log::{debug, error, info, warn}; +use log::{trace, debug, error, info, warn}; use p2p::{ builder::MetaBuilder, service::{ProtocolHandle, ProtocolMeta}, @@ -242,7 +242,7 @@ impl ServiceProtocol for CKBHandler { .map(|peer_index| (peer_id, peer_index)) }) { - debug!( + trace!( target: "network", "ckb protocol received, addr: {}, protocol: {}, peer_id: {:?}", session.address, diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 51b852e412..3477fb66f4 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -10,7 +10,7 @@ use ckb_shared::index::ChainIndex; use ckb_traits::ChainProvider; use ckb_util::try_option; use faketime::unix_time_as_millis; -use log::debug; +use log::{debug, trace}; use numext_fixed_hash::H256; use numext_fixed_uint::U256; use std::cmp; @@ -48,7 +48,7 @@ where .or_insert_with(Default::default); if inflight.timestamp < unix_time_as_millis().saturating_sub(BLOCK_DOWNLOAD_TIMEOUT) { - debug!(target: "sync", "[block downloader] inflight block download timeout"); + trace!(target: "sync", "[block downloader] inflight block download timeout"); inflight.clear(); } @@ -123,7 +123,7 @@ where } pub fn fetch(self) -> Option> { - debug!(target: "sync", "[block downloader] BlockFetcher process"); + trace!(target: "sync", "[block downloader] BlockFetcher process"); if self.initial_and_check_inflight() { debug!(target: "sync", "[block downloader] inflight count reach limit"); @@ -133,7 +133,7 @@ where let best_known_header = match self.peer_best_known_header() { Some(best_known_header) => best_known_header, _ => { - debug!(target: "sync", "[block downloader] peer_best_known_header not found peer={}", self.peer); + trace!(target: "sync", "[block downloader] peer_best_known_header not found peer={}", self.peer); return None; } }; From 66ac6a1308b1740301340998e8c6c77824d40814 Mon Sep 17 00:00:00 2001 From: piaoliu <441594700@qq.com> Date: Fri, 5 Apr 2019 13:37:46 +0800 Subject: [PATCH 28/29] fix: upgrage p2p dependence, fix cpu abnormal occupation --- Cargo.lock | 44 ++++++++++++++++++++++---------------------- network/Cargo.toml | 10 +++++----- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d4fef42d3..6d4258e3a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -514,11 +514,11 @@ dependencies = [ "snap 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "stop-handler 0.9.0-pre", "tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", - "tentacle-discovery 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", - "tentacle-identify 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", - "tentacle-ping 0.2.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", - "tentacle-secio 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", + "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", + "tentacle-discovery 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", + "tentacle-identify 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", + "tentacle-ping 0.2.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", + "tentacle-secio 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", "tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.2 (git+https://github.com/paritytech/unsigned-varint)", ] @@ -2823,23 +2823,23 @@ dependencies = [ [[package]] name = "tentacle" version = "0.2.0-alpha.1" -source = "git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec#ab661f065dc8667a04f12122250f5fb759872dec" +source = "git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95#53cb765b94041543a9c8582aa4d0d34fb2ac6d95" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "flatbuffers 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-multiaddr 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tentacle-secio 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", + "tentacle-secio 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", "tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-threadpool 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-yamux 0.1.4 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", + "tokio-yamux 0.1.4 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", ] [[package]] name = "tentacle-discovery" version = "0.1.0" -source = "git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec#ab661f065dc8667a04f12122250f5fb759872dec" +source = "git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95#53cb765b94041543a9c8582aa4d0d34fb2ac6d95" dependencies = [ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2850,7 +2850,7 @@ dependencies = [ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)", - "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", + "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", "tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", "trust-dns 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2858,32 +2858,32 @@ dependencies = [ [[package]] name = "tentacle-identify" version = "0.1.0" -source = "git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec#ab661f065dc8667a04f12122250f5fb759872dec" +source = "git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95#53cb765b94041543a9c8582aa4d0d34fb2ac6d95" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "flatbuffers 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", + "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", "tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tentacle-ping" version = "0.2.0" -source = "git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec#ab661f065dc8667a04f12122250f5fb759872dec" +source = "git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95#53cb765b94041543a9c8582aa4d0d34fb2ac6d95" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "flatbuffers 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "generic-channel 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)", + "tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)", ] [[package]] name = "tentacle-secio" version = "0.1.0" -source = "git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec#ab661f065dc8667a04f12122250f5fb759872dec" +source = "git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95#53cb765b94041543a9c8582aa4d0d34fb2ac6d95" dependencies = [ "aes-ctr 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3163,7 +3163,7 @@ dependencies = [ [[package]] name = "tokio-yamux" version = "0.1.4" -source = "git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec#ab661f065dc8667a04f12122250f5fb759872dec" +source = "git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95#53cb765b94041543a9c8582aa4d0d34fb2ac6d95" dependencies = [ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3737,11 +3737,11 @@ dependencies = [ "checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" "checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015" "checksum tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b86c784c88d98c801132806dadd3819ed29d8600836c4088e855cdf3e178ed8a" -"checksum tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)" = "" -"checksum tentacle-discovery 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)" = "" -"checksum tentacle-identify 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)" = "" -"checksum tentacle-ping 0.2.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)" = "" -"checksum tentacle-secio 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)" = "" +"checksum tentacle 0.2.0-alpha.1 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)" = "" +"checksum tentacle-discovery 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)" = "" +"checksum tentacle-identify 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)" = "" +"checksum tentacle-ping 0.2.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)" = "" +"checksum tentacle-secio 0.1.0 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)" = "" "checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f" "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" "checksum termios 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72b620c5ea021d75a735c943269bb07d30c9b77d6ac6b236bc8b5c496ef05625" @@ -3765,7 +3765,7 @@ dependencies = [ "checksum tokio-trace-core 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "350c9edade9830dc185ae48ba45667a445ab59f6167ef6d0254ec9d2430d9dd3" "checksum tokio-udp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "66268575b80f4a4a710ef83d087fdfeeabdce9b74c797535fbac18a2cb906e92" "checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" -"checksum tokio-yamux 0.1.4 (git+https://github.com/nervosnetwork/p2p?rev=ab661f065dc8667a04f12122250f5fb759872dec)" = "" +"checksum tokio-yamux 0.1.4 (git+https://github.com/nervosnetwork/p2p?rev=53cb765b94041543a9c8582aa4d0d34fb2ac6d95)" = "" "checksum toml 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "87c5890a989fa47ecdc7bcb4c63a77a82c18f306714104b1decfd722db17b39e" "checksum trust-dns 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "65096825b064877da37eeeb9a83390bd23433eabfc503a6476dc5b1949034aa7" "checksum trust-dns-proto 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09144f0992b0870fa8d2972cc069cbf1e3c0fda64d1f3d45c4d68d0e0b52ad4e" diff --git a/network/Cargo.toml b/network/Cargo.toml index 5d7c770be8..75c5afa6fb 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -23,11 +23,11 @@ tokio = "0.1.18" futures = "0.1" snap = "0.2" crossbeam-channel = "0.3" -p2p = { git = "https://github.com/nervosnetwork/p2p", rev="ab661f065dc8667a04f12122250f5fb759872dec", package="tentacle" } -secio = { git = "https://github.com/nervosnetwork/p2p", rev="ab661f065dc8667a04f12122250f5fb759872dec", package="tentacle-secio" } -p2p-ping = { git = "https://github.com/nervosnetwork/p2p", rev="ab661f065dc8667a04f12122250f5fb759872dec", package="tentacle-ping" } -p2p-discovery = { git = "https://github.com/nervosnetwork/p2p", rev="ab661f065dc8667a04f12122250f5fb759872dec", package="tentacle-discovery" } -p2p-identify = { git = "https://github.com/nervosnetwork/p2p", rev="ab661f065dc8667a04f12122250f5fb759872dec", package="tentacle-identify" } +p2p = { git = "https://github.com/nervosnetwork/p2p", rev="53cb765b94041543a9c8582aa4d0d34fb2ac6d95", package="tentacle" } +secio = { git = "https://github.com/nervosnetwork/p2p", rev="53cb765b94041543a9c8582aa4d0d34fb2ac6d95", package="tentacle-secio" } +p2p-ping = { git = "https://github.com/nervosnetwork/p2p", rev="53cb765b94041543a9c8582aa4d0d34fb2ac6d95", package="tentacle-ping" } +p2p-discovery = { git = "https://github.com/nervosnetwork/p2p", rev="53cb765b94041543a9c8582aa4d0d34fb2ac6d95", package="tentacle-discovery" } +p2p-identify = { git = "https://github.com/nervosnetwork/p2p", rev="53cb765b94041543a9c8582aa4d0d34fb2ac6d95", package="tentacle-identify" } faketime = "0.2.0" rusqlite = {version = "0.16.0", features = ["bundled"]} lazy_static = "1.3.0" From 0aefcc9822d87645819d814a2a9ac7aacfd761e1 Mon Sep 17 00:00:00 2001 From: Linfeng Qian Date: Tue, 9 Apr 2019 11:31:04 +0800 Subject: [PATCH 29/29] fix: rustfmt --- network/src/protocols/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/src/protocols/mod.rs b/network/src/protocols/mod.rs index f7b3c9083c..2cc61791b3 100644 --- a/network/src/protocols/mod.rs +++ b/network/src/protocols/mod.rs @@ -11,7 +11,7 @@ use crate::{ NetworkState, PeerIndex, ProtocolContext, ProtocolContextMutRef, ServiceControl, SessionInfo, }; use bytes::Bytes; -use log::{trace, debug, error, info, warn}; +use log::{debug, error, info, trace, warn}; use p2p::{ builder::MetaBuilder, service::{ProtocolHandle, ProtocolMeta},