Showing with 7,103 additions and 2,698 deletions.
  1. +0 −1 .gitignore
  2. +12 −21 .hooks/pre-commit
  3. +1 −1 CONTRIBUTING.md
  4. +114 −57 Cargo.lock
  5. +12 −12 Cargo.toml
  6. +5 −5 README.md
  7. +76 −265 SECURITY.md
  8. +9 −8 api/Cargo.toml
  9. +55 −0 api/src/auth.rs
  10. +335 −0 api/src/foreign.rs
  11. +886 −0 api/src/foreign_rpc.rs
  12. +276 −33 api/src/handlers.rs
  13. +64 −2 api/src/handlers/blocks_api.rs
  14. +211 −2 api/src/handlers/chain_api.rs
  15. +57 −3 api/src/handlers/peers_api.rs
  16. +45 −1 api/src/handlers/pool_api.rs
  17. +12 −9 api/src/handlers/server_api.rs
  18. +44 −5 api/src/handlers/transactions_api.rs
  19. +66 −0 api/src/handlers/utils.rs
  20. +1 −1 api/src/handlers/version_api.rs
  21. +13 −2 api/src/lib.rs
  22. +179 −0 api/src/owner.rs
  23. +430 −0 api/src/owner_rpc.rs
  24. +13 −3 api/src/rest.rs
  25. +1 −1 api/src/router.rs
  26. +12 −9 api/src/types.rs
  27. +2 −0 api/src/web.rs
  28. +7 −6 chain/Cargo.toml
  29. +115 −127 chain/src/chain.rs
  30. +42 −87 chain/src/pipe.rs
  31. +0 −52 chain/src/store.rs
  32. +2 −0 chain/src/txhashset.rs
  33. +239 −0 chain/src/txhashset/bitmap_accumulator.rs
  34. +148 −40 chain/src/txhashset/txhashset.rs
  35. +15 −0 chain/src/txhashset/utxo_view.rs
  36. +99 −52 chain/src/types.rs
  37. +188 −0 chain/tests/bitmap_accumulator.rs
  38. +3 −3 chain/tests/mine_simple_chain.rs
  39. +4 −3 chain/tests/test_coinbase_maturity.rs
  40. +6 −6 config/Cargo.toml
  41. +11 −3 config/src/comments.rs
  42. +45 −9 config/src/config.rs
  43. +1 −1 config/src/types.rs
  44. +5 −5 core/Cargo.toml
  45. +8 −8 core/fuzz/Cargo.lock
  46. +2 −2 core/fuzz/fuzz_targets/block_read_v1.rs
  47. +2 −2 core/fuzz/fuzz_targets/block_read_v2.rs
  48. +3 −2 core/fuzz/fuzz_targets/compact_block_read_v1.rs
  49. +3 −2 core/fuzz/fuzz_targets/compact_block_read_v2.rs
  50. +40 −29 core/src/consensus.rs
  51. +1 −2 core/src/core.rs
  52. +143 −88 core/src/core/block.rs
  53. +2 −2 core/src/core/block_sums.rs
  54. +5 −6 core/src/core/committed.rs
  55. +30 −5 core/src/core/compact_block.rs
  56. +6 −8 core/src/core/hash.rs
  57. +4 −7 core/src/core/id.rs
  58. +1 −1 core/src/core/merkle_proof.rs
  59. +2 −0 core/src/core/pmmr.rs
  60. +7 −0 core/src/core/pmmr/backend.rs
  61. +20 −10 core/src/core/pmmr/pmmr.rs
  62. +19 −13 core/src/core/pmmr/readonly_pmmr.rs
  63. +153 −0 core/src/core/pmmr/vec_backend.rs
  64. +31 −41 core/src/core/transaction.rs
  65. +1 −2 core/src/core/verifier_cache.rs
  66. +7 −9 core/src/genesis.rs
  67. +12 −4 core/src/global.rs
  68. +2 −6 core/src/lib.rs
  69. +4 −13 core/src/libtx/aggsig.rs
  70. +67 −117 core/src/libtx/build.rs
  71. +6 −4 core/src/libtx/error.rs
  72. +9 −9 core/src/libtx/proof.rs
  73. +2 −3 core/src/libtx/reward.rs
  74. +15 −17 core/src/libtx/secp_ser.rs
  75. +7 −7 core/src/macros.rs
  76. +8 −7 core/src/pow.rs
  77. +2 −3 core/src/pow/common.rs
  78. +4 −1 core/src/pow/cuckaroo.rs
  79. +4 −1 core/src/pow/cuckarood.rs
  80. +192 −0 core/src/pow/cuckaroom.rs
  81. +7 −7 core/src/pow/cuckatoo.rs
  82. +27 −15 core/src/pow/siphash.rs
  83. +10 −14 core/src/pow/types.rs
  84. +13 −13 core/src/ser.rs
  85. +16 −24 core/tests/block.rs
  86. +55 −21 core/tests/common.rs
  87. +61 −43 core/tests/consensus.rs
  88. +27 −38 core/tests/core.rs
  89. +3 −3 core/tests/merkle_proof.rs
  90. +40 −38 core/tests/pmmr.rs
  91. +1 −2 core/tests/transaction.rs
  92. +45 −124 core/tests/vec_backend.rs
  93. +2 −4 core/tests/verifier_cache.rs
  94. +3 −3 doc/build.md
  95. +3 −1 doc/build_ES.md
  96. +3 −3 doc/build_JP.md
  97. +3 −3 doc/build_KR.md
  98. +126 −0 doc/build_ZH-CN.md
  99. +2 −2 doc/chain/chain_sync.md
  100. +2 −2 doc/chain/chain_sync_KR.md
  101. +2 −0 doc/code_structure.md
  102. +64 −0 doc/code_structure_ZH-CN.md
  103. +2 −2 doc/coinbase_maturity_KR.md
  104. +2 −2 doc/contract_ideas.md
  105. +1 −1 doc/fast-sync.md
  106. +2 −0 doc/fast-sync_ES.md
  107. +1 −1 doc/fast-sync_KR.md
  108. +15 −0 doc/fast-sync_ZH-CN.md
  109. +6 −6 doc/grin4bitcoiners.md
  110. +6 −6 doc/grin4bitcoiners_KR.md
  111. +60 −0 doc/grin4bitcoiners_ZH-CN.md
  112. +29 −29 doc/intro.md
  113. +27 −27 doc/intro_DE.md
  114. +29 −29 doc/intro_ES.md
  115. +33 −33 doc/intro_JP.md
  116. +23 −23 doc/intro_KR.md
  117. +29 −29 doc/intro_NL.md
  118. +27 −27 doc/intro_PT-BR.md
  119. +3 −3 doc/intro_RU.md
  120. +29 −29 doc/intro_SE.md
  121. +30 −30 doc/intro_ZH-CN.md
  122. +11 −9 doc/merkle.md
  123. +2 −2 doc/merkle_KR.md
  124. +128 −0 doc/merkle_ZH-CN.md
  125. +4 −4 doc/mmr.md
  126. +4 −2 doc/mmr_KR.md
  127. +133 −0 doc/mmr_ZH-CN.md
  128. +9 −9 doc/pruning.md
  129. +3 −3 doc/pruning_KR.md
  130. +61 −0 doc/pruning_ZH-CN.md
  131. +2 −2 doc/release_instruction.md
  132. +1 −1 doc/state.md
  133. +1 −1 doc/state_JP.md
  134. +5 −3 doc/state_KR.md
  135. +45 −0 doc/state_ZH-CN.md
  136. +8 −8 doc/stratum_KR.md
  137. +2 −2 doc/switch_commitment.md
  138. +1 −1 doc/toc.md
  139. +3 −3 keychain/Cargo.toml
  140. +3 −4 keychain/src/extkey_bip32.rs
  141. +0 −2 keychain/src/lib.rs
  142. +5 −2 keychain/src/mnemonic.rs
  143. +2 −2 keychain/src/types.rs
  144. +8 −7 p2p/Cargo.toml
  145. +83 −102 p2p/src/conn.rs
  146. +9 −3 p2p/src/handshake.rs
  147. +55 −26 p2p/src/msg.rs
  148. +29 −40 p2p/src/peer.rs
  149. +65 −58 p2p/src/peers.rs
  150. +43 −45 p2p/src/protocol.rs
  151. +14 −1 p2p/src/serv.rs
  152. +3 −8 p2p/src/types.rs
  153. +7 −7 pool/Cargo.toml
  154. +6 −0 pool/src/pool.rs
  155. +6 −6 pool/src/types.rs
  156. +15 −7 pool/tests/common.rs
  157. +10 −10 servers/Cargo.toml
  158. +1 −1 servers/src/common.rs
  159. +32 −37 servers/src/common/adapters.rs
  160. +24 −2 servers/src/common/stats.rs
  161. +5 −1 servers/src/common/types.rs
  162. +3 −3 servers/src/grin/seed.rs
  163. +40 −32 servers/src/grin/server.rs
  164. +1 −1 servers/src/grin/sync/body_sync.rs
  165. +6 −5 servers/src/grin/sync/header_sync.rs
  166. +14 −2 servers/src/grin/sync/syncer.rs
  167. +35 −27 src/bin/cmd/server.rs
  168. +25 −21 src/bin/grin.rs
  169. +1 −1 src/bin/grin.yml
  170. +3 −0 src/bin/tui/constants.rs
  171. +104 −0 src/bin/tui/logs.rs
  172. +3 −2 src/bin/tui/menu.rs
  173. +1 −0 src/bin/tui/mod.rs
  174. +1 −1 src/bin/tui/peers.rs
  175. +160 −158 src/bin/tui/status.rs
  176. +93 −11 src/bin/tui/table.rs
  177. +29 −11 src/bin/tui/ui.rs
  178. +5 −4 store/Cargo.toml
  179. +4 −8 store/src/leaf_set.rs
  180. +30 −0 store/src/pmmr.rs
  181. +37 −1 store/tests/pmmr.rs
  182. +2 −2 util/Cargo.toml
  183. +1 −1 util/src/lib.rs
  184. +102 −25 util/src/logger.rs
  185. +1 −58 util/src/types.rs
@@ -1,7 +1,6 @@
*.swp
.DS_Store
.grin*
node*
!node_clients
!node_clients.rs
target
@@ -22,38 +22,29 @@ if [ $? != 0 ]; then
exit 1
fi

result=0
problem_files=()

printf "[pre_commit] rustfmt "

# first collect all the files that need reformatting
for file in $(git diff --name-only --cached); do
if [ ${file: -3} == ".rs" ]; then
# first collect all the files that need reformatting
rustfmt --check $file &>/dev/null
if [ $? != 0 ]; then
problem_files+=($file)
result=1
fi
fi
done

# now reformat all the files that need reformatting
for file in ${problem_files[@]}; do
rustfmt $file
done

# and let the user know what just happened (and which files were affected)
printf "\033[0;32mok\033[0m \n"
if [ $result != 0 ]; then
# printf "\033[0;31mrustfmt\033[0m \n"
printf "[pre_commit] the following files were rustfmt'd (not yet committed): \n"

for file in ${problem_files[@]}; do
printf "\033[0;31m $file\033[0m \n"
done
if [ ${#problem_files[@]} == 0 ]; then
# nothing to do
printf "[pre_commit] rustfmt \033[0;32mok\033[0m \n"
else
# reformat the files that need it and re-stage them.
printf "[pre_commit] the following files were rustfmt'd before commit: \n"
for file in ${problem_files[@]}; do
rustfmt $file
git add $file
printf "\033[0;32m $file\033[0m \n"
done
fi

exit 0
# to actually fail the build on rustfmt failure -
# exit $result
@@ -34,7 +34,7 @@ In case of problems with trying out grin, before starting to contribute, there's
* And [see the developers chat](https://gitter.im/grin_community/dev) if you have questions about source code files.
If you explain what you're looking at and what you want to do, we'll try to help you along the way.
* Also see `docs/*.md` and the folder structure explanations, and [the wiki](https://github.com/mimblewimble/docs/wiki).
* Further information and discussions are in the [Forum](https://www.grin-forum.org/), the [website](https://grin-tech.org), the [mailing list](https://lists.launchpad.net/mimblewimble/) and news channels like the [@grincouncil](https://twitter.com/grincouncil) and a (mostly unfiltered!) Twitter bot that collects headlines, mailing list posts, and reddit posts related to MimbleWimble/Grin: [@grinmw](https://twitter.com/grinmw)
* Further information and discussions are in the [Forum](https://www.grin-forum.org/), the [website](https://grin-tech.org), the [mailing list](https://lists.launchpad.net/mimblewimble/) and news channels like the [@grincouncil](https://twitter.com/grincouncil) and a (mostly unfiltered!) Twitter bot that collects headlines, mailing list posts, and reddit posts related to Mimblewimble/Grin: [@grinmw](https://twitter.com/grinmw)

## Testing

Some generated files are not rendered by default. Learn more.

@@ -1,8 +1,8 @@
[package]
name = "grin"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
@@ -32,14 +32,14 @@ term = "0.5"
failure = "0.1"
failure_derive = "0.1"

grin_api = { path = "./api", version = "2.1.0-beta.3" }
grin_config = { path = "./config", version = "2.1.0-beta.3" }
grin_chain = { path = "./chain", version = "2.1.0-beta.3" }
grin_core = { path = "./core", version = "2.1.0-beta.3" }
grin_keychain = { path = "./keychain", version = "2.1.0-beta.3" }
grin_p2p = { path = "./p2p", version = "2.1.0-beta.3" }
grin_servers = { path = "./servers", version = "2.1.0-beta.3" }
grin_util = { path = "./util", version = "2.1.0-beta.3" }
grin_api = { path = "./api", version = "3.0.0" }
grin_config = { path = "./config", version = "3.0.0" }
grin_chain = { path = "./chain", version = "3.0.0" }
grin_core = { path = "./core", version = "3.0.0" }
grin_keychain = { path = "./keychain", version = "3.0.0" }
grin_p2p = { path = "./p2p", version = "3.0.0" }
grin_servers = { path = "./servers", version = "3.0.0" }
grin_util = { path = "./util", version = "3.0.0" }

[target.'cfg(windows)'.dependencies]
cursive = { version = "0.12", default-features = false, features = ["pancurses-backend"] }
@@ -53,5 +53,5 @@ cursive = "0.12"
built = "0.3"

[dev-dependencies]
grin_chain = { path = "./chain", version = "2.1.0-beta.3" }
grin_store = { path = "./store", version = "2.1.0-beta.3" }
grin_chain = { path = "./chain", version = "3.0.0" }
grin_store = { path = "./store", version = "3.0.0" }
@@ -8,17 +8,17 @@

# Grin

Grin is an in-progress implementation of the MimbleWimble protocol. Many characteristics are still undefined but the following constitutes a first set of choices:
Grin is an in-progress implementation of the Mimblewimble protocol. Many characteristics are still undefined but the following constitutes a first set of choices:

* Clean and minimal implementation, and aiming to stay as such.
* Follows the MimbleWimble protocol, which provides great anonymity and scaling characteristics.
* Follows the Mimblewimble protocol, which provides hidden amounts and scaling advantages.
* Cuckoo Cycle proof of work in two variants named Cuckaroo (ASIC-resistant) and Cuckatoo (ASIC-targeted).
* Relatively fast block time: one minute.
* Fixed block reward over time with a decreasing dilution.
* Transaction fees are based on the number of Outputs created/destroyed and total transaction size.
* Smooth curve for difficulty adjustments.

To learn more, read our [introduction to MimbleWimble and Grin](doc/intro.md).
To learn more, read our [introduction to Mimblewimble and Grin](doc/intro.md).

## Status

@@ -31,7 +31,7 @@ To get involved, read our [contributing docs](CONTRIBUTING.md).
Find us:

* Chat: [Gitter](https://gitter.im/grin_community/Lobby).
* Mailing list: join the [~MimbleWimble team](https://launchpad.net/~mimblewimble) and subscribe on Launchpad.
* Mailing list: join the [~Mimblewimble team](https://launchpad.net/~mimblewimble) and subscribe on Launchpad.
* Twitter for the Grin council: [@grincouncil](https://twitter.com/grincouncil)

## Getting Started
@@ -48,7 +48,7 @@ We believe in pull requests, data and scientific research. We do not believe in

## Credits

Tom Elvis Jedusor for the first formulation of MimbleWimble.
Tom Elvis Jedusor for the first formulation of Mimblewimble.

Andrew Poelstra for his related work and improvements.

@@ -2,293 +2,104 @@

Grin has a [code of conduct](CODE_OF_CONDUCT.md) and the handling of vulnerability disclosure is no exception. We are committed to conduct our security process in a professional and civil manner. Public shaming, under-reporting or misrepresentation of vulnerabilities will not be tolerated.

## Responsible Disclosure
## Responsible Disclosure Standard

For all security related issues, Grin has 4 main points of contact:
Grin follows a
[community standard for responsible disclosure](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#the-standard)
in cryptocurrency and related software. This document is a public commitment to
following the standard.

* Daniel Lehnberg, daniel.lehnberg at protonmail.com
* Ignotus Peverell, igno.peverell at protonmail.com
* hashmap, hashmap.dev at protonmail.com
* John Woeltz, joltz at protonmail.com
This standard provides detailed information for:
- [Initial Contact](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#initial-contact):
how the initial contact process works
- [Giving Details](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#giving-details):
what details to include with your disclosure after receiving a response to your
initial contact
- [Setting Dates](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#setting-dates):
details for when to release updates and publicize details of the issue

Send all communications to all parties and expect a reply within 48h. Public keys can be found at the end of this document.
Any expected deviations and necessary clarifications around the standard are
explained in the following sections.

## Vulnerability Handling
## Receiving Disclosures

Upon reception of a vulnerability disclosure, the Grin team will:
Grin is committed to working with researchers who submit security vulnerability
notifications to us to resolve those issues on an appropriate timeline and perform
a coordinated release, giving credit to the reporter if they would like.

* Reply within a 48h window.
* Within a week, a [CVVS v3](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) severity score should be attributed.
* Keep communicating regularly about the state of a fix, especially for High or Critical severity vulnerabilities.
* Once a fix has been identified, agree on a timeline for release and public disclosure.
Please submit issues to all of the following main points of contact for
security related issues according to the
[initial contact](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#initial-contact)
and [details](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#giving-details)
guidelines. More information is available about the
[expected timelines for the full disclosure cycle](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#standard-disclosure-timelines).

Releasing a fix should include the following steps:
For all security related issues, Grin has 3 main points of contact:

* Creation of a CVE number for all Medium and above severity vulnerabilities.
* Notify all package maintainers or distributors.
* Inclusion of a vulnerability explanation, the CVE and the security researcher or team who found the vulnerability in release notes and project vulnerability list (link TBD).
* Publicize the vulnerability commensurately with severity and encourage fast upgrades (possibly with additional documentation to explain who is affected, the risks and what to do about it).
* Daniel Lehnberg, daniel.lehnberg at protonmail.com [PGP key](https://github.com/mimblewimble/grin-security/blob/master/keys/lehnberg.asc)
* hashmap, hashmap.dev at protonmail.com [PGP key](https://github.com/mimblewimble/grin-security/blob/master/keys/hashmap.asc)
* John Woeltz, joltz at protonmail.com [PGP key](https://github.com/mimblewimble/grin-security/blob/master/keys/j01tz.asc)

_Note: Before Grin mainnet is released, we will be taking some liberty in applying the above steps, notably in issuing a CVE and upgrades._
Send all communications PGP encrypted to all parties.

## Recognition and Bug Bounties

As of this writing, Grin is a **traditional open source project** with limited to no direct funding. As such, we have little means with which to compensate security researchers for their contributions. We recognize this is a shame and intend to do our best to still make these worth while by:

* Advertising the vulnerability, the researchers, or their team on a public page linked from our website, with a links of their choosing.
* Acting as reference whenever this is needed.
* Setting up retroactive bounties whenever possible.
## Sending Disclosures

It is our hope that after mainnet release, participants in the ecosystem will be willing to more widely donate to benefit the further development of Grin. When this is the case we will:
In the case where we become aware of security issues affecting other projects
that has never affected Grin, our intention is to inform those projects of
security issues on a best effort basis.

* Setup a bounty program.
* Decide on the amounts rewarded based on available funds and CVVS score.
In the case where we fix a security issue in Grin that also affects the
following neighboring projects, our intention is to engage in responsible
disclosures with them as described in the adopted
[standard](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#a-standard-for-responsible-disclosure-in-cryptocurrency-and-related-software),
subject to the deviations described in the
[deviations section](#deviations-from-the-standard) of this document.

## Code Reviews and Audits
## Bilateral Responsible Disclosure Agreements

While we intend to undergo more formal audits before release, continued code reviews and audits are required for security. As such, we encourage interested security researchers to:
_Grin does not currently have any established bilateral disclosure agreements._

* Review our code, even if no contributions are planned.
* Publish their findings whichever way they choose, even if no particular bug or vulnerability was found. We can all learn from new sets of eyes and benefit from increased scrutiny.
* Audit the project publicly. While we may disagree with some small points of design or trade-offs, we will always do so respectfully.

## Chain Splits

The Grin Team runs a chain split monitoring tool at (TBD). It is encouraged to monitor it regularly and setup alerts. In case of an accidental chain split:
## Recognition and Bug Bounties

* Exchanges and merchants should either cease operation or extend considerably confirmation delays.
* Miners and mining pools should immediately consult with Grin's development team on regular channels (Grin's Gitter mainly) to diagnose the split and determine a course of events.
* In the likely event of an emergency software patch, all actors should upgrade as soon as possible.
Grin's responsible disclosure standard includes some general language about
[Bounty Payments](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#bounty-payments)
and [Acknowledgements](https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/82e08d2736ea9dbe43484a3317e4bce214163bd0#acknowledgements).

## Useful References
Grin is a **traditional open source project with limited to no direct funding**.
As such, we have little means with which to compensate security researchers for
their contributions. We recognize this is a shame and intend to do our best to
still make these worth while by:

* [Reducing the Risks of Catastrophic Cryptocurrency Bugs](https://medium.com/mit-media-lab-digital-currency-initiative/reducing-the-risk-of-catastrophic-cryptocurrency-bugs-dcdd493c7569)
* [Security Process for Open Source Projects](https://alexgaynor.net/2013/oct/19/security-process-open-source-projects/)
* [Choose-Your-Own-Security-Disclosure-Adventure](http://hackingdistributed.com/2018/05/30/choose-your-own-security-disclosure-adventure/)
* [CVE HOWTO](https://github.com/RedHatProductSecurity/CVE-HOWTO)
* [National Vulnerability Database](https://nvd.nist.gov/)
* Advertising the vulnerability, the researchers, or their team on a public
page linked from our website, with a links of their choosing.
* Acting as reference whenever this is needed.
* Setting up retroactive bounties whenever possible.

## Public Keys
There is not currently a formal bug bounty program for Grin as it would require
a high level of resources and engagement to operate in good faith. More
[funding](https://grin-tech.org/funding) can help provide the necessary
resources to run one in the future for the Grin community.

### Daniel Lehnberg
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
## Deviations from the Standard

mQINBFuWAuMBEACqRebggT91uazP/jzmKOD/UyVwxaXBtEcWt1/hp9fi1azLxGBn
FVSihIM47oSLjb2K7spCL+ssLFw66QKq79xbXbdimn8cWvALIQJe7OQRs5YKibvD
wJ60WW8TR5oh0hAgcsKLfNjXjzBPmAu94CtEQXSXlsJQjsJzjRC8TdUBsRr4SmG9
MoHYIAiDRBH7zBFJemBIhwDHYmZVMkQnn8SsQnXfT3I+GGlXdaxifzZL1ZMZz/sg
N7BPdcm+BRkUVBUVNOZlwKA+bbEijUtzaBx40esAPlCWqnz7A7kGs0iwq9N5HLpC
T+S1eWKl28yv/BCQoIQI1FDF9GF4GtSjZ0ABsaQKKa61Wlj/s9/QMBjqpcZi5iIM
vn1kgvltMiU2qNEL6CZIMstA2KekgRkG3OEJc82StG41XD8w7mLBowXBKj4B6l9y
78bWqBZNuORPJtR3P9JTlyHbXob5XATO93246EeFc98gWy8KaGVghtGeEMvf9JGK
z0zOLAzs48VkOkhfia5abV8UqI9WIy0m92S5TYmsCGOOHzOjwWCJwIgNXHmO2Owo
7Vbb8UbGv8uWC+x678cDKYtaXmmoawBEd0nsb+qzb5FKKfy2CXyGczs0xRxspmTc
EevvJsRC+yT8UQw7lFj6TvnsPvtf6fATDcbNOtPkWTBJTKwn1q4yOX7WUwARAQAB
tDBEYW5pZWwgTGVobmJlcmcgPGRhbmllbC5sZWhuYmVyZ0Bwcm90b25tYWlsLmNv
bT6JAlQEEwEIAD4WIQT5WWYxqRL33k+ahAGiEGR+lkd4dAUCW5YC4wIbAwUJAeEz
gAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCiEGR+lkd4dCO1EACVp9U7LL+I
xfFGT4DosjnJjK4ddcNulmcWoePykMP1X2n6gMjc/5B5p+ICbYz/yutIxCvjKVmS
isULnA2i3G3DQnbFsDfWUbarZWrsPCPCpl2Zq5VYylnU/9EAK4Ng8EyuShRpw30C
weXGkyBjPHjhKPrt3mJqloBn0PJq4KHGB/HCgYCG3mVwgMADoJWW5iMNjAmtk5db
BWzP8YOclJXq22JD49080PAGsx1pfi9M3mtTMuSfmWxUFUzrC2BOZVGxPBQqNL+Q
gns9Ot66V95BG+OJ7IFCfq+J97hE11xt4R4C4VBmQRmbRI7zkG9GbvzT5p0R6XTt
PmXIhtU5bzuzbJCQwGPBIsiyL6ZFEDanmRyYH6kI2SRPNyjRpq3VZuR2tSWx1094
Cp/cpq8IHv3vvnTlpsVyKPnkVA3Z+82ktndhZQxJ4tgEei9WZQ5+8y6zMSN0S1DW
ujaXbW07kef0InLQkpEXtz7iRwqQOiQj5ybD4+WtaoO5wztnZPX3bEWsFy+6l4ed
6Jll/dBRfM/p8UNUemrpK7MgjlMo57baZKK41FtMjGSxN0q7LgPsYQeWuixMfKiu
aNCwEvhG5tJrElnw/a06uHbVRLc5eDKeya+TRnY70G1yuiBxFOXAjW+iIEVdBdvm
rPtharXy+S15QMKV1FTE7sEKLUIKkcJJArkCDQRblgLjARAAyYigRjtKGxyME+i0
5rQNC2uiqwExlk4/fe+EY1x5sKFuw/iYeT6oH6kYFsAi53m5pMmfN/nfZDODwZNU
5QMzR0Gg0UcFLLuB0A7oQUPQALjLWjc37azuvQ1d0hW2+kINJpUFFuC94OOTvhCK
Vsk8FmeIOezIo4kJ0MGc0yF1lprI4n61T6TYT9/TTUuQba1C41PnUnsP7I2mqDSG
KK3wfr1si5hAObn2ypr9GjVjuHJOiQJOekaEL0MrjyKFoYOKpAQ4Ixm8mv1+4fE/
Zj4EAc/BOPFqLAHi/8K5HzX5ybhMKYVjhDPhW1Hr7peoM8jlW38GQWFytxIJOQHl
2DW9vhFY4DCcJmDvdV/JVFhTgpdblW0ttiBLq3HT1+5fMGtuu0cj4cTvsk4qWfEP
cnbN4HnPM2UATpNl1/iNgI3OwQGKmMzLLcJbNgwGzVXyv4SY6KFrXQcXshxjDnKq
4nVMvZTYX1h5dG++WIzjRsO2Pb8NsUrGUmTzarRuFFcLKJw3sEfnpuemytB7QBoK
4PipVN7WnOPmc43Uckg23VioDOXyEAuW5NWmQf7YofLk52D1sThkTRdLVYkzAsPy
9EMJGISSnaJ4aYYOHfj223ihJGM5nRoXgK2RDN6HH21sEHOnTWgszSuy3fVfW9zF
7shAIfZl1D1StGL1Dmne8S7D4qcAEQEAAYkCPAQYAQgAJhYhBPlZZjGpEvfeT5qE
AaIQZH6WR3h0BQJblgLjAhsMBQkB4TOAAAoJEKIQZH6WR3h0OVsP/ihV4m2efHJS
lDF9oF62s2anCq7o2qNqEoXRLxrSrTCO03sKRNI9HQxcO1FFMtsqgWj/zvTsSfrL
5A1LPk7pQn5XNM5GQlgkbFuXcit2WmxlegkYf2HRnPLMNBVIf6jlGYwFyPOKWh9G
5M7xYidJ7x6Rncq9lfUFHXKw8VYBbGAAnqRIT9cTb9FO5RCd+OZxpjN5vWdpmLnB
gfHLRyKa5NI4pD3UBewsBQRlhJzc2lB0z0bxtuRYZMMeXnJd81SXVzEJfGFxjnUH
ltFFbEMp8lfeiM1Ura5uKhDN2p3wi65IRnhR1I8L5h0YdY3wSPKpAI1/RicLZgLy
6UoBMn9zNwCQ/6f4OVcTtVhwhn4OFU4NA+94Q02XrjxH9kLouBy/GUFhvFxkBKbY
wK+ELkpcuAbezhfoSSxRTf5nr2CGWx8KeS7Q6miYR4r/Az2vsQ611JZZl16OP124
Gbek/LMhk++RylExIkz3WX2skhjDlquvH9wN9bG1RtG7lUB/m6/edJhCTTtgB51/
zgLZf95GXYNUoYv6oQztxvH8Ynn6Srx7ZMb4ByhXwl0ENvD+/B/QMBGIRgUIyw1d
SXoznX4iwnjCE3u1xY8szQCUo8zEw8CbKQD+2f7ePfVadoAw3zuCde5unkItb/ux
Yt4GsNSSB0khmbq31wIGbll/ZGsSH60h
=pLZJ
-----END PGP PUBLIC KEY BLOCK-----
Grin is a technology that provides strong privacy with zero-knowledge
commitments and rangeproofs. Due to the nature of the cryptography used, if a
counterfeiting bug results it could be exploited without a way to identify
which data was corrupted. This renders rollbacks or other fork-based attempted
fixes ineffective.

```
### Ignotus Peverell
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
The standard describes reporters of vulnerabilities including full details of
an issue, in order to reproduce it. This is necessary for instance in the case
of an external researcher both demonstrating and proving that there really is a
security issue, and that security issue really has the impact that they say it
has - allowing the development team to accurately prioritize and resolve the issue.

mQINBFgG9rsBEADijVjWEAYpzrUDQEgCvBJOehcwbBcHD8QgtoCbREGysIdNN64Y
Gh8Ni/69EDfWJvE0Te6IJfsvtoRPPdsZrRqYiJUIEBmGRlOroSjMDgJnXWyjzWnO
AK4zOGfhjaFUaZFIyrZ4fHWln2CWWnj5QzzJ5TeYf04bIJB3/NVdgGFKDtkMkOpj
A74oJEt2BQG1QfYUVCg42Uak0FKP7Vjju98iSZUIO/8cWsSfo5IasQPLq5vU/5Xw
hAxccH5uOX9DruEU9X1FuSfhEFs4z2yCq9lz6ID16BVsYtoVnHmrHxi2uGWreYA5
SE+drBSM3bM4mVx3SSWyLWoaUyyTGhjayipUQmMrgzYAYiAZ8kZB95gr2BusRmln
pdbzyEY4v3UQIkHdmBNHLm/SwHl7acuqQBQt2eLnAr9CKUv/14j3A4mhwPC0uKIi
7McCg/OsUeRo5MpKdoabgn/xJ/tsXFcHUwFjBS5j0z1esNlpe4uR9nDC011YrYFB
LOacOYk30nKaktSosmC3GjjTfRjd5lTW/iAo9834EB/FSrIJksgPBVyWwyTi1NZO
MnNwtrf2JUa3X6R0II0PEAra6iUS/o3KgZRVuywXhsXeMwuq6+KKCNnk3XmdAd0P
GOhJIIAzRvRX/RglV9nLN7BwhCQFshQtyDtE7Vg3mlW1vK8OYWZAGXo6KQARAQAB
tC9JZ25vdHVzIFBldmVyZWxsIDxpZ25vLnBldmVyZWxsQHByb3Rvbm1haWwuY29t
PokCOAQTAQIAIgUCWAb2uwIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ
mc0l85+PghH+ihAA1rtTmHt5o277or6/kTvMe4XAr2tTodfEKYO77+fdRVxBbt3H
yx7wodOcRfT7caZyaEf9COvZvNj53RgRAJMiGRQ0s4Pjxg0FjtB2C8JnWe79k181
FMXD+5I1G5xPE5HBQlP0P4kUviDDw2hDDAHuqsEv0VHWULBquneGXgLQ483zeU+R
seEYRK8jEYhYH4dFij3EsikidCq7BqO8wYdJ3+Vx/k6Lc3TUKVwfXlHKMg6D+FXO
L0IAv+OUTsqZ3is1YBhGtA9llMM4Lh5jQQPJfUor3yy8WTLFAtXKQqlEzZ2D/uSq
yY1T3YWDjSo6KBYtu20dM2wJq6IpZ/NbZQ6WMrZzXstSAbSVx/lruiRk2MgjzVLc
NmikdgfIPIurghkC3r6dRI1GpAK+c0bwjM6eJ1KMUPxrGeemLLmE3KiYGNrxek3F
SDMKg5guzEnXLvG+7FiBEYVNyaKe4O+aX45NYg5QN0FvCym7+d/Aekx1/E70c5hX
eiYAIEvmTyhfgPk8wh1Xk/BLhIGq+JVZPEU6hc5kGoJjmAkcrrC/WktfWJjHv3IY
pq/hc4ZLjEmsQhqyCfCMSjcCPeOJUUhjQEu+5Z+hhOfQZPIMJZF+WgK+mXf5SXnf
HI6avuOw2JrTufKMZKlZEm6W2FVGfyv8axgMBMLWnJNUCHOmYy+ZFfo43jC5Ag0E
WAb2uwEQALfj+YjVYJB+4xFyTe5cx8k1UZIcb+69rzlaEHlT+Z1JGcj/Tk12ou81
zpGY7tCHKMRtT5Kwg6PqXyUDeqVuEAzqaz5atHp03BkSCsMhIVWDE4YeQ4GT7GTq
ygT/RwSxRzjsghgbeUTUR43s5gFH0H7iOo89H2FKwJL3HUIN5ySE0X1ecPD2mVx1
7ejf1pblRmaG27fCwnJmzSQF2U6MLPjzM+f47ZVTvky0EIuckqNYNal/zaAQdHbP
XYtDawWYDKFs1M7w+uLz2rL80b1PZugvqqTwpx2zS7VMR+hPOnkPtu//1pADylx9
yw0MqOAymvDKms6EijivDnQqH9kAVXoWqKPjW/bK06JL3QEdhz/HL5Q4PWsLZICQ
pF5kdrhGHenQfu/8iAAdwpMeKrTedYoisePCVC8tjB/UsZtRaTAQKMhpCccQhXTZ
OUcOxB3o54B59rP3OkuI2RFpW2AS6vsOmZCmmIulT4cRk+g+dMi35Rwn33jo8qSl
U2med4kh78zeEvZo+M6dBQffCKSZV6icbUZPPnig4/5mLKUKFu3qIpIr32wx85D/
DwN+lNMiZ13fQdXgs2PMVxqUlhufY4lCt76HEmECuD/Fpy5bTGl5bp0/vIN4Tzk3
jbwBz7dybcbSQ3eg82vxT0cO69BSjJyn8SmmsRkJKe/5kJkGbzwRABEBAAGJAh8E
GAECAAkFAlgG9rsCGwwACgkQmc0l85+PghFB1hAAo/uI+aSAwXS1hi0KpcsS9rZy
1I9kZQglhFJkcqu1T5o//MimVjbZJAikGkqgwDYOyvRI/FwfIWPL267apq6Dgz/6
+AFzu3+tsDQE7h53HTE+JqYOckV8bu8NNWgpd3pVFhiFO8p9ZcBEDRzaMcMLmPT8
56w1lJYprwdUBl70x1axD3SWiQAhGNxJShAaPLUjE1c9sPAvoBLr9VIYlWEXdb5A
lJ3x7fqqPxN+c+Eg7CxsP6WSuC324Vvp1LLtJuCIGuAK8HRrXYmku+FHTMOqxBPW
Hc70tcWkN8KFfEoJ3UFWtdbhitaRpiFnCPCYQnLGCjrD0XLWMBZ0R/62yFPfCGON
G+0CoIkjXJWtnxIy1z5r25uhTQw0/KB+lhQSsXKIll1endsZphw65s/JV2uLZDeu
iMMA5SR7/iahqYO8zWbIacsfHe+QKlnpmbt8pWNafIWnuzS7meDGazez6NGCqTkq
QiNUAyTcySxSCzLOSDUXqoWdtjwFJK0Rcw35nai3INHcIXiAzEjFqfJEAwIkmHiG
nUhS7RlSYqdJ5KYL5NJkWPldSAm3EowObfqyVFgfsFYIIqDbbWC0Frh+wqBqgFqn
J9bS6yqTXy8jZEP7k14ztcpzVaXdFGzMj3yAk6CXUXrxtJwGbkQjyOO5DaghiPZv
VaXAUOUL1MeJOiXI96Q=
=xdp/
-----END PGP PUBLIC KEY BLOCK-----
```
### hashmap
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
In the case of a counterfeiting or privacy-breaking bug, however, we might decide
not to include those details with our reports to partners ahead of coordinated
release, so long as we are sure that they are vulnerable.

xsFNBF0JT20BEADBd71TiSmjdfAOaOiku4b7Qs5vo9wRthTIbufIiUcK/5mg
6Dkii31YjZxDXcTvt4Er9luZsJ4ynUBDfyCo8NeUar9o2DGv3CC0bWQ4uSWZ
so8ZhaFn3VPHfQBj82s5q7saQmq1wTW6qPCDuT8osm+PN0XJvLWdNrdBwWEj
5zDDse1vJ+m2gt+TKrN18LFKMevCEDDahjTqcHyh7Ps5m8pO70u0L/h0STpL
dKxurNqoKvgNDBNuUTgd7aWNyaqdZ/QQRM8lojE02RRwd4fqscKj+GGivhlL
3rDd3oNacFn0pUIGkrqcELmvEhK592U53zuQW0HJRgx7vOkAao/vwnVTDfOY
U2N7vzcpHVk68TCnBreW1o5UHkzlxNcxU8Luv9tXxufVaB1agHVWef6Oju6V
TJIcteKMiatTUQi/EfO2vy4E+6PbmNzCxOVeyxLXbcFVFthhZqk2+sW97Owc
r1WsuBcNA9fbUHRUs3Fe2vbatB2I/TW5naiZWACOkLwDcip8UZWz2YE98O32
HK0335ANRrFlM+8tMXjRhKWyWK5jvmTNxhlEE8eqjskJjk3yK00+UElzkz7D
ot8WQWcosbKzBinDiC4ZsxUVFTnqLl+oWZgetci2XDHWH9fWGv8KbX+hAUbP
jshNfIIY9bfO2jqdIkRL96R4oo1FVxV9uNjl3wARAQABzTdoYXNobWFwLmRl
dkBwcm90b25tYWlsLmNvbSA8aGFzaG1hcC5kZXZAcHJvdG9ubWFpbC5jb20+
wsF1BBABCAAfBQJdCU9tBgsJBwgDAgQVCAoCAxYCAQIZAQIbAwIeAQAKCRA3
h0ARV3ZFef1sD/0QeymTRUVp/k1HZzmRw+TeRH2DQt81DNrkdB7ylhJgjLzs
fftpSAX9E5n6+915MG0tMGtZgDRjUp4OBQTtXue093cJm4R3i4zn6kKCkIpn
hpnk9LdlUdFFZogQj9irUpG4vhbBJuxThxKjVHiFfjWIzgfnwrWz1rd5mdkD
HDg4Vyhvgu3wif+cMpyCZXCVD/0czNGVh8bQLA8POl/fKHOvrP7pnOE4KDHC
HOOUdzhmWqHoh4Yzlgyg07K+Ef7JunA+czGWKpVVOYG+K8ZHp/qA6Rfoy2g5
aCunwFvPWFi4qz2nk4HhMwuTHF493LCFZsKCQx96Yiy8fSC4n7nVqi2uhx3r
beBJ96/oKHqkILbpjbm+5uSTmQjsb6XBtYoS96ujXAhR1EJOM5PIz1ceajK8
MuoR/clqgHH10+DzvnsXEIaXp3cPVpKtnypCT1vipRI6r5XISibYNmHbHYcW
qBYWYvXvqMijr+ETFUADO6oUsFm5eWkqIBtnv3oxi9HcD43GtgeAG53B07Wi
YA1DnQVhhSE9FOce0AWXLs+eho8X3pITPlUHDxPNHdObc8VAYG7dZkKJo2AU
WxsJJnMhNGbHC3uNG6owCdaus8FDrc9vbFFkmadryLKqHyNVNgUOoufxSHie
zQ3GkO/bXdwG4ZwrzqriX5qopqwcB8DQyTQU0s7BTQRdCU9tARAArFncxKFn
IL7IYQPKWhOkhNpex5FRhbeuB9FWJ2diQJwLOSL/TIxTm0iX9AciU5Xz5o1b
q6+Cj7i1+af0ZO1Oyhjn40ha11faonyT6ebB6hpsHpU433ifRLFz4ksQGacM
xZSDJJbf+3LoLWLJ0SDDd82arQq1VLNeiNUaOfADOa/3pwAGYFn3q2gvAHJ2
XC1N2Om0utTANcQH1RRiUWe2gvpO2ZjzSB9IeZ1chk2TWvekdtwWCImWryxt
NK1ISODCbgNSxJEnOgKJp/A+B3rxzDk5naRORdsxQo8V6dewqQrnp84DveTH
RpOZvEN5M5P/69wv0WgKortkNYlknMubJ+If7NYd9rEIQqRI3vHtkMisDfDu
XP+TUhiIvMPRuH/sC5rzRhfuQ6kl/C/fm+PeOfv3sROfjGyvqvfgfhr4lnBV
2haMJTO0wpzTR3uj19gH0FdEe5zTAaSjIkI/Jzk5oFk8yJhaG0brzgAIJ9Nc
9Szm3iXWmNZ+ECPURZyZ0M8mnZ0FGTaMDYxNgJzpvSvZNJ3bHvk6riTt924r
jMqJt18EBlHlMqijE0KK7UCb0xnAiyWGHqg6AL0NVVv9zb7Fo2gQ2XeALgPV
TFX2m6ooUe+2+k+nOQiaWx3P+g3BJ8UsWmyPDlMNV3sVpdbK2SxcpVniBxxX
S55gFCiA/cAR09MAEQEAAcLBXwQYAQgACQUCXQlPbQIbDAAKCRA3h0ARV3ZF
eRb+D/9HqCmvci0Hb4W+kj0pjPKC9+UrNRTFehk9AjSo2apozsj6jEm/VxQ6
TSe791Pog2uHRIxBsdJMJGeQweJPlIppj8P7u3jSFoJzCqjcA4gw74fX/wrj
seic093LF6Kj54ZTcbamwDG2QzYoG4nmDo9vGeSnH4Laep+hnTmt0Z4DNAZL
597G56kz9z0cEpqUuKX8o4+KjyxMvY8s/Fyl3r3H6wQklBORIjtOFZGxMKrL
iG4u7S0kSKeb+EuJnMJ1TwconYoQbyw/6YpB4NDAXjI8omamDgXVq7K1Tq0d
B4yfT77/oEsynwYvtAJuOqTUnl9P5qxMxsaz37b0XZAH3LBP3kMAF854b1di
EcQ2qEt+WfC8aD1ggq0fV9OcQsB7bdgKEQjFvmu6B3X6zVTavKx+2BT4Yf1I
sP653T0MA18j96O4RRxlAEOW+1j3p6XsNRTDuAuWzmpdq/E2KcfdJ11q9EDn
JXtRgfeOoXe79uBZftbIKwNZRy9DAyCUTpQR7V9EGppz37b7sYswLXJGOlwE
5siUjvePbo0wA9isBEWu0SqQddgFKbUFeLl0YFLFiJU7EHuTSdw/mirToK59
mie8azMPT2b90c5pBBBz9FqUkMHPLdJKR0UuaZGbGC/D2TKv928KSrymjlaQ
cN4UNoeD4hpgWl16VHn1wtOl5AEGkg==
=/+Vo
-----END PGP PUBLIC KEY BLOCK-----
```
### John Woeltz
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
## More Information

mQINBF0cFe0BEACkDxjFLQmLI2v8BglkF4sbrSZtqO4jUvSMB2bCW84p+Hl7/XOK
4fgiqOoyLMIFqq8o3p7rQD2zqV43CvSZbtXz/GXXybHm8MzRRGBOj4iY5tIfwUEP
pVRCyZ7tPh8B0Y/fsY9Cn652tl3QnH+SX7yrNNfszwAmKT2qVRb5tGTknhWNpEeZ
gGh/lEUrru01iXt/vA2Vjsx215x1JVotZtpOYFgbe2VfNlrqzxBVQysV1IO9/TfB
ziOxQ1oCqvypKKL+M1HLmwj18fUJywwkukZJOxMhsIkHdc7tOZn5lxT8V/PYXK8w
Rs/YJ90pGN850bsxsAw2KgVdqkk2G6vSH8UkhL00/KORYxqchh0PoOSMq7P6Vnka
+uZA8xulOlbGoupyX/r9PYrqvV96xaXCxPdjztmDCgi4lXwa6d0PzJnWrqKFu4gk
IiieHnVsimR6daPePRXkjSWN5VQX8QU9xPiK6/FuoKm6JQhFQEMkM8zDyi9A+L1E
FryaoRsUocJdwVdPYTGogFiIBO+4ny3pEhIZJdnSWewoX7GhOldPgrT66zUvX7Uq
U+evfuGFQOhAUByN8XWtJ0ws1fwqiENaoD4FYMSwYIcrdxFelTnfQoaHTZINntRp
mAy1s/x2i84qv6c+5urjjOc7b/SxnlnMcHtlDI+gt7AcV3Ew6sXQVXO+bwARAQAB
tCJKb2huIFdvZWx0eiA8am9sdHpAcHJvdG9ubWFpbC5jb20+iQJUBBMBCgA+FiEE
pwlDvRCYWLUDTiOsmWn1cMLvYW8FAl0cFe0CGwMFCQWiy9MFCwkIBwMFFQoJCAsF
FgIDAQACHgECF4AACgkQmWn1cMLvYW8F4g/+OkqbxnPXAAwzY24YzBsfhMGlTsWA
l08AkKRGStbsUzOyGSkNBQq4TDFd2A8bHdw/9w8g0OVs7Dddj5S5EXoNF2MhsCzK
r6IBaU9vMDhochZCsX44TbemttD/XW5LSQ04YHuQgPP7ESDAllkKaKgiopoRCHnv
GwBsXVwOlY75uwHkZBlR5tqYmFTLrlvZZppf8YsLxRPf7RmpMa29A+/tZieurN0n
5k3DKsAP16QcxLdHDfuZovmKjUW0HEzUZ2qhxY4n0JyUuGrU58q02gy1vm2OZW/3
4h/WIZ94UUbQQBRESI8o+8VpsVN8dJRqI7TzJnChWVMnxl8XE0nZAgddrf/91xvS
U0NhP/MgW5/VQpWyu/45vsckTCgtHQA6mQ/pn5tBR+8nEhCa8SWRJIEvNKcAuA2o
ErNLbxhmUv6vH9PNbRLNtt1njQnihU8IBUIHcBK94t8O3T7jAxluwDrDao2t10fe
/ILO7gxWZyFwhlAEvMd7arNu/8bQ027gANEBYpI8o/cn8CkhKQIEG8Uq2vJJk4yw
S4rbojQgLksID5zat58MP9PBaf9yTe9zI2p0Xe4m+cR794vvKK0wGuAWqtdKnUO9
Fgh6qcg4cxOE5xiY208zE60ILBi4ayJ4Uo/1QHcTLtJy4tNknheQ9GYcSipb3rgu
DdyDBSuMwkwdx1a5Ag0EXRwV7QEQAOd20rfa3/yeh8m1BZjJ/2oxUlB9wd4ZOsVz
yyPEXir7JsJaw3LQXYcWeR9MNCZrmUERnkbkZmOFZvaHEYnt+GYepk/fY2kHiTJ/
/D8TwKCmbO5mddpSDPRvMtWbYHWKfZI+NvnWOx4Pd8FkjlQ9qYDQsZOEKTRRh/48
M+0HE2dum3jSFc5mIN0OnvT9BXtk3B+2DcCKe6tM8uvEPdYXxJIosu9kfLxDeXdA
Pk9cF1rgDDWvYmgJdDXlV++l4FlS13Me5mvZP/NuIdf9qeCHT3ikQqBCOjl/Zc0c
FlH5VZk3yqu5NuDKNKa4vc2qmr2haPUotgeyX7mqMIXQGJLt97bd7u+7IIhkVz2S
hbZk4TO02x3hVqxHQtH5BDFxWqoDSoMuVfSm0QVDNNqSFZuEPjjbdjXuv9f3AIwJ
Jn1GNXxh+JdKxnOMmlbFp6s0qCvt6oetye3mKtOrk50PBZv9EDaZr8Sj6IoYP2TE
GfjVxxzMcKPhmS8DkV8yH3TlVwEzR9pbgt7MwK6uz/QH0FEhjQnYKWcfX4Mcjx75
BffBbhVAlv7hIJd7ymXR7E8grfIxx6K8Qk9pW1WWBxkEmfrClla+tu8W7rZpy2Ts
bfRzLrpcr2pTSFgpH7qKFZAFY4VTCT0Ecn50ObNWvXExSyr3udoI3olMtPVw1O9v
XK3yfUDRABEBAAGJAjwEGAEKACYWIQSnCUO9EJhYtQNOI6yZafVwwu9hbwUCXRwV
7QIbDAUJBaLL0wAKCRCZafVwwu9hbxzVD/4jhYx62WIjO6cFKMHC7xpIUgfubJxe
2mx502Iyf/nnmqBAv9COGERxqFcMyK7TijtPIVHQqhVJwROYOOYLyA/DnJtyezAt
JivvSZmQJ2pi1aMMvqdQEkoDiUy753mnIRnwEBCALqGLLEb6k1JZAXmhiL2vy5ie
pJ/nWgFKuf4t+CFov17790uEpMTCqLYuUJ5PdteAwOjnXoX+VVeqX/LiYXQ0XggQ
LCMlWTZJaSFfUbaOi+qouuIsLUldeptZAh/Ll3Y0NXkWyeoMx2p85lARrOxuGKBe
LTV/uPljaRf2s3zu7fkNA2BWB0jS2jJnPglpywNqcTQbozACLQmhaBKcxJMKkNT3
LPX7vlrxIrJ6UGQDdmfCa/AOhbqhp/Cdd6p+W627PQux8v+QP5SvCbWQ8/8/HQoW
cL/iWHWv4X4QpWmekrmtsohTOkKR0sJXjYlZFq4IQ25lWLYCOfi2BRJdrmxNmZ+S
ELYyPsg/8R9g4QSYeSaNlIOoVVB5zt8fCRUb8P1gYR8lvA57TbwDMre5Ev38JK3a
6a0/6+BRHw6gfeHZdywYQdvmz+AdfsBBTr3E3lVEpfUDm0jPvXneD3a2HG3jA/ym
Rjpq9ALXjCK/h57vaZExeWItWV75kFSFucWfr/wCKkOS8MLPXUhhrtWuigAdOpXa
OXCaSbMWSCzC0g==
=BniA
-----END PGP PUBLIC KEY BLOCK-----
```
Additional security-related information about the Grin project including previous
audits, CVEs, canaries, signatures and PGP public keys can be found in the
[grin-security](https://github.com/mimblewimble/grin-security) repository.
@@ -1,15 +1,16 @@
[package]
name = "grin_api"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "APIs for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "APIs for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
workspace = ".."
edition = "2018"

[dependencies]
easy-jsonrpc-mw = "0.5.3"
failure = "0.1.1"
failure_derive = "0.1.1"
hyper = "0.12"
@@ -31,9 +32,9 @@ futures = "0.1.21"
rustls = "0.13"
url = "1.7.0"

grin_core = { path = "../core", version = "2.1.0-beta.3" }
grin_chain = { path = "../chain", version = "2.1.0-beta.3" }
grin_p2p = { path = "../p2p", version = "2.1.0-beta.3" }
grin_pool = { path = "../pool", version = "2.1.0-beta.3" }
grin_store = { path = "../store", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_core = { path = "../core", version = "3.0.0" }
grin_chain = { path = "../chain", version = "3.0.0" }
grin_p2p = { path = "../p2p", version = "3.0.0" }
grin_pool = { path = "../pool", version = "3.0.0" }
grin_store = { path = "../store", version = "3.0.0" }
grin_util = { path = "../util", version = "3.0.0" }
@@ -22,6 +22,8 @@ use ring::constant_time::verify_slices_are_equal;
lazy_static! {
pub static ref GRIN_BASIC_REALM: HeaderValue =
HeaderValue::from_str("Basic realm=GrinAPI").unwrap();
pub static ref GRIN_FOREIGN_BASIC_REALM: HeaderValue =
HeaderValue::from_str("Basic realm=GrinForeignAPI").unwrap();
}

// Basic Authentication Middleware
@@ -78,6 +80,59 @@ impl Handler for BasicAuthMiddleware {
}
}

// Basic Authentication Middleware
pub struct BasicAuthURIMiddleware {
api_basic_auth: String,
basic_realm: &'static HeaderValue,
target_uri: String,
}

impl BasicAuthURIMiddleware {
pub fn new(
api_basic_auth: String,
basic_realm: &'static HeaderValue,
target_uri: String,
) -> BasicAuthURIMiddleware {
BasicAuthURIMiddleware {
api_basic_auth,
basic_realm,
target_uri,
}
}
}

impl Handler for BasicAuthURIMiddleware {
fn call(
&self,
req: Request<Body>,
mut handlers: Box<dyn Iterator<Item = HandlerObj>>,
) -> ResponseFuture {
let next_handler = match handlers.next() {
Some(h) => h,
None => return response(StatusCode::INTERNAL_SERVER_ERROR, "no handler found"),
};
if req.method().as_str() == "OPTIONS" {
return next_handler.call(req, handlers);
}
if req.uri().path() == self.target_uri {
if req.headers().contains_key(AUTHORIZATION)
&& verify_slices_are_equal(
req.headers()[AUTHORIZATION].as_bytes(),
&self.api_basic_auth.as_bytes(),
)
.is_ok()
{
next_handler.call(req, handlers)
} else {
// Unauthorized 401
unauthorized_response(&self.basic_realm)
}
} else {
return next_handler.call(req, handlers);
}
}
}

fn unauthorized_response(basic_realm: &HeaderValue) -> ResponseFuture {
let response = Response::builder()
.status(StatusCode::UNAUTHORIZED)
@@ -0,0 +1,335 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! Foreign API External Definition

use crate::chain::{Chain, SyncState};
use crate::core::core::hash::Hash;
use crate::core::core::transaction::Transaction;
use crate::handlers::blocks_api::{BlockHandler, HeaderHandler};
use crate::handlers::chain_api::{ChainHandler, KernelHandler, OutputHandler};
use crate::handlers::pool_api::PoolHandler;
use crate::handlers::transactions_api::TxHashSetHandler;
use crate::handlers::version_api::VersionHandler;
use crate::pool::{self, PoolEntry};
use crate::rest::*;
use crate::types::{
BlockHeaderPrintable, BlockPrintable, LocatedTxKernel, OutputListing, OutputPrintable, Tip,
Version,
};
use crate::util::RwLock;
use std::sync::Weak;

/// Main interface into all node API functions.
/// Node APIs are split into two seperate blocks of functionality
/// called the ['Owner'](struct.Owner.html) and ['Foreign'](struct.Foreign.html) APIs
///
/// Methods in this API are intended to be 'single use'.
///

pub struct Foreign {
pub chain: Weak<Chain>,
pub tx_pool: Weak<RwLock<pool::TransactionPool>>,
pub sync_state: Weak<SyncState>,
}

impl Foreign {
/// Create a new API instance with the chain, transaction pool, peers and `sync_state`. All subsequent
/// API calls will operate on this instance of node API.
///
/// # Arguments
/// * `chain` - A non-owning reference of the chain.
/// * `tx_pool` - A non-owning reference of the transaction pool.
/// * `peers` - A non-owning reference of the peers.
/// * `sync_state` - A non-owning reference of the `sync_state`.
///
/// # Returns
/// * An instance of the Node holding references to the current chain, transaction pool, peers and sync_state.
///

pub fn new(
chain: Weak<Chain>,
tx_pool: Weak<RwLock<pool::TransactionPool>>,
sync_state: Weak<SyncState>,
) -> Self {
Foreign {
chain,
tx_pool,
sync_state,
}
}

/// Gets block header given either a height, a hash or an unspent output commitment. Only one parameters is needed.
/// If multiple parameters are provided only the first one in the list is used.
///
/// # Arguments
/// * `height` - block height.
/// * `hash` - block hash.
/// * `commit` - output commitment.
///
/// # Returns
/// * Result Containing:
/// * A [`BlockHeaderPrintable`](types/struct.BlockHeaderPrintable.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_header(
&self,
height: Option<u64>,
hash: Option<Hash>,
commit: Option<String>,
) -> Result<BlockHeaderPrintable, Error> {
let header_handler = HeaderHandler {
chain: self.chain.clone(),
};
let hash = header_handler.parse_inputs(height, hash, commit)?;
header_handler.get_header_v2(&hash)
}

/// Gets block details given either a height, a hash or an unspent output commitment. Only one parameters is needed.
/// If multiple parameters are provided only the first one in the list is used.
///
/// # Arguments
/// * `height` - block height.
/// * `hash` - block hash.
/// * `commit` - output commitment.
///
/// # Returns
/// * Result Containing:
/// * A [`BlockPrintable`](types/struct.BlockPrintable.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_block(
&self,
height: Option<u64>,
hash: Option<Hash>,
commit: Option<String>,
) -> Result<BlockPrintable, Error> {
let block_handler = BlockHandler {
chain: self.chain.clone(),
};
let hash = block_handler.parse_inputs(height, hash, commit)?;
block_handler.get_block(&hash, true, true)
}

/// Returns the node version and block header version (used by grin-wallet).
///
/// # Returns
/// * Result Containing:
/// * A [`Version`](types/struct.Version.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_version(&self) -> Result<Version, Error> {
let version_handler = VersionHandler {
chain: self.chain.clone(),
};
version_handler.get_version()
}

/// Returns details about the state of the current fork tip.
///
/// # Returns
/// * Result Containing:
/// * A [`Tip`](types/struct.Tip.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_tip(&self) -> Result<Tip, Error> {
let chain_handler = ChainHandler {
chain: self.chain.clone(),
};
chain_handler.get_tip()
}

/// Returns a [`LocatedTxKernel`](types/struct.LocatedTxKernel.html) based on the kernel excess.
/// The `min_height` and `max_height` parameters are both optional.
/// If not supplied, `min_height` will be set to 0 and `max_height` will be set to the head of the chain.
/// The method will start at the block height `max_height` and traverse the kernel MMR backwards,
/// until either the kernel is found or `min_height` is reached.
///
/// # Arguments
/// * `excess` - kernel excess to look for.
/// * `min_height` - minimum height to stop the lookup.
/// * `max_height` - maximum height to start the lookup.
///
/// # Returns
/// * Result Containing:
/// * A [`LocatedTxKernel`](types/struct.LocatedTxKernel.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_kernel(
&self,
excess: String,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<LocatedTxKernel, Error> {
let kernel_handler = KernelHandler {
chain: self.chain.clone(),
};
kernel_handler.get_kernel_v2(excess, min_height, max_height)
}

/// Retrieves details about specifics outputs. Supports retrieval of multiple outputs in a single request.
/// Support retrieval by both commitment string and block height.
///
/// # Arguments
/// * `commits` - a vector of unspent output commitments.
/// * `start_height` - start height to start the lookup.
/// * `end_height` - end height to stop the lookup.
/// * `include_proof` - whether or not to include the range proof in the response.
/// * `include_merkle_proof` - whether or not to include the merkle proof in the response.
///
/// # Returns
/// * Result Containing:
/// * An [`OutputPrintable`](types/struct.OutputPrintable.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_outputs(
&self,
commits: Option<Vec<String>>,
start_height: Option<u64>,
end_height: Option<u64>,
include_proof: Option<bool>,
include_merkle_proof: Option<bool>,
) -> Result<Vec<OutputPrintable>, Error> {
let output_handler = OutputHandler {
chain: self.chain.clone(),
};
output_handler.get_outputs_v2(
commits,
start_height,
end_height,
include_proof,
include_merkle_proof,
)
}

/// UTXO traversal. Retrieves last utxos since a `start_index` until a `max`.
///
/// # Arguments
/// * `start_index` - start index in the MMR.
/// * `end_index` - optional index so stop in the MMR.
/// * `max` - max index in the MMR.
/// * `include_proof` - whether or not to include the range proof in the response.
///
/// # Returns
/// * Result Containing:
/// * An [`OutputListing`](types/struct.OutputListing.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_unspent_outputs(
&self,
start_index: u64,
end_index: Option<u64>,
max: u64,
include_proof: Option<bool>,
) -> Result<OutputListing, Error> {
let output_handler = OutputHandler {
chain: self.chain.clone(),
};
output_handler.get_unspent_outputs(start_index, end_index, max, include_proof)
}

/// Retrieves the PMMR indices based on the provided block height(s).
///
/// # Arguments
/// * `start_block_height` - start index in the MMR.
/// * `end_block_height` - optional index so stop in the MMR.
///
/// # Returns
/// * Result Containing:
/// * An [`OutputListing`](types/struct.OutputListing.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_pmmr_indices(
&self,
start_block_height: u64,
end_block_height: Option<u64>,
) -> Result<OutputListing, Error> {
let txhashset_handler = TxHashSetHandler {
chain: self.chain.clone(),
};
txhashset_handler.block_height_range_to_pmmr_indices(start_block_height, end_block_height)
}

/// Returns the number of transaction in the transaction pool.
///
/// # Returns
/// * Result Containing:
/// * `usize`
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_pool_size(&self) -> Result<usize, Error> {
let pool_handler = PoolHandler {
tx_pool: self.tx_pool.clone(),
};
pool_handler.get_pool_size()
}

/// Returns the number of transaction in the stem transaction pool.
///
/// # Returns
/// * Result Containing:
/// * `usize`
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_stempool_size(&self) -> Result<usize, Error> {
let pool_handler = PoolHandler {
tx_pool: self.tx_pool.clone(),
};
pool_handler.get_stempool_size()
}

/// Returns the unconfirmed transactions in the transaction pool.
/// Will not return transactions in the stempool.
///
/// # Returns
/// * Result Containing:
/// * A vector of [`PoolEntry`](types/struct.PoolEntry.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_unconfirmed_transactions(&self) -> Result<Vec<PoolEntry>, Error> {
let pool_handler = PoolHandler {
tx_pool: self.tx_pool.clone(),
};
pool_handler.get_unconfirmed_transactions()
}

/// Push new transaction to our local transaction pool.
///
/// # Arguments
/// * `tx` - the Grin transaction to push.
/// * `fluff` - boolean to bypass Dandelion relay.
///
/// # Returns
/// * Result Containing:
/// * `Ok(())` if the transaction was pushed successfully
/// * or [`Error`](struct.Error.html) if an error is encountered.
///
pub fn push_transaction(&self, tx: Transaction, fluff: Option<bool>) -> Result<(), Error> {
let pool_handler = PoolHandler {
tx_pool: self.tx_pool.clone(),
};
pool_handler.push_transaction(tx, fluff)
}
}
@@ -0,0 +1,886 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! JSON-RPC Stub generation for the Foreign API

use crate::core::core::hash::Hash;
use crate::core::core::transaction::Transaction;
use crate::foreign::Foreign;
use crate::pool::PoolEntry;
use crate::rest::ErrorKind;
use crate::types::{
BlockHeaderPrintable, BlockPrintable, LocatedTxKernel, OutputListing, OutputPrintable, Tip,
Version,
};
use crate::util;

/// Public definition used to generate Node jsonrpc api.
/// * When running `grin` with defaults, the V2 api is available at
/// `localhost:3413/v2/foreign`
/// * The endpoint only supports POST operations, with the json-rpc request as the body
#[easy_jsonrpc_mw::rpc]
pub trait ForeignRpc: Sync + Send {
/**
Networked version of [Foreign::get_header](struct.Node.html#method.get_header).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_header",
"params": [null, "00000100c54dcb7a9cbb03aaf55da511aca2c98b801ffd45046b3991e4f697f9", null],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"cuckoo_solution": [
9886309,
35936712,
43170402,
48069549,
70022151,
97464262,
107044653,
108342481,
118947913,
130828808,
144192311,
149269998,
179888206,
180736988,
207416734,
227431174,
238941623,
245603454,
261819503,
280895459,
284655965,
293675096,
297070583,
299129598,
302141405,
313482158,
321703003,
351704938,
376529742,
381955038,
383597880,
408364901,
423241240,
436882285,
442043438,
446377997,
470779425,
473427731,
477149621,
483204863,
496335498,
534567776
],
"edge_bits": 29,
"hash": "00000100c54dcb7a9cbb03aaf55da511aca2c98b801ffd45046b3991e4f697f9",
"height": 374336,
"kernel_root": "d294e6017b9905b288dc62f6f725c864665391c41da20a18a371e3492c448b88",
"nonce": 4715085839955132421,
"output_root": "12464313f7cd758a7761f65b2837e9b9af62ad4060c97180555bfc7e7e5808fa",
"prev_root": "e22090fefaece85df1441e62179af097458e2bdcf600f8629b977470db1b6db1",
"previous": "0000015957d92c9e04c6f3aec8c5b9976f3d25f52ff459c630a01a643af4a88c",
"range_proof_root": "4fd9a9189e0965aa9cdeb9cf7873ecd9e6586eac1dd9ca3915bc50824a253b02",
"secondary_scaling": 561,
"timestamp": "2019-10-03T16:08:11+00:00",
"total_difficulty": 1133587428693359,
"total_kernel_offset": "0320b6f8a4a4180ed79ecd67c8059c1d7bd74afe144d225395857386e5822314",
"version": 2
}
}
}
# "#
# );
```
*/
fn get_header(
&self,
height: Option<u64>,
hash: Option<String>,
commit: Option<String>,
) -> Result<BlockHeaderPrintable, ErrorKind>;

/**
Networked version of [Foreign::get_block](struct.Node.html#method.get_block).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_block",
"params": [374274, null, null],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"header": {
"cuckoo_solution": [
1263501,
14648727,
42430559,
58137254,
68666726,
72784903,
101936839,
104273571,
123886748,
131179768,
155443226,
162493783,
164784425,
167313215,
169806918,
183041591,
184403611,
210351649,
215159650,
239995384,
240935454,
257742462,
280820644,
300143903,
303146496,
311804841,
341039986,
354918290,
363508555,
377618528,
396693709,
397417856,
399875872,
413238540,
413767813,
432697194,
436903767,
447257325,
453337210,
459401597,
496068509,
511300624
],
"edge_bits": 29,
"hash": "000001e16cb374e38c979c353a0aaffbf5b939da7688f69ad99efda6c112ea9b",
"height": 374274,
"kernel_root": "e17920c0e456a6feebf19e24a46f510a85f21cb60e81012f843c00fe2c4cad6e",
"nonce": 4354431877761457166,
"output_root": "1e9daee31b80c6b83573eacfd3048a4af57c614bd36f9acd5fb50fbd236beb16",
"prev_root": "9827b8ffab942e264b6ac81f2b487e3de65e411145c514092ce783df9344fa8a",
"previous": "00001266a73ba6a8032ef8b4d4f5508407ffb1c270c105dac06f4669c17af020",
"range_proof_root": "3491b8c46a3919df637a636ca72824377f89c4967dcfe4857379a4a82b510069",
"secondary_scaling": 571,
"timestamp": "2019-10-03T15:15:35+00:00",
"total_difficulty": 1133438031814173,
"total_kernel_offset": "63315ca0be65c9f6ddf2d3306876caf9f458a01d1a0bf50cc4d3c9b699161958",
"version": 2
},
"inputs": [],
"kernels": [
{
"excess": "08761e9cb1eea5bfcf771d1218b5ec802798d6eecaf75faae50ba3a1997aaef009",
"excess_sig": "971317046c533d21dff3e449cc9380c2be10b0274f70e009aa2453f755239e3299883c09a1785b15a141d89d563cdd59395886c7d63aba9c2b6438575555e2c4",
"features": "Coinbase",
"fee": 0,
"lock_height": 0
}
],
"outputs": [
{
"block_height": 374274,
"commit": "09d33615563ba2d65acc2b295a024337166b9f520122d49730c73e8bfb43017610",
"merkle_proof": "00000000003e6f5e000000000000000f60fe09a7601a519d9be71135404580ad9de0964c70a7619b1731dca2cd8c1ae1dce9f544df671d63ff0e05b58f070cb48e163ca8f44fb4446c9fe1fc9cfef90e4b81e7119e8cf60acb9515363ecaea1ce20d2a8ea2f6f638f79a33a19d0d7b54cfff3daf8d21c243ba4ccd2c0fbda833edfa2506b1b326059d124e0c2e27cda90268e66f2dcc7576efac9ebbb831894d7776c191671c3294c2ca0af23201498a7f5ce98d5440ca24116b40ac98b1c5e38b28c8b560afc4f4684b81ab34f8cf162201040d4779195ba0e4967d1dd8184b579208e9ebebafa2f5004c51f5902a94bf268fd498f0247e8ba1a46efec8d88fa44d5ecb206fbe728ee56c24af36442eba416ea4d06e1ea267309bc2e6f961c57069e2525d17e78748254729d7fdec56720aa85fe6d89b2756a7eeed0a7aa5d13cfb874e3c65576ec8a15d6df17d7d4856653696b10fb9ec205f5e4d1c7a1f3e2dd2994b12eeed93e84776d8dcd8a5d78aecd4f96ae95c0b090d104adf2aa84f0a1fbd8d319fea5476d1a306b2800716e60b00115a5cca678617361c5a89660b4536c56254bc8dd7035d96f05de62b042d16acaeff57c111fdf243b859984063e3fcfdf40c4c4a52889706857a7c3e90e264f30f40cc87bd20e74689f14284bc5ea0a540950dfcc8d33c503477eb1c60",
"mmr_index": 4091742,
"output_type": "Coinbase",
"proof": "7adae7bcecf735c70eaa21e8fdce1d3c83d7b593f082fc29e16ff2c64ee5aaa15b682e5583257cf351de457dda8f877f4d8c1492af3aaf25cf5f496fce7ca54a0ef78cc61c4252c490386f3c69132960e9edc811add6415a6026d53d604414a5f4dd330a63fcbb005ba908a45b2fb1950a9529f793405832e57c89a36d3920715bc2d43db16a718ecd19aeb23428b5d3eeb89d73c28272a7f2b39b8923e777d8eb2c5ce9872353ba026dc79fdb093a6538868b4d184215afc29a9f90548f9c32aa663f9197fea1cadbb28d40d35ed79947b4b2b722e30e877a15aa2ecf95896faad173af2e2795b36ce342dfdacf13a2f4f273ab9927371f52913367d1d58246a0c35c8f0d2330fcddb9eec34c277b1cfdaf7639eec2095930b2adef17e0eb94f32e071bf1c607d2ef1757d66647477335188e5afc058c07fe0440a67804fbdd5d35d850391ead3e9c8a3136ae1c42a33d5b01fb2c6ec84a465df3f74358cbc28542036ae4ef3e63046fbd2bce6b12f829ed193fb51ea87790e88f1ea686d943c46714b076fb8c6be7c577bca5b2792e63d5f7b8f6018730b6f9ddaf5758a5fa6a3859d68b317ad4383719211e78f2ca832fd34c6a222a8488e40519179209ad1979f3095b7b7ba7f57e81c371989a4ace465149b0fe576d89473bc596c54cee663fbf78196e7eb31e4d56604c5226e9242a68bda95e1b45473c52f63fe865901839e82079a9935e25fe8d44e339484ba0a62d20857c6b3f15ab5c56b59c7523b63f86fa8977e3f4c35dc8b1c446c48a28947f9d9bd9992763404bcba95f94b45d643f07bb7c352bfad30809c741938b103a44218696206ca1e18f0b10b222d8685cc1ed89d5fdb0c7258b66486e35c0fd560a678864fd64c642b2b689a0c46d1be6b402265b7808cd61a95c2b4a4df280e3f0ec090197fb039d32538d05d3f0a082f5",
"proof_hash": "cfd97db403c274220bb0dbaf3ecc88e483c0b707d8e6f16dfda37cd4f2c3211c",
"spent": false
}
]
}
}
}
# "#
# );
```
*/
fn get_block(
&self,
height: Option<u64>,
hash: Option<String>,
commit: Option<String>,
) -> Result<BlockPrintable, ErrorKind>;

/**
Networked version of [Foreign::get_version](struct.Node.html#method.get_version).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_version",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"node_version": "2.1.0-beta.2",
"block_header_version": 2
}
}
}
# "#
# );
```
*/
fn get_version(&self) -> Result<Version, ErrorKind>;

/**
Networked version of [Foreign::get_tip](struct.Node.html#method.get_tip).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_tip",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"height": 374350,
"last_block_pushed": "000000543c69a0306b5463b92939643442a44a6d9be5bef72bea9fc1d718d310",
"prev_block_to_last": "000001237c6bac162f1add2b122fab6a254b9fcc2c4b4c8c632a8c39855521f1",
"total_difficulty": 1133621604919005
}
}
}
# "#
# );
```
*/
fn get_tip(&self) -> Result<Tip, ErrorKind>;

/**
Networked version of [Foreign::get_kernel](struct.Node.html#method.get_kernel).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_kernel",
"params": ["09c868a2fed619580f296e91d2819b6b3ae61ab734bf3d9c3eafa6d9700f00361b", null, null],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"height": 374557,
"mmr_index": 2211662,
"tx_kernel": {
"excess": "09c868a2fed619580f296e91d2819b6b3ae61ab734bf3d9c3eafa6d9700f00361b",
"excess_sig": "1720ec1b94aa5d6ba4d567f7446314f9a6d064eea69c5675cc5659f65f290d80b0e9e3a48d818cadba0a4e894bbc6eb6754b56f53813e2ee0b1447969894ca4a",
"features": "Coinbase"
}
}
}
}
# "#
# );
```
*/
fn get_kernel(
&self,
excess: String,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<LocatedTxKernel, ErrorKind>;

/**
Networked version of [Foreign::get_outputs](struct.Node.html#method.get_outputs).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_outputs",
"params": [
[
"09bab2bdba2e6aed690b5eda11accc13c06723ca5965bb460c5f2383655989af3f",
"08ecd94ae293863286e99d37f4685f07369bc084ba74d5c59c7f15359a75c84c03"
],
376150,
376154,
true,
true
],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": [
{
"block_height": 374568,
"commit": "09bab2bdba2e6aed690b5eda11accc13c06723ca5965bb460c5f2383655989af3f",
"merkle_proof": null,
"mmr_index": 4093403,
"output_type": "Transaction",
"proof": "e30aa961d6f89361a9a3c60f73e3551f50a3887212e524b5874ac50c1759bb95bc8e588d82dd51d84c7cbaa9abe79e0b8fe902bcfda17276c24d269fbf636aa2016c65a760a02e18338a33e83dec8e51fbfd953ee5b765d97ce39ba0850790d2104812a1d15d5eaa174de548144d3a7d413906d85e22f89065ef727910ee4c573494520c43e36e83dacee8096666aa4033b5e8322e72930c3f8476bb7be9aef0838a2ad6c28f4f5212708bf3e5954fc3971d66b7835383b96406fa65415b64ecd53a747f41d785c3e3615c18dfdbe39a0920fefcf6bc55fe65b4b215b1ad98c80fdafbef6f21ab60596f2d9a3e7bc45d750e807d5eb883dadde1625d4f20af9f1315b8bea08c97fad922afe2000c84c9eb5f96b2a24da7a637f95c1102ecfc1257e19bc4120082f5ee76448c90abd55108256f8341e0f4009cfc3906a598de465467ee1ee072bfd3384e1a0b9039192d1edc33092d7b09d1164c4fc4c378227a391600a8a5d5ba5fe36a2a4eabe0dbae270aefa5a5f2df810cda79211805206ad93ae08689e2675aad025db3499d43f1effc110dfb2f540ccd6eb972c02f98e8151535c099381c8aeb1ea8aad2cfdf952e6ab9d26e74a5611d943d02315e212eb06ce2cd20b4675e6f245e5302cdb8b31d46bb2e718b50ecfad2d440323826570447c2498376c8bad6e4ee97bde41c47f6a20eea406d758c53fb9e8542f114c1a277a6335ad97fdc542c6bbec756dc4a9085c319fe6f0c9e1bb043f01a43c12aa6f4dff8b1220e7f16bc56dee9ccb59fb7c3b7aa6bb33b41c33d8e4b03b6b9cb89491504210dd691b46ffe2862387339d2b62a9dc4c20d629e23eb8b06490c4999433c1b4626fb4d21517072bd8e82511c115ee47bf9a5e40f0a74177f5b573db2e277459877a01b172e026cbb3f76aaf0c61f244584f3a76804dea62175a80d777238",
"proof_hash": "660d706330fc36f611c50d90cb965fddf750cc91f8891a58b5e39b83a5fc6b46",
"spent": false
},
{
"block_height": 376151,
"commit": "08ecd94ae293863286e99d37f4685f07369bc084ba74d5c59c7f15359a75c84c03",
"merkle_proof": "6b2abbd334c9d75409461fba9c1acd4a8d7bc2ab0bc43143f42388b2a3a87b881505ccf8ffc8737fa6fd4fe412a082d974911bd223eae612d0d1d7ddcc09b5e6079c40b011405b2ccb49ce32473c93aea6d843488d5765fea114d3368d34cd05fcb8c2de3903fbaf39b1f064c809f9f1c0d47959d81a508957040eda55c6dce6dd8c43a79c72faffacfabe1d73055790b6249de2f7c603f186cb109eee58fb1426ea48cb781f88df9acd8996d235fe6bfe60e02aae6e3bfe38ed2599baca1430b3b637072d9bdcdc7644f873728e3cd38eff7124ea848cfad67f8e114cf8595c89a3686a4271cfb2b5098597c315c01d04270ca8f70262af967a947f49adacfa4aad8b6fd196dd0ef4e5cefa132c38c7e5f43db12b3d74f0a8d83c3404e73c6b25a12bff70a8ef4526c89b1558810bb744ede53f8c4cc8cc2555e953637722adb41ea5752281cf1f75599f7e59b17f11f5f9ce4f6b2da4141a3398f51d8b834cdc8b00f61915a41d200572a10bb2102cbae7e94aa7ced3c388dcd58282932c99a8fa66f6fc511ff3e8c60d442bbdb49cca1166328ca8c9bbc97d024570b4cc1ca6c7dba3db223e9e27fd9345b94d3cf10e2b54915b87c57e32965bc2db1b1f956d1962812738ca9b2c93fd7825adf4dffddc97aa85ca0f3f412f02d30678a816d2efbfb6778305fd5e610b6e8af30030bc059880c337bfde326b392d5dcd7c36cb0076fbccc7099b94f1f03bdb525d6e3818b6d50b93ced802957a4b03892c71b6679052bd35e92ceea71a96b22b2ed2c129755f0c74fa172f43da2790f3132a7e57e408d2fc5f1126b088cd0398e6dedcb237242e6720e12e8d7a5a1e196eda6241cfee1cc85e9d20af67f3f9bdf91160516ebcd0b8da6bb7b12229e1112b22c9f1aaef1d75441465cfee2ac1c47b5255514316ed4637e192b00ff28491168f2f2b00",
"mmr_index": 4107711,
"output_type": "Coinbase",
"proof": "7083884b5f64e4e61fb910d2c3c603f7c94490716d95e7144b4c927d0ca6ccc0e069cc285e25f38ee90c402ef26005cad2b4073eeba17f0ae3ea2b87095106ef00634f321d8a49c2feaad485bc9ee552564a6a883c99886d0d3a85af3490d718f5a5cbc70f9dcc9bf5d987fb6072132a4c247d4bbd4af927532a887b1e4250b7277771f6b82f43f4fb5a48089ed58e7d3190a19197e07acfed650f8b2cd5f103e994fb3d3735c5727f06f302bd1f182586297dd57a7951ff296bdf6106704abedc39db77f1293effaa7496a77d19420a6208bc1c589b33dad9540cb6180cccf5e085006b01309419f931e54531d770e5fe00eca584072692a7e4883fd65ed4a7c460665608ab96bf0c7d564fe96a341f14066db413a6fddc359eb11f6f962aca70ca1414c35d7941ce06b77d0a0606081b78d5e64a4501f8e8eba9f0e0889042bc54b4cbfd71087a95af63e0306dba214084d4860b0ce66dc80af44224e5a6fef55800650b05cf1639f81bfdc30950f3634d1fd4375d50c22c7f13f3dfb690e5f155a535aff041b7f800bfe74c60f606e8ab47df60754a0e08221c2a50abe643bb086433afd040a7e6290d1d00b3fe657be3bb05c67f90eb183c2acb53c81e1ca15cd8d35fe9d7d52d8f455398e905bdc77ffb211697d477af25704cf9896e8ce797f4fed03e2ba1615e3ad5646eecaa698470f99437d01d5193f041201502763e8bde51e6dc830b5c676d05c8f7f87c4972c578b8d9d5922ba29f6e4a89a123311d02b5ac44a7d5307f7ed5e4e66aaf749afc76c6fc1114445d6fafeea816a0f985eeacdbe9e6d32a8514ca4aaf7faad4e9d43cde55327ac84bac4d70a9319840e136e713aa31d639e43302f3c71a79f08f4e5c9a19a48d4b46403734cd8f3cc9b67bc26ea8e2a01e63a6f5be6e044e8ed5db5f26d15d25de75f672a79315c5e2407e",
"proof_hash": "7cf77fdaecef6c6fc01edca744c1521581f854a9bac0153971edbb1618fc36ad",
"spent": false
},
{
"block_height": 376154,
"commit": "095c12db5e57e4a1ead0870219bda4ebfb1419f6ab1501386b9dd8dc9811a8c5ff",
"merkle_proof": "00000000003eadc6000000000000000e13c509a17cbb0d81634215cd2482ab6d9eb58b332fcbe6b2c4fa458a63d3cb0dfe3614ebe6e52657870df225d132179fa1ea0fdc2105f0e51d03bc3765a9cd059c60d434a7cae0a3d669b37588c25410f57405c841312cfa50cf514678877a3f4ce8bd3e57723ba75a2b7d61027b2088fbabebdb7336b97ea88b00a7e809a6245def980eba18d987601f4cbd6c3cc9f12a5684fe7a1bc2565a9f8ab63c2db1afa8304f5e23d4754cd97f29c8b06dcb3de4f6d3a83079676b6e9941afe5553a7195384b564ecd6d37522cb5e452cc930d2b549af22698a8fd9bf6cad05a06b09e3f6e672b94e82c0255394b5c187ab76fda653a2491378997ba3d49f9d9c34ca93bc627fe5d98b327c03d429b5473f62672e9d73c4eafd9cb8f62e5158a1ec7eb56653696b10fb9ec205f5e4d1c7a1f3e2dd2994b12eeed93e84776d8dcd8a5d78aecd4f96ae95c0b090d104adf2aa84f0a1fbd8d319fea5476d1a306b2800716e60b00115a5cca678617361c5a89660b4536c56254bc8dd7035d96f05de62b042d16acaeff57c111fdf243b859984063e3fcfdf40c4c4a52889706857a7c3e90e264f30f40cc87bd20e74689f14284bc5ea0a540950dfcc8d33c503477eb1c60",
"mmr_index": 4107717,
"output_type": "Coinbase",
"proof": "073593bc475478f1e4b648ab261df3b0a6e5a58a617176dd0c8f5e0e1d58b012b40eb9b341d16ee22baf3645ea37705895e731dee5c220b58b0f780d781806a10dfa33e870d0494fba18aaa8a7a709bfb3ddf9eb3e4e75a525b382df68dc6f710275cdffb623373c47c1310ae63479826f435ca4520fdc13bb0d995b7d9a10a7587d61bd4a51c9e32c87f3eb6b0f862cdff19a9ac6cb04d6f7fafb8e94508a851dcf5dc6acea4271bb40117a45319da5522b966091b089698f4f940842458b5b49e212d846be35e0c2b98a00ac3d0b7ceaf081272dbed8abd84fe8f26d57bac1340e8184602436ed8c4470ef9dc214df3405de0e71703abec4456b15e122a94706852bb476213ceadf00529d00d8d3b16dc57f4e4a9a86dacfa719e00366728de42f3f830e73f6113f1e391fab07eba1b40f6466203b0ce14701230e934f6138c575660a03dbb0e59d7295df3115a4fc0909a5520d74657b319fc83481079ad6c13400175e39fa2b86071ba563ce8836320713ef8f55d4e90bee3f57df96c7aef0f2e896f57192fae9675471cd9751bcaf2b15e5a65a9733a6f7f9b8147b8f6e8dac51d056018d411fd252225cf88e56f143143f49e8a0d2e43c10de0442dbc84966817532b1256b6769db987526790a389c371a1fe7a36eacffef82877b4db7a9b5e58722ffbd0fc4fdbd7624365ee326bb8b1e60b999f513715b30f37ef6116eabf53b3524b46c33a1fac49205b39e24aa388d823269c1fc43c3599a06b69433a0a47a03bd871321afb7846a6dbfd5891bd84f89c556231745c929d08445f66f332857bfda1c4f86ae58a01007b7303f870ac24e0ba72d84c0ef4903ac2ff777e2c2dcb4d8e303c74e0c8a559686b4d4c25024ee97601787d4e5a97224af41e5d35d91744292f5a41f64d4e1cae77bebebd77a473f3b54e86f7221aac230942f0468",
"proof_hash": "5dd69c083e2c0fd797a499bbafedee0728849afa3476034280ecadf6eb4bffc2",
"spent": false
},
{
"block_height": 376153,
"commit": "0948cb346b7affe004a6f84fa4b5b44995830f1c332b03537df4c258d51d1afb50",
"merkle_proof": "00000000003eadc4000000000000000dfe3614ebe6e52657870df225d132179fa1ea0fdc2105f0e51d03bc3765a9cd059c60d434a7cae0a3d669b37588c25410f57405c841312cfa50cf514678877a3f4ce8bd3e57723ba75a2b7d61027b2088fbabebdb7336b97ea88b00a7e809a6245def980eba18d987601f4cbd6c3cc9f12a5684fe7a1bc2565a9f8ab63c2db1afa8304f5e23d4754cd97f29c8b06dcb3de4f6d3a83079676b6e9941afe5553a7195384b564ecd6d37522cb5e452cc930d2b549af22698a8fd9bf6cad05a06b09e3f6e672b94e82c0255394b5c187ab76fda653a2491378997ba3d49f9d9c34ca93bc627fe5d98b327c03d429b5473f62672e9d73c4eafd9cb8f62e5158a1ec7eb56653696b10fb9ec205f5e4d1c7a1f3e2dd2994b12eeed93e84776d8dcd8a5d78aecd4f96ae95c0b090d104adf2aa84f0a1fbd8d319fea5476d1a306b2800716e60b00115a5cca678617361c5a89660b4536c56254bc8dd7035d96f05de62b042d16acaeff57c111fdf243b859984063e3fcfdf40c4c4a52889706857a7c3e90e264f30f40cc87bd20e74689f14284bc5ea0a540950dfcc8d33c503477eb1c60",
"mmr_index": 4107716,
"output_type": "Coinbase",
"proof": "72950da23ad7f0d0381e2f788bf0ac6b6bcb17aaccf0373534122a95714d2d0dbf6a24822b4aab0711a595c80bc36122957111c39292f2a36a973252fb88cbda0b1d61ea8ea84f5171a61f751cac97332637b7cf74cc73144b912ba700dedaa60895f06e947f1e42a8c79d70f924f45fdcb6df5d30289f36ff77d0ae368df5775a739b7a25cbfb63f0cdbdc167b046067c2a021fe0950c7b67515b185b9e4a00ce63b795d49ae184fe5cc726d72fc05d717c4fb55dd5f65967dc282d3c47cb6f8a92cb696e5a1d8cca21214bc766e3de6271791cebf646cda97ae77035da16606f3397f71e103137358c97b9943c3e15403184f61230bd0e3954c7681a0891aa7a0cc32e82d830fb7d8759a04d1da7058630a853508df095142f22158c28bd5e3f2477ad6c8990e63d0377a0fa3d588b6584453778eb38cbaec8a33c1d3772c97a826d4a2f6953c35342993b04567e9fea6fc64fb714653f934faa1a8f635d39eb2903de4bed960a3df07dce7c2e3ff517bbc15f467d0190a579bc07b0f1a910b23269d794835bbb34e8318dcc4fd4159f8f03faa77842d445cf61af9e33caf46aa5fae0812a6476a09c0757e929271a96a245701ab14c1fdd836b92b7e763afa623017f68f1bc4eb716ce735820a1311b743dd8d5c6bb275a2e4e7d2eff8f45417b60cc937086c3e7fd3b612ae064d7237eb6a7bd1a39d8575fac312068fa060bc1ceac4df0754601edaf04ecb1b89c0661ea01a593c3763e456bebbd8487edc0ff3bc6f203965cd92b1706070c59a3795f9dee23087cea0aaec015f1b7bfe4df81818d7a37af781ca7b757ace2fa489f85215ecb85976b1c74c7f1df6d834a8bc63e887407ef6e233c55ea040bc5f2471e99ebc92f2283ff592ff751d9226bd105e68e187c91ecb236c9fa4fb060ae4d706c571ac2123da1debd12737d98be118578",
"proof_hash": "0ce421970d13fe9b3981e308c5d0b549982cdda9f69918289cd95ffcd09e0fc2",
"spent": false
}
]
}
}
# "#
# );
```
*/
fn get_outputs(
&self,
commits: Option<Vec<String>>,
start_height: Option<u64>,
end_height: Option<u64>,
include_proof: Option<bool>,
include_merkle_proof: Option<bool>,
) -> Result<Vec<OutputPrintable>, ErrorKind>;

/**
Networked version of [Foreign::get_unspent_outputs](struct.Node.html#method.get_unspent_outputs).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_unspent_outputs",
"params": [1, 2, null, true],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"highest_index": 2078061,
"last_retrieved_index": 30,
"outputs": [
{
"block_height": 1,
"commit": "08b7e57c448db5ef25aa119dde2312c64d7ff1b890c416c6dda5ec73cbfed2edea",
"merkle_proof": null,
"mmr_index": 1,
"output_type": "Coinbase",
"proof": "9330ad8cde205f317c6537eca96b866293a0489615a9a277b4d3a597c873544c82474932b641e06ac8719604ee52e895e8cd4621b6bfb85780cd9becce14d0700b83a664db2f52a26c425fd777ad88944cdfff38043a2793ed4d9aa67e36cbfd5585579fc69dda930418af5eaf603654f6f751258d2dfc8c2113c171e130f31ec1e6cce2a718e435298fce5d64ffe1bd3464fd7c87cfa92093855be034bfe4439e928bd92ad77fd0a0e00355ee1d1a9ceb1ed0c408dcfdba8c583e7598dc700aaa9f91432097259a405f5b7315a2f7658861e3349bb0dc8bf883726a215f0149ded6613e5ac0670c0c5202247d7c27c8a7d03bdb03c9cf5455463f9b42cf87403e31f8383cc4f49a34c62ae459f5801a9eed4f0ee3dfd5f55b7011c0cae393c474abd6f8c7965b9b5fff3104dd4e39542077c0c8dd2f8ffceb6bb598512d90506d0a7184f20f1498cf458787f23284b54888c9be416d103f760406357a16b6d841a303d5c95b6b474d2d7f0fea0a2a76c897dd2110e9303f54684169421147684c6f1819c33cef3f38ec995a508450c02cd1872f8065fdee723109c18b1dd2ddde75825546ecf0df0793c353b20c946cd64122cea8c116f432336899a16ad24a2aafcb8f900e09a1147135fcf2a54cbf81db308a47a08a49c77c130e5dc5e661cd55a5cc69e607055a5b08111bf61a62ea5778f85119043633f1cab8c756d756c5a34851024ac311a596b1cd919bbca43226f0ba057f6b57de2f6955b0823c3826de7f6096c1c1b6b9b8e4063e1645c0bff32f80561aaa959d97120fbc2ecd9d2be28bd0c17811dc59a88049f6d8952ee9a0a0207693c89ca3ad1197e9bfdfc03be9d845aea8d663969217e3b494cee9e652bc9f8713e2fd5cb1843848f46c3a6ab024d0e3d57ca45454cdbda414adaa835fa147deb4ffb7129cf3a8d86726a0144794",
"proof_hash": "6c301688d9186c3a99444f827bdfe3b858fe87fc314737a4dc1155d9884491d2",
"spent": false
},
{
"block_height": 29,
"commit": "09bab1ddad0f6fec1aedcd3830c5c647515ad543929e722344e4a8d390b6fdd51b",
"merkle_proof": null,
"mmr_index": 55,
"output_type": "Coinbase",
"proof": "4a5f858d4311bdd902f4446682f27f64be376283b1171060fd2ad33d85350fee13c25a030874d6308d2b325995a3fe545eb1d85ba66e2ba002b794edfdeacb3f0fd2a690b9a78137771b3633aaef2a77f62fbe4d6b4b373c4bdb7e5f58cfae361a3b4c2e4420cc0d38465b2444e01b50e57c6ebfc2afd6dda9017e54585638bddef17d181d1fd7064d975d8bb1dcfd96c89486aed4680b4d39294a141581d1f51c1acfbb80e2ffc40f8499cdc43be04cacda1e34dd6592edfc500229aa70db1c2869f974cfe9aee0cab696c198624de8ecdaf5ae481a1e46fe79fe983209459b89492f2b24416c368394c43c60c33d0fdd1792f0a58d11763e7c8b89d27da25109db346e4d7b62935d182b45dfb659829c55922350e6f7e3452d9311e527ec5b561f4d043cef865f683fce1ce2d410d414f5bcee63c4bbc00964b0fa757bdfd68158e22c1068d871a45759fbd527883c0451db6f36b15139864b6177a78ad64d326e0152914e5313a97ed7b685e5089f2758bf072c804560306bd944831f067c3413ded09330fd788f353e4ee875d3c9303dd4ec0dda9d55b4a27d7748b3247fe85cf3d26b7004e6e3379041fad136fccdacd02b06456a50ad40a3259842c0794f2d59dbd8fa6b4af065b38c388d76b82136b633b06779e4eb05b5b62ec37cdc2986327639bafa8651318f4c00c066e6f45504ec9a96874d5510b519f434a1a88175d51f86e8ee36ae18d107cfaf83e60b2e62fff032c7539be66d776e3a52c5f9b0ee6fe08820d65cd75d35c793e5ab3914adf5a97b7dba75e90d4a4c9aa844e2f1e9464cd5fc4923b475defca4e3b03e1b33353ff91ac1084712cf4445e329ffdbe1e2da16ae71dee0e914b546fdc0db9b0fcde80822ee716e9f2eec90db7aa4417d53a1266e1e8383e20c9a9548bae35c2a8e1293a49e7afbd8011a9e66e79ed6be",
"proof_hash": "a64ed774d824dc55123c6c5ba46d84bac15b6ead8cb60200836c2a0e74506ab0",
"spent": false
}
]
}
}
}
# "#
# );
```
*/
fn get_unspent_outputs(
&self,
start_index: u64,
end_index: Option<u64>,
max: u64,
include_proof: Option<bool>,
) -> Result<OutputListing, ErrorKind>;

/**
Networked version of [Foreign::get_pmmr_indices](struct.Node.html#method.get_pmmr_indices).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_pmmr_indices",
"params": [0, 100],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"highest_index": 398,
"last_retrieved_index": 2,
"outputs": []
}
}
# "#
# );
```
*/
fn get_pmmr_indices(
&self,
start_block_height: u64,
end_block_height: Option<u64>,
) -> Result<OutputListing, ErrorKind>;

/**
Networked version of [Foreign::get_pool_size](struct.Node.html#method.get_pool_size).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_pool_size",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": 1
}
}
# "#
# );
```
*/
fn get_pool_size(&self) -> Result<usize, ErrorKind>;

/**
Networked version of [Foreign::get_stempool_size](struct.Node.html#method.get_stempool_size).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_stempool_size",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": 0
}
}
# "#
# );
```
*/
fn get_stempool_size(&self) -> Result<usize, ErrorKind>;

/**
Networked version of [Foreign::get_unconfirmed_transactions](struct.Node.html#method.get_unconfirmed_transactions).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_unconfirmed_transactions",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": [
{
"src": "Broadcast",
"tx": {
"body": {
"inputs": [
{
"commit": "0992ce1827ec349e9f339ce183ffd01db39bf43999799d8191bfc267a58f0a715c",
"features": "Coinbase"
},
{
"commit": "0943a3c4ee4a22a5b086c26f8e6dc534204dafde0cf4c07e0c468d224dd79127ec",
"features": "Plain"
}
],
"kernels": [
{
"excess": "083c49eaaf6380d44596f52cce4cf278cfac6dd34fbef73981002d8f1e8ee8abe4",
"excess_sig": "3f011e7e288231d67f42cb4f6416c4720e6170d5e3c805a52d33aa4521328f9be0303be654bc8ddcd3111aadc27c848b9cf07e0a70885ef79be70b7bb70f8c75",
"features": {
"Plain": {
"fee": 7000000
}
}
}
],
"outputs": [
{
"commit": "0873fafd4a0e4f365939e24c68eeb18aafc6674ca244a364dcdbfa8fa525e7bae1",
"features": "Plain",
"proof": "4b675be40672d5965c43d9f03880560a8ac784ee3de8768e28c236a4bc43b8c3d4bc83dee00d2b96530af9607c3b91d9a828f0234bf2aaf7e7c0e9cf936db69c04ca1b267668fbdb2f08ce05c8b119c9d886ceaafb4634b7fae7ea01966ad825dddc9ffab8093155d9c5d268160b86fcad95f4f5e66bf46ff642a51629dbdfd7bba7936846915b925d547337a1b95c33030fad4178468825936242e631797aa3a8f0a5ae0d23040938622648c8432fc247a902abad27e383affb4ec518e4f6f55f55e264bc0f99957be203cfb26d4b8e561fb36da55a50b6ef5861134c484556d701133e1dceda5ea53e731184e0a11f33d06e13ca37d03d39dd047170580534b049862fcd6c73decc7c0af45a267ed148fe6ef2cc375ffebfa8187d2fa0a134428a036d2ec1f65d3ce036b955730fc1ee43b23b574bae2b58b7adfa2a7a45cdec393d9b658857c911560aa3c44cf4435a99d68f3dbc81c82ea43e426ef0198148a90336ee72472aab5f7feea1df93ec830fe5ec642c93c1046dec955df361bfdc3ab74477f847a1b72e8735ef65a8a6d1680745c0152bfb5cbb2a4b4671491a253a1a09d5a07d55f4872c9f0a3d25e07b257926629d5bb96aed96f5debab02503eb0ac45033323cc5a46c8e5d4469ee9f3dd618a20d54d6f5740c010fe5a0fe853efeb253a6df196bd24469ac51c1be8ba84737cecdb5ab73d7c52570d2273621fb69bd7ed985bbc6999dbd2d6fd2687ae44a391d604ff232cc6b3fbedd5d1cd0cd8c658c5d56069b5a5099cc5c9f48bbf7d7e83b4f9a7bdef6eabd164c8395468f818e8cd8c1c800bc3adfd66dbcb247d1bda5a7af38c288c0beb8e0d9160bf67500094530a0f8be52e97b5c2114f5a4a333a11c7f37f4c47a437422455d8cbcfa770cdc85ec55accf48cf14550b07f1346a02fccdf280fcb24c1fb38751d889a17e"
},
{
"commit": "08de9e42d361cabd99e566c67f7f8599c7e6985cd285a841277f1aeb89ad6c8fe3",
"features": "Plain",
"proof": "5eb7afa00e9681e3b6425fb4256c96905303505787d6a065e88a50154410b9a371b0f879d3f97cfa00425e9c8266e180188656acdbb46cacfdfb159fb135c5eb03b08be3c231c4b21df777da2e2afe8d30db91e602dc4ceed71aeb1b45a0266cfeadc4acbf9fdf7a67f67408fbbea7bf14182bc407373d243c6875373b655695604deb575369a9b28274885601b338882219c7f508aa2a0ae1d02736af2249327145f1d3d00093f9587f0e0b408692700fac0f2a048c329e81cabaa4b997dd88923fe97420125f394e21b4835e36cce9de383d9e223df1b5a6ba6f48ffeac315991189dc2716cc7ec07f6ccc8062344d5ed4fcaddf9070f44f0c59ffe8160d1f6fdfe42b40066f51e687d38b6b5255771800ac060bd8034cd68d14eee1b2f43b6d7bf20d71549ea9a50006dd30b9a795e785385801546eb9a83721a09fc34d3b69d4ccdc0ff0fb74d224048aeb66ecff5515296cadd57f42e0717cbba7c70719a10c007db4520e868efe98a51001b67952d7bda3174195a3d76b93ee4dac60137a38b2e8309cad13ef1cfb6c467f1969385e5b334b52f4fd55da440e036d2a428e9f3be905d79f717c169060468acc6d469636fed098b1aba5cd055a120314bcab55d5b8b6889321edf373517e93ef67fbe74557ec6c0211265efefa25a34ac267cf1db891c47163bfed20d2b535abfe60390c2844dcef5f0aad5fa7f1db9f726d7f223c025861069603936a22377707cdd3915e762e7061132124c716212b0e91bb7fc5d7816366f5d169d93fe75669a6ba19057bb2450958aa6f5ada09042570f46215af5a41b623d140be574b7a8c9ab24ea48da416dbe6ec0fa3b889206fb804df8d69805ceb80f1e9d4e8b664b3939491cba946d87585c830e3dab0638fa279b5e911642f18452e2731764aa62f92bbcf194c97f344c90c1931fd2c3af4bcf6b0"
}
]
},
"offset": "0eb2c2669ce918675c72697891e5527bd13da5a499396381409219b8bbbd8129"
},
"tx_at": "2019-10-07T16:20:08.709114Z"
}
]
}
}
# "#
# );
```
*/
fn get_unconfirmed_transactions(&self) -> Result<Vec<PoolEntry>, ErrorKind>;

/**
Networked version of [Foreign::push_transaction](struct.Node.html#method.push_transaction).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "push_transaction",
"params": [ {
"body": {
"inputs": [
{
"commit": "0904cbd34d0745eb00ffc3e95c9f4746738794d00268e243e9b57163a73b384102",
"features": "Coinbase"
}
],
"kernels": [
{
"excess": "08385257d22f1b8a758903f78ae12545245d620cffc50e7ee7bc852c5815513dc7",
"excess_sig": "e001a7349fd40d4a9dfc1df275d30906fb3b304f8c7892a20ed5c9b10923c871cbabedcf322511a9ce56f10113b48855441f681280133e121b25ea1ff7efad9e",
"features": {
"Plain": {
"fee": 8000000
}
}
}
],
"outputs": [
{
"commit": "087c3ca7419751e96cdae4908bb8a92fc2826f2ad36690420b905d51beb7409ca0",
"features": "Plain",
"proof": "379ae236937883c2e1e613fb30f1b18d2a44d4173360e94bcd07862aafaf81b3aaa1154d67287cc03efde0d3981c6da8a18e2e426f5c30afc0f2e3a75012448402d8d56df52b87f4815575a56d4da174f8187e4faae64bf883b249ceed694271f84ef62a3711d36c997dff7a11111419011e36e3a070b7552415a55faaa3999f99439edccdfe5313277147fdb42be1798442bb225c2b546f5347920584b365aa81a0365b4a706c97c89617b0e6218d2c9bc15805caab27c438ed06340cc4f8dc7bfca0e9d38864c88bb0c834372f6b662b9159134f3f8ec9b8a87878739a7e516b97419ac29e1d4a2b250321470a9a6b98d07065bb7e79afc25a5ab6fc47108f53223078a64502bd4af1a109641447dab82741ebe3fbdbd803ee7a42fe2554e78fa86bd1d1e6e3b913118e9419b0be6f976b2404447d943b5f1bac19a5809fd6834797945a62d21b1ecb6ddebbc5ef94ca9e704d033bd64afde67bd3e06e2cca3bb10190188afc0af80b48dd862b86753d8b4af314763324deb1c97cf020cb87285a47cd28874bb91c6cdf858965e8b9daafbcbc1b4817d334a97d7e25e01b2d072d8dcc6418e3dc7b8e7712632f939238e65ed0731c7af02d55a8884cd8f7f88dc0f63a21955a7364562532f5716c89e14f8f23ad78f6fe2f1649e13ea8f8185f3ee63cc174684d1ef8d8c33fb25bc802f8e05e53fe200b1ea5231f588a020942e6fd7eec67301700088dae8816c16a337120063c21e1604e009df932032812f88be6473af13f802b42d8ad6fc14230fbe13ede178319a7b6540656234ec1f2fcfa70f6faa9c4b6b8150b81fe0fdc273a9bb385d766a02041a5c3f58471d42059c17d84d13ad592aa0ccf337970e7eef06f306b13288795123c9c005b815d848f359b23450656b310f09cda9ad4b7b6931805d47dcd10a8745d834a984e2055168ac3"
},
{
"commit": "09a7b2c1d4b346c4ebe9c6c979e32e7740446624d5439d9d7abb82166c2545e5be",
"features": "Plain",
"proof": "5fb0ee4093a153e2ed173207dbfa02b4d185f1f313ea4cbf222558819074543f19e9bcdb595a23d4ee971aafcc614b6d2774e22cee6627bc4388297fe6ebf03e0d422f3eb8003cc8516417a6b32eb22f87e1745e0ae5bf1733f2ea253399719b1ef0067934dc548c58729604d24a44040165b32d05e82c9efc9a1f30151dd73ce893ae94709ec2fe5d0f409bb54a86604f0e92915b4f93e7adde823eccf87830ae91d71a7b99967dbcc8531fee44c20c24fb6fe2a34fe86ba5da3a9235cbcdcde033ead57d65c03903a9c9ed877bf0fab9f26d08552c64ea668d5408c84b74bc3ac8335aaaa04ebcf523d36d2207fb8770e976b6fde7d04e2148de5a4169c60b1958bb840b79a8c8f356e1f1fadc35a5a7e276fcd67c354cde546548c9bf788981f38edf5a406977826aa4524004e770b3d3cd6b26f0dc99729ffd9929fa4509b145ef0c3e4293e71b964da731a47cc9f082350acf32afb64b3b12f8383c8f2cc9880131a80ea957b2908c92f21d2db7aa5d67bafb11eb07674e52b920e67a86259dd9c5dcdd18bad182fd85ec4b659c47ea2e2e8a89c57e4d2cde87958fc2ab932e169f6805d2fb14549ac93807bc426eb4cf6d29ff6a4cf22e35dbb27f04211b06b65173501c17a3bb3ff0eecc9bb05dca23379abe457ca3010ebea69e1a2f7f3ed6531bf766007cdd1ac7d6c762785fb56f36194cc2ccaee76a499a7383288e84981b103d76cbe007f66c913eacb277746e78ae08627b279ac1f9a43ab284d8a3b32c6edcd2ea99e8ea836b31a1e2582be6c41f2282cf5fc7bdb95e4b412a5eeccad29670197873a888a100c4b2704ce75137fc997a5632d81001f9b57300a9bf99edd857065be83f835e4c49d852165ba18e1c96316c153459a913773d5d86ddc26c5cd1fff38a8fbb62506b0aef6076382674c0fa95a50a03b0c3df0a688a2cbf"
}
]
},
"offset": "0ec14d3875ad5a366418256fe65bad2a4d4ff1914e1b9488db72dd355138ca3a"
},
true
],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": null
}
}
# "#
# );
```
*/
fn push_transaction(&self, tx: Transaction, fluff: Option<bool>) -> Result<(), ErrorKind>;
}

impl ForeignRpc for Foreign {
fn get_header(
&self,
height: Option<u64>,
hash: Option<String>,
commit: Option<String>,
) -> Result<BlockHeaderPrintable, ErrorKind> {
let mut parsed_hash: Option<Hash> = None;
if let Some(hash) = hash {
let vec = util::from_hex(hash)
.map_err(|e| ErrorKind::Argument(format!("invalid block hash: {}", e)))?;
parsed_hash = Some(Hash::from_vec(&vec));
}
Foreign::get_header(self, height, parsed_hash, commit).map_err(|e| e.kind().clone())
}
fn get_block(
&self,
height: Option<u64>,
hash: Option<String>,
commit: Option<String>,
) -> Result<BlockPrintable, ErrorKind> {
let mut parsed_hash: Option<Hash> = None;
if let Some(hash) = hash {
let vec = util::from_hex(hash)
.map_err(|e| ErrorKind::Argument(format!("invalid block hash: {}", e)))?;
parsed_hash = Some(Hash::from_vec(&vec));
}
Foreign::get_block(self, height, parsed_hash, commit).map_err(|e| e.kind().clone())
}

fn get_version(&self) -> Result<Version, ErrorKind> {
Foreign::get_version(self).map_err(|e| e.kind().clone())
}

fn get_tip(&self) -> Result<Tip, ErrorKind> {
Foreign::get_tip(self).map_err(|e| e.kind().clone())
}

fn get_kernel(
&self,
excess: String,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<LocatedTxKernel, ErrorKind> {
Foreign::get_kernel(self, excess, min_height, max_height).map_err(|e| e.kind().clone())
}

fn get_outputs(
&self,
commits: Option<Vec<String>>,
start_height: Option<u64>,
end_height: Option<u64>,
include_proof: Option<bool>,
include_merkle_proof: Option<bool>,
) -> Result<Vec<OutputPrintable>, ErrorKind> {
Foreign::get_outputs(
self,
commits,
start_height,
end_height,
include_proof,
include_merkle_proof,
)
.map_err(|e| e.kind().clone())
}

fn get_unspent_outputs(
&self,
start_index: u64,
end_index: Option<u64>,
max: u64,
include_proof: Option<bool>,
) -> Result<OutputListing, ErrorKind> {
Foreign::get_unspent_outputs(self, start_index, end_index, max, include_proof)
.map_err(|e| e.kind().clone())
}

fn get_pmmr_indices(
&self,
start_block_height: u64,
end_block_height: Option<u64>,
) -> Result<OutputListing, ErrorKind> {
Foreign::get_pmmr_indices(self, start_block_height, end_block_height)
.map_err(|e| e.kind().clone())
}

fn get_pool_size(&self) -> Result<usize, ErrorKind> {
Foreign::get_pool_size(self).map_err(|e| e.kind().clone())
}

fn get_stempool_size(&self) -> Result<usize, ErrorKind> {
Foreign::get_stempool_size(self).map_err(|e| e.kind().clone())
}

fn get_unconfirmed_transactions(&self) -> Result<Vec<PoolEntry>, ErrorKind> {
Foreign::get_unconfirmed_transactions(self).map_err(|e| e.kind().clone())
}
fn push_transaction(&self, tx: Transaction, fluff: Option<bool>) -> Result<(), ErrorKind> {
Foreign::push_transaction(self, tx, fluff).map_err(|e| e.kind().clone())
}
}

#[doc(hidden)]
#[macro_export]
macro_rules! doctest_helper_json_rpc_foreign_assert_response {
($request:expr, $expected_response:expr) => {
// create temporary grin server, run jsonrpc request on node api, delete server, return
// json response.

{
/*use grin_servers::test_framework::framework::run_doctest;
use grin_util as util;
use serde_json;
use serde_json::Value;
use tempfile::tempdir;
let dir = tempdir().map_err(|e| format!("{:#?}", e)).unwrap();
let dir = dir
.path()
.to_str()
.ok_or("Failed to convert tmpdir path to string.".to_owned())
.unwrap();
let request_val: Value = serde_json::from_str($request).unwrap();
let expected_response: Value = serde_json::from_str($expected_response).unwrap();
let response = run_doctest(
request_val,
dir,
$use_token,
$blocks_to_mine,
$perform_tx,
$lock_tx,
$finalize_tx,
)
.unwrap()
.unwrap();
if response != expected_response {
panic!(
"(left != right) \nleft: {}\nright: {}",
serde_json::to_string_pretty(&response).unwrap(),
serde_json::to_string_pretty(&expected_response).unwrap()
);
}*/
}
};
}
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.

mod blocks_api;
mod chain_api;
mod peers_api;
mod pool_api;
mod server_api;
mod transactions_api;
mod utils;
mod version_api;
pub mod blocks_api;
pub mod chain_api;
pub mod peers_api;
pub mod pool_api;
pub mod server_api;
pub mod transactions_api;
pub mod utils;
pub mod version_api;

use self::blocks_api::BlockHandler;
use self::blocks_api::HeaderHandler;
@@ -38,59 +38,302 @@ use self::server_api::KernelDownloadHandler;
use self::server_api::StatusHandler;
use self::transactions_api::TxHashSetHandler;
use self::version_api::VersionHandler;
use crate::auth::{BasicAuthMiddleware, GRIN_BASIC_REALM};
use crate::auth::{
BasicAuthMiddleware, BasicAuthURIMiddleware, GRIN_BASIC_REALM, GRIN_FOREIGN_BASIC_REALM,
};
use crate::chain;
use crate::chain::{Chain, SyncState};
use crate::foreign::Foreign;
use crate::foreign_rpc::ForeignRpc;
use crate::owner::Owner;
use crate::owner_rpc::OwnerRpc;
use crate::p2p;
use crate::pool;
use crate::rest::*;
use crate::rest::{ApiServer, Error, TLSConfig};
use crate::router::ResponseFuture;
use crate::router::{Router, RouterError};
use crate::util;
use crate::util::to_base64;
use crate::util::RwLock;
use crate::web::*;
use easy_jsonrpc_mw::{Handler, MaybeReply};
use futures::future::ok;
use futures::Future;
use hyper::{Body, Request, Response, StatusCode};
use serde::Serialize;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::{Arc, Weak};

/// Start all server HTTP handlers. Register all of them with Router
/// and runs the corresponding HTTP server.
///
/// Hyper currently has a bug that prevents clean shutdown. In order
/// to avoid having references kept forever by handlers, we only pass
/// weak references. Note that this likely means a crash if the handlers are
/// used after a server shutdown (which should normally never happen,
/// except during tests).
pub fn start_rest_apis(
addr: String,
/// Listener version, providing same API but listening for requests on a
/// port and wrapping the calls
pub fn node_apis(
addr: &str,
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool>>,
peers: Arc<p2p::Peers>,
sync_state: Arc<chain::SyncState>,
api_secret: Option<String>,
foreign_api_secret: Option<String>,
tls_config: Option<TLSConfig>,
) -> bool {
let mut apis = ApiServer::new();
let mut router =
build_router(chain, tx_pool, peers, sync_state).expect("unable to build API router");
) -> Result<(), Error> {
// Manually build router when getting rid of v1
//let mut router = Router::new();
let mut router = build_router(
chain.clone(),
tx_pool.clone(),
peers.clone(),
sync_state.clone(),
)
.expect("unable to build API router");

// Add basic auth to v1 API and owner v2 API
if let Some(api_secret) = api_secret {
let api_basic_auth = format!("Basic {}", util::to_base64(&format!("grin:{}", api_secret)));
let api_basic_auth =
"Basic ".to_string() + &to_base64(&("grin:".to_string() + &api_secret));
let basic_auth_middleware = Arc::new(BasicAuthMiddleware::new(
api_basic_auth,
&GRIN_BASIC_REALM,
None,
Some("/v2/foreign".into()),
));
router.add_middleware(basic_auth_middleware);
}

info!("Starting HTTP API server at {}.", addr);
let api_handler_v2 = OwnerAPIHandlerV2::new(
Arc::downgrade(&chain),
Arc::downgrade(&peers),
Arc::downgrade(&sync_state),
);
router.add_route("/v2/owner", Arc::new(api_handler_v2))?;

// Add basic auth to v2 foreign API only
if let Some(api_secret) = foreign_api_secret {
let api_basic_auth =
"Basic ".to_string() + &to_base64(&("grin:".to_string() + &api_secret));
let basic_auth_middleware = Arc::new(BasicAuthURIMiddleware::new(
api_basic_auth,
&GRIN_FOREIGN_BASIC_REALM,
"/v2/foreign".into(),
));
router.add_middleware(basic_auth_middleware);
}

let api_handler_v2 = ForeignAPIHandlerV2::new(
Arc::downgrade(&chain),
Arc::downgrade(&tx_pool),
Arc::downgrade(&sync_state),
);
router.add_route("/v2/foreign", Arc::new(api_handler_v2))?;

let mut apis = ApiServer::new();
warn!("Starting HTTP Node APIs server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
let res = apis.start(socket_addr, router, tls_config);
match res {
Ok(_) => true,
let api_thread = apis.start(socket_addr, router, tls_config);

warn!("HTTP Node listener started.");

match api_thread {
Ok(_) => Ok(()),
Err(e) => {
error!("HTTP API server failed to start. Err: {}", e);
false
Err(e)
}
}
}

type NodeResponseFuture = Box<dyn Future<Item = Response<Body>, Error = Error> + Send>;

/// V2 API Handler/Wrapper for owner functions
pub struct OwnerAPIHandlerV2 {
pub chain: Weak<Chain>,
pub peers: Weak<p2p::Peers>,
pub sync_state: Weak<SyncState>,
}

impl OwnerAPIHandlerV2 {
/// Create a new owner API handler for GET methods
pub fn new(chain: Weak<Chain>, peers: Weak<p2p::Peers>, sync_state: Weak<SyncState>) -> Self {
OwnerAPIHandlerV2 {
chain,
peers,
sync_state,
}
}

fn call_api(
&self,
req: Request<Body>,
api: Owner,
) -> Box<dyn Future<Item = serde_json::Value, Error = Error> + Send> {
Box::new(parse_body(req).and_then(move |val: serde_json::Value| {
let owner_api = &api as &dyn OwnerRpc;
match owner_api.handle_request(val) {
MaybeReply::Reply(r) => ok(r),
MaybeReply::DontReply => {
// Since it's http, we need to return something. We return [] because jsonrpc
// clients will parse it as an empty batch response.
ok(serde_json::json!([]))
}
}
}))
}

fn handle_post_request(&self, req: Request<Body>) -> NodeResponseFuture {
let api = Owner::new(
self.chain.clone(),
self.peers.clone(),
self.sync_state.clone(),
);
Box::new(
self.call_api(req, api)
.and_then(|resp| ok(json_response_pretty(&resp))),
)
}
}

impl crate::router::Handler for OwnerAPIHandlerV2 {
fn post(&self, req: Request<Body>) -> ResponseFuture {
Box::new(
self.handle_post_request(req)
.and_then(|r| ok(r))
.or_else(|e| {
error!("Request Error: {:?}", e);
ok(create_error_response(e))
}),
)
}

fn options(&self, _req: Request<Body>) -> ResponseFuture {
Box::new(ok(create_ok_response("{}")))
}
}

/// V2 API Handler/Wrapper for foreign functions
pub struct ForeignAPIHandlerV2 {
pub chain: Weak<Chain>,
pub tx_pool: Weak<RwLock<pool::TransactionPool>>,
pub sync_state: Weak<SyncState>,
}

impl ForeignAPIHandlerV2 {
/// Create a new foreign API handler for GET methods
pub fn new(
chain: Weak<Chain>,
tx_pool: Weak<RwLock<pool::TransactionPool>>,
sync_state: Weak<SyncState>,
) -> Self {
ForeignAPIHandlerV2 {
chain,
tx_pool,
sync_state,
}
}

fn call_api(
&self,
req: Request<Body>,
api: Foreign,
) -> Box<dyn Future<Item = serde_json::Value, Error = Error> + Send> {
Box::new(parse_body(req).and_then(move |val: serde_json::Value| {
let foreign_api = &api as &dyn ForeignRpc;
match foreign_api.handle_request(val) {
MaybeReply::Reply(r) => ok(r),
MaybeReply::DontReply => {
// Since it's http, we need to return something. We return [] because jsonrpc
// clients will parse it as an empty batch response.
ok(serde_json::json!([]))
}
}
}))
}

fn handle_post_request(&self, req: Request<Body>) -> NodeResponseFuture {
let api = Foreign::new(
self.chain.clone(),
self.tx_pool.clone(),
self.sync_state.clone(),
);
Box::new(
self.call_api(req, api)
.and_then(|resp| ok(json_response_pretty(&resp))),
)
}
}

impl crate::router::Handler for ForeignAPIHandlerV2 {
fn post(&self, req: Request<Body>) -> ResponseFuture {
Box::new(
self.handle_post_request(req)
.and_then(|r| ok(r))
.or_else(|e| {
error!("Request Error: {:?}", e);
ok(create_error_response(e))
}),
)
}

fn options(&self, _req: Request<Body>) -> ResponseFuture {
Box::new(ok(create_ok_response("{}")))
}
}

// pretty-printed version of above
fn json_response_pretty<T>(s: &T) -> Response<Body>
where
T: Serialize,
{
match serde_json::to_string_pretty(s) {
Ok(json) => response(StatusCode::OK, json),
Err(_) => response(StatusCode::INTERNAL_SERVER_ERROR, ""),
}
}

fn create_error_response(e: Error) -> Response<Body> {
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.header("access-control-allow-origin", "*")
.header(
"access-control-allow-headers",
"Content-Type, Authorization",
)
.body(format!("{}", e).into())
.unwrap()
}

fn create_ok_response(json: &str) -> Response<Body> {
Response::builder()
.status(StatusCode::OK)
.header("access-control-allow-origin", "*")
.header(
"access-control-allow-headers",
"Content-Type, Authorization",
)
.header(hyper::header::CONTENT_TYPE, "application/json")
.body(json.to_string().into())
.unwrap()
}

/// Build a new hyper Response with the status code and body provided.
///
/// Whenever the status code is `StatusCode::OK` the text parameter should be
/// valid JSON as the content type header will be set to `application/json'
fn response<T: Into<Body>>(status: StatusCode, text: T) -> Response<Body> {
let mut builder = &mut Response::builder();

builder = builder
.status(status)
.header("access-control-allow-origin", "*")
.header(
"access-control-allow-headers",
"Content-Type, Authorization",
);

if status == StatusCode::OK {
builder = builder.header(hyper::header::CONTENT_TYPE, "application/json");
}

builder.body(text.into()).unwrap()
}

// Legacy V1 router
pub fn build_router(
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool>>,
@@ -11,7 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::utils::{get_output, w};
use super::utils::{get_output, get_output_v2, w};
use crate::chain;
use crate::core::core::hash::Hash;
use crate::core::core::hash::Hashed;
@@ -63,6 +63,40 @@ impl HeaderHandler {
Err(_) => Err(ErrorKind::NotFound)?,
}
}

pub fn get_header_v2(&self, h: &Hash) -> Result<BlockHeaderPrintable, Error> {
let chain = w(&self.chain)?;
let header = chain.get_block_header(h).context(ErrorKind::NotFound)?;
return Ok(BlockHeaderPrintable::from_header(&header));
}

// Try to get hash from height, hash or output commit
pub fn parse_inputs(
&self,
height: Option<u64>,
hash: Option<Hash>,
commit: Option<String>,
) -> Result<Hash, Error> {
if let Some(height) = height {
match w(&self.chain)?.get_header_by_height(height) {
Ok(header) => return Ok(header.hash()),
Err(_) => return Err(ErrorKind::NotFound)?,
}
}
if let Some(hash) = hash {
return Ok(hash);
}
if let Some(commit) = commit {
let oid = get_output_v2(&self.chain, &commit, false, false)?.1;
match w(&self.chain)?.get_header_for_output(&oid) {
Ok(header) => return Ok(header.hash()),
Err(_) => return Err(ErrorKind::NotFound)?,
}
}
return Err(ErrorKind::Argument(
"not a valid hash, height or output commit".to_owned(),
))?;
}
}

impl Handler for HeaderHandler {
@@ -87,7 +121,7 @@ pub struct BlockHandler {
}

impl BlockHandler {
fn get_block(
pub fn get_block(
&self,
h: &Hash,
include_proof: bool,
@@ -119,6 +153,34 @@ impl BlockHandler {
.map_err(|e| ErrorKind::Argument(format!("invalid input: {}", e)))?;
Ok(Hash::from_vec(&vec))
}

// Try to get hash from height, hash or output commit
pub fn parse_inputs(
&self,
height: Option<u64>,
hash: Option<Hash>,
commit: Option<String>,
) -> Result<Hash, Error> {
if let Some(height) = height {
match w(&self.chain)?.get_header_by_height(height) {
Ok(header) => return Ok(header.hash()),
Err(_) => return Err(ErrorKind::NotFound)?,
}
}
if let Some(hash) = hash {
return Ok(hash);
}
if let Some(commit) = commit {
let oid = get_output_v2(&self.chain, &commit, false, false)?.1;
match w(&self.chain)?.get_header_for_output(&oid) {
Ok(header) => return Ok(header.hash()),
Err(_) => return Err(ErrorKind::NotFound)?,
}
}
return Err(ErrorKind::Argument(
"not a valid hash, height or output commit".to_owned(),
))?;
}
}

fn check_block_param(input: &String) -> Result<(), Error> {
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use super::utils::{get_output, w};
use super::utils::{get_output, get_output_v2, w};
use crate::chain;
use crate::core::core::hash::Hashed;
use crate::rest::*;
@@ -32,7 +32,7 @@ pub struct ChainHandler {
}

impl ChainHandler {
fn get_tip(&self) -> Result<Tip, Error> {
pub fn get_tip(&self) -> Result<Tip, Error> {
let head = w(&self.chain)?
.head()
.map_err(|e| ErrorKind::Internal(format!("can't get head: {}", e)))?;
@@ -52,6 +52,14 @@ pub struct ChainValidationHandler {
pub chain: Weak<chain::Chain>,
}

impl ChainValidationHandler {
pub fn validate_chain(&self) -> Result<(), Error> {
w(&self.chain)?
.validate(true)
.map_err(|_| ErrorKind::Internal("chain error".to_owned()).into())
}
}

impl Handler for ChainValidationHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
match w_fut!(&self.chain).validate(true) {
@@ -71,6 +79,14 @@ pub struct ChainCompactHandler {
pub chain: Weak<chain::Chain>,
}

impl ChainCompactHandler {
pub fn compact_chain(&self) -> Result<(), Error> {
w(&self.chain)?
.compact()
.map_err(|_| ErrorKind::Internal("chain error".to_owned()).into())
}
}

impl Handler for ChainCompactHandler {
fn post(&self, _req: Request<Body>) -> ResponseFuture {
match w_fut!(&self.chain).compact() {
@@ -97,6 +113,103 @@ impl OutputHandler {
Ok(res.0)
}

fn get_output_v2(
&self,
id: &str,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<OutputPrintable, Error> {
let res = get_output_v2(&self.chain, id, include_proof, include_merkle_proof)?;
Ok(res.0)
}

pub fn get_outputs_v2(
&self,
commits: Option<Vec<String>>,
start_height: Option<u64>,
end_height: Option<u64>,
include_proof: Option<bool>,
include_merkle_proof: Option<bool>,
) -> Result<Vec<OutputPrintable>, Error> {
let mut outputs: Vec<OutputPrintable> = vec![];
if let Some(commits) = commits {
// First check the commits length
for commit in &commits {
if commit.len() != 66 {
return Err(ErrorKind::RequestError(format!(
"invalid commit length for {}",
commit
))
.into());
}
}
for commit in commits {
match self.get_output_v2(
&commit,
include_proof.unwrap_or(false),
include_merkle_proof.unwrap_or(false),
) {
Ok(output) => outputs.push(output),
// do not crash here simply do not retrieve this output
Err(e) => error!(
"Failure to get output for commitment {} with error {}",
commit, e
),
};
}
}
// cannot chain to let Some() for now see https://github.com/rust-lang/rust/issues/53667
if let Some(start_height) = start_height {
if let Some(end_height) = end_height {
let block_output_batch = self.outputs_block_batch_v2(
start_height,
end_height,
include_proof.unwrap_or(false),
include_merkle_proof.unwrap_or(false),
)?;
outputs = [&outputs[..], &block_output_batch[..]].concat();
}
}
return Ok(outputs);
}

// allows traversal of utxo set
pub fn get_unspent_outputs(
&self,
start_index: u64,
end_index: Option<u64>,
mut max: u64,
include_proof: Option<bool>,
) -> Result<OutputListing, Error> {
//set a limit here
if max > 10_000 {
max = 10_000;
}
let chain = w(&self.chain)?;
let outputs = chain
.unspent_outputs_by_pmmr_index(start_index, max, end_index)
.context(ErrorKind::NotFound)?;
let out = OutputListing {
last_retrieved_index: outputs.0,
highest_index: outputs.1,
outputs: outputs
.2
.iter()
.map(|x| {
OutputPrintable::from_output(
x,
chain.clone(),
None,
include_proof.unwrap_or(false),
false,
)
})
.collect::<Result<Vec<_>, _>>()
.context(ErrorKind::Internal("chain error".to_owned()))?,
};
Ok(out)
}

fn outputs_by_ids(&self, req: &Request<Body>) -> Result<Vec<Output>, Error> {
let mut commitments: Vec<String> = vec![];

@@ -155,6 +268,42 @@ impl OutputHandler {
})
}

fn outputs_at_height_v2(
&self,
block_height: u64,
commitments: Vec<Commitment>,
include_rproof: bool,
include_merkle_proof: bool,
) -> Result<Vec<OutputPrintable>, Error> {
let header = w(&self.chain)?
.get_header_by_height(block_height)
.map_err(|_| ErrorKind::NotFound)?;

// TODO - possible to compact away blocks we care about
// in the period between accepting the block and refreshing the wallet
let chain = w(&self.chain)?;
let block = chain
.get_block(&header.hash())
.map_err(|_| ErrorKind::NotFound)?;
let outputs = block
.outputs()
.iter()
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
.map(|output| {
OutputPrintable::from_output(
output,
chain.clone(),
Some(&header),
include_rproof,
include_merkle_proof,
)
})
.collect::<Result<Vec<_>, _>>()
.context(ErrorKind::Internal("cain error".to_owned()))?;

Ok(outputs)
}

// returns outputs for a specified range of blocks
fn outputs_block_batch(&self, req: &Request<Body>) -> Result<Vec<BlockOutputs>, Error> {
let mut commitments: Vec<Commitment> = vec![];
@@ -186,6 +335,38 @@ impl OutputHandler {

Ok(return_vec)
}

// returns outputs for a specified range of blocks
fn outputs_block_batch_v2(
&self,
start_height: u64,
end_height: u64,
include_rproof: bool,
include_merkle_proof: bool,
) -> Result<Vec<OutputPrintable>, Error> {
let commitments: Vec<Commitment> = vec![];

debug!(
"outputs_block_batch: {}-{}, {}, {}",
start_height, end_height, include_rproof, include_merkle_proof,
);

let mut return_vec: Vec<OutputPrintable> = vec![];
for i in (start_height..=end_height).rev() {
if let Ok(res) = self.outputs_at_height_v2(
i,
commitments.clone(),
include_rproof,
include_merkle_proof,
) {
if res.len() > 0 {
return_vec = [&return_vec[..], &res[..]].concat();
}
}
}

Ok(return_vec)
}
}

impl Handler for OutputHandler {
@@ -259,6 +440,34 @@ impl KernelHandler {
});
Ok(kernel)
}

pub fn get_kernel_v2(
&self,
excess: String,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<LocatedTxKernel, Error> {
let excess = util::from_hex(excess.to_owned())
.map_err(|_| ErrorKind::RequestError("invalid excess hex".into()))?;
if excess.len() != 33 {
return Err(ErrorKind::RequestError("invalid excess length".into()).into());
}
let excess = Commitment::from_vec(excess);

let chain = w(&self.chain)?;
let kernel = chain
.get_kernel_height(&excess, min_height, max_height)
.map_err(|e| ErrorKind::Internal(format!("{}", e)))?
.map(|(tx_kernel, height, mmr_index)| LocatedTxKernel {
tx_kernel,
height,
mmr_index,
});
match kernel {
Some(kernel) => Ok(kernel),
None => Err(ErrorKind::NotFound.into()),
}
}
}

impl Handler for KernelHandler {
@@ -13,11 +13,13 @@
// limitations under the License.

use super::utils::w;
use crate::p2p;
use crate::p2p::types::{PeerAddr, PeerInfoDisplay, ReasonForBan};
use crate::p2p::{self, PeerData};
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::web::*;
use hyper::{Body, Request, StatusCode};
use std::net::SocketAddr;
use std::sync::Weak;

pub struct PeersAllHandler {
@@ -35,6 +37,17 @@ pub struct PeersConnectedHandler {
pub peers: Weak<p2p::Peers>,
}

impl PeersConnectedHandler {
pub fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, Error> {
let peers = w(&self.peers)?
.connected_peers()
.iter()
.map(|p| p.info.clone().into())
.collect::<Vec<PeerInfoDisplay>>();
Ok(peers)
}
}

impl Handler for PeersConnectedHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers: Vec<PeerInfoDisplay> = w_fut!(&self.peers)
@@ -54,6 +67,35 @@ pub struct PeerHandler {
pub peers: Weak<p2p::Peers>,
}

impl PeerHandler {
pub fn get_peers(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, Error> {
if let Some(addr) = addr {
let peer_addr = PeerAddr(addr);
let peer_data: PeerData = w(&self.peers)?.get_peer(peer_addr).map_err(|e| {
let e: Error = ErrorKind::Internal(format!("get peer error: {:?}", e)).into();
e
})?;
return Ok(vec![peer_data]);
}
let peers = w(&self.peers)?.all_peers();
Ok(peers)
}

pub fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.ban_peer(peer_addr, ReasonForBan::ManualBan)
.map_err(|e| ErrorKind::Internal(format!("ban peer error: {:?}", e)).into())
}

pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.unban_peer(peer_addr)
.map_err(|e| ErrorKind::Internal(format!("unban peer error: {:?}", e)).into())
}
}

impl Handler for PeerHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let command = right_path_element!(req);
@@ -101,8 +143,20 @@ impl Handler for PeerHandler {
};

match command {
"ban" => w_fut!(&self.peers).ban_peer(addr, ReasonForBan::ManualBan),
"unban" => w_fut!(&self.peers).unban_peer(addr),
"ban" => match w_fut!(&self.peers).ban_peer(addr, ReasonForBan::ManualBan) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("ban failed: {:?}", e),
),
},
"unban" => match w_fut!(&self.peers).unban_peer(addr) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("unban failed: {:?}", e),
),
},
_ => return response(StatusCode::BAD_REQUEST, "invalid command"),
};

@@ -16,7 +16,7 @@ use super::utils::w;
use crate::core::core::hash::Hashed;
use crate::core::core::Transaction;
use crate::core::ser::{self, ProtocolVersion};
use crate::pool;
use crate::pool::{self, PoolEntry};
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::types::*;
@@ -46,6 +46,50 @@ impl Handler for PoolInfoHandler {
}
}

pub struct PoolHandler {
pub tx_pool: Weak<RwLock<pool::TransactionPool>>,
}

impl PoolHandler {
pub fn get_pool_size(&self) -> Result<usize, Error> {
let pool_arc = w(&self.tx_pool)?;
let pool = pool_arc.read();
Ok(pool.total_size())
}
pub fn get_stempool_size(&self) -> Result<usize, Error> {
let pool_arc = w(&self.tx_pool)?;
let pool = pool_arc.read();
Ok(pool.stempool.size())
}
pub fn get_unconfirmed_transactions(&self) -> Result<Vec<PoolEntry>, Error> {
// will only read from txpool
let pool_arc = w(&self.tx_pool)?;
let txpool = pool_arc.read();
Ok(txpool.txpool.entries.clone())
}
pub fn push_transaction(&self, tx: Transaction, fluff: Option<bool>) -> Result<(), Error> {
let pool_arc = w(&self.tx_pool)?;
let source = pool::TxSource::PushApi;
info!(
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
tx.hash(),
tx.inputs().len(),
tx.outputs().len(),
tx.kernels().len(),
);

// Push to tx pool.
let mut tx_pool = pool_arc.write();
let header = tx_pool
.blockchain
.chain_head()
.context(ErrorKind::Internal("Failed to get chain head".to_owned()))?;
let res = tx_pool
.add_to_pool(source, tx, !fluff.unwrap_or(false), &header)
.context(ErrorKind::Internal("Failed to update pool".to_owned()))?;
Ok(res)
}
}
/// Dummy wrapper for the hex-encoded serialized transaction.
#[derive(Serialize, Deserialize)]
struct TxWrapper {
@@ -69,7 +69,7 @@ pub struct StatusHandler {
}

impl StatusHandler {
fn get_status(&self) -> Result<Status, Error> {
pub fn get_status(&self) -> Result<Status, Error> {
let head = w(&self.chain)?
.head()
.map_err(|e| ErrorKind::Internal(format!("can't get head: {}", e)))?;
@@ -113,16 +113,19 @@ fn sync_status_to_api(sync_status: SyncStatus) -> (String, Option<serde_json::Va
"txhashset_download".to_string(),
Some(json!({ "downloaded_size": downloaded_size, "total_size": total_size })),
),
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
SyncStatus::TxHashsetRangeProofsValidation {
rproofs,
rproof_total,
rproofs_total,
} => (
"txhashset_rangeproofs_validation".to_string(),
Some(json!({ "rproofs": rproofs, "rproofs_total": rproofs_total })),
),
SyncStatus::TxHashsetKernelsValidation {
kernels,
kernels_total,
} => (
"txhashset_validation".to_string(),
Some(
json!({ "kernels": kernels, "kernel_total": kernel_total ,"rproofs": rproofs, "rproof_total": rproof_total }),
),
"txhashset_kernels_validation".to_string(),
Some(json!({ "kernels": kernels, "kernels_total": kernels_total })),
),
SyncStatus::BodySync {
current_height,
@@ -35,6 +35,7 @@ use std::sync::Weak;

// UTXO traversal::
// GET /v1/txhashset/outputs?start_index=1&max=100
// GET /v1/txhashset/heightstopmmr?start_height=1&end_height=1000
//
// Build a merkle proof for a given pos
// GET /v1/txhashset/merkleproof?n=1
@@ -46,7 +47,10 @@ pub struct TxHashSetHandler {
impl TxHashSetHandler {
// gets roots
fn get_roots(&self) -> Result<TxHashSet, Error> {
Ok(TxHashSet::from_head(w(&self.chain)?))
let res = TxHashSet::from_head(w(&self.chain)?).context(ErrorKind::Internal(
"failed to read roots from txhashset".to_owned(),
))?;
Ok(res)
}

// gets last n outputs inserted in to the tree
@@ -68,14 +72,19 @@ impl TxHashSetHandler {
}

// allows traversal of utxo set
fn outputs(&self, start_index: u64, mut max: u64) -> Result<OutputListing, Error> {
fn outputs(
&self,
start_index: u64,
end_index: Option<u64>,
mut max: u64,
) -> Result<OutputListing, Error> {
//set a limit here
if max > 10_000 {
max = 10_000;
}
let chain = w(&self.chain)?;
let outputs = chain
.unspent_outputs_by_insertion_index(start_index, max)
.unspent_outputs_by_pmmr_index(start_index, max, end_index)
.context(ErrorKind::NotFound)?;
let out = OutputListing {
last_retrieved_index: outputs.0,
@@ -85,7 +94,25 @@ impl TxHashSetHandler {
.iter()
.map(|x| OutputPrintable::from_output(x, chain.clone(), None, true, true))
.collect::<Result<Vec<_>, _>>()
.context(ErrorKind::Internal("cain error".to_owned()))?,
.context(ErrorKind::Internal("chain error".to_owned()))?,
};
Ok(out)
}

// allows traversal of utxo set bounded within a block range
pub fn block_height_range_to_pmmr_indices(
&self,
start_block_height: u64,
end_block_height: Option<u64>,
) -> Result<OutputListing, Error> {
let chain = w(&self.chain)?;
let range = chain
.block_height_range_to_pmmr_indices(start_block_height, end_block_height)
.context(ErrorKind::NotFound)?;
let out = OutputListing {
last_retrieved_index: range.0,
highest_index: range.1,
outputs: vec![],
};
Ok(out)
}
@@ -121,15 +148,27 @@ impl Handler for TxHashSetHandler {
let params = QueryParams::from(req.uri().query());
let last_n = parse_param_no_err!(params, "n", 10);
let start_index = parse_param_no_err!(params, "start_index", 1);
let end_index = match parse_param_no_err!(params, "end_index", 0) {
0 => None,
i => Some(i),
};
let max = parse_param_no_err!(params, "max", 100);
let id = parse_param_no_err!(params, "id", "".to_owned());
let start_height = parse_param_no_err!(params, "start_height", 1);
let end_height = match parse_param_no_err!(params, "end_height", 0) {
0 => None,
h => Some(h),
};

match right_path_element!(req) {
"roots" => result_to_response(self.get_roots()),
"lastoutputs" => result_to_response(self.get_last_n_output(last_n)),
"lastrangeproofs" => result_to_response(self.get_last_n_rangeproof(last_n)),
"lastkernels" => result_to_response(self.get_last_n_kernel(last_n)),
"outputs" => result_to_response(self.outputs(start_index, max)),
"outputs" => result_to_response(self.outputs(start_index, end_index, max)),
"heightstopmmr" => result_to_response(
self.block_height_range_to_pmmr_indices(start_height, end_height),
),
"merkleproof" => result_to_response(self.get_merkle_proof_for_output(&id)),
_ => response(StatusCode::BAD_REQUEST, ""),
}
@@ -72,3 +72,69 @@ pub fn get_output(
}
Err(ErrorKind::NotFound)?
}

/// Retrieves an output from the chain given a commit id (a tiny bit iteratively)
pub fn get_output_v2(
chain: &Weak<chain::Chain>,
id: &str,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<(OutputPrintable, OutputIdentifier), Error> {
let c = util::from_hex(String::from(id)).context(ErrorKind::Argument(format!(
"Not a valid commitment: {}",
id
)))?;
let commit = Commitment::from_vec(c);

// We need the features here to be able to generate the necessary hash
// to compare against the hash in the output MMR.
// For now we can just try both (but this probably needs to be part of the api
// params)
let outputs = [
OutputIdentifier::new(OutputFeatures::Plain, &commit),
OutputIdentifier::new(OutputFeatures::Coinbase, &commit),
];

let chain = w(chain)?;

for x in outputs.iter() {
let res = chain.is_unspent(x);
match res {
Ok(output_pos) => match chain.get_unspent_output_at(output_pos.position) {
Ok(output) => {
let mut header = None;
if include_merkle_proof && output.is_coinbase() {
header = chain.get_header_by_height(output_pos.height).ok();
}
match OutputPrintable::from_output(
&output,
chain.clone(),
header.as_ref(),
include_proof,
include_merkle_proof,
) {
Ok(output_printable) => return Ok((output_printable, x.clone())),
Err(e) => {
trace!(
"get_output: err: {} for commit: {:?} with feature: {:?}",
e.to_string(),
x.commit,
x.features
);
}
}
}
Err(_) => return Err(ErrorKind::NotFound)?,
},
Err(e) => {
trace!(
"get_output: err: {} for commit: {:?} with feature: {:?}",
e.to_string(),
x.commit,
x.features
);
}
}
}
Err(ErrorKind::NotFound)?
}
@@ -30,7 +30,7 @@ pub struct VersionHandler {
}

impl VersionHandler {
fn get_version(&self) -> Result<Version, Error> {
pub fn get_version(&self) -> Result<Version, Error> {
let head = w(&self.chain)?
.head_header()
.map_err(|e| ErrorKind::Internal(format!("can't get head: {}", e)))?;
@@ -27,20 +27,31 @@ extern crate lazy_static;

#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate log;

#[macro_use]
mod web;
pub mod auth;
pub mod client;
mod foreign;
mod foreign_rpc;
mod handlers;
mod owner;
mod owner_rpc;
mod rest;
mod router;
mod types;

pub use crate::auth::{BasicAuthMiddleware, GRIN_BASIC_REALM};
pub use crate::handlers::start_rest_apis;
pub use crate::auth::{
BasicAuthMiddleware, BasicAuthURIMiddleware, GRIN_BASIC_REALM, GRIN_FOREIGN_BASIC_REALM,
};
pub use crate::foreign::Foreign;
pub use crate::foreign_rpc::ForeignRpc;
pub use crate::handlers::node_apis;
pub use crate::owner::Owner;
pub use crate::owner_rpc::OwnerRpc;
pub use crate::rest::*;
pub use crate::router::*;
pub use crate::types::*;
@@ -0,0 +1,179 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! Owner API External Definition

use crate::chain::{Chain, SyncState};
use crate::handlers::chain_api::{ChainCompactHandler, ChainValidationHandler};
use crate::handlers::peers_api::{PeerHandler, PeersConnectedHandler};
use crate::handlers::server_api::StatusHandler;
use crate::p2p::types::PeerInfoDisplay;
use crate::p2p::{self, PeerData};
use crate::rest::*;
use crate::types::Status;
use std::net::SocketAddr;
use std::sync::Weak;

/// Main interface into all node API functions.
/// Node APIs are split into two seperate blocks of functionality
/// called the ['Owner'](struct.Owner.html) and ['Foreign'](struct.Foreign.html) APIs
///
/// Methods in this API are intended to be 'single use'.
///

pub struct Owner {
pub chain: Weak<Chain>,
pub peers: Weak<p2p::Peers>,
pub sync_state: Weak<SyncState>,
}

impl Owner {
/// Create a new API instance with the chain, transaction pool, peers and `sync_state`. All subsequent
/// API calls will operate on this instance of node API.
///
/// # Arguments
/// * `chain` - A non-owning reference of the chain.
/// * `tx_pool` - A non-owning reference of the transaction pool.
/// * `peers` - A non-owning reference of the peers.
/// * `sync_state` - A non-owning reference of the `sync_state`.
///
/// # Returns
/// * An instance of the Node holding references to the current chain, transaction pool, peers and sync_state.
///

pub fn new(chain: Weak<Chain>, peers: Weak<p2p::Peers>, sync_state: Weak<SyncState>) -> Self {
Owner {
chain,
peers,
sync_state,
}
}

/// Returns various information about the node, the network and the current sync status.
///
/// # Returns
/// * Result Containing:
/// * A [`Status`](types/struct.Status.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_status(&self) -> Result<Status, Error> {
let status_handler = StatusHandler {
chain: self.chain.clone(),
peers: self.peers.clone(),
sync_state: self.sync_state.clone(),
};
status_handler.get_status()
}

/// Trigger a validation of the chain state.
///
/// # Returns
/// * Result Containing:
/// * `Ok(())` if the validation was done successfully
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn validate_chain(&self) -> Result<(), Error> {
let chain_validation_handler = ChainValidationHandler {
chain: self.chain.clone(),
};
chain_validation_handler.validate_chain()
}

/// Trigger a compaction of the chain state to regain storage space.
///
/// # Returns
/// * Result Containing:
/// * `Ok(())` if the compaction was done successfully
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn compact_chain(&self) -> Result<(), Error> {
let chain_compact_handler = ChainCompactHandler {
chain: self.chain.clone(),
};
chain_compact_handler.compact_chain()
}

/// Retrieves information about stored peers.
/// If `None` is provided, will list all stored peers.
///
/// # Arguments
/// * `addr` - the ip:port of the peer to get.
///
/// # Returns
/// * Result Containing:
/// * A vector of [`PeerData`](types/struct.PeerData.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_peers(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, Error> {
let peer_handler = PeerHandler {
peers: self.peers.clone(),
};
peer_handler.get_peers(addr)
}

/// Retrieves a list of all connected peers.
///
/// # Returns
/// * Result Containing:
/// * A vector of [`PeerInfoDisplay`](types/struct.PeerInfoDisplay.html)
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, Error> {
let peers_connected_handler = PeersConnectedHandler {
peers: self.peers.clone(),
};
peers_connected_handler.get_connected_peers()
}

/// Bans a specific peer.
///
/// # Arguments
/// * `addr` - the ip:port of the peer to ban.
///
/// # Returns
/// * Result Containing:
/// * `Ok(())` if the path was correctly set
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_handler = PeerHandler {
peers: self.peers.clone(),
};
peer_handler.ban_peer(addr)
}

/// Unbans a specific peer.
///
/// # Arguments
/// * `addr` - the ip:port of the peer to unban.
///
/// # Returns
/// * Result Containing:
/// * `Ok(())` if the unban was done successfully
/// * or [`Error`](struct.Error.html) if an error is encountered.
///

pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_handler = PeerHandler {
peers: self.peers.clone(),
};
peer_handler.unban_peer(addr)
}
}
@@ -0,0 +1,430 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! JSON-RPC Stub generation for the Owner API

use crate::owner::Owner;
use crate::p2p::types::PeerInfoDisplay;
use crate::p2p::PeerData;
use crate::rest::ErrorKind;
use crate::types::Status;
use std::net::SocketAddr;

/// Public definition used to generate Node jsonrpc api.
/// * When running `grin` with defaults, the V2 api is available at
/// `localhost:3413/v2/owner`
/// * The endpoint only supports POST operations, with the json-rpc request as the body
#[easy_jsonrpc_mw::rpc]
pub trait OwnerRpc: Sync + Send {
/**
Networked version of [Owner::get_status](struct.Node.html#method.get_status).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_owner_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_status",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"protocol_version": "2",
"user_agent": "MW/Grin 2.x.x",
"connections": "8",
"tip": {
"height": 371553,
"last_block_pushed": "00001d1623db988d7ed10c5b6319360a52f20c89b4710474145806ba0e8455ec",
"prev_block_to_last": "0000029f51bacee81c49a27b4bc9c6c446e03183867c922890f90bb17108d89f",
"total_difficulty": 1127628411943045
},
"sync_status": "header_sync",
"sync_info": {
"current_height": 371553,
"highest_height": 0
}
}
}
}
# "#
# );
```
*/
fn get_status(&self) -> Result<Status, ErrorKind>;

/**
Networked version of [Owner::validate_chain](struct.Node.html#method.validate_chain).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_owner_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "validate_chain",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": null
}
}
# "#
# );
```
*/
fn validate_chain(&self) -> Result<(), ErrorKind>;

/**
Networked version of [Owner::compact_chain](struct.Node.html#method.compact_chain).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_owner_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "compact_chain",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": null
}
}
# "#
# );
```
*/
fn compact_chain(&self) -> Result<(), ErrorKind>;

/**
Networked version of [Owner::get_peers](struct.Node.html#method.get_peers).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_owner_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_peers",
"params": ["70.50.33.130:3414"],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": [
{
"addr": "70.50.33.130:3414",
"ban_reason": "None",
"capabilities": {
"bits": 15
},
"flags": "Defunct",
"last_banned": 0,
"last_connected": 1570129317,
"user_agent": "MW/Grin 2.0.0"
}
]
}
}
# "#
# );
```
*/
fn get_peers(&self, peer_addr: Option<SocketAddr>) -> Result<Vec<PeerData>, ErrorKind>;

/**
Networked version of [Owner::get_connected_peers](struct.Node.html#method.get_connected_peers).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_owner_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "get_connected_peers",
"params": [],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": [
{
"addr": "35.176.195.242:3414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 374510,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
},
{
"addr": "47.97.198.21:3414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 374510,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
},
{
"addr": "148.251.16.13:3414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 374510,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
},
{
"addr": "68.195.18.155:3414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 374510,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
},
{
"addr": "52.53.221.15:3414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 0,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
},
{
"addr": "109.74.202.16:3414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 374510,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
},
{
"addr": "121.43.183.180:3414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 374510,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
},
{
"addr": "35.157.247.209:23414",
"capabilities": {
"bits": 15
},
"direction": "Outbound",
"height": 374510,
"total_difficulty": 1133954621205750,
"user_agent": "MW/Grin 2.0.0",
"version": 1
}
]
}
}
# "#
# );
```
*/
fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, ErrorKind>;

/**
Networked version of [Owner::ban_peer](struct.Node.html#method.ban_peer).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_owner_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "ban_peer",
"params": ["70.50.33.130:3414"],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": null
}
}
# "#
# );
```
*/
fn ban_peer(&self, peer_addr: SocketAddr) -> Result<(), ErrorKind>;

/**
Networked version of [Owner::unban_peer](struct.Node.html#method.unban_peer).
# Json rpc example
```
# grin_api::doctest_helper_json_rpc_owner_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "unban_peer",
"params": ["70.50.33.130:3414"],
"id": 1
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": null
}
}
# "#
# );
```
*/
fn unban_peer(&self, peer_addr: SocketAddr) -> Result<(), ErrorKind>;
}

impl OwnerRpc for Owner {
fn get_status(&self) -> Result<Status, ErrorKind> {
Owner::get_status(self).map_err(|e| e.kind().clone())
}

fn validate_chain(&self) -> Result<(), ErrorKind> {
Owner::validate_chain(self).map_err(|e| e.kind().clone())
}

fn compact_chain(&self) -> Result<(), ErrorKind> {
Owner::compact_chain(self).map_err(|e| e.kind().clone())
}

fn get_peers(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, ErrorKind> {
Owner::get_peers(self, addr).map_err(|e| e.kind().clone())
}

fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, ErrorKind> {
Owner::get_connected_peers(self).map_err(|e| e.kind().clone())
}

fn ban_peer(&self, addr: SocketAddr) -> Result<(), ErrorKind> {
Owner::ban_peer(self, addr).map_err(|e| e.kind().clone())
}

fn unban_peer(&self, addr: SocketAddr) -> Result<(), ErrorKind> {
Owner::unban_peer(self, addr).map_err(|e| e.kind().clone())
}
}

#[doc(hidden)]
#[macro_export]
macro_rules! doctest_helper_json_rpc_owner_assert_response {
($request:expr, $expected_response:expr) => {
// create temporary grin server, run jsonrpc request on node api, delete server, return
// json response.

{
/*use grin_servers::test_framework::framework::run_doctest;
use grin_util as util;
use serde_json;
use serde_json::Value;
use tempfile::tempdir;
let dir = tempdir().map_err(|e| format!("{:#?}", e)).unwrap();
let dir = dir
.path()
.to_str()
.ok_or("Failed to convert tmpdir path to string.".to_owned())
.unwrap();
let request_val: Value = serde_json::from_str($request).unwrap();
let expected_response: Value = serde_json::from_str($expected_response).unwrap();
let response = run_doctest(
request_val,
dir,
$use_token,
$blocks_to_mine,
$perform_tx,
$lock_tx,
$finalize_tx,
)
.unwrap()
.unwrap();
if response != expected_response {
panic!(
"(left != right) \nleft: {}\nright: {}",
serde_json::to_string_pretty(&response).unwrap(),
serde_json::to_string_pretty(&expected_response).unwrap()
);
}*/
}
};
}
@@ -18,7 +18,7 @@
//! To use it, just have your service(s) implement the ApiEndpoint trait and
//! register them on a ApiServer.

use crate::router::{Handler, HandlerObj, ResponseFuture, Router};
use crate::router::{Handler, HandlerObj, ResponseFuture, Router, RouterError};
use crate::web::response;
use failure::{Backtrace, Context, Fail, ResultExt};
use futures::sync::oneshot;
@@ -41,7 +41,7 @@ pub struct Error {
inner: Context<ErrorKind>,
}

#[derive(Clone, Eq, PartialEq, Debug, Fail)]
#[derive(Clone, Eq, PartialEq, Debug, Fail, Serialize, Deserialize)]
pub enum ErrorKind {
#[fail(display = "Internal error: {}", _0)]
Internal(String),
@@ -53,6 +53,8 @@ pub enum ErrorKind {
RequestError(String),
#[fail(display = "ResponseError error: {}", _0)]
ResponseError(String),
#[fail(display = "Router error: {}", _0)]
Router(RouterError),
}

impl Fail for Error {
@@ -91,6 +93,14 @@ impl From<Context<ErrorKind>> for Error {
}
}

impl From<RouterError> for Error {
fn from(error: RouterError) -> Error {
Error {
inner: Context::new(ErrorKind::Router(error)),
}
}
}

/// TLS config
#[derive(Clone)]
pub struct TLSConfig {
@@ -247,7 +257,7 @@ impl ApiServer {
// TODO re-enable stop after investigation
//let tx = mem::replace(&mut self.shutdown_sender, None).unwrap();
//tx.send(()).expect("Failed to stop API server");
info!("API server has been stoped");
info!("API server has been stopped");
true
} else {
error!("Can't stop API server, it's not running or doesn't spport stop operation");
@@ -85,7 +85,7 @@ pub trait Handler {
}
}

#[derive(Fail, Debug)]
#[derive(Clone, Fail, Eq, Debug, PartialEq, Serialize, Deserialize)]
pub enum RouterError {
#[fail(display = "Route already exists")]
RouteAlreadyExists,
@@ -116,13 +116,16 @@ pub struct TxHashSet {
}

impl TxHashSet {
pub fn from_head(head: Arc<chain::Chain>) -> TxHashSet {
let roots = head.get_txhashset_roots();
TxHashSet {
output_root_hash: roots.output_root.to_hex(),
range_proof_root_hash: roots.rproof_root.to_hex(),
kernel_root_hash: roots.kernel_root.to_hex(),
}
/// A TxHashSet in the context of the api is simply the collection of PMMR roots.
/// We can obtain these in a lightweight way by reading them from the head of the chain.
/// We will have validated the roots on this header against the roots of the txhashset.
pub fn from_head(chain: Arc<chain::Chain>) -> Result<TxHashSet, chain::Error> {
let header = chain.head_header()?;
Ok(TxHashSet {
output_root_hash: header.output_root.to_hex(),
range_proof_root_hash: header.range_proof_root.to_hex(),
kernel_root_hash: header.kernel_root.to_hex(),
})
}
}

@@ -337,7 +340,7 @@ impl OutputPrintable {
};

let p_vec = util::from_hex(proof_str)
.map_err(|_| ser::Error::HexError(format!("invalud output range_proof")))?;
.map_err(|_| ser::Error::HexError(format!("invalid output range_proof")))?;
let mut p_bytes = [0; util::secp::constants::MAX_PROOF_SIZE];
for i in 0..p_bytes.len() {
p_bytes[i] = p_vec[i];
@@ -471,7 +474,7 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
spent: spent.unwrap(),
proof: proof,
proof_hash: proof_hash.unwrap(),
block_height: block_height,
block_height: block_height.unwrap(),
merkle_proof: merkle_proof,
mmr_index: mmr_index.unwrap(),
})
@@ -42,6 +42,8 @@ where
ErrorKind::ResponseError(msg) => {
response(StatusCode::INTERNAL_SERVER_ERROR, msg.clone())
}
// place holder
ErrorKind::Router(_) => response(StatusCode::INTERNAL_SERVER_ERROR, ""),
},
}
}
@@ -1,15 +1,16 @@
[package]
name = "grin_chain"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
workspace = ".."
edition = "2018"

[dependencies]
bit-vec = "0.6"
bitflags = "1"
byteorder = "1"
failure = "0.1"
@@ -23,10 +24,10 @@ lru-cache = "0.1"
lazy_static = "1"
regex = "1"

grin_core = { path = "../core", version = "2.1.0-beta.3" }
grin_keychain = { path = "../keychain", version = "2.1.0-beta.3" }
grin_store = { path = "../store", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_core = { path = "../core", version = "3.0.0" }
grin_keychain = { path = "../keychain", version = "3.0.0" }
grin_store = { path = "../store", version = "3.0.0" }
grin_util = { path = "../util", version = "3.0.0" }

[dev-dependencies]
env_logger = "0.5"
@@ -30,8 +30,7 @@ use crate::store;
use crate::txhashset;
use crate::txhashset::{PMMRHandle, TxHashSet};
use crate::types::{
BlockStatus, ChainAdapter, NoStatus, Options, OutputMMRPosition, Tip, TxHashSetRoots,
TxHashsetWriteStatus,
BlockStatus, ChainAdapter, NoStatus, Options, OutputMMRPosition, Tip, TxHashsetWriteStatus,
};
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::RwLock;
@@ -201,7 +200,6 @@ impl Chain {
&mut sync_pmmr,
&mut txhashset,
)?;
Chain::log_heads(&store)?;

let chain = Chain {
db_root,
@@ -226,6 +224,8 @@ impl Chain {
chain.rebuild_height_for_pos()?;
}

chain.log_heads()?;

Ok(chain)
}

@@ -244,46 +244,22 @@ impl Chain {
self.store.clone()
}

fn log_heads(store: &store::ChainStore) -> Result<(), Error> {
let head = store.head()?;
debug!(
"init: head: {} @ {} [{}]",
head.total_difficulty.to_num(),
head.height,
head.last_block_h,
);

let header_head = store.header_head()?;
debug!(
"init: header_head: {} @ {} [{}]",
header_head.total_difficulty.to_num(),
header_head.height,
header_head.last_block_h,
);

let sync_head = store.get_sync_head()?;
debug!(
"init: sync_head: {} @ {} [{}]",
sync_head.total_difficulty.to_num(),
sync_head.height,
sync_head.last_block_h,
);

fn log_heads(&self) -> Result<(), Error> {
let log_head = |name, head: Tip| {
debug!(
"{}: {} @ {} [{}]",
name,
head.total_difficulty.to_num(),
head.height,
head.hash(),
);
};
log_head("head", self.head()?);
log_head("header_head", self.header_head()?);
log_head("sync_head", self.get_sync_head()?);
Ok(())
}

/// Reset sync_head to current header_head.
/// We do this when we first transition to header_sync to ensure we extend
/// the "sync" header MMR from a known consistent state and to ensure we track
/// the header chain correctly at the fork point.
pub fn reset_sync_head(&self) -> Result<Tip, Error> {
let batch = self.store.batch()?;
batch.reset_sync_head()?;
let head = batch.get_sync_head()?;
batch.commit()?;
Ok(head)
}

/// Processes a single block, then checks for orphans, processing
/// those as well if they're found
pub fn process_block(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
@@ -464,7 +440,7 @@ impl Chain {
}

/// Check for orphans, once a block is successfully added
pub fn check_orphans(&self, mut height: u64) {
fn check_orphans(&self, mut height: u64) {
let initial_height = height;

// Is there an orphan in our orphans that we can now process?
@@ -529,6 +505,15 @@ impl Chain {
txhashset.is_unspent(output_ref)
}

/// Retrieves an unspent output using its PMMR position
pub fn get_unspent_output_at(&self, pos: u64) -> Result<Output, Error> {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo| {
utxo.get_unspent_output_at(pos)
})
}

/// Validate the tx against the current UTXO set.
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
let header_pmmr = self.header_pmmr.read();
@@ -613,22 +598,24 @@ impl Chain {
Ok((prev_root, extension.roots()?, extension.sizes()))
})?;

// Set the prev_root on the header.
b.header.prev_root = prev_root;

// Set the output, rangeproof and kernel MMR roots.
b.header.output_root = roots.output_root;
b.header.range_proof_root = roots.rproof_root;
b.header.kernel_root = roots.kernel_root;

// Set the output and kernel MMR sizes.
// Note: We need to do this *before* calculating the roots as the output_root
// depends on the output_mmr_size
{
// Carefully destructure these correctly...
let (output_mmr_size, _, kernel_mmr_size) = sizes;
b.header.output_mmr_size = output_mmr_size;
b.header.kernel_mmr_size = kernel_mmr_size;
}

// Set the prev_root on the header.
b.header.prev_root = prev_root;

// Set the output, rangeproof and kernel MMR roots.
b.header.output_root = roots.output_root(&b.header);
b.header.range_proof_root = roots.rproof_root;
b.header.kernel_root = roots.kernel_root;

Ok(())
}

@@ -657,11 +644,6 @@ impl Chain {
txhashset.merkle_proof(commit)
}

/// Returns current txhashset roots.
pub fn get_txhashset_roots(&self) -> TxHashSetRoots {
self.txhashset.read().roots()
}

/// Provides a reading view into the current kernel state.
pub fn kernel_data_read(&self) -> Result<File, Error> {
let txhashset = self.txhashset.read();
@@ -694,24 +676,17 @@ impl Chain {
// The fast sync client does *not* have the necessary data
// to rewind after receiving the txhashset zip.
let header = self.get_block_header(&h)?;
{
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| {
pipe::rewind_and_apply_fork(&header, ext)?;
let ref mut extension = ext.extension;
extension.snapshot()?;
Ok(())
})?;
}

// prepares the zip and return the corresponding Read
let txhashset_reader = txhashset::zip_read(self.db_root.clone(), &header)?;
Ok((
header.output_mmr_size,
header.kernel_mmr_size,
txhashset_reader,
))
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| {
pipe::rewind_and_apply_fork(&header, ext)?;
ext.extension.snapshot()?;

// prepare the zip
txhashset::zip_read(self.db_root.clone(), &header)
.map(|file| (header.output_mmr_size, header.kernel_mmr_size, file))
})
}

/// To support the ability to download the txhashset from multiple peers in parallel,
@@ -773,9 +748,8 @@ impl Chain {
pub fn rebuild_sync_mmr(&self, head: &Tip) -> Result<(), Error> {
let mut sync_pmmr = self.sync_pmmr.write();
let mut batch = self.store.batch()?;
let sync_head = batch.get_sync_head()?;
let header = batch.get_block_header(&head.hash())?;
txhashset::header_extending(&mut sync_pmmr, &sync_head, &mut batch, |extension| {
txhashset::header_extending(&mut sync_pmmr, &mut batch, |extension| {
pipe::rewind_and_apply_header_fork(&header, extension)?;
Ok(())
})?;
@@ -1030,9 +1004,6 @@ impl Chain {

debug!("txhashset_write: replaced our txhashset with the new one");

// Check for any orphan blocks and process them based on the new chain state.
self.check_orphans(header.height + 1);

status.on_done();

Ok(false)
@@ -1170,15 +1141,20 @@ impl Chain {
}

/// outputs by insertion index
pub fn unspent_outputs_by_insertion_index(
pub fn unspent_outputs_by_pmmr_index(
&self,
start_index: u64,
max: u64,
max_count: u64,
max_pmmr_index: Option<u64>,
) -> Result<(u64, u64, Vec<Output>), Error> {
let txhashset = self.txhashset.read();
let max_index = txhashset.highest_output_insertion_index();
let outputs = txhashset.outputs_by_insertion_index(start_index, max);
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
let last_index = match max_pmmr_index {
Some(i) => i,
None => txhashset.highest_output_insertion_index(),
};
let outputs = txhashset.outputs_by_pmmr_index(start_index, max_count, max_pmmr_index);
let rangeproofs =
txhashset.rangeproofs_by_pmmr_index(start_index, max_count, max_pmmr_index);
if outputs.0 != rangeproofs.0 || outputs.1.len() != rangeproofs.1.len() {
return Err(ErrorKind::TxHashSetErr(String::from(
"Output and rangeproof sets don't match",
@@ -1193,7 +1169,27 @@ impl Chain {
proof: y,
});
}
Ok((outputs.0, max_index, output_vec))
Ok((outputs.0, last_index, output_vec))
}

/// Return unspent outputs as above, but bounded between a particular range of blocks
pub fn block_height_range_to_pmmr_indices(
&self,
start_block_height: u64,
end_block_height: Option<u64>,
) -> Result<(u64, u64), Error> {
let end_block_height = match end_block_height {
Some(h) => h,
None => self.head_header()?.height,
};
// Return headers at the given heights
let prev_to_start_header =
self.get_header_by_height(start_block_height.saturating_sub(1))?;
let end_header = self.get_header_by_height(end_block_height)?;
Ok((
prev_to_start_header.output_mmr_size + 1,
end_header.output_mmr_size,
))
}

/// Orphans pool size
@@ -1215,11 +1211,25 @@ impl Chain {
.map_err(|e| ErrorKind::StoreErr(e, "chain tail".to_owned()).into())
}

/// Tip (head) of the header chain if read lock can be acquired reasonably quickly.
/// Used by the TUI when updating stats to avoid locking the TUI up.
pub fn try_header_head(&self, timeout: Duration) -> Result<Option<Tip>, Error> {
self.header_pmmr
.try_read_for(timeout)
.map(|ref pmmr| self.read_header_head(pmmr).map(|x| Some(x)))
.unwrap_or(Ok(None))
}

/// Tip (head) of the header chain.
pub fn header_head(&self) -> Result<Tip, Error> {
self.store
.header_head()
.map_err(|e| ErrorKind::StoreErr(e, "chain header head".to_owned()).into())
self.read_header_head(&self.header_pmmr.read())
}

/// Read head from the provided PMMR handle.
fn read_header_head(&self, pmmr: &txhashset::PMMRHandle<BlockHeader>) -> Result<Tip, Error> {
let hash = pmmr.head_hash()?;
let header = self.store.get_block_header(&hash)?;
Ok(Tip::from_header(&header))
}

/// Block header for the chain head
@@ -1376,7 +1386,6 @@ impl Chain {

Ok(Some((kernel, header.height, mmr_index)))
}

/// Gets the block header in which a given kernel mmr index appears in the txhashset.
pub fn get_header_for_kernel_index(
&self,
@@ -1429,9 +1438,9 @@ impl Chain {
/// Get the tip of the current "sync" header chain.
/// This may be significantly different to current header chain.
pub fn get_sync_head(&self) -> Result<Tip, Error> {
self.store
.get_sync_head()
.map_err(|e| ErrorKind::StoreErr(e, "chain get sync head".to_owned()).into())
let hash = self.sync_pmmr.read().head_hash()?;
let header = self.store.get_block_header(&hash)?;
Ok(Tip::from_header(&header))
}

/// Builds an iterator on blocks starting from the current chain head and
@@ -1460,6 +1469,22 @@ fn setup_head(
) -> Result<(), Error> {
let mut batch = store.batch()?;

// Apply the genesis header to header and sync MMRs to ensure they are non-empty.
// We read header_head and sync_head directly from the MMR and assume they are non-empty.
{
if header_pmmr.last_pos == 0 {
txhashset::header_extending(header_pmmr, &mut batch, |extension| {
extension.apply_header(&genesis.header)
})?;
}

if sync_pmmr.last_pos == 0 {
txhashset::header_extending(sync_pmmr, &mut batch, |extension| {
extension.apply_header(&genesis.header)
})?;
}
}

// check if we have a head in store, otherwise the genesis block is it
let head_res = batch.head();
let mut head: Tip;
@@ -1534,13 +1559,7 @@ fn setup_head(
// We will update this later once we have the correct header_root.
batch.save_block_header(&genesis.header)?;
batch.save_block(&genesis)?;

let tip = Tip::from_header(&genesis.header);

// Save these ahead of time as we need head and header_head to be initialized
// with *something* when creating a txhashset extension below.
batch.save_body_head(&tip)?;
batch.save_header_head(&tip)?;
batch.save_body_head(&Tip::from_header(&genesis.header))?;

if genesis.kernels().len() > 0 {
let (utxo_sum, kernel_sum) = (sums, genesis as &dyn Committed).verify_kernel_sums(
@@ -1552,14 +1571,6 @@ fn setup_head(
kernel_sum,
};
}
txhashset::header_extending(header_pmmr, &tip, &mut batch, |extension| {
extension.apply_header(&genesis.header)?;
Ok(())
})?;
txhashset::header_extending(sync_pmmr, &tip, &mut batch, |extension| {
extension.apply_header(&genesis.header)?;
Ok(())
})?;
txhashset::extending(header_pmmr, txhashset, &mut batch, |ext| {
let ref mut extension = ext.extension;
extension.apply_block(&genesis)?;
@@ -1575,29 +1586,6 @@ fn setup_head(
}
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
};

// Check we have the header corresponding to the header_head.
// If not then something is corrupted and we should reset our header_head.
// Either way we want to reset sync_head to match header_head.
let head = batch.head()?;
let header_head = batch.header_head()?;
if batch.get_block_header(&header_head.last_block_h).is_ok() {
// Reset sync_head to be consistent with current header_head.
batch.reset_sync_head()?;
} else {
// Reset both header_head and sync_head to be consistent with current head.
warn!(
"setup_head: header missing for {}, {}, resetting header_head and sync_head to head: {}, {}",
header_head.last_block_h,
header_head.height,
head.last_block_h,
head.height,
);
batch.reset_header_head()?;
batch.reset_sync_head()?;
}

batch.commit()?;

Ok(())
}
@@ -25,8 +25,6 @@ use crate::store;
use crate::txhashset;
use crate::types::{Options, Tip};
use crate::util::RwLock;
use chrono::prelude::Utc;
use chrono::Duration;
use grin_store;
use std::sync::Arc;

@@ -56,16 +54,19 @@ fn check_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), E
}

// Validate only the proof of work in a block header.
// Used to cheaply validate orphans in process_block before adding them to OrphanBlockPool.
// Used to cheaply validate pow before checking if orphan or continuing block validation.
fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
if ctx.opts.contains(Options::SKIP_POW) {
// Some of our tests require this check to be skipped (we should revisit this).
return Ok(());
}
if !header.pow.is_primary() && !header.pow.is_secondary() {
return Err(ErrorKind::LowEdgebits.into());
}
let edge_bits = header.pow.edge_bits();
if !(ctx.pow_verifier)(header).is_ok() {
error!(
"pipe: error validating header with cuckoo edge_bits {}",
edge_bits
header.pow.edge_bits(),
);
return Err(ErrorKind::InvalidPow.into());
}
@@ -88,20 +89,22 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
// Check if we have already processed this block previously.
check_known(&b.header, ctx)?;

let head = ctx.batch.head()?;
// Quick pow validation. No point proceeding if this is invalid.
// We want to do this before we add the block to the orphan pool so we
// want to do this now and not later during header validation.
validate_pow_only(&b.header, ctx)?;

let is_next = b.header.prev_hash == head.last_block_h;
let head = ctx.batch.head()?;
let prev = prev_header_store(&b.header, &mut ctx.batch)?;

// Block is an orphan if we do not know about the previous full block.
// Skip this check if we have just processed the previous block
// or the full txhashset state (fast sync) at the previous block height.
let prev = prev_header_store(&b.header, &mut ctx.batch)?;
if !is_next && !ctx.batch.block_exists(&prev.hash())? {
// Validate the proof of work of the orphan block to prevent adding
// invalid blocks to OrphanBlockPool.
validate_pow_only(&b.header, ctx)?;

return Err(ErrorKind::Orphan.into());
{
let is_next = b.header.prev_hash == head.last_block_h;
if !is_next && !ctx.batch.block_exists(&prev.hash())? {
return Err(ErrorKind::Orphan.into());
}
}

// Process the header for the block.
@@ -185,7 +188,12 @@ pub fn sync_block_headers(
// Check if we know about all these headers. If so we can accept them quickly.
// If they *do not* increase total work on the sync chain we are done.
// If they *do* increase total work then we should process them to update sync_head.
let sync_head = ctx.batch.get_sync_head()?;
let sync_head = {
let hash = ctx.header_pmmr.head_hash()?;
let header = ctx.batch.get_block_header(&hash)?;
Tip::from_header(&header)
};

if let Ok(existing) = ctx.batch.get_block_header(&last_header.hash()) {
if !has_more_work(&existing, &sync_head) {
return Ok(());
@@ -199,16 +207,12 @@ pub fn sync_block_headers(
add_block_header(header, &ctx.batch)?;
}

// Now apply this entire chunk of headers to the sync MMR.
txhashset::header_extending(&mut ctx.header_pmmr, &sync_head, &mut ctx.batch, |ext| {
// Now apply this entire chunk of headers to the sync MMR (ctx is sync MMR specific).
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext| {
rewind_and_apply_header_fork(&last_header, ext)?;
Ok(())
})?;

if has_more_work(&last_header, &sync_head) {
update_sync_head(&Tip::from_header(&last_header), &mut ctx.batch)?;
}

Ok(())
}

@@ -229,14 +233,19 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) ->
// If it does not increase total_difficulty beyond our current header_head
// then we can (re)accept this header and process the full block (or request it).
// This header is on a fork and we should still accept it as the fork may eventually win.
let header_head = ctx.batch.header_head()?;
let header_head = {
let hash = ctx.header_pmmr.head_hash()?;
let header = ctx.batch.get_block_header(&hash)?;
Tip::from_header(&header)
};

if let Ok(existing) = ctx.batch.get_block_header(&header.hash()) {
if !has_more_work(&existing, &header_head) {
return Ok(());
}
}

txhashset::header_extending(&mut ctx.header_pmmr, &header_head, &mut ctx.batch, |ext| {
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext| {
rewind_and_apply_header_fork(&prev_header, ext)?;
ext.validate_root(header)?;
ext.apply_header(header)?;
@@ -249,15 +258,6 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) ->
validate_header(header, ctx)?;
add_block_header(header, &ctx.batch)?;

// Update header_head independently of chain head (full blocks).
// If/when we process the corresponding full block we will update the
// chain head to match. This allows our header chain to extend safely beyond
// the full chain in a fork scenario without needing excessive rewinds to handle
// the temporarily divergent chains.
if has_more_work(&header, &header_head) {
update_header_head(&Tip::from_header(&header), &mut ctx.batch)?;
}

Ok(())
}

@@ -314,44 +314,19 @@ fn prev_header_store(
/// to make it as cheap as possible. The different validations are also
/// arranged by order of cost to have as little DoS surface as possible.
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
// check version, enforces scheduled hard fork
if !consensus::valid_header_version(header.height, header.version) {
error!(
"Invalid block header version received ({:?}), maybe update Grin?",
header.version
);
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
}

if header.timestamp > Utc::now() + Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64)) {
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
// TODO add warning in p2p code if local time is too different from peers
return Err(ErrorKind::InvalidBlockTime.into());
}

if !ctx.opts.contains(Options::SKIP_POW) {
if !header.pow.is_primary() && !header.pow.is_secondary() {
return Err(ErrorKind::LowEdgebits.into());
}
let edge_bits = header.pow.edge_bits();
if !(ctx.pow_verifier)(header).is_ok() {
error!(
"pipe: error validating header with cuckoo edge_bits {}",
edge_bits
);
return Err(ErrorKind::InvalidPow.into());
}
}

// First I/O cost, delayed as late as possible.
let prev = prev_header_store(header, &mut ctx.batch)?;

// make sure this header has a height exactly one higher than the previous
// header
// This header height must increase the height from the previous header by exactly 1.
if header.height != prev.height + 1 {
return Err(ErrorKind::InvalidBlockHeight.into());
}

// This header must have a valid header version for its height.
if !consensus::valid_header_version(header.height, header.version) {
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
}

if header.timestamp <= prev.timestamp {
// prevent time warp attacks and some timestamp manipulations by forcing strict
// time progression
@@ -365,6 +340,10 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(
// check the pow hash shows a difficulty at least as large
// as the target difficulty
if !ctx.opts.contains(Options::SKIP_POW) {
// Quick check of this header in isolation. No point proceeding if this fails.
// We can do this without needing to iterate over previous headers.
validate_pow_only(header, ctx)?;

if header.total_difficulty() <= prev.total_difficulty() {
return Err(ErrorKind::DifficultyTooLow.into());
}
@@ -501,30 +480,6 @@ fn has_more_work(header: &BlockHeader, head: &Tip) -> bool {
header.total_difficulty() > head.total_difficulty
}

/// Update the sync head so we can keep syncing from where we left off.
fn update_sync_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
batch
.save_sync_head(&head)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
debug!(
"sync_head updated to {} at {}",
head.last_block_h, head.height
);
Ok(())
}

/// Update the header_head.
fn update_header_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
batch
.save_header_head(&head)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
debug!(
"header_head updated to {} at {}",
head.last_block_h, head.height
);
Ok(())
}

/// Rewind the header chain and reapply headers on a fork.
pub fn rewind_and_apply_header_fork(
header: &BlockHeader,
@@ -32,8 +32,6 @@ const BLOCK_HEADER_PREFIX: u8 = 'h' as u8;
const BLOCK_PREFIX: u8 = 'b' as u8;
const HEAD_PREFIX: u8 = 'H' as u8;
const TAIL_PREFIX: u8 = 'T' as u8;
const HEADER_HEAD_PREFIX: u8 = 'I' as u8;
const SYNC_HEAD_PREFIX: u8 = 's' as u8;
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
const COMMIT_POS_HGT_PREFIX: u8 = 'p' as u8;
const BLOCK_INPUT_BITMAP_PREFIX: u8 = 'B' as u8;
@@ -79,20 +77,6 @@ impl ChainStore {
self.get_block_header(&self.head()?.last_block_h)
}

/// Head of the header chain (not the same thing as head_header).
pub fn header_head(&self) -> Result<Tip, Error> {
option_to_not_found(self.db.get_ser(&vec![HEADER_HEAD_PREFIX]), || {
"HEADER_HEAD".to_owned()
})
}

/// The "sync" head.
pub fn get_sync_head(&self) -> Result<Tip, Error> {
option_to_not_found(self.db.get_ser(&vec![SYNC_HEAD_PREFIX]), || {
"SYNC_HEAD".to_owned()
})
}

/// Get full block.
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
option_to_not_found(
@@ -198,20 +182,6 @@ impl<'a> Batch<'a> {
self.get_block_header(&self.head()?.last_block_h)
}

/// Head of the header chain (not the same thing as head_header).
pub fn header_head(&self) -> Result<Tip, Error> {
option_to_not_found(self.db.get_ser(&vec![HEADER_HEAD_PREFIX]), || {
"HEADER_HEAD".to_owned()
})
}

/// Get "sync" head.
pub fn get_sync_head(&self) -> Result<Tip, Error> {
option_to_not_found(self.db.get_ser(&vec![SYNC_HEAD_PREFIX]), || {
"SYNC_HEAD".to_owned()
})
}

/// Save body head to db.
pub fn save_body_head(&self, t: &Tip) -> Result<(), Error> {
self.db.put_ser(&vec![HEAD_PREFIX], t)
@@ -222,28 +192,6 @@ impl<'a> Batch<'a> {
self.db.put_ser(&vec![TAIL_PREFIX], t)
}

/// Save header_head to db.
pub fn save_header_head(&self, t: &Tip) -> Result<(), Error> {
self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t)
}

/// Save "sync" head to db.
pub fn save_sync_head(&self, t: &Tip) -> Result<(), Error> {
self.db.put_ser(&vec![SYNC_HEAD_PREFIX], t)
}

/// Reset sync_head to the current head of the header chain.
pub fn reset_sync_head(&self) -> Result<(), Error> {
let head = self.header_head()?;
self.save_sync_head(&head)
}

/// Reset header_head to the current head of the body chain.
pub fn reset_header_head(&self) -> Result<(), Error> {
let tip = self.head()?;
self.save_header_head(&tip)
}

/// get block
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
option_to_not_found(
@@ -15,10 +15,12 @@
//! Utility structs to handle the 3 hashtrees (output, range proof,
//! kernel) more conveniently and transactionally.

mod bitmap_accumulator;
mod rewindable_kernel_view;
mod txhashset;
mod utxo_view;

pub use self::bitmap_accumulator::*;
pub use self::rewindable_kernel_view::*;
pub use self::txhashset::*;
pub use self::utxo_view::*;
@@ -0,0 +1,239 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use std::convert::TryFrom;
use std::time::Instant;

use bit_vec::BitVec;
use croaring::Bitmap;

use crate::core::core::hash::{DefaultHashable, Hash};
use crate::core::core::pmmr::{self, ReadonlyPMMR, VecBackend, PMMR};
use crate::core::ser::{self, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use crate::error::{Error, ErrorKind};

/// The "bitmap accumulator" allows us to commit to a specific bitmap by splitting it into
/// fragments and inserting these fragments into an MMR to produce an overall root hash.
/// Leaves in the MMR are fragments of the bitmap consisting of 1024 contiguous bits
/// from the overall bitmap. The first (leftmost) leaf in the MMR represents the first 1024 bits
/// of the bitmap, the next leaf is the next 1024 bits of the bitmap etc.
///
/// Flipping a single bit does not require the full bitmap to be rehashed, only the path from the
/// relevant leaf up to its associated peak.
///
/// Flipping multiple bits *within* a single chunk is no more expensive than flipping a single bit
/// as a leaf node in the MMR represents a sequence of 1024 bits. Flipping multiple bits located
/// close together is a relatively cheap operation with minimal rehashing required to update the
/// relevant peaks and the overall MMR root.
///
/// It is also possible to generate Merkle proofs for these 1024 bit fragments, proving
/// both inclusion and location in the overall "accumulator" MMR. We plan to take advantage of
/// this during fast sync, allowing for validation of partial data.
///
#[derive(Clone)]
pub struct BitmapAccumulator {
backend: VecBackend<BitmapChunk>,
}

impl BitmapAccumulator {
/// Crate a new empty bitmap accumulator.
pub fn new() -> BitmapAccumulator {
BitmapAccumulator {
backend: VecBackend::new_hash_only(),
}
}

/// Initialize a bitmap accumulator given the provided idx iterator.
pub fn init<T: IntoIterator<Item = u64>>(&mut self, idx: T, size: u64) -> Result<(), Error> {
self.apply_from(idx, 0, size)
}

/// Find the start of the first "chunk" of 1024 bits from the provided idx.
/// Zero the last 10 bits to round down to multiple of 1024.
pub fn chunk_start_idx(idx: u64) -> u64 {
idx & !0x3ff
}

/// The first 1024 belong to chunk 0, the next 1024 to chunk 1 etc.
fn chunk_idx(idx: u64) -> u64 {
idx / 1024
}

/// Apply the provided idx iterator to our bitmap accumulator.
/// We start at the chunk containing from_idx and rebuild chunks as necessary
/// for the bitmap, limiting it to size (in bits).
/// If from_idx is 1023 and size is 1024 then we rebuild a single chunk.
fn apply_from<T>(&mut self, idx: T, from_idx: u64, size: u64) -> Result<(), Error>
where
T: IntoIterator<Item = u64>,
{
let now = Instant::now();

// Find the (1024 bit chunk) chunk_idx for the (individual bit) from_idx.
let from_chunk_idx = BitmapAccumulator::chunk_idx(from_idx);
let mut chunk_idx = from_chunk_idx;

let mut chunk = BitmapChunk::new();

let mut idx_iter = idx.into_iter().filter(|&x| x < size).peekable();
while let Some(x) = idx_iter.peek() {
if *x < chunk_idx * 1024 {
// skip until we reach our first chunk
idx_iter.next();
} else if *x < (chunk_idx + 1) * 1024 {
let idx = idx_iter.next().expect("next after peek");
chunk.set(idx % 1024, true);
} else {
self.append_chunk(chunk)?;
chunk_idx += 1;
chunk = BitmapChunk::new();
}
}
if chunk.any() {
self.append_chunk(chunk)?;
}
debug!(
"applied {} chunks from idx {} to idx {} ({}ms)",
1 + chunk_idx - from_chunk_idx,
from_chunk_idx,
chunk_idx,
now.elapsed().as_millis(),
);
Ok(())
}

/// Apply updates to the bitmap accumulator given an iterator of invalidated idx and
/// an iterator of idx to be set to true.
/// We determine the existing chunks to be rebuilt given the invalidated idx.
/// We then rebuild given idx, extending the accumulator with new chunk(s) as necessary.
/// Resulting bitmap accumulator will contain sufficient bitmap chunks to cover size.
/// If size is 1 then we will have a single chunk.
/// If size is 1023 then we will have a single chunk (bits 0 to 1023 inclusive).
/// If the size is 1024 then we will have two chunks.
pub fn apply<T, U>(&mut self, invalidated_idx: T, idx: U, size: u64) -> Result<(), Error>
where
T: IntoIterator<Item = u64>,
U: IntoIterator<Item = u64>,
{
// Determine the earliest chunk by looking at the min invalidated idx (assume sorted).
// Rewind prior to this and reapply new_idx.
// Note: We rebuild everything after rewind point but much of the bitmap may be
// unchanged. This can be further optimized by only rebuilding necessary chunks and
// rehashing.
if let Some(from_idx) = invalidated_idx.into_iter().next() {
self.rewind_prior(from_idx)?;
self.pad_left(from_idx)?;
self.apply_from(idx, from_idx, size)?;
}

Ok(())
}

/// Given the provided (bit) idx rewind the bitmap accumulator to the end of the
/// previous chunk ready for the updated chunk to be appended.
fn rewind_prior(&mut self, from_idx: u64) -> Result<(), Error> {
let chunk_idx = BitmapAccumulator::chunk_idx(from_idx);
let last_pos = self.backend.size();
let mut pmmr = PMMR::at(&mut self.backend, last_pos);
let chunk_pos = pmmr::insertion_to_pmmr_index(chunk_idx + 1);
let rewind_pos = chunk_pos.saturating_sub(1);
pmmr.rewind(rewind_pos, &Bitmap::create())
.map_err(|e| ErrorKind::Other(e))?;
Ok(())
}

/// Make sure we append empty chunks to fill in any gap before we append the chunk
/// we actually care about. This effectively pads the bitmap with 1024 chunks of 0s
/// as necessary to put the new chunk at the correct place.
fn pad_left(&mut self, from_idx: u64) -> Result<(), Error> {
let chunk_idx = BitmapAccumulator::chunk_idx(from_idx);
let current_chunk_idx = pmmr::n_leaves(self.backend.size());
for _ in current_chunk_idx..chunk_idx {
self.append_chunk(BitmapChunk::new())?;
}
Ok(())
}

/// Append a new chunk to the BitmapAccumulator.
/// Append parent hashes (if any) as necessary to build associated peak.
pub fn append_chunk(&mut self, chunk: BitmapChunk) -> Result<u64, Error> {
let last_pos = self.backend.size();
PMMR::at(&mut self.backend, last_pos)
.push(&chunk)
.map_err(|e| ErrorKind::Other(e).into())
}

/// The root hash of the bitmap accumulator MMR.
pub fn root(&self) -> Hash {
ReadonlyPMMR::at(&self.backend, self.backend.size()).root()
}
}

/// A bitmap "chunk" representing 1024 contiguous bits of the overall bitmap.
/// The first 1024 bits belong in one chunk. The next 1024 bits in the next chunk, etc.
#[derive(Clone, Debug)]
pub struct BitmapChunk(BitVec);

impl BitmapChunk {
const LEN_BITS: usize = 1024;
const LEN_BYTES: usize = Self::LEN_BITS / 8;

/// Create a new bitmap chunk, defaulting all bits in the chunk to false.
pub fn new() -> BitmapChunk {
BitmapChunk(BitVec::from_elem(Self::LEN_BITS, false))
}

/// Set a single bit in this chunk.
/// 0-indexed from start of chunk.
/// Panics if idx is outside the valid range of bits in a chunk.
pub fn set(&mut self, idx: u64, value: bool) {
let idx = usize::try_from(idx).expect("usize from u64");
assert!(idx < Self::LEN_BITS);
self.0.set(idx, value)
}

/// Does this bitmap chunk have any bits set to 1?
pub fn any(&self) -> bool {
self.0.any()
}
}

impl PMMRable for BitmapChunk {
type E = Self;

fn as_elmt(&self) -> Self::E {
self.clone()
}
}

impl FixedLength for BitmapChunk {
const LEN: usize = Self::LEN_BYTES;
}

impl DefaultHashable for BitmapChunk {}

impl Writeable for BitmapChunk {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.0.to_bytes().write(writer)
}
}

impl Readable for BitmapChunk {
/// Reading is not currently supported, just return an empty one for now.
/// We store the underlying roaring bitmap externally for the bitmap accumulator
/// and the "hash only" backend means we never actually read these chunks.
fn read(_reader: &mut dyn Reader) -> Result<BitmapChunk, ser::Error> {
Ok(BitmapChunk::new())
}
}
@@ -23,8 +23,9 @@ use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, TxK
use crate::core::ser::{PMMRIndexHashable, PMMRable, ProtocolVersion};
use crate::error::{Error, ErrorKind};
use crate::store::{Batch, ChainStore};
use crate::txhashset::bitmap_accumulator::BitmapAccumulator;
use crate::txhashset::{RewindableKernelView, UTXOView};
use crate::types::{OutputMMRPosition, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use crate::types::{OutputMMRPosition, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::{file, secp_static, zip};
use croaring::Bitmap;
@@ -86,6 +87,21 @@ impl PMMRHandle<BlockHeader> {
Err(ErrorKind::Other(format!("get header hash by height")).into())
}
}

/// Get the header hash for the head of the header chain based on current MMR state.
/// Find the last leaf pos based on MMR size and return its header hash.
pub fn head_hash(&self) -> Result<Hash, Error> {
if self.last_pos == 0 {
return Err(ErrorKind::Other(format!("MMR empty, no head")).into());
}
let header_pmmr = ReadonlyPMMR::at(&self.backend, self.last_pos);
let leaf_pos = pmmr::bintree_rightmost(self.last_pos);
if let Some(entry) = header_pmmr.get_data(leaf_pos) {
Ok(entry.hash())
} else {
Err(ErrorKind::Other(format!("failed to find head hash")).into())
}
}
}

/// An easy to manipulate structure holding the 3 sum trees necessary to
@@ -102,6 +118,8 @@ pub struct TxHashSet {
rproof_pmmr_h: PMMRHandle<RangeProof>,
kernel_pmmr_h: PMMRHandle<TxKernel>,

bitmap_accumulator: BitmapAccumulator,

// chain store used as index of commitments to MMR positions
commit_index: Arc<ChainStore>,
}
@@ -133,6 +151,9 @@ impl TxHashSet {
header,
)?;

// Initialize the bitmap accumulator from the current output PMMR.
let bitmap_accumulator = TxHashSet::bitmap_accumulator(&output_pmmr_h)?;

let mut maybe_kernel_handle: Option<PMMRHandle<TxKernel>> = None;
let versions = vec![ProtocolVersion(2), ProtocolVersion(1)];
for version in versions {
@@ -180,13 +201,23 @@ impl TxHashSet {
output_pmmr_h,
rproof_pmmr_h,
kernel_pmmr_h,
bitmap_accumulator,
commit_index,
})
} else {
Err(ErrorKind::TxHashSetErr(format!("failed to open kernel PMMR")).into())
}
}

// Build a new bitmap accumulator for the provided output PMMR.
fn bitmap_accumulator(pmmr_h: &PMMRHandle<Output>) -> Result<BitmapAccumulator, Error> {
let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.last_pos);
let size = pmmr::n_leaves(pmmr_h.last_pos);
let mut bitmap_accumulator = BitmapAccumulator::new();
bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), size)?;
Ok(bitmap_accumulator)
}

/// Close all backend file handles
pub fn release_backend_files(&mut self) {
self.output_pmmr_h.backend.release_files();
@@ -252,15 +283,17 @@ impl TxHashSet {
Ok(self.commit_index.get_all_output_pos()?)
}

/// returns outputs from the given insertion (leaf) index up to the
/// returns outputs from the given pmmr index up to the
/// specified limit. Also returns the last index actually populated
pub fn outputs_by_insertion_index(
/// max index is the last PMMR index to consider, not leaf index
pub fn outputs_by_pmmr_index(
&self,
start_index: u64,
max_count: u64,
max_index: Option<u64>,
) -> (u64, Vec<OutputIdentifier>) {
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos)
.elements_from_insertion_index(start_index, max_count)
.elements_from_pmmr_index(start_index, max_count, max_index)
}

/// highest output insertion index available
@@ -269,13 +302,14 @@ impl TxHashSet {
}

/// As above, for rangeproofs
pub fn rangeproofs_by_insertion_index(
pub fn rangeproofs_by_pmmr_index(
&self,
start_index: u64,
max_count: u64,
max_index: Option<u64>,
) -> (u64, Vec<RangeProof>) {
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos)
.elements_from_insertion_index(start_index, max_count)
.elements_from_pmmr_index(start_index, max_count, max_index)
}

/// Find a kernel with a given excess. Work backwards from `max_index` to `min_index`
@@ -311,7 +345,10 @@ impl TxHashSet {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);

TxHashSetRoots {
output_root: output_pmmr.root(),
output_roots: OutputRoots {
pmmr_root: output_pmmr.root(),
bitmap_root: self.bitmap_accumulator.root(),
},
rproof_root: rproof_pmmr.root(),
kernel_root: kernel_pmmr.root(),
}
@@ -436,7 +473,13 @@ where
trace!("Starting new txhashset (readonly) extension.");

let head = batch.head()?;
let header_head = batch.header_head()?;

// Find header head based on current header MMR (the rightmost leaf node in the MMR).
let header_head = {
let hash = handle.head_hash()?;
let header = batch.get_block_header(&hash)?;
Tip::from_header(&header)
};

let res = {
let header_pmmr = PMMR::at(&mut handle.backend, handle.last_pos);
@@ -477,11 +520,13 @@ where
let output_pmmr =
ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos);
let header_pmmr = ReadonlyPMMR::at(&handle.backend, handle.last_pos);
let rproof_pmmr =
ReadonlyPMMR::at(&trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.last_pos);

// Create a new batch here to pass into the utxo_view.
// Discard it (rollback) after we finish with the utxo_view.
let batch = trees.commit_index.batch()?;
let utxo = UTXOView::new(output_pmmr, header_pmmr, &batch);
let utxo = UTXOView::new(output_pmmr, header_pmmr, rproof_pmmr, &batch);
res = inner(&utxo);
}
res
@@ -530,9 +575,16 @@ where
let sizes: (u64, u64, u64);
let res: Result<T, Error>;
let rollback: bool;
let bitmap_accumulator: BitmapAccumulator;

let head = batch.head()?;
let header_head = batch.header_head()?;

// Find header head based on current header MMR (the rightmost leaf node in the MMR).
let header_head = {
let hash = header_pmmr.head_hash()?;
let header = batch.get_block_header(&hash)?;
Tip::from_header(&header)
};

// create a child transaction so if the state is rolled back by itself, all
// index saving can be undone
@@ -551,6 +603,7 @@ where

rollback = extension_pair.extension.rollback;
sizes = extension_pair.extension.sizes();
bitmap_accumulator = extension_pair.extension.bitmap_accumulator.clone();
}

// During an extension we do not want to modify the header_extension (and only read from it).
@@ -580,6 +633,9 @@ where
trees.output_pmmr_h.last_pos = sizes.0;
trees.rproof_pmmr_h.last_pos = sizes.1;
trees.kernel_pmmr_h.last_pos = sizes.2;

// Update our bitmap_accumulator based on our extension
trees.bitmap_accumulator = bitmap_accumulator;
}

trace!("TxHashSet extension done.");
@@ -588,12 +644,11 @@ where
}
}

/// Start a new header MMR unit of work. This MMR tracks the header_head.
/// Start a new header MMR unit of work.
/// This MMR can be extended individually beyond the other (output, rangeproof and kernel) MMRs
/// to allow headers to be validated before we receive the full block data.
pub fn header_extending<'a, F, T>(
handle: &'a mut PMMRHandle<BlockHeader>,
head: &Tip,
batch: &'a mut Batch<'_>,
inner: F,
) -> Result<T, Error>
@@ -607,9 +662,19 @@ where
// create a child transaction so if the state is rolled back by itself, all
// index saving can be undone
let child_batch = batch.child()?;

// Find chain head based on current MMR (the rightmost leaf node in the MMR).
let head = match handle.head_hash() {
Ok(hash) => {
let header = child_batch.get_block_header(&hash)?;
Tip::from_header(&header)
}
Err(_) => Tip::default(),
};

{
let pmmr = PMMR::at(&mut handle.backend, handle.last_pos);
let mut extension = HeaderExtension::new(pmmr, &child_batch, head.clone());
let mut extension = HeaderExtension::new(pmmr, &child_batch, head);
res = inner(&mut extension);

rollback = extension.rollback;
@@ -787,6 +852,8 @@ pub struct Extension<'a> {
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,

bitmap_accumulator: BitmapAccumulator,

/// Rollback flag.
rollback: bool,

@@ -840,6 +907,7 @@ impl<'a> Extension<'a> {
&mut trees.kernel_pmmr_h.backend,
trees.kernel_pmmr_h.last_pos,
),
bitmap_accumulator: trees.bitmap_accumulator.clone(),
rollback: false,
batch,
}
@@ -856,34 +924,60 @@ impl<'a> Extension<'a> {
UTXOView::new(
self.output_pmmr.readonly_pmmr(),
header_ext.pmmr.readonly_pmmr(),
self.rproof_pmmr.readonly_pmmr(),
self.batch,
)
}

/// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs).
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
let mut affected_pos = vec![];

for out in b.outputs() {
let pos = self.apply_output(out)?;
// Update the (output_pos,height) index for the new output.
affected_pos.push(pos);
self.batch
.save_output_pos_height(&out.commitment(), pos, b.header.height)?;
}

for input in b.inputs() {
self.apply_input(input)?;
let pos = self.apply_input(input)?;
affected_pos.push(pos);
}

for kernel in b.kernels() {
self.apply_kernel(kernel)?;
}

// Update our BitmapAccumulator based on affected outputs (both spent and created).
self.apply_to_bitmap_accumulator(&affected_pos)?;

// Update the head of the extension to reflect the block we just applied.
self.head = Tip::from_header(&b.header);

Ok(())
}

fn apply_input(&mut self, input: &Input) -> Result<(), Error> {
fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> {
// if self.output_pmmr.is_empty() || output_pos.is_empty() {
// return Ok(());
// }
let mut output_idx: Vec<_> = output_pos
.iter()
.map(|x| pmmr::n_leaves(*x).saturating_sub(1))
.collect();
output_idx.sort_unstable();
let min_idx = output_idx.first().cloned().unwrap_or(0);
let size = pmmr::n_leaves(self.output_pmmr.last_pos);
self.bitmap_accumulator.apply(
output_idx,
self.output_pmmr
.leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)),
size,
)
}

fn apply_input(&mut self, input: &Input) -> Result<u64, Error> {
let commit = input.commitment();
let pos_res = self.batch.get_output_pos(&commit);
if let Ok(pos) = pos_res {
@@ -904,14 +998,14 @@ impl<'a> Extension<'a> {
self.rproof_pmmr
.prune(pos)
.map_err(|e| ErrorKind::TxHashSetErr(e))?;
Ok(pos)
}
Ok(false) => return Err(ErrorKind::AlreadySpent(commit).into()),
Err(e) => return Err(ErrorKind::TxHashSetErr(e).into()),
Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()),
Err(e) => Err(ErrorKind::TxHashSetErr(e).into()),
}
} else {
return Err(ErrorKind::AlreadySpent(commit).into());
Err(ErrorKind::AlreadySpent(commit).into())
}
Ok(())
}

fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
@@ -1044,17 +1138,27 @@ impl<'a> Extension<'a> {
self.kernel_pmmr
.rewind(kernel_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;

// Update our BitmapAccumulator based on affected outputs.
// We want to "unspend" every rewound spent output.
// Treat output_pos as an affected output to ensure we rebuild far enough back.
let mut affected_pos: Vec<_> = rewind_rm_pos.iter().map(|x| x as u64).collect();
affected_pos.push(output_pos);
self.apply_to_bitmap_accumulator(&affected_pos)?;
Ok(())
}

/// Current root hashes and sums (if applicable) for the Output, range proof
/// and kernel sum trees.
pub fn roots(&self) -> Result<TxHashSetRoots, Error> {
Ok(TxHashSetRoots {
output_root: self
.output_pmmr
.root()
.map_err(|_| ErrorKind::InvalidRoot)?,
output_roots: OutputRoots {
pmmr_root: self
.output_pmmr
.root()
.map_err(|_| ErrorKind::InvalidRoot)?,
bitmap_root: self.bitmap_accumulator.root(),
},
rproof_root: self
.rproof_pmmr
.root()
@@ -1072,16 +1176,7 @@ impl<'a> Extension<'a> {
return Ok(());
}
let head_header = self.batch.get_block_header(&self.head.hash())?;
let header_roots = TxHashSetRoots {
output_root: head_header.output_root,
rproof_root: head_header.range_proof_root,
kernel_root: head_header.kernel_root,
};
if header_roots != self.roots()? {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
self.roots()?.validate(&head_header)
}

/// Validate the header, output and kernel MMR sizes against the block header.
@@ -1241,7 +1336,7 @@ impl<'a> Extension<'a> {
TxKernel::batch_sig_verify(&tx_kernels)?;
kern_count += tx_kernels.len() as u64;
tx_kernels.clear();
status.on_validation(kern_count, total_kernels, 0, 0);
status.on_validation_kernels(kern_count, total_kernels);
debug!(
"txhashset: verify_kernel_signatures: verified {} signatures",
kern_count,
@@ -1266,7 +1361,8 @@ impl<'a> Extension<'a> {
let mut proofs: Vec<RangeProof> = Vec::with_capacity(1_000);

let mut proof_count = 0;
let total_rproofs = pmmr::n_leaves(self.output_pmmr.unpruned_size());
let total_rproofs = self.output_pmmr.n_unpruned_leaves();

for pos in self.output_pmmr.leaf_pos_iter() {
let output = self.output_pmmr.get_data(pos);
let proof = self.rproof_pmmr.get_data(pos);
@@ -1292,10 +1388,9 @@ impl<'a> Extension<'a> {
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}

if proof_count % 1_000 == 0 {
status.on_validation(0, 0, proof_count, total_rproofs);
if proof_count % 1_000 == 0 {
status.on_validation_rproofs(proof_count, total_rproofs);
}
}
}

@@ -1331,6 +1426,12 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
// if file exist, just re-use it
let zip_file = File::open(zip_path.clone());
if let Ok(zip) = zip_file {
debug!(
"zip_read: {} at {}: reusing existing zip file: {:?}",
header.hash(),
header.height,
zip_path
);
return Ok(zip);
} else {
// clean up old zips.
@@ -1371,6 +1472,13 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
temp_txhashset_path
};

debug!(
"zip_read: {} at {}: created zip file: {:?}",
header.hash(),
header.height,
zip_path
);

// open it again to read it back
let zip_file = File::open(zip_path.clone())?;

@@ -21,12 +21,14 @@ use crate::core::global;
use crate::core::ser::PMMRIndexHashable;
use crate::error::{Error, ErrorKind};
use crate::store::Batch;
use crate::util::secp::pedersen::RangeProof;
use grin_store::pmmr::PMMRBackend;

/// Readonly view of the UTXO set (based on output MMR).
pub struct UTXOView<'a> {
output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>,
header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
batch: &'a Batch<'a>,
}

@@ -35,11 +37,13 @@ impl<'a> UTXOView<'a> {
pub fn new(
output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>,
header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
batch: &'a Batch<'_>,
) -> UTXOView<'a> {
UTXOView {
output_pmmr,
header_pmmr,
rproof_pmmr,
batch,
}
}
@@ -98,6 +102,17 @@ impl<'a> UTXOView<'a> {
Ok(())
}

/// Retrieves an unspent output using its PMMR position
pub fn get_unspent_output_at(&self, pos: u64) -> Result<Output, Error> {
match self.output_pmmr.get_data(pos) {
Some(output_id) => match self.rproof_pmmr.get_data(pos) {
Some(rproof) => Ok(output_id.into_output(rproof)),
None => Err(ErrorKind::RangeproofNotFound.into()),
},
None => Err(ErrorKind::OutputNotFound.into()),
}
}

/// Verify we are not attempting to spend any coinbase outputs
/// that have not sufficiently matured.
pub fn verify_coinbase_maturity(&self, inputs: &Vec<Input>, height: u64) -> Result<(), Error> {
@@ -18,10 +18,10 @@ use chrono::prelude::{DateTime, Utc};
use std::sync::Arc;

use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
use crate::core::core::{Block, BlockHeader};
use crate::core::core::{Block, BlockHeader, HeaderVersion};
use crate::core::pow::Difficulty;
use crate::core::ser;
use crate::error::Error;
use crate::core::ser::{self, PMMRIndexHashable};
use crate::error::{Error, ErrorKind};
use crate::util::RwLock;

bitflags! {
@@ -65,12 +65,15 @@ pub enum SyncStatus {
},
/// Setting up before validation
TxHashsetSetup,
/// Validating the full state
TxHashsetValidation {
/// Validating the kernels
TxHashsetKernelsValidation {
kernels: u64,
kernel_total: u64,
kernels_total: u64,
},
/// Validating the range proofs
TxHashsetRangeProofsValidation {
rproofs: u64,
rproof_total: u64,
rproofs_total: u64,
},
/// Finalizing the new state
TxHashsetSave,
@@ -155,43 +158,18 @@ impl TxHashsetWriteStatus for SyncState {
self.update(SyncStatus::TxHashsetSetup);
}

fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
let mut status = self.current.write();
match *status {
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
rproofs,
rproof_total,
} => {
let ks = if vkernels > 0 { vkernels } else { kernels };
let kt = if vkernel_total > 0 {
vkernel_total
} else {
kernel_total
};
let rps = if vrproofs > 0 { vrproofs } else { rproofs };
let rpt = if vrproof_total > 0 {
vrproof_total
} else {
rproof_total
};
*status = SyncStatus::TxHashsetValidation {
kernels: ks,
kernel_total: kt,
rproofs: rps,
rproof_total: rpt,
};
}
_ => {
*status = SyncStatus::TxHashsetValidation {
kernels: 0,
kernel_total: 0,
rproofs: 0,
rproof_total: 0,
}
}
}
fn on_validation_kernels(&self, kernels: u64, kernels_total: u64) {
self.update(SyncStatus::TxHashsetKernelsValidation {
kernels,
kernels_total,
});
}

fn on_validation_rproofs(&self, rproofs: u64, rproofs_total: u64) {
self.update(SyncStatus::TxHashsetRangeProofsValidation {
rproofs,
rproofs_total,
});
}

fn on_save(&self) {
@@ -203,18 +181,84 @@ impl TxHashsetWriteStatus for SyncState {
}
}

/// A helper to hold the roots of the txhashset in order to keep them
/// readable.
#[derive(Debug, PartialEq)]
/// A helper for the various txhashset MMR roots.
#[derive(Debug)]
pub struct TxHashSetRoots {
/// Output root
pub output_root: Hash,
/// Output roots
pub output_roots: OutputRoots,
/// Range Proof root
pub rproof_root: Hash,
/// Kernel root
pub kernel_root: Hash,
}

impl TxHashSetRoots {
/// Accessor for the output PMMR root (rules here are block height dependent).
/// We assume the header version is consistent with the block height, validated
/// as part of pipe::validate_header().
pub fn output_root(&self, header: &BlockHeader) -> Hash {
self.output_roots.root(header)
}

/// Validate roots against the provided block header.
pub fn validate(&self, header: &BlockHeader) -> Result<(), Error> {
debug!(
"validate roots: {} at {}, {} vs. {} (original: {}, merged: {})",
header.hash(),
header.height,
header.output_root,
self.output_root(header),
self.output_roots.pmmr_root,
self.output_roots.merged_root(header),
);

if header.output_root != self.output_root(header) {
Err(ErrorKind::InvalidRoot.into())
} else if header.range_proof_root != self.rproof_root {
Err(ErrorKind::InvalidRoot.into())
} else if header.kernel_root != self.kernel_root {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
}
}

/// A helper for the various output roots.
#[derive(Debug)]
pub struct OutputRoots {
/// The output PMMR root
pub pmmr_root: Hash,
/// The bitmap accumulator root
pub bitmap_root: Hash,
}

impl OutputRoots {
/// The root of our output PMMR. The rules here are block height specific.
/// We use the merged root here for header version 3 and later.
/// We assume the header version is consistent with the block height, validated
/// as part of pipe::validate_header().
pub fn root(&self, header: &BlockHeader) -> Hash {
if header.version < HeaderVersion(3) {
self.output_root()
} else {
self.merged_root(header)
}
}

/// The root of the underlying output PMMR.
fn output_root(&self) -> Hash {
self.pmmr_root
}

/// Hash the root of the output PMMR and the root of the bitmap accumulator
/// together with the size of the output PMMR (for consistency with existing PMMR impl).
/// H(pmmr_size | pmmr_root | bitmap_root)
fn merged_root(&self, header: &BlockHeader) -> Hash {
(self.pmmr_root, self.bitmap_root).hash_with_index(header.output_mmr_size)
}
}

/// A helper to hold the output pmmr position of the txhashset in order to keep them
/// readable.
#[derive(Debug)]
@@ -315,8 +359,10 @@ pub trait ChainAdapter {
pub trait TxHashsetWriteStatus {
/// First setup of the txhashset
fn on_setup(&self);
/// Starting validation
fn on_validation(&self, kernels: u64, kernel_total: u64, rproofs: u64, rproof_total: u64);
/// Starting kernel validation
fn on_validation_kernels(&self, kernels: u64, kernel_total: u64);
/// Starting rproof validation
fn on_validation_rproofs(&self, rproofs: u64, rproof_total: u64);
/// Starting to save the txhashset and related data
fn on_save(&self);
/// Done writing a new txhashset
@@ -328,7 +374,8 @@ pub struct NoStatus;

impl TxHashsetWriteStatus for NoStatus {
fn on_setup(&self) {}
fn on_validation(&self, _ks: u64, _kts: u64, _rs: u64, _rt: u64) {}
fn on_validation_kernels(&self, _ks: u64, _kts: u64) {}
fn on_validation_rproofs(&self, _rs: u64, _rt: u64) {}
fn on_save(&self) {}
fn on_done(&self) {}
}
@@ -0,0 +1,188 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use self::chain::txhashset::BitmapAccumulator;
use self::core::core::hash::Hash;
use self::core::ser::PMMRIndexHashable;
use bit_vec::BitVec;
use grin_chain as chain;
use grin_core as core;
use grin_util as util;

#[test]
fn test_bitmap_accumulator() {
util::init_test_logger();

let mut accumulator = BitmapAccumulator::new();
assert_eq!(accumulator.root(), Hash::default());

// 1000... (rebuild from 0, setting [0] true)
accumulator.apply(vec![0], vec![0], 1).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(0, true);
bit_vec.to_bytes().hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);

// 1100... (rebuild from 0, setting [0, 1] true)
accumulator.apply(vec![0], vec![0, 1], 2).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(0, true);
bit_vec.set(1, true);
bit_vec.to_bytes().hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);

// 0100... (rebuild from 0, setting [1] true, which will reset [0] false)
accumulator.apply(vec![0], vec![1], 2).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);

// 0100... (rebuild from 1, setting [1] true)
accumulator.apply(vec![1], vec![1], 2).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);

// 0100...0001 (rebuild from 0, setting [1, 1023] true)
accumulator.apply(vec![0], vec![1, 1023], 1024).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
bit_vec.set(1023, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);

// Now set bits such that we extend the bitmap accumulator across multiple 1024 bit chunks.
// We need a second bit_vec here to reflect the additional chunk.
// 0100...0001, 1000...0000 (rebuild from 0, setting [1, 1023, 1024] true)
accumulator
.apply(vec![0], vec![1, 1023, 1024], 1025)
.unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
bit_vec.set(1023, true);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(0, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);

// Just rebuild the second bitmap chunk.
// 0100...0001, 0100...0000 (rebuild from 1025, setting [1025] true)
accumulator.apply(vec![1025], vec![1025], 1026).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
bit_vec.set(1023, true);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(1, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);

// Rebuild the first bitmap chunk and all chunks after it.
// 0100...0000, 0100...0000 (rebuild from 1, setting [1, 1025] true)
accumulator.apply(vec![1], vec![1, 1025], 1026).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(1, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);

// Make sure we handle the case where the first chunk is all 0s
// 0000...0000, 0100...0000 (rebuild from 1, setting [1025] true)
accumulator.apply(vec![1], vec![1025], 1026).unwrap();
let expected_hash = {
let bit_vec = BitVec::from_elem(1024, false);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(1, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);

// Check that removing the last bit in a chunk removes the now empty chunk
// if it is the rightmost chunk.
// 0000...0001 (rebuild from 1023, setting [1023] true)
accumulator.apply(vec![1023], vec![1023], 1024).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1023, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);

// Make sure we pad appropriately with 0s if we set a distant bit to 1.
// Start with an empty accumulator.
// 0000...0000, 0000...0000, 0000...0000, 0000...0001 (rebuild from 4095, setting [4095] true)
let mut accumulator = BitmapAccumulator::new();
accumulator.apply(vec![4095], vec![4095], 4096).unwrap();
let expected_hash = {
let bit_vec0 = BitVec::from_elem(1024, false);
let bit_vec1 = BitVec::from_elem(1024, false);
let bit_vec2 = BitVec::from_elem(1024, false);
let mut bit_vec3 = BitVec::from_elem(1024, false);
bit_vec3.set(1023, true);

let expected_bytes_0 = bit_vec0.to_bytes();
let expected_bytes_1 = bit_vec1.to_bytes();
let expected_bytes_2 = bit_vec2.to_bytes();
let expected_bytes_3 = bit_vec3.to_bytes();

let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
let expected_hash_2 = (expected_hash_0, expected_hash_1).hash_with_index(2);

let expected_hash_3 = expected_bytes_2.hash_with_index(3);
let expected_hash_4 = expected_bytes_3.hash_with_index(4);
let expected_hash_5 = (expected_hash_3, expected_hash_4).hash_with_index(5);

(expected_hash_2, expected_hash_5).hash_with_index(6)
};
assert_eq!(accumulator.root(), expected_hash);
}
@@ -16,7 +16,7 @@ use self::chain::types::{NoopAdapter, Tip};
use self::chain::Chain;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader, OutputIdentifier, Transaction};
use self::core::core::{Block, BlockHeader, KernelFeatures, OutputIdentifier, Transaction};
use self::core::global::ChainTypes;
use self::core::libtx::{self, build, ProofBuilder};
use self::core::pow::Difficulty;
@@ -562,10 +562,10 @@ fn spend_in_fork_and_compact() {
let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier();

let tx1 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
vec![
build::coinbase_input(consensus::REWARD, key_id2.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
build::with_fee(20000),
],
&kc,
&pb,
@@ -580,10 +580,10 @@ fn spend_in_fork_and_compact() {
chain.validate(false).unwrap();

let tx2 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
vec![
build::input(consensus::REWARD - 20000, key_id30.clone()),
build::output(consensus::REWARD - 40000, key_id31.clone()),
build::with_fee(20000),
],
&kc,
&pb,
@@ -15,6 +15,7 @@
use self::chain::types::NoopAdapter;
use self::chain::ErrorKind;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::KernelFeatures;
use self::core::global::{self, ChainTypes};
use self::core::libtx::{self, build, ProofBuilder};
use self::core::pow::Difficulty;
@@ -47,7 +48,7 @@ fn test_coinbase_maturity() {

{
let chain = chain::Chain::init(
".grin".to_string(),
chain_dir.to_string(),
Arc::new(NoopAdapter {}),
genesis_block,
pow::verify_size,
@@ -99,10 +100,10 @@ fn test_coinbase_maturity() {
// here we build a tx that attempts to spend the earlier coinbase output
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
build::with_fee(2),
],
&keychain,
&builder,
@@ -182,10 +183,10 @@ fn test_coinbase_maturity() {
// here we build a tx that attempts to spend the earlier coinbase output
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
build::with_fee(2),
],
&keychain,
&builder,
@@ -1,8 +1,8 @@
[package]
name = "grin_config"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Configuration for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Configuration for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
@@ -16,10 +16,10 @@ serde_derive = "1"
toml = "0.4"
dirs = "1.0.3"

grin_core = { path = "../core", version = "2.1.0-beta.3" }
grin_servers = { path = "../servers", version = "2.1.0-beta.3" }
grin_p2p = { path = "../p2p", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_core = { path = "../core", version = "3.0.0" }
grin_servers = { path = "../servers", version = "3.0.0" }
grin_p2p = { path = "../p2p", version = "3.0.0" }
grin_util = { path = "../util", version = "3.0.0" }

[dev-dependencies]
pretty_assertions = "0.5.1"
@@ -56,7 +56,16 @@ fn comments() -> HashMap<String, String> {
retval.insert(
"api_secret_path".to_string(),
"
#path of the secret token used by the API to authenticate the calls
#path of the secret token used by the Rest API and v2 Owner API to authenticate the calls
#comment the it to disable basic auth
"
.to_string(),
);

retval.insert(
"foreign_api_secret_path".to_string(),
"
#path of the secret token used by the Foreign API to authenticate the calls
#comment the it to disable basic auth
"
.to_string(),
@@ -114,8 +123,7 @@ fn comments() -> HashMap<String, String> {
retval.insert(
"run_tui".to_string(),
"
#whether to run the ncurses TUI. Ncurses must be installed and this
#will also disable logging to stdout
#whether to run the ncurses TUI (Ncurses must be installed)
"
.to_string(),
);
@@ -30,16 +30,18 @@ use crate::core::global;
use crate::p2p;
use crate::servers::ServerConfig;
use crate::types::{ConfigError, ConfigMembers, GlobalConfig};
use crate::util::LoggingConfig;
use crate::util::logger::LoggingConfig;

/// The default file name to use when trying to derive
/// the node config file location
pub const SERVER_CONFIG_FILE_NAME: &'static str = "grin-server.toml";
const SERVER_LOG_FILE_NAME: &'static str = "grin-server.log";
const GRIN_HOME: &'static str = ".grin";
const GRIN_CHAIN_DIR: &'static str = "chain_data";
/// Node API secret
/// Node Rest API and V2 Owner API secret
pub const API_SECRET_FILE_NAME: &'static str = ".api_secret";
/// Foreign API secret
pub const FOREIGN_API_SECRET_FILE_NAME: &'static str = ".foreign_api_secret";

fn get_grin_path(chain_type: &global::ChainTypes) -> Result<PathBuf, ConfigError> {
// Check if grin dir exists
@@ -95,11 +97,14 @@ pub fn check_api_secret(api_secret_path: &PathBuf) -> Result<(), ConfigError> {
Ok(())
}

/// Check that the api secret file exists and is valid
fn check_api_secret_file(chain_type: &global::ChainTypes) -> Result<(), ConfigError> {
/// Check that the api secret files exist and are valid
fn check_api_secret_files(
chain_type: &global::ChainTypes,
secret_file_name: &str,
) -> Result<(), ConfigError> {
let grin_path = get_grin_path(chain_type)?;
let mut api_secret_path = grin_path.clone();
api_secret_path.push(API_SECRET_FILE_NAME);
api_secret_path.push(secret_file_name);
if !api_secret_path.exists() {
init_api_secret(&api_secret_path)
} else {
@@ -109,7 +114,8 @@ fn check_api_secret_file(chain_type: &global::ChainTypes) -> Result<(), ConfigEr

/// Handles setup and detection of paths for node
pub fn initial_setup_server(chain_type: &global::ChainTypes) -> Result<GlobalConfig, ConfigError> {
check_api_secret_file(chain_type)?;
check_api_secret_files(chain_type, API_SECRET_FILE_NAME)?;
check_api_secret_files(chain_type, FOREIGN_API_SECRET_FILE_NAME)?;
// Use config file if current directory if it exists, .grin home otherwise
if let Some(p) = check_config_current_dir(SERVER_CONFIG_FILE_NAME) {
GlobalConfig::new(p.to_str().unwrap())
@@ -221,7 +227,8 @@ impl GlobalConfig {
let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let decoded: Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents);
let fixed = GlobalConfig::fix_warning_level(contents);
let decoded: Result<ConfigMembers, toml::de::Error> = toml::from_str(&fixed);
match decoded {
Ok(gc) => {
self.members = Some(gc);
@@ -296,9 +303,38 @@ impl GlobalConfig {
/// Write configuration to a file
pub fn write_to_file(&mut self, name: &str) -> Result<(), ConfigError> {
let conf_out = self.ser_config()?;
let conf_out = insert_comments(conf_out);
let fixed_config = GlobalConfig::fix_log_level(conf_out);
let commented_config = insert_comments(fixed_config);
let mut file = File::create(name)?;
file.write_all(conf_out.as_bytes())?;
file.write_all(commented_config.as_bytes())?;
Ok(())
}

// For forwards compatibility old config needs `Warning` log level changed to standard log::Level `WARN`
fn fix_warning_level(conf: String) -> String {
conf.replace("Warning", "WARN")
}

// For backwards compatibility only first letter of log level should be capitalised.
fn fix_log_level(conf: String) -> String {
conf.replace("TRACE", "Trace")
.replace("DEBUG", "Debug")
.replace("INFO", "Info")
.replace("WARN", "Warning")
.replace("ERROR", "Error")
}
}

#[test]
fn test_fix_log_level() {
let config = "TRACE DEBUG INFO WARN ERROR".to_string();
let fixed_config = GlobalConfig::fix_log_level(config);
assert_eq!(fixed_config, "Trace Debug Info Warning Error");
}

#[test]
fn test_fix_warning_level() {
let config = "Warning".to_string();
let fixed_config = GlobalConfig::fix_warning_level(config);
assert_eq!(fixed_config, "WARN");
}
@@ -19,7 +19,7 @@ use std::io;
use std::path::PathBuf;

use crate::servers::ServerConfig;
use crate::util::LoggingConfig;
use crate::util::logger::LoggingConfig;

/// Error type wrapping config errors.
#[derive(Debug)]
@@ -1,16 +1,16 @@
[package]
name = "grin_core"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
workspace = ".."
edition = "2018"

[dependencies]
blake2-rfc = "0.2"
blake2 = { package = "blake2-rfc", version = "0.2"}
byteorder = "1"
croaring = "0.3.9"
enum_primitive = "0.1"
@@ -29,8 +29,8 @@ log = "0.4"
chrono = { version = "0.4.4", features = ["serde"] }
zeroize = "0.9"

grin_keychain = { path = "../keychain", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
keychain = { package = "grin_keychain", path = "../keychain", version = "3.0.0" }
util = { package = "grin_util", path = "../util", version = "3.0.0" }

[dev-dependencies]
serde_json = "1"

Some generated files are not rendered by default. Learn more.

@@ -3,10 +3,10 @@ extern crate grin_core;
#[macro_use]
extern crate libfuzzer_sys;

use grin_core::core::Block;
use grin_core::core::UntrustedBlock;
use grin_core::ser;

fuzz_target!(|data: &[u8]| {
let mut d = data.clone();
let _t: Result<Block, ser::Error> = ser::deserialize(&mut d, ser::ProtocolVersion(1));
let _t: Result<UntrustedBlock, ser::Error> = ser::deserialize(&mut d, ser::ProtocolVersion(1));
});
@@ -3,10 +3,10 @@ extern crate grin_core;
#[macro_use]
extern crate libfuzzer_sys;

use grin_core::core::Block;
use grin_core::core::UntrustedBlock;
use grin_core::ser;

fuzz_target!(|data: &[u8]| {
let mut d = data.clone();
let _t: Result<Block, ser::Error> = ser::deserialize(&mut d, ser::ProtocolVersion(2));
let _t: Result<UntrustedBlock, ser::Error> = ser::deserialize(&mut d, ser::ProtocolVersion(2));
});
@@ -3,10 +3,11 @@ extern crate grin_core;
#[macro_use]
extern crate libfuzzer_sys;

use grin_core::core::CompactBlock;
use grin_core::core::UntrustedCompactBlock;
use grin_core::ser;

fuzz_target!(|data: &[u8]| {
let mut d = data.clone();
let _t: Result<CompactBlock, ser::Error> = ser::deserialize(&mut d, ser::ProtocolVersion(1));
let _t: Result<UntrustedCompactBlock, ser::Error> =
ser::deserialize(&mut d, ser::ProtocolVersion(1));
});
@@ -3,10 +3,11 @@ extern crate grin_core;
#[macro_use]
extern crate libfuzzer_sys;

use grin_core::core::CompactBlock;
use grin_core::core::UntrustedCompactBlock;
use grin_core::ser;

fuzz_target!(|data: &[u8]| {
let mut d = data.clone();
let _t: Result<CompactBlock, ser::Error> = ser::deserialize(&mut d, ser::ProtocolVersion(2));
let _t: Result<UntrustedCompactBlock, ser::Error> =
ser::deserialize(&mut d, ser::ProtocolVersion(2));
});
@@ -18,12 +18,11 @@
//! enough, consensus-relevant constants and short functions should be kept
//! here.

use std::cmp::{max, min};

use crate::core::block::HeaderVersion;
use crate::core::hash::{Hash, ZERO_HASH};
use crate::global;
use crate::pow::Difficulty;
use std::cmp::{max, min};

/// A grin is divisible to 10^9, following the SI prefixes
pub const GRIN_BASE: u64 = 1_000_000_000;
@@ -79,7 +78,7 @@ pub const PROOFSIZE: usize = 42;
/// Default Cuckatoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 31;

/// Cuckaroo proof-of-work edge_bits, meant to be ASIC resistant.
/// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;

/// Original reference edge_bits to compute difficulty factors for higher
@@ -131,42 +130,53 @@ pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Floonet first hard fork height, set to happen around 2019-06-20
pub const FLOONET_FIRST_HARD_FORK: u64 = 185_040;

/// Check whether the block version is valid at a given height, implements
/// Floonet second hard fork height, set to happen around 2019-12-19
pub const FLOONET_SECOND_HARD_FORK: u64 = 298_080;

/// AutomatedTesting and UserTesting first hard fork height.
pub const TESTING_FIRST_HARD_FORK: u64 = 3;

/// AutomatedTesting and UserTesting second hard fork height.
pub const TESTING_SECOND_HARD_FORK: u64 = 6;

/// Compute possible block version at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
pub fn header_version(height: u64) -> HeaderVersion {
let chain_type = global::CHAIN_TYPE.read().clone();
let hf_interval = (1 + height / HARD_FORK_INTERVAL) as u16;
match chain_type {
global::ChainTypes::Mainnet => HeaderVersion(hf_interval),
global::ChainTypes::Floonet => {
if height < FLOONET_FIRST_HARD_FORK {
version == HeaderVersion::default()
// add branches one by one as we go from hard fork to hard fork
// } else if height < FLOONET_SECOND_HARD_FORK {
} else if height < 2 * HARD_FORK_INTERVAL {
version == HeaderVersion::new(2)
(HeaderVersion(1))
} else if height < FLOONET_SECOND_HARD_FORK {
(HeaderVersion(2))
} else if height < 3 * HARD_FORK_INTERVAL {
(HeaderVersion(3))
} else {
false
HeaderVersion(hf_interval)
}
}
// everything else just like mainnet
_ => {
if height < HARD_FORK_INTERVAL {
version == HeaderVersion::default()
} else if height < 2 * HARD_FORK_INTERVAL {
version == HeaderVersion::new(2)
// uncomment branches one by one as we go from hard fork to hard fork
/*} else if height < 3 * HARD_FORK_INTERVAL {
version == HeaderVersion::new(3)
} else if height < 4 * HARD_FORK_INTERVAL {
version == HeaderVersion::new(4)
} else {
version > HeaderVersion::new(4) */
global::ChainTypes::AutomatedTesting | global::ChainTypes::UserTesting => {
if height < TESTING_FIRST_HARD_FORK {
(HeaderVersion(1))
} else if height < TESTING_SECOND_HARD_FORK {
(HeaderVersion(2))
} else if height < 3 * HARD_FORK_INTERVAL {
(HeaderVersion(3))
} else {
false
HeaderVersion(hf_interval)
}
}
}
}

/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
return height < 3 * HARD_FORK_INTERVAL && version == header_version(height);
}

/// Number of blocks used to calculate difficulty adjustments
pub const DIFFICULTY_ADJUST_WINDOW: u64 = HOUR_HEIGHT;

@@ -189,13 +199,14 @@ pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
let mut xpr_edge_bits = edge_bits as u64;

let bits_over_min = edge_bits.saturating_sub(global::min_edge_bits());
let expiry_height = (1 << bits_over_min) * YEAR_HEIGHT;
if edge_bits < 32 && height >= expiry_height {
let expiry_height = YEAR_HEIGHT;
if edge_bits == 31 && height >= expiry_height {
xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT);
}
// For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT
// 30 weeks after Jan 15, 2020 would be Aug 12, 2020

(2 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
}

/// Minimum difficulty, enforced in diff retargetting
@@ -26,8 +26,7 @@ pub mod transaction;
pub mod verifier_cache;

use crate::consensus::GRIN_BASE;

use crate::util::secp::pedersen::Commitment;
use util::secp::pedersen::Commitment;

pub use self::block::*;
pub use self::block_sums::*;
@@ -14,14 +14,6 @@

//! Blocks and blockheaders

use crate::util::RwLock;
use chrono::naive::{MAX_DATE, MIN_DATE};
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use std::collections::HashSet;
use std::fmt;
use std::iter::FromIterator;
use std::sync::Arc;

use crate::consensus::{self, reward, REWARD};
use crate::core::committed::{self, Committed};
use crate::core::compact_block::{CompactBlock, CompactBlockBody};
@@ -31,12 +23,19 @@ use crate::core::{
transaction, Commitment, Input, KernelFeatures, Output, Transaction, TransactionBody, TxKernel,
Weighting,
};

use crate::global;
use crate::keychain::{self, BlindingFactor};
use crate::pow::{Difficulty, Proof, ProofOfWork};
use crate::pow::{verify_size, Difficulty, Proof, ProofOfWork};
use crate::ser::{self, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use crate::util::{secp, static_secp_instance};
use chrono::naive::{MAX_DATE, MIN_DATE};
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use chrono::Duration;
use keychain::{self, BlindingFactor};
use std::collections::HashSet;
use std::fmt;
use std::iter::FromIterator;
use std::sync::Arc;
use util::RwLock;
use util::{secp, static_secp_instance};

/// Errors thrown by Block validation
#[derive(Debug, Clone, Eq, PartialEq, Fail)]
@@ -52,6 +51,12 @@ pub enum Error {
TooHeavy,
/// Block weight (based on inputs|outputs|kernels) exceeded.
WeightExceeded,
/// Block version is invalid for a given block height
InvalidBlockVersion(HeaderVersion),
/// Block time is invalid
InvalidBlockTime,
/// Invalid POW
InvalidPow,
/// Kernel not valid due to lock_height exceeding block header height
KernelLockHeight(u64),
/// Underlying tx related error
@@ -171,29 +176,9 @@ impl Hashed for HeaderEntry {
}

/// Some type safety around header versioning.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Serialize)]
pub struct HeaderVersion(pub u16);

impl Default for HeaderVersion {
fn default() -> HeaderVersion {
HeaderVersion(1)
}
}

// self-conscious increment function courtesy of Jasper
impl HeaderVersion {
fn next(&self) -> Self {
Self(self.0 + 1)
}
}

impl HeaderVersion {
/// Constructor taking the provided version.
pub fn new(version: u16) -> HeaderVersion {
HeaderVersion(version)
}
}

impl From<HeaderVersion> for u16 {
fn from(v: HeaderVersion) -> u16 {
v.0
@@ -248,7 +233,7 @@ impl DefaultHashable for BlockHeader {}
impl Default for BlockHeader {
fn default() -> BlockHeader {
BlockHeader {
version: HeaderVersion::default(),
version: HeaderVersion(1),
height: 0,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc),
prev_hash: ZERO_HASH,
@@ -289,48 +274,44 @@ impl Writeable for BlockHeader {
}
}

fn read_block_header(reader: &mut dyn Reader) -> Result<BlockHeader, ser::Error> {
let version = HeaderVersion::read(reader)?;
let (height, timestamp) = ser_multiread!(reader, read_u64, read_i64);
let prev_hash = Hash::read(reader)?;
let prev_root = Hash::read(reader)?;
let output_root = Hash::read(reader)?;
let range_proof_root = Hash::read(reader)?;
let kernel_root = Hash::read(reader)?;
let total_kernel_offset = BlindingFactor::read(reader)?;
let (output_mmr_size, kernel_mmr_size) = ser_multiread!(reader, read_u64, read_u64);
let pow = ProofOfWork::read(reader)?;

if timestamp > MAX_DATE.and_hms(0, 0, 0).timestamp()
|| timestamp < MIN_DATE.and_hms(0, 0, 0).timestamp()
{
return Err(ser::Error::CorruptedData);
}

Ok(BlockHeader {
version,
height,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc),
prev_hash,
prev_root,
output_root,
range_proof_root,
kernel_root,
total_kernel_offset,
output_mmr_size,
kernel_mmr_size,
pow,
})
}

/// Deserialization of a block header
impl Readable for BlockHeader {
fn read(reader: &mut dyn Reader) -> Result<BlockHeader, ser::Error> {
let version = HeaderVersion::read(reader)?;
let (height, timestamp) = ser_multiread!(reader, read_u64, read_i64);
let prev_hash = Hash::read(reader)?;
let prev_root = Hash::read(reader)?;
let output_root = Hash::read(reader)?;
let range_proof_root = Hash::read(reader)?;
let kernel_root = Hash::read(reader)?;
let total_kernel_offset = BlindingFactor::read(reader)?;
let (output_mmr_size, kernel_mmr_size) = ser_multiread!(reader, read_u64, read_u64);
let pow = ProofOfWork::read(reader)?;

if timestamp > MAX_DATE.and_hms(0, 0, 0).timestamp()
|| timestamp < MIN_DATE.and_hms(0, 0, 0).timestamp()
{
return Err(ser::Error::CorruptedData);
}

// Check the block version before proceeding any further.
// We want to do this here because blocks can be pretty large
// and we want to halt processing as early as possible.
// If we receive an invalid block version then the peer is not on our hard-fork.
if !consensus::valid_header_version(height, version) {
return Err(ser::Error::InvalidBlockVersion);
}

Ok(BlockHeader {
version,
height,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc),
prev_hash,
prev_root,
output_root,
range_proof_root,
kernel_root,
total_kernel_offset,
output_mmr_size,
kernel_mmr_size,
pow,
})
read_block_header(reader)
}
}

@@ -397,7 +378,60 @@ impl BlockHeader {
}
}

/// A block as expressed in the MimbleWimble protocol. The reward is
impl From<UntrustedBlockHeader> for BlockHeader {
fn from(header: UntrustedBlockHeader) -> Self {
header.0
}
}

/// Block header which does lightweight validation as part of deserialization,
/// it supposed to be used when we can't trust the channel (eg network)
pub struct UntrustedBlockHeader(BlockHeader);

/// Deserialization of an untrusted block header
impl Readable for UntrustedBlockHeader {
fn read(reader: &mut dyn Reader) -> Result<UntrustedBlockHeader, ser::Error> {
let header = read_block_header(reader)?;
if header.timestamp
> Utc::now() + Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
{
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
// TODO add warning in p2p code if local time is too different from peers
error!(
"block header {} validation error: block time is more than 12 blocks in future",
header.hash()
);
return Err(ser::Error::CorruptedData);
}

// Check the block version before proceeding any further.
// We want to do this here because blocks can be pretty large
// and we want to halt processing as early as possible.
// If we receive an invalid block version then the peer is not on our hard-fork.
if !consensus::valid_header_version(header.height, header.version) {
return Err(ser::Error::InvalidBlockVersion);
}

if !header.pow.is_primary() && !header.pow.is_secondary() {
error!(
"block header {} validation error: invalid edge bits",
header.hash()
);
return Err(ser::Error::CorruptedData);
}
if let Err(e) = verify_size(&header) {
error!(
"block header {} validation error: invalid POW: {}",
header.hash(),
e
);
return Err(ser::Error::CorruptedData);
}
Ok(UntrustedBlockHeader(header))
}
}

/// A block as expressed in the Mimblewimble protocol. The reward is
/// non-explicit, assumed to be deducible from block height (similar to
/// bitcoin's schedule) and expressed as a global transaction fee (added v.H),
/// additive to the total of fees ever collected.
@@ -435,16 +469,7 @@ impl Writeable for Block {
impl Readable for Block {
fn read(reader: &mut dyn Reader) -> Result<Block, ser::Error> {
let header = BlockHeader::read(reader)?;

let body = TransactionBody::read(reader)?;

// Now "lightweight" validation of the block.
// Treat any validation issues as data corruption.
// An example of this would be reading a block
// that exceeded the allowed number of inputs.
body.validate_read(Weighting::AsBlock)
.map_err(|_| ser::Error::CorruptedData)?;

Ok(Block { header, body })
}
}
@@ -574,12 +599,9 @@ impl Block {
vec![],
)?;

// Determine the height and associated version for the new header.
let height = prev.height + 1;

let mut version = prev.version;
if !consensus::valid_header_version(height, version) {
version = version.next();
}
let version = consensus::header_version(height);

let now = Utc::now().timestamp();
let timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now, 0), Utc);
@@ -770,3 +792,36 @@ impl Block {
Ok(())
}
}

impl From<UntrustedBlock> for Block {
fn from(block: UntrustedBlock) -> Self {
block.0
}
}

/// Block which does lightweight validation as part of deserialization,
/// it supposed to be used when we can't trust the channel (eg network)
pub struct UntrustedBlock(Block);

/// Deserialization of an untrusted block header
impl Readable for UntrustedBlock {
fn read(reader: &mut dyn Reader) -> Result<UntrustedBlock, ser::Error> {
// we validate header here before parsing the body
let header = UntrustedBlockHeader::read(reader)?;
let body = TransactionBody::read(reader)?;

// Now "lightweight" validation of the block.
// Treat any validation issues as data corruption.
// An example of this would be reading a block
// that exceeded the allowed number of inputs.
body.validate_read(Weighting::AsBlock).map_err(|e| {
error!("read validation error: {}", e);
ser::Error::CorruptedData
})?;
let block = Block {
header: header.into(),
body,
};
Ok(UntrustedBlock(block))
}
}
@@ -17,8 +17,8 @@

use crate::core::committed::Committed;
use crate::ser::{self, Readable, Reader, Writeable, Writer};
use crate::util::secp::pedersen::Commitment;
use crate::util::secp_static;
use util::secp::pedersen::Commitment;
use util::secp_static;

/// The output_sum and kernel_sum for a given block.
/// This is used to validate the next block being processed by applying
@@ -14,13 +14,12 @@

//! The Committed trait and associated errors.

use crate::keychain;
use crate::keychain::BlindingFactor;

use crate::util::secp::key::SecretKey;
use crate::util::secp::pedersen::Commitment;
use crate::util::{secp, secp_static, static_secp_instance};
use failure::Fail;
use keychain;
use keychain::BlindingFactor;
use util::secp::key::SecretKey;
use util::secp::pedersen::Commitment;
use util::{secp, secp_static, static_secp_instance};

/// Errors from summing and verifying kernel excesses via committed trait.
#[derive(Debug, Clone, PartialEq, Eq, Fail, Serialize, Deserialize)]
@@ -14,13 +14,12 @@

//! Compact Blocks.

use rand::{thread_rng, Rng};

use crate::core::block::{Block, BlockHeader, Error};
use crate::core::block::{Block, BlockHeader, Error, UntrustedBlockHeader};
use crate::core::hash::{DefaultHashable, Hashed};
use crate::core::id::ShortIdentifiable;
use crate::core::{Output, ShortId, TxKernel};
use crate::ser::{self, read_multi, Readable, Reader, VerifySortedAndUnique, Writeable, Writer};
use rand::{thread_rng, Rng};

/// Container for full (full) outputs and kernels and kern_ids for a compact block.
#[derive(Debug, Clone)]
@@ -221,15 +220,41 @@ impl Readable for CompactBlock {
let nonce = reader.read_u64()?;
let body = CompactBlockBody::read(reader)?;

let cb = CompactBlock {
Ok(CompactBlock {
header,
nonce,
body,
})
}
}

impl From<UntrustedCompactBlock> for CompactBlock {
fn from(ucb: UntrustedCompactBlock) -> Self {
ucb.0
}
}

/// Compackt block which does lightweight validation as part of deserialization,
/// it supposed to be used when we can't trust the channel (eg network)
pub struct UntrustedCompactBlock(CompactBlock);

/// Implementation of Readable for an untrusted compact block, defines how to read a
/// compact block from a binary stream.
impl Readable for UntrustedCompactBlock {
fn read(reader: &mut dyn Reader) -> Result<UntrustedCompactBlock, ser::Error> {
let header = UntrustedBlockHeader::read(reader)?;
let nonce = reader.read_u64()?;
let body = CompactBlockBody::read(reader)?;

let cb = CompactBlock {
header: header.into(),
nonce,
body,
};

// Now validate the compact block and treat any validation error as corrupted data.
cb.validate_read().map_err(|_| ser::Error::CorruptedData)?;

Ok(cb)
Ok(UntrustedCompactBlock(cb))
}
}
@@ -17,18 +17,16 @@
//! Primary hash function used in the protocol
//!

use crate::ser::{
self, AsFixedBytes, Error, FixedLength, ProtocolVersion, Readable, Reader, Writeable, Writer,
};
use blake2::blake2b::Blake2b;
use byteorder::{BigEndian, ByteOrder};
use std::cmp::min;
use std::convert::AsRef;
use std::ops::Add;
use std::{fmt, ops};

use crate::blake2::blake2b::Blake2b;

use crate::ser::{
self, AsFixedBytes, Error, FixedLength, ProtocolVersion, Readable, Reader, Writeable, Writer,
};
use crate::util;
use util;

/// A hash consisting of all zeroes, used as a sentinel. No known preimage.
pub const ZERO_HASH: Hash = Hash([0; 32]);
@@ -251,7 +249,7 @@ impl<D: DefaultHashable, E: DefaultHashable> DefaultHashable for (D, E) {}
impl<D: DefaultHashable, E: DefaultHashable, F: DefaultHashable> DefaultHashable for (D, E, F) {}

/// Implement Hashed trait for external types here
impl DefaultHashable for crate::util::secp::pedersen::RangeProof {}
impl DefaultHashable for util::secp::pedersen::RangeProof {}
impl DefaultHashable for Vec<u8> {}
impl DefaultHashable for u8 {}
impl DefaultHashable for u64 {}
@@ -14,15 +14,12 @@

//! short ids for compact blocks

use std::cmp::min;
use std::cmp::Ordering;

use byteorder::{ByteOrder, LittleEndian};
use siphasher::sip::SipHasher24;

use crate::core::hash::{DefaultHashable, Hash, Hashed};
use crate::ser::{self, Readable, Reader, Writeable, Writer};
use crate::util;
use byteorder::{ByteOrder, LittleEndian};
use siphasher::sip::SipHasher24;
use std::cmp::{min, Ordering};
use util;

/// The size of a short id used to identify inputs|outputs|kernels (6 bytes)
pub const SHORT_ID_SIZE: usize = 6;
@@ -18,7 +18,7 @@ use crate::core::hash::Hash;
use crate::core::pmmr;
use crate::ser;
use crate::ser::{PMMRIndexHashable, Readable, Reader, Writeable, Writer};
use crate::util;
use util;

/// Merkle proof errors.
#[derive(Clone, Debug, PartialEq)]
@@ -40,8 +40,10 @@ mod backend;
mod pmmr;
mod readonly_pmmr;
mod rewindable_pmmr;
mod vec_backend;

pub use self::backend::*;
pub use self::pmmr::*;
pub use self::readonly_pmmr::*;
pub use self::rewindable_pmmr::*;
pub use self::vec_backend::*;
@@ -55,6 +55,13 @@ pub trait Backend<T: PMMRable> {
/// Iterator over current (unpruned, unremoved) leaf positions.
fn leaf_pos_iter(&self) -> Box<dyn Iterator<Item = u64> + '_>;

/// Number of leaves
fn n_unpruned_leaves(&self) -> u64;

/// Iterator over current (unpruned, unremoved) leaf insertion index.
/// Note: This differs from underlying MMR pos - [0, 1, 2, 3, 4] vs. [1, 2, 4, 5, 8].
fn leaf_idx_iter(&self, from_idx: u64) -> Box<dyn Iterator<Item = u64> + '_>;

/// Remove Hash by insertion position. An index is also provided so the
/// underlying backend can implement some rollback of positions up to a
/// given index (practically the index is the height of a block that
@@ -79,6 +79,16 @@ where
self.backend.leaf_pos_iter()
}

/// Number of leafs in the MMR
pub fn n_unpruned_leaves(&self) -> u64 {
self.backend.n_unpruned_leaves()
}

/// Iterator over current (unpruned, unremoved) leaf insertion indices.
pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator<Item = u64> + '_ {
self.backend.leaf_idx_iter(from_idx)
}

/// Returns a vec of the peaks of this MMR.
pub fn peaks(&self) -> Vec<Hash> {
let peaks_pos = peaks(self.last_pos);
@@ -99,10 +109,11 @@ where
.filter(|x| *x < peak_pos)
.filter_map(|x| self.backend.get_from_file(x))
.collect::<Vec<_>>();
res.reverse();
if let Some(rhs) = rhs {
res.insert(0, rhs);
res.push(rhs);
}
res.reverse();

res
}

@@ -118,10 +129,10 @@ where
.collect::<Vec<_>>();

let mut res = None;
for peak in rhs.iter().rev() {
for peak in rhs.into_iter().rev() {
res = match res {
None => Some(*peak),
Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())),
None => Some(peak),
Some(rhash) => Some((peak, rhash).hash_with_index(self.unpruned_size())),
}
}
res
@@ -134,10 +145,10 @@ where
return Ok(ZERO_HASH);
}
let mut res = None;
for peak in self.peaks().iter().rev() {
for peak in self.peaks().into_iter().rev() {
res = match res {
None => Some(*peak),
Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())),
None => Some(peak),
Some(rhash) => Some((peak, rhash).hash_with_index(self.unpruned_size())),
}
}
res.ok_or_else(|| "no root, invalid tree".to_owned())
@@ -418,7 +429,7 @@ pub fn peaks(num: u64) -> Vec<u64> {
/// The number of leaves in a MMR of the provided size.
pub fn n_leaves(size: u64) -> u64 {
let (sizes, height) = peak_sizes_height(size);
let nleaves = sizes.iter().map(|n| (n + 1) / 2 as u64).sum();
let nleaves = sizes.into_iter().map(|n| (n + 1) / 2 as u64).sum();
if height == 0 {
nleaves
} else {
@@ -489,7 +500,6 @@ pub fn peak_map_height(mut pos: u64) -> (u64, u64) {
/// The height of a node in a full binary tree from its postorder traversal
/// index. This function is the base on which all others, as well as the MMR,
/// are built.

pub fn bintree_postorder_height(num: u64) -> u64 {
if num == 0 {
return 0;
@@ -17,7 +17,7 @@
use std::marker;

use crate::core::hash::{Hash, ZERO_HASH};
use crate::core::pmmr::pmmr::{bintree_rightmost, insertion_to_pmmr_index, peaks};
use crate::core::pmmr::pmmr::{bintree_rightmost, peaks};
use crate::core::pmmr::{is_leaf, Backend};
use crate::ser::{PMMRIndexHashable, PMMRable};

@@ -91,6 +91,11 @@ where
self.backend.leaf_pos_iter()
}

/// Iterator over current (unpruned, unremoved) leaf insertion indices.
pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator<Item = u64> + '_ {
self.backend.leaf_idx_iter(from_idx)
}

/// Is the MMR empty?
pub fn is_empty(&self) -> bool {
self.last_pos == 0
@@ -133,27 +138,28 @@ where

/// Helper function which returns un-pruned nodes from the insertion index
/// forward
/// returns last insertion index returned along with data
pub fn elements_from_insertion_index(
/// returns last pmmr index returned along with data
pub fn elements_from_pmmr_index(
&self,
mut index: u64,
mut pmmr_index: u64,
max_count: u64,
max_pmmr_pos: Option<u64>,
) -> (u64, Vec<T::E>) {
let mut return_vec = vec![];
if index == 0 {
index = 1;
let last_pos = match max_pmmr_pos {
Some(p) => p,
None => self.last_pos,
};
if pmmr_index == 0 {
pmmr_index = 1;
}
let mut return_index = index;
let mut pmmr_index = insertion_to_pmmr_index(index);
while return_vec.len() < max_count as usize && pmmr_index <= self.last_pos {
while return_vec.len() < max_count as usize && pmmr_index <= last_pos {
if let Some(t) = self.get_data(pmmr_index) {
return_vec.push(t);
return_index = index;
}
index += 1;
pmmr_index = insertion_to_pmmr_index(index);
pmmr_index += 1;
}
(return_index, return_vec)
(pmmr_index.saturating_sub(1), return_vec)
}

/// Helper function to get the last N nodes inserted, i.e. the last
@@ -0,0 +1,153 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use std::collections::HashSet;
use std::convert::TryFrom;
use std::fs::File;

use croaring::Bitmap;

use crate::core::hash::Hash;
use crate::core::pmmr::{self, Backend};
use crate::core::BlockHeader;
use crate::ser::PMMRable;

/// Simple/minimal/naive MMR backend implementation backed by Vec<T> and Vec<Hash>.
/// Removed pos are maintained in a HashSet<u64>.
#[derive(Clone, Debug)]
pub struct VecBackend<T: PMMRable> {
/// Backend elements (optional, possible to just store hashes).
pub data: Option<Vec<T>>,
/// Vec of hashes for the PMMR (both leaves and parents).
pub hashes: Vec<Hash>,
/// Positions of removed elements (is this applicable if we do not store data?)
pub removed: HashSet<u64>,
}

impl<T: PMMRable> Backend<T> for VecBackend<T> {
fn append(&mut self, elmt: &T, hashes: Vec<Hash>) -> Result<(), String> {
if let Some(data) = &mut self.data {
data.push(elmt.clone());
}
self.hashes.append(&mut hashes.clone());
Ok(())
}

fn get_hash(&self, position: u64) -> Option<Hash> {
if self.removed.contains(&position) {
None
} else {
self.get_from_file(position)
}
}

fn get_data(&self, position: u64) -> Option<T::E> {
if self.removed.contains(&position) {
None
} else {
self.get_data_from_file(position)
}
}

fn get_from_file(&self, position: u64) -> Option<Hash> {
let idx = usize::try_from(position.saturating_sub(1)).expect("usize from u64");
self.hashes.get(idx).cloned()
}

fn get_data_from_file(&self, position: u64) -> Option<T::E> {
if let Some(data) = &self.data {
let idx = usize::try_from(pmmr::n_leaves(position).saturating_sub(1))
.expect("usize from u64");
data.get(idx).map(|x| x.as_elmt())
} else {
None
}
}

fn data_as_temp_file(&self) -> Result<File, String> {
unimplemented!()
}

/// Number of leaves in the MMR
fn n_unpruned_leaves(&self) -> u64 {
unimplemented!()
}

fn leaf_pos_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(
self.hashes
.iter()
.enumerate()
.map(|(x, _)| (x + 1) as u64)
.filter(move |x| pmmr::is_leaf(*x) && !self.removed.contains(x)),
)
}

fn leaf_idx_iter(&self, from_idx: u64) -> Box<dyn Iterator<Item = u64> + '_> {
let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1);
Box::new(
self.leaf_pos_iter()
.skip_while(move |x| *x < from_pos)
.map(|x| pmmr::n_leaves(x).saturating_sub(1)),
)
}

fn remove(&mut self, position: u64) -> Result<(), String> {
self.removed.insert(position);
Ok(())
}

fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> {
if let Some(data) = &mut self.data {
let idx = pmmr::n_leaves(position);
data.truncate(usize::try_from(idx).expect("usize from u64"));
}
self.hashes
.truncate(usize::try_from(position).expect("usize from u64"));
Ok(())
}

fn snapshot(&self, _header: &BlockHeader) -> Result<(), String> {
Ok(())
}

fn release_files(&mut self) {}

fn dump_stats(&self) {}
}

impl<T: PMMRable> VecBackend<T> {
/// Instantiates a new empty vec backend.
pub fn new() -> VecBackend<T> {
VecBackend {
data: Some(vec![]),
hashes: vec![],
removed: HashSet::new(),
}
}

/// Instantiate a new empty "hash only" vec backend.
pub fn new_hash_only() -> VecBackend<T> {
VecBackend {
data: None,
hashes: vec![],
removed: HashSet::new(),
}
}

/// Size of this vec backend in hashes.
pub fn size(&self) -> u64 {
self.hashes.len() as u64
}
}
@@ -17,23 +17,23 @@
use crate::core::hash::{DefaultHashable, Hashed};
use crate::core::verifier_cache::VerifierCache;
use crate::core::{committed, Committed};
use crate::keychain::{self, BlindingFactor};
use crate::libtx::secp_ser;
use crate::ser::{
self, read_multi, FixedLength, PMMRable, ProtocolVersion, Readable, Reader,
VerifySortedAndUnique, Writeable, Writer,
};
use crate::util;
use crate::util::secp;
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::static_secp_instance;
use crate::util::RwLock;
use crate::{consensus, global};
use enum_primitive::FromPrimitive;
use keychain::{self, BlindingFactor};
use std::cmp::Ordering;
use std::cmp::{max, min};
use std::sync::Arc;
use std::{error, fmt};
use util;
use util::secp;
use util::secp::pedersen::{Commitment, RangeProof};
use util::static_secp_instance;
use util::RwLock;

/// Various tx kernel variants.
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
@@ -463,42 +463,17 @@ impl TxKernel {

/// Build an empty tx kernel with zero values.
pub fn empty() -> TxKernel {
TxKernel::with_features(KernelFeatures::Plain { fee: 0 })
}

/// Build an empty tx kernel with the provided kernel features.
pub fn with_features(features: KernelFeatures) -> TxKernel {
TxKernel {
features: KernelFeatures::Plain { fee: 0 },
features,
excess: Commitment::from_vec(vec![0; 33]),
excess_sig: secp::Signature::from_raw_data(&[0; 64]).unwrap(),
}
}

/// Builds a new tx kernel with the provided fee.
/// Will panic if we cannot safely do this on the existing kernel.
/// i.e. Do not try and set a fee on a coinbase kernel.
pub fn with_fee(self, fee: u64) -> TxKernel {
match self.features {
KernelFeatures::Plain { .. } => {
let features = KernelFeatures::Plain { fee };
TxKernel { features, ..self }
}
KernelFeatures::HeightLocked { lock_height, .. } => {
let features = KernelFeatures::HeightLocked { fee, lock_height };
TxKernel { features, ..self }
}
KernelFeatures::Coinbase => panic!("fee not supported on coinbase kernel"),
}
}

/// Builds a new tx kernel with the provided lock_height.
/// Will panic if we cannot safely do this on the existing kernel.
/// i.e. Do not try and set a lock_height on a coinbase kernel.
pub fn with_lock_height(self, lock_height: u64) -> TxKernel {
match self.features {
KernelFeatures::Plain { fee } | KernelFeatures::HeightLocked { fee, .. } => {
let features = KernelFeatures::HeightLocked { fee, lock_height };
TxKernel { features, ..self }
}
KernelFeatures::Coinbase => panic!("lock_height not supported on coinbase kernel"),
}
}
}

/// Enum of possible tx weight verification options -
@@ -684,6 +659,13 @@ impl TransactionBody {
self
}

/// Builds a new TransactionBody replacing any existing kernels with the provided kernel.
pub fn replace_kernel(mut self, kernel: TxKernel) -> TransactionBody {
self.kernels.clear();
self.kernels.push(kernel);
self
}

/// Total fee for a TransactionBody is the sum of fees of all fee carrying kernels.
pub fn fee(&self) -> u64 {
self.kernels
@@ -1012,8 +994,8 @@ impl Transaction {
}
}

/// Builds a new transaction with the provided output added. Existing
/// outputs, if any, are kept intact.
/// Builds a new transaction with the provided kernel added. Existing
/// kernels, if any, are kept intact.
/// Sort order is maintained.
pub fn with_kernel(self, kernel: TxKernel) -> Transaction {
Transaction {
@@ -1022,6 +1004,14 @@ impl Transaction {
}
}

/// Builds a new transaction replacing any existing kernels with the provided kernel.
pub fn replace_kernel(self, kernel: TxKernel) -> Transaction {
Transaction {
body: self.body.replace_kernel(kernel),
..self
}
}

/// Get inputs
pub fn inputs(&self) -> &Vec<Input> {
&self.body.inputs
@@ -1594,8 +1584,8 @@ mod test {
use super::*;
use crate::core::hash::Hash;
use crate::core::id::{ShortId, ShortIdentifiable};
use crate::keychain::{ExtKeychain, Keychain, SwitchCommitmentType};
use crate::util::secp;
use keychain::{ExtKeychain, Keychain, SwitchCommitmentType};
use util::secp;

#[test]
fn test_kernel_ser_deser() {
@@ -15,10 +15,9 @@
//! VerifierCache trait for batch verifying outputs and kernels.
//! We pass a "caching verifier" into the block validation processing with this.

use lru_cache::LruCache;

use crate::core::hash::{Hash, Hashed};
use crate::core::{Output, TxKernel};
use lru_cache::LruCache;

/// Verifier cache for caching expensive verification results.
/// Specifically the following -
@@ -19,17 +19,15 @@

#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))]

use chrono::prelude::{TimeZone, Utc};

use crate::core;
use crate::pow::{Difficulty, Proof, ProofOfWork};
use crate::util;
use crate::util::secp::constants::SINGLE_BULLET_PROOF_SIZE;
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::secp::Signature;

use crate::core::hash::Hash;
use crate::keychain::BlindingFactor;
use crate::pow::{Difficulty, Proof, ProofOfWork};
use chrono::prelude::{TimeZone, Utc};
use keychain::BlindingFactor;
use util;
use util::secp::constants::SINGLE_BULLET_PROOF_SIZE;
use util::secp::pedersen::{Commitment, RangeProof};
use util::secp::Signature;

/// Genesis block definition for development networks. The proof of work size
/// is small enough to mine it on the fly, so it does not contain its own
@@ -24,13 +24,15 @@ use crate::consensus::{
};
use crate::core::block::HeaderVersion;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckatoo_ctx, EdgeType, PoWContext,
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckatoo_ctx, EdgeType,
PoWContext,
};
use util::RwLock;

/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
use crate::util::RwLock;
/// Define these here, as they should be developer-set, not really tweakable
/// by users

@@ -174,14 +176,20 @@ where
match chain_type {
// Mainnet has Cuckaroo(d)29 for AR and Cuckatoo31+ for AF
ChainTypes::Mainnet if edge_bits > 29 => new_cuckatoo_ctx(edge_bits, proof_size, max_sols),
ChainTypes::Mainnet if valid_header_version(height, HeaderVersion::new(2)) => {
ChainTypes::Mainnet if valid_header_version(height, HeaderVersion(3)) => {
new_cuckaroom_ctx(edge_bits, proof_size)
}
ChainTypes::Mainnet if valid_header_version(height, HeaderVersion(2)) => {
new_cuckarood_ctx(edge_bits, proof_size)
}
ChainTypes::Mainnet => new_cuckaroo_ctx(edge_bits, proof_size),

// Same for Floonet
ChainTypes::Floonet if edge_bits > 29 => new_cuckatoo_ctx(edge_bits, proof_size, max_sols),
ChainTypes::Floonet if valid_header_version(height, HeaderVersion::new(2)) => {
ChainTypes::Floonet if valid_header_version(height, HeaderVersion(3)) => {
new_cuckaroom_ctx(edge_bits, proof_size)
}
ChainTypes::Floonet if valid_header_version(height, HeaderVersion(2)) => {
new_cuckarood_ctx(edge_bits, proof_size)
}
ChainTypes::Floonet => new_cuckaroo_ctx(edge_bits, proof_size),
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

//! Implementation of the MimbleWimble paper.
//! Implementation of the Mimblewimble paper.
//! https://download.wpsoftware.net/bitcoin/wizardry/mimblewimble.txt

#![deny(non_upper_case_globals)]
@@ -21,22 +21,18 @@
#![deny(unused_mut)]
#![warn(missing_docs)]

use blake2_rfc as blake2;
#[macro_use]
extern crate enum_primitive;
use grin_keychain as keychain;
use grin_util as util;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate serde_derive;
extern crate serde;
use serde;
#[macro_use]
extern crate log;
use failure;
#[macro_use]
extern crate failure_derive;
extern crate zeroize;
#[macro_use]
pub mod macros;

@@ -16,12 +16,11 @@
//! This module interfaces into the underlying
//! [Rust Aggsig library](https://github.com/mimblewimble/rust-secp256k1-zkp/blob/master/src/aggsig.rs)

use crate::keychain::{BlindingFactor, Identifier, Keychain};
use crate::libtx::error::{Error, ErrorKind};
use crate::util::secp::key::{PublicKey, SecretKey};
use crate::util::secp::pedersen::Commitment;
use crate::util::secp::{self, aggsig, Message, Secp256k1, Signature};
use grin_keychain::SwitchCommitmentType;
use keychain::{BlindingFactor, Identifier, Keychain, SwitchCommitmentType};
use util::secp::key::{PublicKey, SecretKey};
use util::secp::pedersen::Commitment;
use util::secp::{self, aggsig, Message, Secp256k1, Signature};

/// Creates a new secure nonce (as a SecretKey), guaranteed to be usable during
/// aggsig creation.
@@ -34,7 +33,6 @@ use grin_keychain::SwitchCommitmentType;
///
/// ```
/// # extern crate grin_core as core;
/// # extern crate grin_util as util;
/// use core::libtx::aggsig;
/// use util::secp::{ContextFlag, Secp256k1};
/// let secp = Secp256k1::with_caps(ContextFlag::SignOnly);
@@ -68,7 +66,6 @@ pub fn create_secnonce(secp: &Secp256k1) -> Result<SecretKey, Error> {
///
/// ```
/// # extern crate grin_core as core;
/// # extern crate grin_util as util;
/// # extern crate rand;
/// use rand::thread_rng;
/// use core::libtx::aggsig;
@@ -139,7 +136,6 @@ pub fn calculate_partial_sig(
///
/// ```
/// # extern crate grin_core as core;
/// # extern crate grin_util as util;
/// # extern crate rand;
/// use rand::thread_rng;
/// use core::libtx::aggsig;
@@ -223,9 +219,7 @@ pub fn verify_partial_sig(
/// # Example
///
/// ```
/// # extern crate grin_util as util;
/// # extern crate grin_core as core;
/// # extern crate grin_keychain as keychain;
/// use core::consensus::reward;
/// use util::secp::key::{PublicKey, SecretKey};
/// use util::secp::{ContextFlag, Secp256k1};
@@ -291,9 +285,7 @@ where
/// # Example
///
/// ```
/// # extern crate grin_util as util;
/// # extern crate grin_core as core;
/// # extern crate grin_keychain as keychain;
/// use core::consensus::reward;
/// use core::libtx::{aggsig, proof};
/// use util::secp::key::{PublicKey, SecretKey};
@@ -367,7 +359,6 @@ pub fn verify_single_from_commit(
///
/// ```
/// # extern crate grin_core as core;
/// # extern crate grin_util as util;
/// # extern crate rand;
/// use rand::thread_rng;
/// use core::libtx::aggsig;
@@ -22,14 +22,19 @@
//! _transaction_ function.
//!
//! Example:
//! build::transaction(vec![input_rand(75), output_rand(42), output_rand(32),
//! with_fee(1)])

use crate::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use crate::keychain::{BlindSum, BlindingFactor, Identifier, Keychain};
//! build::transaction(
//! KernelFeatures::Plain{ fee: 2 },
//! vec![
//! input_rand(75),
//! output_rand(42),
//! output_rand(32),
//! ]
//! )

use crate::core::{Input, KernelFeatures, Output, OutputFeatures, Transaction, TxKernel};
use crate::libtx::proof::{self, ProofBuild};
use crate::libtx::{aggsig, Error};
use grin_keychain::SwitchCommitmentType;
use keychain::{BlindSum, BlindingFactor, Identifier, Keychain, SwitchCommitmentType};

/// Context information available to transaction combinators.
pub struct Context<'a, K, B>
@@ -44,11 +49,12 @@ where
}

/// Function type returned by the transaction combinators. Transforms a
/// (Transaction, BlindSum) pair into another, provided some context.
/// (Transaction, BlindSum) tuple into another, given the provided context.
/// Will return an Err if seomthing went wrong at any point during transaction building.
pub type Append<K, B> = dyn for<'a> Fn(
&'a mut Context<'_, K, B>,
(Transaction, TxKernel, BlindSum),
) -> (Transaction, TxKernel, BlindSum);
Result<(Transaction, BlindSum), Error>,
) -> Result<(Transaction, BlindSum), Error>;

/// Adds an input with the provided value and blinding key to the transaction
/// being built.
@@ -58,17 +64,21 @@ where
B: ProofBuild,
{
Box::new(
move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
let commit = build
.keychain
.commit(value, &key_id, &SwitchCommitmentType::Regular)
.unwrap(); // TODO: proper support for different switch commitment schemes
let input = Input::new(features, commit);
(
tx.with_input(input),
kern,
sum.sub_key_id(key_id.to_value_path(value)),
)
move |build, acc| -> Result<(Transaction, BlindSum), Error> {
if let Ok((tx, sum)) = acc {
let commit =
build
.keychain
.commit(value, &key_id, &SwitchCommitmentType::Regular)?;
// TODO: proper support for different switch commitment schemes
let input = Input::new(features, commit);
Ok((
tx.with_input(input),
sum.sub_key_id(key_id.to_value_path(value)),
))
} else {
acc
}
},
)
}
@@ -105,11 +115,13 @@ where
B: ProofBuild,
{
Box::new(
move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
move |build, acc| -> Result<(Transaction, BlindSum), Error> {
let (tx, sum) = acc?;

// TODO: proper support for different switch commitment schemes
let switch = &SwitchCommitmentType::Regular;

let commit = build.keychain.commit(value, &key_id, switch).unwrap();
let commit = build.keychain.commit(value, &key_id, switch)?;

debug!("Building output: {}, {:?}", value, commit);

@@ -121,44 +133,16 @@ where
switch,
commit,
None,
)
.unwrap();
)?;

(
Ok((
tx.with_output(Output {
features: OutputFeatures::Plain,
commit,
proof: rproof,
}),
kern,
sum.add_key_id(key_id.to_value_path(value)),
)
},
)
}

/// Sets the fee on the transaction being built.
pub fn with_fee<K, B>(fee: u64) -> Box<Append<K, B>>
where
K: Keychain,
B: ProofBuild,
{
Box::new(
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
(tx, kern.with_fee(fee), sum)
},
)
}

/// Sets the lock_height on the transaction being built.
pub fn with_lock_height<K, B>(lock_height: u64) -> Box<Append<K, B>>
where
K: Keychain,
B: ProofBuild,
{
Box::new(
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
(tx, kern.with_lock_height(lock_height), sum)
))
},
)
}
@@ -172,53 +156,32 @@ where
B: ProofBuild,
{
Box::new(
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
(tx, kern, sum.add_blinding_factor(excess.clone()))
},
)
}

/// Sets a known tx "offset". Used in final step of tx construction.
pub fn with_offset<K, B>(offset: BlindingFactor) -> Box<Append<K, B>>
where
K: Keychain,
B: ProofBuild,
{
Box::new(
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
(tx.with_offset(offset.clone()), kern, sum)
move |_build, acc| -> Result<(Transaction, BlindSum), Error> {
acc.map(|(tx, sum)| (tx, sum.add_blinding_factor(excess.clone())))
},
)
}

/// Sets an initial transaction to add to when building a new transaction.
/// We currently only support building a tx with a single kernel with
/// build::transaction()
pub fn initial_tx<K, B>(mut tx: Transaction) -> Box<Append<K, B>>
pub fn initial_tx<K, B>(tx: Transaction) -> Box<Append<K, B>>
where
K: Keychain,
B: ProofBuild,
{
assert_eq!(tx.kernels().len(), 1);
let kern = tx.kernels_mut().remove(0);
Box::new(
move |_build, (_, _, sum)| -> (Transaction, TxKernel, BlindSum) {
(tx.clone(), kern.clone(), sum)
move |_build, acc| -> Result<(Transaction, BlindSum), Error> {
acc.map(|(_, sum)| (tx.clone(), sum))
},
)
}

/// Builds a new transaction by combining all the combinators provided in a
/// Vector. Transactions can either be built "from scratch" with a list of
/// inputs or outputs or from a pre-existing transaction that gets added to.
/// Takes an existing transaction and partially builds on it.
///
/// Example:
/// let (tx1, sum) = build::transaction(vec![input_rand(4), output_rand(1),
/// with_fee(1)], keychain).unwrap();
/// let (tx2, _) = build::transaction(vec![initial_tx(tx1), with_excess(sum),
/// output_rand(2)], keychain).unwrap();
/// let (tx, sum) = build::transaction(tx, vec![input_rand(4), output_rand(1))], keychain)?;
///
pub fn partial_transaction<K, B>(
tx: Transaction,
elems: Vec<Box<Append<K, B>>>,
keychain: &K,
builder: &B,
@@ -228,22 +191,16 @@ where
B: ProofBuild,
{
let mut ctx = Context { keychain, builder };
let (tx, kern, sum) = elems.iter().fold(
(Transaction::empty(), TxKernel::empty(), BlindSum::new()),
|acc, elem| elem(&mut ctx, acc),
);
let (tx, sum) = elems
.iter()
.fold(Ok((tx, BlindSum::new())), |acc, elem| elem(&mut ctx, acc))?;
let blind_sum = ctx.keychain.blind_sum(&sum)?;

// we only support building a tx with a single kernel via build::transaction()
assert!(tx.kernels().is_empty());

let tx = tx.with_kernel(kern);

Ok((tx, blind_sum))
}

/// Builds a complete transaction.
pub fn transaction<K, B>(
features: KernelFeatures,
elems: Vec<Box<Append<K, B>>>,
keychain: &K,
builder: &B,
@@ -253,50 +210,50 @@ where
B: ProofBuild,
{
let mut ctx = Context { keychain, builder };
let (mut tx, mut kern, sum) = elems.iter().fold(
(Transaction::empty(), TxKernel::empty(), BlindSum::new()),
|acc, elem| elem(&mut ctx, acc),
);
let (mut tx, sum) = elems
.iter()
.fold(Ok((Transaction::empty(), BlindSum::new())), |acc, elem| {
elem(&mut ctx, acc)
})?;
let blind_sum = ctx.keychain.blind_sum(&sum)?;

// Split the key so we can generate an offset for the tx.
let split = blind_sum.split(&keychain.secp())?;
let k1 = split.blind_1;
let k2 = split.blind_2;

let mut kern = TxKernel::with_features(features);

// Construct the message to be signed.
let msg = kern.msg_to_sign()?;

// Generate kernel excess and excess_sig using the split key k1.
let skey = k1.secret_key(&keychain.secp())?;
kern.excess = ctx.keychain.secp().commit(0, skey)?;
let pubkey = &kern.excess.to_pubkey(&keychain.secp())?;
kern.excess_sig =
aggsig::sign_with_blinding(&keychain.secp(), &msg, &k1, Some(&pubkey)).unwrap();
kern.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &k1, Some(&pubkey))?;

// Store the kernel offset (k2) on the tx.
// Commitments will sum correctly when accounting for the offset.
tx.offset = k2.clone();

// Set the kernel on the tx (assert this is now a single-kernel tx).
assert!(tx.kernels().is_empty());
let tx = tx.with_kernel(kern);
assert_eq!(tx.kernels().len(), 1);
// Set the kernel on the tx.
let tx = tx.replace_kernel(kern);

Ok(tx)
}

// Just a simple test, most exhaustive tests in the core.
#[cfg(test)]
mod test {
use crate::util::RwLock;
use std::sync::Arc;
use util::RwLock;

use super::*;
use crate::core::transaction::Weighting;
use crate::core::verifier_cache::{LruVerifierCache, VerifierCache};
use crate::keychain::{ExtKeychain, ExtKeychainPath};
use crate::libtx::ProofBuilder;
use keychain::{ExtKeychain, ExtKeychainPath};

fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
@@ -313,12 +270,8 @@ mod test {
let vc = verifier_cache();

let tx = transaction(
vec![
input(10, key_id1),
input(12, key_id2),
output(20, key_id3),
with_fee(2),
],
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&keychain,
&builder,
)
@@ -338,12 +291,8 @@ mod test {
let vc = verifier_cache();

let tx = transaction(
vec![
input(10, key_id1),
input(12, key_id2),
output(20, key_id3),
with_fee(2),
],
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&keychain,
&builder,
)
@@ -362,7 +311,8 @@ mod test {
let vc = verifier_cache();

let tx = transaction(
vec![input(6, key_id1), output(2, key_id2), with_fee(4)],
KernelFeatures::Plain { fee: 4 },
vec![input(6, key_id1), output(2, key_id2)],
&keychain,
&builder,
)
@@ -13,12 +13,11 @@
// limitations under the License.

//! libtx specific errors
use crate::core::transaction;
use failure::{Backtrace, Context, Fail};
use keychain;
use std::fmt::{self, Display};

use crate::core::transaction;
use crate::keychain;
use crate::util::secp;
use util::secp;

/// Lib tx error definition
#[derive(Debug)]
@@ -44,6 +43,9 @@ pub enum ErrorKind {
/// Rangeproof error
#[fail(display = "Rangeproof Error")]
RangeProof(String),
/// Other error
#[fail(display = "Other Error")]
Other(String),
}

impl Fail for Error {
@@ -14,15 +14,15 @@

//! Rangeproof library functions

use crate::blake2::blake2b::blake2b;
use crate::keychain::extkey_bip32::BIP32GrinHasher;
use crate::keychain::{Identifier, Keychain, SwitchCommitmentType, ViewKey};
use crate::libtx::error::{Error, ErrorKind};
use crate::util::secp::key::SecretKey;
use crate::util::secp::pedersen::{Commitment, ProofMessage, RangeProof};
use crate::util::secp::{self, Secp256k1};
use crate::zeroize::Zeroize;
use blake2::blake2b::blake2b;
use keychain::extkey_bip32::BIP32GrinHasher;
use keychain::{Identifier, Keychain, SwitchCommitmentType, ViewKey};
use std::convert::TryFrom;
use util::secp::key::SecretKey;
use util::secp::pedersen::{Commitment, ProofMessage, RangeProof};
use util::secp::{self, Secp256k1};
use zeroize::Zeroize;

/// Create a bulletproof
pub fn create<K, B>(
@@ -440,8 +440,8 @@ impl ProofBuild for ViewKey {
#[cfg(test)]
mod tests {
use super::*;
use crate::keychain::ExtKeychain;
use grin_keychain::ChildNumber;
use keychain::ChildNumber;
use keychain::ExtKeychain;
use rand::{thread_rng, Rng};

#[test]
@@ -16,14 +16,13 @@
//! reward.
use crate::consensus::reward;
use crate::core::{KernelFeatures, Output, OutputFeatures, TxKernel};
use crate::keychain::{Identifier, Keychain};
use crate::libtx::error::Error;
use crate::libtx::{
aggsig,
proof::{self, ProofBuild},
};
use crate::util::{secp, static_secp_instance};
use grin_keychain::SwitchCommitmentType;
use keychain::{Identifier, Keychain, SwitchCommitmentType};
use util::{secp, static_secp_instance};

/// output a reward output
pub fn output<K, B>(
@@ -14,16 +14,16 @@

//! Sane serialization & deserialization of cryptographic structs into hex

use crate::keychain::BlindingFactor;
use crate::serde::{Deserialize, Deserializer, Serializer};
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::{from_hex, to_hex};
use keychain::BlindingFactor;
use serde::{Deserialize, Deserializer, Serializer};
use util::secp::pedersen::{Commitment, RangeProof};
use util::{from_hex, to_hex};

/// Serializes a secp PublicKey to and from hex
pub mod pubkey_serde {
use crate::serde::{Deserialize, Deserializer, Serializer};
use crate::util::secp::key::PublicKey;
use crate::util::{from_hex, static_secp_instance, to_hex};
use serde::{Deserialize, Deserializer, Serializer};
use util::secp::key::PublicKey;
use util::{from_hex, static_secp_instance, to_hex};

///
pub fn serialize<S>(key: &PublicKey, serializer: S) -> Result<S::Ok, S::Error>
@@ -55,8 +55,8 @@ pub mod pubkey_serde {
/// Serializes an Option<secp::Signature> to and from hex
pub mod option_sig_serde {
use crate::serde::{Deserialize, Deserializer, Serializer};
use crate::util::{from_hex, secp, static_secp_instance, to_hex};
use serde::de::Error;
use util::{from_hex, secp, static_secp_instance, to_hex};

///
pub fn serialize<S>(sig: &Option<secp::Signature>, serializer: S) -> Result<S::Ok, S::Error>
@@ -93,14 +93,13 @@ pub mod option_sig_serde {
None => Ok(None),
})
}

}

/// Serializes an Option<secp::SecretKey> to and from hex
pub mod option_seckey_serde {
use crate::serde::{Deserialize, Deserializer, Serializer};
use crate::util::{from_hex, secp, static_secp_instance, to_hex};
use serde::de::Error;
use util::{from_hex, secp, static_secp_instance, to_hex};

///
pub fn serialize<S>(
@@ -141,8 +140,8 @@ pub mod option_seckey_serde {
/// Serializes a secp::Signature to and from hex
pub mod sig_serde {
use crate::serde::{Deserialize, Deserializer, Serializer};
use crate::util::{from_hex, secp, static_secp_instance, to_hex};
use serde::de::Error;
use util::{from_hex, secp, static_secp_instance, to_hex};

///
pub fn serialize<S>(sig: &secp::Signature, serializer: S) -> Result<S::Ok, S::Error>
@@ -175,9 +174,9 @@ pub mod sig_serde {
/// Serializes an Option<secp::Commitment> to and from hex
pub mod option_commitment_serde {
use crate::serde::{Deserialize, Deserializer, Serializer};
use crate::util::secp::pedersen::Commitment;
use crate::util::{from_hex, to_hex};
use serde::de::Error;
use util::secp::pedersen::Commitment;
use util::{from_hex, to_hex};

///
pub fn serialize<S>(commit: &Option<Commitment>, serializer: S) -> Result<S::Ok, S::Error>
@@ -202,7 +201,6 @@ pub mod option_commitment_serde {
None => Ok(None),
})
}

}
/// Creates a BlindingFactor from a hex string
pub fn blind_from_hex<'de, D>(deserializer: D) -> Result<BlindingFactor, D::Error>
@@ -350,9 +348,9 @@ pub mod opt_string_or_u64 {
mod test {
use super::*;
use crate::libtx::aggsig;
use crate::util::secp::key::{PublicKey, SecretKey};
use crate::util::secp::{Message, Signature};
use crate::util::static_secp_instance;
use util::secp::key::{PublicKey, SecretKey};
use util::secp::{Message, Signature};
use util::static_secp_instance;

use serde_json;

@@ -66,30 +66,30 @@ macro_rules! tee {
/// Eliminate some of the boilerplate of deserialization (package ser) by
/// passing just the list of reader function (with optional single param)
/// Example before:
/// let foo = try!(reader.read_u64());
/// let bar = try!(reader.read_u32());
/// let fixed_byte_var = try!(reader.read_fixed_bytes(64));
/// let foo = reader.read_u64()?;
/// let bar = reader.read_u32()?;
/// let fixed_byte_var = reader.read_fixed_bytes(64)?;
/// Example after:
/// let (foo, bar, fixed_byte_var) = ser_multiread!(reader, read_u64,
/// read_u32, read_fixed_bytes(64));
#[macro_export]
macro_rules! ser_multiread {
($rdr:ident, $($read_call:ident $(($val:expr)),*),*) => {
( $(r#try!($rdr.$read_call($($val),*))),* )
( $($rdr.$read_call($($val),*)?),* )
}
}

/// Eliminate some of the boilerplate of serialization (package ser) by
/// passing directly pairs of writer function and data to write.
/// Example before:
/// try!(reader.write_u64(42));
/// try!(reader.write_u32(100));
/// reader.write_u64(42)?;
/// reader.write_u32(100)?;
/// Example after:
/// ser_multiwrite!(writer, [write_u64, 42], [write_u32, 100]);
#[macro_export]
macro_rules! ser_multiwrite {
($wrtr:ident, $([ $write_call:ident, $val:expr ]),* ) => {
$( r#try!($wrtr.$write_call($val)) );*
$($wrtr.$write_call($val)? );*
}
}

@@ -28,31 +28,32 @@
#![deny(unused_mut)]
#![warn(missing_docs)]

pub use self::common::EdgeType;
pub use self::types::*;
use crate::core::{Block, BlockHeader};
use crate::genesis;
use crate::global;
use chrono;
use num;

#[macro_use]
mod common;
pub mod cuckaroo;
pub mod cuckarood;
pub mod cuckaroom;
pub mod cuckatoo;
mod error;
#[allow(dead_code)]
pub mod lean;
mod siphash;
mod types;

use crate::core::{Block, BlockHeader};
use crate::genesis;
use crate::global;
use chrono::prelude::{DateTime, NaiveDateTime, Utc};

pub use self::common::EdgeType;
pub use self::types::*;
pub use crate::pow::cuckaroo::{new_cuckaroo_ctx, CuckarooContext};
pub use crate::pow::cuckarood::{new_cuckarood_ctx, CuckaroodContext};
pub use crate::pow::cuckaroom::{new_cuckaroom_ctx, CuckaroomContext};
pub use crate::pow::cuckatoo::{new_cuckatoo_ctx, CuckatooContext};
pub use crate::pow::error::Error;
use chrono::prelude::{DateTime, NaiveDateTime, Utc};

const MAX_SOLS: u32 = 10;

@@ -14,12 +14,11 @@

//! Common types and traits for cuckoo family of solvers

use crate::blake2::blake2b::blake2b;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};

use crate::pow::error::{Error, ErrorKind};
use crate::pow::num::{PrimInt, ToPrimitive};
use crate::pow::siphash::siphash24;
use blake2::blake2b::blake2b;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::hash::Hash;
use std::io::Cursor;
@@ -85,7 +85,10 @@ where
return Err(ErrorKind::Verification("edges not ascending".to_owned()))?;
}
// 21 is standard siphash rotation constant
let edge = to_edge!(T, siphash_block(&self.params.siphash_keys, nonces[n], 21));
let edge = to_edge!(
T,
siphash_block(&self.params.siphash_keys, nonces[n], 21, false)
);
uvs[2 * n] = to_u64!(edge & self.params.edge_mask);
uvs[2 * n + 1] = to_u64!((edge >> 32) & self.params.edge_mask);
xor0 ^= uvs[2 * n];
@@ -89,7 +89,10 @@ where
if n > 0 && nonces[n] <= nonces[n - 1] {
return Err(ErrorKind::Verification("edges not ascending".to_owned()))?;
}
let edge = to_edge!(T, siphash_block(&self.params.siphash_keys, nonces[n], 25));
let edge = to_edge!(
T,
siphash_block(&self.params.siphash_keys, nonces[n], 25, false)
);
let idx = 4 * ndir[dir] + 2 * dir;
uvs[idx] = to_u64!(edge & nodemask);
uvs[idx + 1] = to_u64!((edge >> 32) & nodemask);
@@ -0,0 +1,192 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! Implementation of Cuckaroom Cycle, based on Cuckoo Cycle designed by
//! John Tromp. Ported to Rust from https://github.com/tromp/cuckoo.
//!
//! Cuckaroom is a variation of Cuckaroo that's tweaked at the second HardFork
//! to maintain ASIC-Resistance, as introduced in
//! https://www.grin-forum.org/t/mid-december-pow-hardfork-cuckarood29-cuckaroom29
//! It uses a tweaked edge block generation where states are xored with all later
//! states, reverts to standard siphash, and most importantly, identifies cycles
//! in a mono-partite graph, from which it derives the letter 'm'.

use crate::global;
use crate::pow::common::{CuckooParams, EdgeType};
use crate::pow::error::{Error, ErrorKind};
use crate::pow::siphash::siphash_block;
use crate::pow::{PoWContext, Proof};

/// Instantiate a new CuckaroomContext as a PowContext. Note that this can't
/// be moved in the PoWContext trait as this particular trait needs to be
/// convertible to an object trait.
pub fn new_cuckaroom_ctx<T>(
edge_bits: u8,
proof_size: usize,
) -> Result<Box<dyn PoWContext<T>>, Error>
where
T: EdgeType + 'static,
{
let params = CuckooParams::new(edge_bits, proof_size)?;
Ok(Box::new(CuckaroomContext { params }))
}

/// Cuckaroom cycle context. Only includes the verifier for now.
pub struct CuckaroomContext<T>
where
T: EdgeType,
{
params: CuckooParams<T>,
}

impl<T> PoWContext<T> for CuckaroomContext<T>
where
T: EdgeType,
{
fn set_header_nonce(
&mut self,
header: Vec<u8>,
nonce: Option<u32>,
_solve: bool,
) -> Result<(), Error> {
self.params.reset_header_nonce(header, nonce)
}

fn find_cycles(&mut self) -> Result<Vec<Proof>, Error> {
unimplemented!()
}

fn verify(&self, proof: &Proof) -> Result<(), Error> {
let proofsize = proof.proof_size();
if proofsize != global::proofsize() {
return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?;
}
let nonces = &proof.nonces;
let mut from = vec![0u32; proofsize];
let mut to = vec![0u32; proofsize];
let mut xor_from: u32 = 0;
let mut xor_to: u32 = 0;
let nodemask = self.params.edge_mask >> 1;

for n in 0..proofsize {
if nonces[n] > to_u64!(self.params.edge_mask) {
return Err(ErrorKind::Verification("edge too big".to_owned()))?;
}
if n > 0 && nonces[n] <= nonces[n - 1] {
return Err(ErrorKind::Verification("edges not ascending".to_owned()))?;
}
let edge = to_edge!(
T,
siphash_block(&self.params.siphash_keys, nonces[n], 21, true)
);
from[n] = to_u32!(edge & nodemask);
xor_from ^= from[n];
to[n] = to_u32!((edge >> 32) & nodemask);
xor_to ^= to[n];
}
if xor_from != xor_to {
return Err(ErrorKind::Verification(
"endpoints don't match up".to_owned(),
))?;
}
let mut visited = vec![false; proofsize];
let mut n = 0;
let mut i = 0;
loop {
// follow cycle
if visited[i] {
return Err(ErrorKind::Verification("branch in cycle".to_owned()))?;
}
visited[i] = true;
let mut nexti = 0;
while from[nexti] != to[i] {
nexti += 1;
if nexti == proofsize {
return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?;
}
}
i = nexti;
n += 1;
if i == 0 {
// must cycle back to start or find branch
break;
}
}
if n == proofsize {
Ok(())
} else {
Err(ErrorKind::Verification("cycle too short".to_owned()))?
}
}
}

#[cfg(test)]
mod test {
use super::*;

// empty header, nonce 64
static V1_19_HASH: [u64; 4] = [
0xdb7896f799c76dab,
0x352e8bf25df7a723,
0xf0aa29cbb1150ea6,
0x3206c2759f41cbd5,
];
static V1_19_SOL: [u64; 42] = [
0x0413c, 0x05121, 0x0546e, 0x1293a, 0x1dd27, 0x1e13e, 0x1e1d2, 0x22870, 0x24642, 0x24833,
0x29190, 0x2a732, 0x2ccf6, 0x302cf, 0x32d9a, 0x33700, 0x33a20, 0x351d9, 0x3554b, 0x35a70,
0x376c1, 0x398c6, 0x3f404, 0x3ff0c, 0x48b26, 0x49a03, 0x4c555, 0x4dcda, 0x4dfcd, 0x4fbb6,
0x50275, 0x584a8, 0x5da0d, 0x5dbf1, 0x6038f, 0x66540, 0x72bbd, 0x77323, 0x77424, 0x77a14,
0x77dc9, 0x7d9dc,
];

// empty header, nonce 15
static V2_29_HASH: [u64; 4] = [
0xe4b4a751f2eac47d,
0x3115d47edfb69267,
0x87de84146d9d609e,
0x7deb20eab6d976a1,
];
static V2_29_SOL: [u64; 42] = [
0x04acd28, 0x29ccf71, 0x2a5572b, 0x2f31c2c, 0x2f60c37, 0x317fe1d, 0x32f6d4c, 0x3f51227,
0x45ee1dc, 0x535eeb8, 0x5e135d5, 0x6184e3d, 0x6b1b8e0, 0x6f857a9, 0x8916a0f, 0x9beb5f8,
0xa3c8dc9, 0xa886d94, 0xaab6a57, 0xd6df8f8, 0xe4d630f, 0xe6ae422, 0xea2d658, 0xf7f369b,
0x10c465d8, 0x1130471e, 0x12049efb, 0x12f43bc5, 0x15b493a6, 0x16899354, 0x1915dfca,
0x195c3dac, 0x19b09ab6, 0x1a1a8ed7, 0x1bba748f, 0x1bdbf777, 0x1c806542, 0x1d201b53,
0x1d9e6af7, 0x1e99885e, 0x1f255834, 0x1f9c383b,
];

#[test]
fn cuckaroom19_29_vectors() {
let mut ctx19 = new_impl::<u64>(19, 42);
ctx19.params.siphash_keys = V1_19_HASH.clone();
assert!(ctx19
.verify(&Proof::new(V1_19_SOL.to_vec().clone()))
.is_ok());
assert!(ctx19.verify(&Proof::zero(42)).is_err());
let mut ctx29 = new_impl::<u64>(29, 42);
ctx29.params.siphash_keys = V2_29_HASH.clone();
assert!(ctx29
.verify(&Proof::new(V2_29_SOL.to_vec().clone()))
.is_ok());
assert!(ctx29.verify(&Proof::zero(42)).is_err());
}

fn new_impl<T>(edge_bits: u8, proof_size: usize) -> CuckaroomContext<T>
where
T: EdgeType,
{
let params = CuckooParams::new(edge_bits, proof_size).unwrap();
CuckaroomContext { params }
}
}
@@ -12,16 +12,14 @@
// limitations under the License.

//! Implementation of Cuckatoo Cycle designed by John Tromp.
use std::mem;

use byteorder::{BigEndian, WriteBytesExt};
use croaring::Bitmap;

use crate::global;
use crate::pow::common::{CuckooParams, EdgeType, Link};
use crate::pow::error::{Error, ErrorKind};
use crate::pow::{PoWContext, Proof};
use crate::util;
use byteorder::{BigEndian, WriteBytesExt};
use croaring::Bitmap;
use std::mem;
use util;

struct Graph<T>
where
@@ -53,6 +51,9 @@ where
{
/// Create a new graph with given parameters
pub fn new(max_edges: T, max_sols: u32, proof_size: usize) -> Result<Graph<T>, Error> {
if to_u64!(max_edges) >= u64::max_value() / 2 {
return Err(ErrorKind::Verification(format!("graph is to big to build")))?;
}
let max_nodes = 2 * to_u64!(max_edges);
Ok(Graph {
max_edges,
@@ -483,5 +484,4 @@ mod test {
}
Ok(())
}

}
@@ -39,26 +39,29 @@ pub fn siphash24(v: &[u64; 4], nonce: u64) -> u64 {
/// Builds a block of siphash values by repeatedly hashing from the nonce
/// truncated to its closest block start, up to the end of the block. Returns
/// the resulting hash at the nonce's position.
pub fn siphash_block(v: &[u64; 4], nonce: u64, rot_e: u8) -> u64 {
pub fn siphash_block(v: &[u64; 4], nonce: u64, rot_e: u8, xor_all: bool) -> u64 {
// beginning of the block of hashes
let nonce0 = nonce & !SIPHASH_BLOCK_MASK;
let mut nonce_hash = 0;
let nonce_i = nonce & SIPHASH_BLOCK_MASK;
let mut nonce_hash = vec![0u64; SIPHASH_BLOCK_SIZE as usize];

// repeated hashing over the whole block
let mut siphash = SipHash24::new(v);
for n in nonce0..(nonce0 + SIPHASH_BLOCK_SIZE) {
siphash.hash(n, rot_e);
if n == nonce {
nonce_hash = siphash.digest();
}
for i in 0..SIPHASH_BLOCK_SIZE {
siphash.hash(nonce0 + i, rot_e);
nonce_hash[i as usize] = siphash.digest();
}
// xor the nonce with the last hash to force hashing the whole block
// unless the nonce is last in the block
if nonce == nonce0 + SIPHASH_BLOCK_MASK {
return siphash.digest();
// xor the hash at nonce_i < SIPHASH_BLOCK_MASK with some or all later hashes to force hashing the whole block
let mut xor: u64 = nonce_hash[nonce_i as usize];
let xor_from = if xor_all || nonce_i == SIPHASH_BLOCK_MASK {
nonce_i + 1
} else {
return nonce_hash ^ siphash.digest();
SIPHASH_BLOCK_MASK
};
for i in xor_from..SIPHASH_BLOCK_SIZE {
xor ^= nonce_hash[i as usize];
}
return xor;
}

/// Implements siphash 2-4 specialized for a 4 u64 array key and a u64 nonce
@@ -130,8 +133,17 @@ mod test {

#[test]
fn hash_block() {
assert_eq!(siphash_block(&[1, 2, 3, 4], 10, 21), 1182162244994096396);
assert_eq!(siphash_block(&[1, 2, 3, 4], 123, 21), 11303676240481718781);
assert_eq!(siphash_block(&[9, 7, 6, 7], 12, 21), 4886136884237259030);
assert_eq!(
siphash_block(&[1, 2, 3, 4], 10, 21, false),
1182162244994096396
);
assert_eq!(
siphash_block(&[1, 2, 3, 4], 123, 21, false),
11303676240481718781
);
assert_eq!(
siphash_block(&[9, 7, 6, 7], 12, 21, false),
4886136884237259030
);
}
}
@@ -12,22 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.

/// Types for a Cuck(at)oo proof of work and its encapsulation as a fully usable
/// proof of work within a block header.
use std::cmp::{max, min};
use std::ops::{Add, Div, Mul, Sub};
use std::{fmt, iter};

use rand::{thread_rng, Rng};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};

use crate::consensus::{graph_weight, MIN_DIFFICULTY, SECOND_POW_EDGE_BITS};
use crate::core::hash::{DefaultHashable, Hashed};
use crate::global;
use crate::ser::{self, FixedLength, Readable, Reader, Writeable, Writer};

use crate::pow::common::EdgeType;
use crate::pow::error::Error;
use crate::ser::{self, FixedLength, Readable, Reader, Writeable, Writer};
use rand::{thread_rng, Rng};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
/// Types for a Cuck(at)oo proof of work and its encapsulation as a fully usable
/// proof of work within a block header.
use std::cmp::{max, min};
use std::ops::{Add, Div, Mul, Sub};
use std::{fmt, iter};

/// Generic trait for a solver/verifier providing common interface into Cuckoo-family PoW
/// Mostly used for verification, but also for test mining if necessary
@@ -429,7 +426,7 @@ fn read_number(bits: &Vec<u8>, bit_start: usize, bit_count: usize) -> u64 {
impl Readable for Proof {
fn read(reader: &mut dyn Reader) -> Result<Proof, ser::Error> {
let edge_bits = reader.read_u8()?;
if edge_bits == 0 || edge_bits > 64 {
if edge_bits == 0 || edge_bits > 63 {
return Err(ser::Error::CorruptedData);
}

@@ -509,7 +506,7 @@ mod tests {

#[test]
fn test_proof_rw() {
for edge_bits in 10..64 {
for edge_bits in 10..63 {
let mut proof = Proof::new(gen_proof(edge_bits as u32));
proof.edge_bits = edge_bits;
let mut buf = Cursor::new(Vec::new());
@@ -541,5 +538,4 @@ mod tests {
}
v
}

}
@@ -21,20 +21,20 @@

use crate::core::hash::{DefaultHashable, Hash, Hashed};
use crate::global::PROTOCOL_VERSION;
use crate::keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
use crate::util::secp::constants::{
AGG_SIGNATURE_SIZE, COMPRESSED_PUBLIC_KEY_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE,
SECRET_KEY_SIZE,
};
use crate::util::secp::key::PublicKey;
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::secp::Signature;
use crate::util::secp::{ContextFlag, Secp256k1};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
use std::fmt::{self, Debug};
use std::io::{self, Read, Write};
use std::marker;
use std::{cmp, error};
use util::secp::constants::{
AGG_SIGNATURE_SIZE, COMPRESSED_PUBLIC_KEY_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE,
SECRET_KEY_SIZE,
};
use util::secp::key::PublicKey;
use util::secp::pedersen::{Commitment, RangeProof};
use util::secp::Signature;
use util::secp::{ContextFlag, Secp256k1};

/// Possible errors deriving from serializing or deserializing.
#[derive(Clone, Eq, PartialEq, Debug, Serialize, Deserialize)]
@@ -927,17 +927,17 @@ impl AsFixedBytes for crate::core::hash::Hash {
32
}
}
impl AsFixedBytes for crate::util::secp::pedersen::RangeProof {
impl AsFixedBytes for util::secp::pedersen::RangeProof {
fn len(&self) -> usize {
self.plen
}
}
impl AsFixedBytes for crate::util::secp::Signature {
impl AsFixedBytes for util::secp::Signature {
fn len(&self) -> usize {
64
}
}
impl AsFixedBytes for crate::util::secp::pedersen::Commitment {
impl AsFixedBytes for util::secp::pedersen::Commitment {
fn len(&self) -> usize {
PEDERSEN_COMMITMENT_SIZE
}
@@ -947,7 +947,7 @@ impl AsFixedBytes for BlindingFactor {
SECRET_KEY_SIZE
}
}
impl AsFixedBytes for crate::keychain::Identifier {
impl AsFixedBytes for keychain::Identifier {
fn len(&self) -> usize {
IDENTIFIER_SIZE
}
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

pub mod common;
mod common;
use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use crate::core::consensus::BLOCK_OUTPUT_WEIGHT;
use crate::core::core::block::Error;
@@ -24,18 +24,16 @@ use crate::core::core::Committed;
use crate::core::core::{
Block, BlockHeader, CompactBlock, HeaderVersion, KernelFeatures, OutputFeatures,
};
use crate::core::libtx::build::{self, input, output, with_fee};
use crate::core::libtx::build::{self, input, output};
use crate::core::libtx::ProofBuilder;
use crate::core::{global, ser};
use crate::keychain::{BlindingFactor, ExtKeychain, Keychain};
use crate::util::secp;
use crate::util::RwLock;
use chrono::Duration;
use grin_core as core;
use grin_core::global::ChainTypes;
use grin_keychain as keychain;
use grin_util as util;
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use std::sync::Arc;
use util::secp;
use util::RwLock;

fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
@@ -58,8 +56,9 @@ fn too_large_block() {
parts.push(output(5, pks.pop().unwrap()));
}

parts.append(&mut vec![input(500000, pks.pop().unwrap()), with_fee(2)]);
let tx = build::transaction(parts, &keychain, &builder).unwrap();
parts.append(&mut vec![input(500000, pks.pop().unwrap())]);
let tx =
build::transaction(KernelFeatures::Plain { fee: 2 }, parts, &keychain, &builder).unwrap();

let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
@@ -92,7 +91,8 @@ fn block_with_cut_through() {

let mut btx1 = tx2i1o();
let mut btx2 = build::transaction(
vec![input(7, key_id1), output(5, key_id2.clone()), with_fee(2)],
KernelFeatures::Plain { fee: 2 },
vec![input(7, key_id1), output(5, key_id2.clone())],
&keychain,
&builder,
)
@@ -211,7 +211,7 @@ fn serialize_deserialize_header_version() {
ser::serialize_default(&mut vec1, &1_u16).expect("serialization failed");

let mut vec2 = Vec::new();
ser::serialize_default(&mut vec2, &HeaderVersion::default()).expect("serialization failed");
ser::serialize_default(&mut vec2, &HeaderVersion(1)).expect("serialization failed");

// Check that a header_version serializes to a
// single u16 value with no extraneous bytes wrapping it.
@@ -477,12 +477,8 @@ fn same_amount_outputs_copy_range_proof() {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);

let tx = build::transaction(
vec![
input(7, key_id1),
output(3, key_id2),
output(3, key_id3),
with_fee(1),
],
KernelFeatures::Plain { fee: 1 },
vec![input(7, key_id1), output(3, key_id2), output(3, key_id3)],
&keychain,
&builder,
)
@@ -527,23 +523,19 @@ fn wrong_amount_range_proof() {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);

let tx1 = build::transaction(
KernelFeatures::Plain { fee: 1 },
vec![
input(7, key_id1.clone()),
output(3, key_id2.clone()),
output(3, key_id3.clone()),
with_fee(1),
],
&keychain,
&builder,
)
.unwrap();
let tx2 = build::transaction(
vec![
input(7, key_id1),
output(2, key_id2),
output(4, key_id3),
with_fee(1),
],
KernelFeatures::Plain { fee: 1 },
vec![input(7, key_id1), output(2, key_id2), output(4, key_id3)],
&keychain,
&builder,
)
@@ -14,20 +14,19 @@

//! Common test functions

use crate::keychain::{Identifier, Keychain};
use grin_core::core::{
block::{Block, BlockHeader},
Transaction,
};
use grin_core::core::hash::DefaultHashable;
use grin_core::core::{Block, BlockHeader, KernelFeatures, Transaction};
use grin_core::libtx::{
build::{self, input, output, with_fee},
build::{self, input, output},
proof::{ProofBuild, ProofBuilder},
reward,
};
use grin_core::pow::Difficulty;
use grin_keychain as keychain;
use grin_core::ser::{self, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use keychain::{Identifier, Keychain};

// utility producing a transaction with 2 inputs and a single outputs
#[allow(dead_code)]
pub fn tx2i1o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
let builder = ProofBuilder::new(&keychain);
@@ -36,27 +35,25 @@ pub fn tx2i1o() -> Transaction {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);

build::transaction(
vec![
input(10, key_id1),
input(11, key_id2),
output(19, key_id3),
with_fee(2),
],
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), input(11, key_id2), output(19, key_id3)],
&keychain,
&builder,
)
.unwrap()
}

// utility producing a transaction with a single input and output
#[allow(dead_code)]
pub fn tx1i1o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
let builder = ProofBuilder::new(&keychain);
let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);

build::transaction(
vec![input(5, key_id1), output(3, key_id2), with_fee(2)],
KernelFeatures::Plain { fee: 2 },
vec![input(5, key_id1), output(3, key_id2)],
&keychain,
&builder,
)
@@ -66,6 +63,7 @@ pub fn tx1i1o() -> Transaction {
// utility producing a transaction with a single input
// and two outputs (one change output)
// Note: this tx has an "offset" kernel
#[allow(dead_code)]
pub fn tx1i2o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
let builder = ProofBuilder::new(&keychain);
@@ -74,12 +72,8 @@ pub fn tx1i2o() -> Transaction {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);

build::transaction(
vec![
input(6, key_id1),
output(3, key_id2),
output(1, key_id3),
with_fee(2),
],
KernelFeatures::Plain { fee: 2 },
vec![input(6, key_id1), output(3, key_id2), output(1, key_id3)],
&keychain,
&builder,
)
@@ -88,6 +82,7 @@ pub fn tx1i2o() -> Transaction {

// utility to create a block without worrying about the key or previous
// header
#[allow(dead_code)]
pub fn new_block<K, B>(
txs: Vec<&Transaction>,
keychain: &K,
@@ -112,6 +107,7 @@ where

// utility producing a transaction that spends an output with the provided
// value and blinding key
#[allow(dead_code)]
pub fn txspend1i1o<K, B>(
v: u64,
keychain: &K,
@@ -124,9 +120,47 @@ where
B: ProofBuild,
{
build::transaction(
vec![input(v, key_id1), output(3, key_id2), with_fee(2)],
KernelFeatures::Plain { fee: 2 },
vec![input(v, key_id1), output(3, key_id2)],
keychain,
builder,
)
.unwrap()
}

#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct TestElem(pub [u32; 4]);

impl DefaultHashable for TestElem {}

impl FixedLength for TestElem {
const LEN: usize = 16;
}

impl PMMRable for TestElem {
type E = Self;

fn as_elmt(&self) -> Self::E {
self.clone()
}
}

impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u32(self.0[0])?;
writer.write_u32(self.0[1])?;
writer.write_u32(self.0[2])?;
writer.write_u32(self.0[3])
}
}

impl Readable for TestElem {
fn read(reader: &mut dyn Reader) -> Result<TestElem, ser::Error> {
Ok(TestElem([
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
]))
}
}
@@ -623,71 +623,89 @@ fn hard_forks() {
{
global::set_mining_mode(global::ChainTypes::Mainnet);
assert_eq!(global::is_floonet(), false);
assert!(valid_header_version(0, HeaderVersion::new(1)));
assert!(valid_header_version(10, HeaderVersion::new(1)));
assert!(!valid_header_version(10, HeaderVersion::new(2)));
assert!(valid_header_version(
YEAR_HEIGHT / 2 - 1,
HeaderVersion::new(1)
));
assert!(valid_header_version(YEAR_HEIGHT / 2, HeaderVersion::new(2)));
assert!(valid_header_version(
YEAR_HEIGHT / 2 + 1,
HeaderVersion::new(2)
));
assert!(valid_header_version(0, HeaderVersion(1)));
assert!(valid_header_version(10, HeaderVersion(1)));
assert!(!valid_header_version(10, HeaderVersion(2)));
assert!(valid_header_version(YEAR_HEIGHT / 2 - 1, HeaderVersion(1)));
assert!(valid_header_version(YEAR_HEIGHT / 2, HeaderVersion(2)));
assert!(valid_header_version(YEAR_HEIGHT / 2 + 1, HeaderVersion(2)));
assert!(!valid_header_version(YEAR_HEIGHT / 2, HeaderVersion(1)));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion(1)));

assert!(valid_header_version(YEAR_HEIGHT - 1, HeaderVersion(2)));
assert!(valid_header_version(YEAR_HEIGHT, HeaderVersion(3)));
assert!(valid_header_version(YEAR_HEIGHT + 1, HeaderVersion(3)));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion(2)));
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(2)));
// v4 not active yet
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(4)));
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(3)));
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(2)));
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(1)));
assert!(!valid_header_version(YEAR_HEIGHT * 2, HeaderVersion(3)));
assert!(!valid_header_version(
YEAR_HEIGHT / 2,
HeaderVersion::new(1)
));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(1)));
// v3 not active yet
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(3)));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(2)));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(1)));
assert!(!valid_header_version(
YEAR_HEIGHT * 3 / 2,
HeaderVersion::new(2)
));
assert!(!valid_header_version(
YEAR_HEIGHT + 1,
HeaderVersion::new(2)
YEAR_HEIGHT * 3 / 2 + 1,
HeaderVersion(3)
));
}
// Tests for floonet chain type.
{
global::set_mining_mode(global::ChainTypes::Floonet);
assert_eq!(global::is_floonet(), true);
assert!(valid_header_version(0, HeaderVersion::new(1)));
assert!(valid_header_version(10, HeaderVersion::new(1)));
assert!(!valid_header_version(10, HeaderVersion::new(2)));
assert!(valid_header_version(0, HeaderVersion(1)));
assert!(valid_header_version(10, HeaderVersion(1)));
assert!(!valid_header_version(10, HeaderVersion(2)));
assert!(valid_header_version(
FLOONET_FIRST_HARD_FORK - 1,
HeaderVersion::new(1)
HeaderVersion(1)
));
assert!(valid_header_version(
FLOONET_FIRST_HARD_FORK,
HeaderVersion::new(2)
HeaderVersion(2)
));
assert!(valid_header_version(
FLOONET_FIRST_HARD_FORK + 1,
HeaderVersion::new(2)
HeaderVersion(2)
));
assert!(!valid_header_version(
FLOONET_FIRST_HARD_FORK,
HeaderVersion::new(1)
HeaderVersion(1)
));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion(1)));
assert!(valid_header_version(
FLOONET_SECOND_HARD_FORK - 1,
HeaderVersion(2)
));
assert!(valid_header_version(
FLOONET_SECOND_HARD_FORK,
HeaderVersion(3)
));
assert!(valid_header_version(
FLOONET_SECOND_HARD_FORK + 1,
HeaderVersion(3)
));
assert!(!valid_header_version(
FLOONET_SECOND_HARD_FORK,
HeaderVersion(2)
));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(1)));
// v3 not active yet
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(3)));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(2)));
assert!(!valid_header_version(YEAR_HEIGHT, HeaderVersion::new(1)));
assert!(!valid_header_version(
YEAR_HEIGHT * 3 / 2,
HeaderVersion::new(2)
FLOONET_SECOND_HARD_FORK,
HeaderVersion(1)
));

assert!(!valid_header_version(YEAR_HEIGHT - 1, HeaderVersion(2)));
assert!(valid_header_version(YEAR_HEIGHT - 1, HeaderVersion(3)));
assert!(valid_header_version(YEAR_HEIGHT, HeaderVersion(3)));
assert!(valid_header_version(YEAR_HEIGHT + 1, HeaderVersion(3)));
// v4 not active yet
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(4)));
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(3)));
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(2)));
assert!(!valid_header_version(YEAR_HEIGHT * 3 / 2, HeaderVersion(1)));
assert!(!valid_header_version(YEAR_HEIGHT * 2, HeaderVersion(3)));
assert!(!valid_header_version(
YEAR_HEIGHT + 1,
HeaderVersion::new(2)
YEAR_HEIGHT * 3 / 2 + 1,
HeaderVersion(3)
));
}
}
@@ -20,20 +20,18 @@ use self::core::core::block::BlockHeader;
use self::core::core::block::Error::KernelLockHeight;
use self::core::core::hash::{Hashed, ZERO_HASH};
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{aggregate, deaggregate, KernelFeatures, Output, Transaction, Weighting};
use self::core::libtx::build::{
self, initial_tx, input, output, with_excess, with_fee, with_lock_height,
use self::core::core::{
aggregate, deaggregate, KernelFeatures, Output, Transaction, TxKernel, Weighting,
};
use self::core::libtx::build::{self, initial_tx, input, output, with_excess};
use self::core::libtx::ProofBuilder;
use self::core::ser;
use self::keychain::{BlindingFactor, ExtKeychain, Keychain};
use self::util::static_secp_instance;
use self::util::RwLock;
use crate::common::{new_block, tx1i1o, tx1i2o, tx2i1o};
use grin_core as core;
use grin_keychain as keychain;
use grin_util as util;
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use std::sync::Arc;
use util::static_secp_instance;
use util::RwLock;

#[test]
fn simple_tx_ser() {
@@ -99,6 +97,7 @@ fn test_zero_commit_fails() {

// blinding should fail as signing with a zero r*G shouldn't work
build::transaction(
KernelFeatures::Plain { fee: 0 },
vec![input(10, key_id1.clone()), output(10, key_id1.clone())],
&keychain,
&builder,
@@ -120,12 +119,8 @@ fn build_tx_kernel() {

// first build a valid tx with corresponding blinding factor
let tx = build::transaction(
vec![
input(10, key_id1),
output(5, key_id2),
output(3, key_id3),
with_fee(2),
],
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), output(5, key_id2), output(3, key_id3)],
&keychain,
&builder,
)
@@ -373,12 +368,8 @@ fn hash_output() {
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);

let tx = build::transaction(
vec![
input(75, key_id1),
output(42, key_id2),
output(32, key_id3),
with_fee(1),
],
KernelFeatures::Plain { fee: 1 },
vec![input(75, key_id1), output(42, key_id2), output(32, key_id3)],
&keychain,
&builder,
)
@@ -439,12 +430,11 @@ fn tx_build_exchange() {

// Alice builds her transaction, with change, which also produces the sum
// of blinding factors before they're obscured.
let (tx, sum) = build::partial_transaction(
vec![in1, in2, output(1, key_id3), with_fee(2)],
&keychain,
&builder,
)
.unwrap();
let tx = Transaction::empty()
.with_kernel(TxKernel::with_features(KernelFeatures::Plain { fee: 2 }));
let (tx, sum) =
build::partial_transaction(tx, vec![in1, in2, output(1, key_id3)], &keychain, &builder)
.unwrap();

(tx, sum)
};
@@ -453,6 +443,7 @@ fn tx_build_exchange() {
// blinding factors. He adds his output, finalizes the transaction so it's
// ready for broadcast.
let tx_final = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![
initial_tx(tx_alice),
with_excess(blind_sum),
@@ -547,12 +538,11 @@ fn test_block_with_timelocked_tx() {
// first check we can add a timelocked tx where lock height matches current
// block height and that the resulting block is valid
let tx1 = build::transaction(
vec![
input(5, key_id1.clone()),
output(3, key_id2.clone()),
with_fee(2),
with_lock_height(1),
],
KernelFeatures::HeightLocked {
fee: 2,
lock_height: 1,
},
vec![input(5, key_id1.clone()), output(3, key_id2.clone())],
&keychain,
&builder,
)
@@ -572,12 +562,11 @@ fn test_block_with_timelocked_tx() {
// now try adding a timelocked tx where lock height is greater than current
// block height
let tx1 = build::transaction(
vec![
input(5, key_id1.clone()),
output(3, key_id2.clone()),
with_fee(2),
with_lock_height(2),
],
KernelFeatures::HeightLocked {
fee: 2,
lock_height: 2,
},
vec![input(5, key_id1.clone()), output(3, key_id2.clone())],
&keychain,
&builder,
)
@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.

mod vec_backend;
mod common;

use self::core::core::merkle_proof::MerkleProof;
use self::core::core::pmmr::PMMR;
use self::core::core::pmmr::{VecBackend, PMMR};
use self::core::ser::{self, PMMRIndexHashable};
use crate::vec_backend::{TestElem, VecBackend};
use crate::common::TestElem;
use grin_core as core;

#[test]
@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.

mod vec_backend;
mod common;

use self::core::core::hash::Hash;
use self::core::core::pmmr::{self, PMMR};
use self::core::core::pmmr::{self, VecBackend, PMMR};
use self::core::ser::PMMRIndexHashable;
use crate::vec_backend::{TestElem, VecBackend};
use crate::common::TestElem;
use chrono::prelude::Utc;
use grin_core as core;
use std::u64;
@@ -433,7 +433,7 @@ fn pmmr_prune() {

// First check the initial numbers of elements.
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 0);
assert_eq!(ba.removed.len(), 0);

// pruning a leaf with no parent should do nothing
{
@@ -442,7 +442,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap());
}
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 1);
assert_eq!(ba.removed.len(), 1);

// pruning leaves with no shared parent just removes 1 element
{
@@ -451,15 +451,15 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap());
}
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 2);
assert_eq!(ba.removed.len(), 2);

{
let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz);
pmmr.prune(4).unwrap();
assert_eq!(orig_root, pmmr.root().unwrap());
}
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 3);
assert_eq!(ba.removed.len(), 3);

// pruning a non-leaf node has no effect
{
@@ -468,7 +468,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap());
}
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 3);
assert_eq!(ba.removed.len(), 3);

// TODO - no longer true (leaves only now) - pruning sibling removes subtree
{
@@ -477,7 +477,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap());
}
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 4);
assert_eq!(ba.removed.len(), 4);

// TODO - no longer true (leaves only now) - pruning all leaves under level >1
// removes all subtree
@@ -487,7 +487,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap());
}
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 5);
assert_eq!(ba.removed.len(), 5);

// pruning everything should only leave us with a single peak
{
@@ -498,7 +498,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap());
}
assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 9);
assert_eq!(ba.removed.len(), 9);
}

#[test]
@@ -514,46 +514,48 @@ fn check_insertion_to_pmmr_index() {
}

#[test]
fn check_elements_from_insertion_index() {
fn check_elements_from_pmmr_index() {
let mut ba = VecBackend::new();
let mut pmmr = PMMR::new(&mut ba);
for x in 1..1000 {
// 20 elements should give max index 38
for x in 1..21 {
pmmr.push(&TestElem([0, 0, 0, x])).unwrap();
}

// Normal case
let res = pmmr.readonly_pmmr().elements_from_insertion_index(1, 100);
assert_eq!(res.0, 100);
assert_eq!(res.1.len(), 100);
let res = pmmr.readonly_pmmr().elements_from_pmmr_index(1, 1000, None);
assert_eq!(res.0, 38);
assert_eq!(res.1.len(), 20);
assert_eq!(res.1[0].0[3], 1);
assert_eq!(res.1[99].0[3], 100);
assert_eq!(res.1[19].0[3], 20);

// middle of pack
let res = pmmr.readonly_pmmr().elements_from_insertion_index(351, 70);
assert_eq!(res.0, 420);
assert_eq!(res.1.len(), 70);
assert_eq!(res.1[0].0[3], 351);
assert_eq!(res.1[69].0[3], 420);
let res = pmmr
.readonly_pmmr()
.elements_from_pmmr_index(8, 1000, Some(34));
assert_eq!(res.0, 34);
assert_eq!(res.1.len(), 14);
assert_eq!(res.1[0].0[3], 5);
assert_eq!(res.1[13].0[3], 18);

// past the end
// bounded
let res = pmmr
.readonly_pmmr()
.elements_from_insertion_index(650, 1000);
assert_eq!(res.0, 999);
assert_eq!(res.1.len(), 350);
assert_eq!(res.1[0].0[3], 650);
assert_eq!(res.1[349].0[3], 999);
.elements_from_pmmr_index(8, 7, Some(34));
assert_eq!(res.0, 19);
assert_eq!(res.1.len(), 7);
assert_eq!(res.1[0].0[3], 5);
assert_eq!(res.1[6].0[3], 11);

// pruning a few nodes should get consistent results
pmmr.prune(pmmr::insertion_to_pmmr_index(650)).unwrap();
pmmr.prune(pmmr::insertion_to_pmmr_index(651)).unwrap();
pmmr.prune(pmmr::insertion_to_pmmr_index(800)).unwrap();
pmmr.prune(pmmr::insertion_to_pmmr_index(900)).unwrap();
pmmr.prune(pmmr::insertion_to_pmmr_index(998)).unwrap();
pmmr.prune(pmmr::insertion_to_pmmr_index(5)).unwrap();
pmmr.prune(pmmr::insertion_to_pmmr_index(20)).unwrap();

let res = pmmr
.readonly_pmmr()
.elements_from_insertion_index(650, 1000);
assert_eq!(res.0, 999);
assert_eq!(res.1.len(), 345);
assert_eq!(res.1[0].0[3], 652);
assert_eq!(res.1[344].0[3], 999);
.elements_from_pmmr_index(8, 7, Some(34));
assert_eq!(res.0, 20);
assert_eq!(res.1.len(), 7);
assert_eq!(res.1[0].0[3], 6);
assert_eq!(res.1[6].0[3], 12);
}
@@ -19,9 +19,8 @@ pub mod common;
use self::core::core::{Output, OutputFeatures};
use self::core::libtx::proof;
use self::core::ser;
use self::keychain::{ExtKeychain, Keychain};
use grin_core as core;
use grin_keychain as keychain;
use keychain::{ExtKeychain, Keychain};

#[test]
fn test_output_ser_deser() {
@@ -12,135 +12,56 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use std::fs::File;
mod common;

use self::core::core::hash::{DefaultHashable, Hash};
use self::core::core::pmmr::{self, Backend};
use self::core::core::BlockHeader;
use self::core::ser;
use self::core::ser::{FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use croaring;
use croaring::Bitmap;
use self::core::core::pmmr::{VecBackend, PMMR};
use crate::common::TestElem;
use grin_core as core;

#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct TestElem(pub [u32; 4]);

impl DefaultHashable for TestElem {}

impl FixedLength for TestElem {
const LEN: usize = 16;
}

impl PMMRable for TestElem {
type E = Self;

fn as_elmt(&self) -> Self::E {
self.clone()
}
}

impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
r#try!(writer.write_u32(self.0[0]));
r#try!(writer.write_u32(self.0[1]));
r#try!(writer.write_u32(self.0[2]));
writer.write_u32(self.0[3])
}
}

impl Readable for TestElem {
fn read(reader: &mut dyn Reader) -> Result<TestElem, ser::Error> {
Ok(TestElem([
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
]))
}
}

/// Simple MMR backend implementation based on a Vector. Pruning does not
/// compact the Vec itself.
#[derive(Clone, Debug)]
pub struct VecBackend<T: PMMRable> {
/// Backend elements
pub data: Vec<T>,
pub hashes: Vec<Hash>,
/// Positions of removed elements
pub remove_list: Vec<u64>,
}

impl<T: PMMRable> Backend<T> for VecBackend<T> {
fn append(&mut self, data: &T, hashes: Vec<Hash>) -> Result<(), String> {
self.data.push(data.clone());
self.hashes.append(&mut hashes.clone());
Ok(())
}

fn get_hash(&self, position: u64) -> Option<Hash> {
if self.remove_list.contains(&position) {
None
} else {
self.get_from_file(position)
}
#[test]
fn leaf_pos_and_idx_iter_test() {
let elems = [
TestElem([0, 0, 0, 1]),
TestElem([0, 0, 0, 2]),
TestElem([0, 0, 0, 3]),
TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
];
let mut backend = VecBackend::new();
let mut pmmr = PMMR::new(&mut backend);
for x in &elems {
pmmr.push(x).unwrap();
}

fn get_data(&self, position: u64) -> Option<T::E> {
if self.remove_list.contains(&position) {
None
} else {
self.get_data_from_file(position)
}
}

fn get_from_file(&self, position: u64) -> Option<Hash> {
let hash = &self.hashes[(position - 1) as usize];
Some(hash.clone())
}

fn get_data_from_file(&self, position: u64) -> Option<T::E> {
let idx = pmmr::n_leaves(position);
let data = self.data[(idx - 1) as usize].clone();
Some(data.as_elmt())
}

fn data_as_temp_file(&self) -> Result<File, String> {
unimplemented!()
}

fn leaf_pos_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
unimplemented!()
}

fn remove(&mut self, position: u64) -> Result<(), String> {
self.remove_list.push(position);
Ok(())
}

fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> {
let idx = pmmr::n_leaves(position);
self.data = self.data[0..(idx as usize) + 1].to_vec();
self.hashes = self.hashes[0..(position as usize) + 1].to_vec();
Ok(())
}

fn snapshot(&self, _header: &BlockHeader) -> Result<(), String> {
Ok(())
}

fn release_files(&mut self) {}

fn dump_stats(&self) {}
assert_eq!(
vec![0, 1, 2, 3, 4],
pmmr.leaf_idx_iter(0).collect::<Vec<_>>()
);
assert_eq!(
vec![1, 2, 4, 5, 8],
pmmr.leaf_pos_iter().collect::<Vec<_>>()
);
}

impl<T: PMMRable> VecBackend<T> {
/// Instantiates a new VecBackend<T>
pub fn new() -> VecBackend<T> {
VecBackend {
data: vec![],
hashes: vec![],
remove_list: vec![],
}
#[test]
fn leaf_pos_and_idx_iter_hash_only_test() {
let elems = [
TestElem([0, 0, 0, 1]),
TestElem([0, 0, 0, 2]),
TestElem([0, 0, 0, 3]),
TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
];
let mut backend = VecBackend::new_hash_only();
let mut pmmr = PMMR::new(&mut backend);
for x in &elems {
pmmr.push(x).unwrap();
}
assert_eq!(
vec![0, 1, 2, 3, 4],
pmmr.leaf_idx_iter(0).collect::<Vec<_>>()
);
assert_eq!(
vec![1, 2, 4, 5, 8],
pmmr.leaf_pos_iter().collect::<Vec<_>>()
);
}
@@ -17,12 +17,10 @@ pub mod common;
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{Output, OutputFeatures};
use self::core::libtx::proof;
use self::keychain::{ExtKeychain, Keychain, SwitchCommitmentType};
use self::util::RwLock;
use grin_core as core;
use grin_keychain as keychain;
use grin_util as util;
use keychain::{ExtKeychain, Keychain, SwitchCommitmentType};
use std::sync::Arc;
use util::RwLock;

fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
@@ -1,6 +1,6 @@
# Grin - Build, Configuration, and Running

*Read this in other languages: [Español](build_ES.md), [Korean](build_KR.md), [日本語](build_JP.md).*
*Read this in other languages: [Español](build_ES.md), [Korean](build_KR.md), [日本語](build_JP.md), [简体中文](build_ZH-CN.md).*

## Supported Platforms

@@ -98,7 +98,7 @@ For help on grin commands and their switches, try:

```sh
grin help
grin wallet --help
grin server --help
grin client --help
```

@@ -137,4 +137,4 @@ Please note that all mining functions for Grin have moved into a separate, stand
you can start mining by building and running grin-miner against your running Grin node.

For grin-miner to be able to communicate with your grin node, make sure that you have `enable_stratum_server = true`
in your `grin-server.toml` configuration file and you have a wallet listener running (`grin wallet listen`).
in your `grin-server.toml` configuration file and you have a wallet listener running (`grin-wallet listen`).
@@ -1,5 +1,7 @@
# Grin - Compilación, configuración y ejecución

*Lea esto en otros idiomas: [English](build.md), [日本語](build_JP.md), [Korean](build_KR.md), [简体中文](build_ZH-CN.md).*

## Plataformas soportadas

En un largo plazo, es probable que la mayoría de las plataformas sean compatibles en cierta medida.
@@ -87,7 +89,7 @@ Para obtener ayuda sobre los comandos de grin y sus cambios intente:

```sh
grin help
grin wallet --help
grin server --help
grin client --help
```

@@ -1,6 +1,6 @@
# grin - ビルド、設定、動作確認

*Read this in other languages: [Español](build_ES.md), [Korean](build_KR.md), [日本語](build_JP.md).*
*この文章を他の言語で読む: [English](build.md), [Español](build_ES.md), [Korean](build_KR.md), [简体中文](build_ZH-CN.md).*

## 動作環境

@@ -93,7 +93,7 @@ grinは気の利いたデフォルト設定で起動するようになってお

```sh
grin help
grin wallet --help
grin server --help
grin client --help
```

@@ -126,4 +126,4 @@ x86のLinux上で`grin`をクロスコンパイルしARMバイナリを作成し

grinのマイニングに関する全ての機能は[grin-miner](https://github.com/mimblewimble/grin-miner)と呼ばれるスタンドアローンなパッケージに分離されていることに注意。

grin-minerをgrinノードと通信させるためには、`grin-server.toml`の設定ファイルで`enable_stratum_server = true`と設定し、ウォレットリスナーを起動(`grin wallet listen`)しておく必要がある。
grin-minerをgrinノードと通信させるためには、`grin-server.toml`の設定ファイルで`enable_stratum_server = true`と設定し、ウォレットリスナーを起動(`grin-wallet listen`)しておく必要がある。
@@ -1,6 +1,6 @@
# Grin - Build, Configuration, and Running

*다른 언어로 되어있는 문서를 읽으려면:[에스파냐어](build_ES.md).
*다른 언어로 되어있는 문서를 읽으려면: [English](build.md), [Español](build_ES.md), [日本語](build_JP.md), [简体中文](build_ZH-CN.md).*

## 지원하는 플랫폼들에 대해서

@@ -94,7 +94,7 @@ Grin을 작동시키는 명령어에 대한 도움말은 다음 명령어를 실

```sh
grin help
grin wallet --help
grin server --help
grin client --help
```

@@ -128,4 +128,4 @@ Rust(Cargo)는 여러 플랫폼에서 Grin을 빌드 할 수 있습니다. 그
Grin의 모든 마이닝 기능은 분리된 독랍형 패키지인 [grin-miner](https://github.com/mimblewimble/grin-miner)로 옮겨졌습니다.
일단 Grin 노드가 실행되면 실행중인 노드에 대해 grin-miner를 빌드하고 실행하여 마이닝을 시작할 수 있습니다.

grin-miner가 grin 노드와 통신 할 수 있게 하려면, `grin-server.toml` 설정 파일에서`enable_stratum_server = true`가 설정되어 있는지 확인하세요. 그 다음 Wallet listener인 `grin wallet listen` 명령어를 실행하세요 .
grin-miner가 grin 노드와 통신 할 수 있게 하려면, `grin-server.toml` 설정 파일에서`enable_stratum_server = true`가 설정되어 있는지 확인하세요. 그 다음 Wallet listener인 `grin-wallet listen` 명령어를 실행하세요 .
@@ -0,0 +1,126 @@
# Grin - 构建,配置和运行

*阅读其它语言版本: [English](build.md), [Español](build_ES.md), [日本語](build_JP.md), [Korean](build_KR.md).*

## 支持的平台

从长远来看,大多数平台都可能会得到一定程度的支持。Grin 的编写语言 `rust` 已为大多数平台建立了目标。

到目前为止进度:

* Linux x86\_64 and macOS [grin + 挖矿 + 部署]
* 暂时不支持 Windows 10 [部分 grin。暂时不支持挖矿。希望得到帮助!]

## 要求

* rust 1.34+ (使用 [rustup]((https://www.rustup.rs/))- i.e. `curl https://sh.rustup.rs -sSf | sh; source $HOME/.cargo/env`)
* 如果已经安装过了 rust,只需要运行 `rustup update` 升级版本
* clang
* ncurses 和 libs (ncurses, ncursesw5)
* zlib libs (zlib1g-dev or zlib-devel)
* pkg-config
* libssl-dev
* linux-headers (有报告指出在 Alpine linux 上是必需的)
* llvm

对于基于 Debian 的发行版(Debian,Ubuntu,Mint 等),一行就可以搞定(rust 的安装除外):

```sh
apt install build-essential cmake git libgit2-dev clang libncurses5-dev libncursesw5-dev zlib1g-dev pkg-config libssl-dev llvm
```

macOS 用户:

```sh
xcode-select --install
brew install --with-toolchain llvm
brew install pkg-config
brew install openssl
```

## 构建步骤

```sh
git clone https://github.com/mimblewimble/grin.git
cd grin
cargo build --release
```

Grin也可以在 debug 模式下构建(不带 `--release` 参数,或是使用 `--debug` 或 `--verbose` 参数),但是由于加密的开销很大,这将影响快速同步的性能。

## 构建错误

详见 [故障排除](https://github.com/mimblewimble/docs/wiki/Troubleshooting)

## 构建得到了什么?

成功的构建可以提供给您:

* `target/release/grin` - grin 主要的二进制文件

默认情况下,grin 创建和使用的所有数据,配置和日志文件都位于隐藏的 `~/.grin` 目录中(位于用户主目录下)。
您可以通过编辑文件 `~/.grin/main/grin-server.toml` 来修改所有配置。

也可以让 grin 在当前目录中创建其数据文件。只需要运行

```sh
grin server config
```

它将在当前目录中生成一个 `grin-server.toml` 文件,该文件已预先配置为使用当前目录中的所有数据。
在包含 `grin-server.toml` 文件所在的目录下运行 grin 将使用该文件中的配置,而不是默认的 `~/.grin/main/grin-server.toml`。

在测试时,将 grin 二进制文件放在您的 `PATH` 中,如下所示:

```sh
export PATH=`pwd`/target/release:$PATH
```

假设您从 Grin 安装的根目录运行。

然后您可以直接运行 `grin`(尝试使用 `grin help` 获得更多选项)。

## 配置

Grin 尝试使用合理的默认值运行,并且可以通过 `grin-server.toml` 文件进行进一步配置。
该文件是在首次运行时由 grin 生成的,并且包含有关每个可用选项的文档。

虽然建议您通过 `grin-server.toml` 配置 grin 服务器,但也可以提供命令行开关以覆盖文件中的任何设置。

有关 grin 命令及其开关的帮助,请尝试:

```sh
grin help
grin wallet --help
grin client --help
```

## Docker

```sh
docker build -t grin -f etc/Dockerfile .
```
对于 floonet, 使用 `etc/Dockerfile.floonet` 代替

您可以绑定安装您的 grin 缓存以在容器中运行。

```sh
docker run -it -d -v $HOME/.grin:/root/.grin grin
```
如果您更喜欢使用名为 volume 的 docker,则可以传递 `-v dotgrin: /root/.grin` 以替换。
使用命名卷在创建卷时会复制默认配置。

## 跨平台构建

Rust(cargo)可以在许多平台上构建 grin,因此从理论上讲,可以在低功耗设备上运行 `grin` 作为验证节点。要在 x86 Linux 平台上交叉编译 `grin` 并生成 ARM 二进制文件,例如,为一个 Raspberry Pi。

## 使用 grin

Wiki页面 [Wallet User Guide](https://github.com/mimblewimble/docs/wiki/Wallet-User-Guide) 和链接页面提供了有关我们提供的功能,故障排除等更多信息。

## 在 Grin 中挖矿

请注意,针对 Grin 的所有挖矿功能已移至一个名为 [grin-miner](https://github.com/mimblewimble/grin-miner) 的独立软件包中。
一旦您的 Grin 代码节点启动并运行,就可以通过针对正在运行的 Grin 节点构建并运行 grin-miner 开始挖矿。

为了使 grin-miner 能够与您的 grin 节点进行通信,请确保在您的 `grin-server.toml` 配置文件中有 `enable_stratum_server = true`,并且您正在运行钱包监听器(`grin wallet listen`)。
@@ -4,7 +4,7 @@

We describe here the different methods used by a new node when joining the network
to catch up with the latest chain state. We start with reminding the reader of the
following assumptions, which are all characteristics of Grin or MimbleWimble:
following assumptions, which are all characteristics of Grin or Mimblewimble:

* All block headers include the root hash of all unspent outputs in the chain at
the time of that block.
@@ -27,7 +27,7 @@ its peers.

The security model here is similar to bitcoin. We're able to verify the whole
chain, the total work, the validity of each block, their full content, etc.
In addition, with MimbleWimble and full UTXO set commitments, even more integrity
In addition, with Mimblewimble and full UTXO set commitments, even more integrity
validation can be performed.

We do not try to do any space or bandwidth optimization in this mode (for example,
@@ -1,7 +1,7 @@
# 블록체인의 동기화

최신 노드 상태를 따라 가기 위해 네트워크에 참여할 때 새 노드가 사용하는 여러 가지 방법을 설명합니다.
먼저, 독자에게 다음과 같은 Grin 또는 MimbleWimble의 특성을 먼저 전제 하고 설명하겠습니다.
먼저, 독자에게 다음과 같은 Grin 또는 Mimblewimble의 특성을 먼저 전제 하고 설명하겠습니다.

* 해당 블록 안의 모든 블록 헤더는 체인 안에 사용하지 않는 출력값의 모든 루트해시를 가지고 있습니다.
* 입력 또는 출력은 전체 블록 상태를 무효화하지 않고선 변조되거나 위조 될 수 없습니다
@@ -14,7 +14,7 @@

이 모델은 대부분의 메이저 퍼블릭 블록체인 에서 "풀 노드"가 사용하는 모델입니다. 새로운 노드는 제네시스 블록에 대한 사전 정보를 가지고 있습니다. 노드는 네트워크의 다른 피어와 연결되어 피어에게 알려진 최신 블록(호라이즌 블록)에 도달 할 때까지 블록을 요청하기 시작합니다.

보안 모델은 비트 코인과 비슷합니다. 전체 체인, 총 작업, 각 블록의 유효성, 전체 내용 등을 검증 할 수 있습니다. 또한 MimbleWimble 및 전체 UTXO 세트 실행들을 통해 훨씬 더 무결성 검증이 잘 수행될 수 있습니다.
보안 모델은 비트 코인과 비슷합니다. 전체 체인, 총 작업, 각 블록의 유효성, 전체 내용 등을 검증 할 수 있습니다. 또한 Mimblewimble 및 전체 UTXO 세트 실행들을 통해 훨씬 더 무결성 검증이 잘 수행될 수 있습니다.

이 모드에서는 저장공간 최적화 또는 대역폭 최적화를 시도하지 않습니다 (예를 들자면 유효성 검증 후 Range proof 가 삭제 될 수 있습니다). 여기서 중요한 것은 기록 아카이브를 제공하고 나중에 확인 및 증명을 하게 하는 것입니다.

@@ -1,5 +1,7 @@
# Grin code structure

*Read this in other languages: [简体中文](code_structure_ZH-CN.md).*

Grin is built in [Rust](https://www.rust-lang.org/), a memory safe, compiled language. Performance critical parts like the Cuckoo mining algorithm are built as plugins, making it easy to swap between algorithm implementations for various hardware. Grin comes with CPU and experimental GPU support.

## Files in project root
@@ -0,0 +1,64 @@
# Grin 代码结构

*阅读其它语言版本: [English](code_structure.md).*

Grin 使用 [Rust](https://www.rust-lang.org/)编写,这是一个内存安全的编译语言。诸如 Cuckoo 挖掘算法之类的性能关键部分都是作为插件构建的,因此可以轻松地在各种硬件的算法实现之间进行交换。Grin 带有 CPU 和实验性 GPU 支持。

## 项目根目录中的文件

List of files tracked in `git` and some files you'll create when you use grin.
`git` 中跟踪的文件列表以及使用 grin 时将创建的一些文件。

- [CODE_OF_CONDUCT](../CODE_OF_CONDUCT.md) - 如果您想参与到其中,该做些什么。取自 rust,并稍作修改。
- [CONTRIBUTING](../CONTRIBUTING.md) - 如何帮助并参与其中成为 grin 的一部分。
- [Cargo.toml](../Cargo.toml) 和 Cargo.lock(本地创建,*不*在 git 中)- 定义如何编译和构建项目代码。
- [LICENSE](../LICENSE) - Apache 2.0 license
- [README](../README.md) - 您应该阅读的第一个文档,同时它列出了包含更多详细信息的进阶阅读。
- [rustfmt.toml](../rustfmt.toml) - rustfmt 的配置文件。在提交*新*代码之前需要。

## 文件夹结构

在检查了 grin,构建和使用之后,这些是您的文件夹将会有以下内容:

- `api`\
可通过 REST 访问的 ApiEndpoints 代码。
- `chain`\
区块链实现,接受一个块(请参阅 pipe.rs)并将其添加到链中,或拒绝它。
- `config`\
用于处理配置的代码。
- `core`\
所有核心类型:哈希,块,输入,输出,以及如何对其进行序列化。核心挖掘算法等。
- `doc`\
所有文档。
- `servers`\
grin 服务的许多组成部分(adapters, lib, miner, seed, server, sync, types),包括挖矿服务器。
- `keychain`\
Code for working safely with keys and doing blinding.
- `p2p`\
所有点对点连接和与协议相关的逻辑(握手,块传播等)。
- `pool`\
交易池实现的代码。
- `server`\
在启动服务器之前,您[要创建的文件夹](build_ZH-CN.md):cd 到项目根目录;mkdir server;cd server;grin server start(或 run),它将创建一个子文件夹 .grin
- `.grin`
- `chain` - 具有区块链块和相关信息的数据库
- `peers` - 一个数据库,其中包含您连接的 Grin peers 节点的列表
- `txhashset` - 包含内核,范围证明和输出的文件夹,每个文件夹都有一个 pmmr_dat.bin 文件
- `src`\
构建 grin 可执行文件的代码。
- `store`\
数据存储 - Grin 在 LMDB(键值嵌入式数据存储)周围使用了接近零成本的 Rust 包装器。
- `target`\
在编译和构建过程完成之后,grin 的二进制文件所在的位置。
万一遇到麻烦,请参阅[troubleshooting](https://github.com/mimblewimble/docs/wiki/Troubleshooting)
- `util`\
底层 rust 工具。
- `wallet`\
简单的命令行钱包实现。将会创建:
- `wallet_data` - 储存您“输出”的数据库,一旦被确认并到期,就可以通过 [`grin wallet send`](wallet/usage.md) 命令来花费掉。(本地创建,*不*包含在 git 中)
- `wallet.seed` - 您的钱包种子。(本地创建,*不*包含在 git 中)

## grin 依赖

- [secp256k1](https://github.com/mimblewimble/rust-secp256k1-zkp)
libsecp256k1 的集成和 rust 绑定,还有一些更动等待更新。在 util/Cargo.toml 中被导入。
@@ -38,8 +38,8 @@ Grin은 동시에 출력 셋에 중복된 실행값(commitment)이 존재하는
아래와 같은 몇 가지가 이런 상황을 복잡하게 만듭니다.

1. 특히 빈 블록의 경우 두 블록이 동일한 보상을 받을 수 있습니다. 뿐만 아니라 거래 수수료가 있는 비어 있지 않은 블록의 경우에도 가능합니다.
2. 코인베이스 출력이 아닌 출력값이 코인베이스 출력과 동일한 값을 가질 수 있습니다.
3. 권장되진 않지만 마이너가 비밀키(private key)를 재사용 할 수 있습니다.
1. 코인베이스 출력이 아닌 출력값이 코인베이스 출력과 동일한 값을 가질 수 있습니다.
1. 권장되진 않지만 마이너가 비밀키(private key)를 재사용 할 수 있습니다.

Grin은 동시에 출력 셋에 중복된 실행값(commitment)이 존재하는 것을 허용하지 않습니다. 그러나 출력 셋은 특정 체인 분리(fork)의 상태에 따라 다릅니다.
같은 순간에 있는 서로 다른 체인에 중복 된 *동일한* 실행값(commitment)가 동시에 *존재할 수 있습니다*. 그리고 이러한 중복된 실행값은 다른 "lock height"를 가질 수 있습니다. 그리고 각각 다른 체인에서 이런 실행값들은 코인베이스 만기가 다 되어서 소비 할 수 있수도 있습니다.
@@ -15,7 +15,7 @@ Note that this requires both chains to support hash preimages: all Bitcoin scrip

(So far this is the same as the classic Bitcoin atomic swap by Tier Nolan [3]; the difference in locktimes is because during part of the protocol Igno can take his coins but I can't yet take mine, so I want to be sure he can't do this and simultaneously back out. This way ff he takes the coins, I can take mine, but if he backs out then I've long since backed out, and these are his only possibilities.)

2. Igno and I construct transactions that move the locked coins to their final destinations. We agree on the kernels and signature nonces, and in particular on signature challenges e and e'.
2. Igno and I construct transactions that move the locked coins to their final destinations. We agree on the kernels and signature nonces, and in particular on signature challenges e and e'.

3. Igno sends me a "conversion" keys sconv which satisfies

@@ -248,7 +248,7 @@ changes (as does the nonce). Because it depends on the other party's nonce,
this might require an additional round of interaction per channel update.

Note also that nothing I've said depends at all on what's being signed. This
means this works just as well for MimbleWimble as it would for Bitcoin+Schnorr
means this works just as well for Mimblewimble as it would for Bitcoin+Schnorr
as it would for Monero (with a multisig ring-CT construction) as it would
for Ethereum+Schnorr. Further, it can link transactions across chains." - Andrew Poelstra
- https://lists.launchpad.net/mimblewimble/msg00086.html
@@ -1,6 +1,6 @@
# Fast Sync

*Read this in other languages: [Español](fast-sync_ES.md), [Korean](fast-sync_KR.md).*
*Read this in other languages: [Español](fast-sync_ES.md), [Korean](fast-sync_KR.md), [简体中文](fast-sync_ZH-CN.md).*

In Grin, we call "sync" the process of synchronizing a new node or a node that
hasn't been keeping up with the chain for a while, and bringing it up to the
@@ -1,5 +1,7 @@
# Sincronización rápida

*Lea esto en otros idiomas: [English](fast-sync.md), [简体中文](fast-sync_ZH-CN.md), [Korean](fast-sync_KR.md).*

En Grin, llamamos "sync" al proceso de sincronizar un nuevo nodo o un nodo que no ha estado al día con la cadena durante un
tiempo, y llevarlo hasta el último bloque conocido. La Descarga Inicial de Bloques (o IBD) es usada a menudo por otras cadenas
de bloques, pero esto es problemático para Grin ya que típicamente no descarga bloques completos..
@@ -1,6 +1,6 @@
# 빠른 동기화

*이 문서를 다른 언어로 읽으시려면: [에스파냐어](fast-sync_ES.md).*
*다른 언어로 되어있는 문서를 읽으려면: [English](fast-sync.md), [Español](fast-sync_ES.md), [简体中文](fast-sync_ZH-CN.md).*

Grin에서는 새로 네트워크에 참여하는 노드나 얼마 동안 체인을 따라 잡지 않은 노드(의 상태)를 알려진 최신 블록으로( 원문에서는 most-worked block 이라고 표현- 역자 주 ) 가져 오는 프로세스를 "동기화"라고 부릅니다. Initial Block Download (또는 IBD)는 다른 블록 체인에서 자주 사용되지만 빠른 동기화를 사용하는 Grin에서는 일반적으로 전체 블록을 다운로드하지 않으므로 문제가 됩니다.

@@ -0,0 +1,15 @@
# 快速同步

*阅读其它语言版本: [English](fast-sync.md), [Español](fast-sync_ES.md), [Korean](fast-sync_KR.md).*

在 Grin 中,我们把同步一个新节点或一段时间未跟上链的节点,并将其升级到最新的已知工作量最大的块的过程称为“同步”("sync")。 初始块下载(或 IBD)通常在其他区块链中被采用,但这对 Grin 来说这是有问题的,因为它通常不会下载完整的块。

简而言之,在 Grin 中的快速同步会执行以下操作:

1. 按照其他节点的建议,在最有效的链上按块下载所有块头(block header)。
1. 找到距链头(chain head)足够靠后的一个头(header)。这称为节点视界(node horizon),因为它是节点可以在不触发另一个新的完整同步的情况下在新分支上重组其链的最远位置。
1. 下载处于视界的完整状态,包括未花费(unspent)状态输出,范围证明(range proof)和内核数据(kernel data),以及所有相应的 MMR。这些其实只是一个大的 zip 文件。
1. 验证完整状态。
1. 从视界开始下载完整的块直到链头。

在本节的其余部分,我们将详细阐述每个步骤。
@@ -1,4 +1,4 @@
# Grin/MimbleWimble for Bitcoiners
# Grin/Mimblewimble for Bitcoiners

*Read this in other languages:[Korean](grin4bitcoiners_KR.md)

@@ -7,24 +7,24 @@
There are 3 main properties of Grin transactions that make them private:

1. There are no addresses.
2. There are no amounts.
3. 2 transactions, one spending the other, can be merged in a block to form only one, removing all intermediary information.
1. There are no amounts.
1. 2 transactions, one spending the other, can be merged in a block to form only one, removing all intermediary information.

The 2 first properties mean that all transactions are indistinguishable from one another. Unless you directly participated in the transaction, all inputs and outputs look like random pieces of data (in lingo, they're all random curve points).

Moreover, there are no more transactions in a block. A Grin block looks just like one giant transaction and all original association between inputs and outputs is lost.

## Scalability

As explained in the previous section, thanks to the MimbleWimble transaction and block format we can merge transactions when an output is directly spent by the input of another. It's as if when Alice gives money to Bob, and then Bob gives it all to Carol, Bob was never involved and his transaction is actually never even seen on the blockchain.
As explained in the previous section, thanks to the Mimblewimble transaction and block format we can merge transactions when an output is directly spent by the input of another. It's as if when Alice gives money to Bob, and then Bob gives it all to Carol, Bob was never involved and his transaction is actually never even seen on the blockchain.

Pushing that further, between blocks, most outputs end up being spent sooner or later by another input. So *all spent outputs can be safely removed*. And the whole blockchain can be stored, downloaded and fully verified in just a few gigabytes or less (assuming a number of transactions similar to bitcoin).

This means that the Grin blockchain scales with the number of users (unspent outputs), not the number of transactions. At the moment, there is one caveat to that: a small piece of data (called a *kernel*, about 100 bytes) needs to stay around for each transaction. But we're working on optimizing that as well.

## Scripting

Maybe you've heard that MimbleWimble doesn't support scripts. And in some way, that's true. But thanks to cryptographic trickery, many contracts that in Bitcoin would require a script can be achieved with Grin using properties of Elliptic Curve Cryptography. So far, we know how to do:
Maybe you've heard that Mimblewimble doesn't support scripts. And in some way, that's true. But thanks to cryptographic trickery, many contracts that in Bitcoin would require a script can be achieved with Grin using properties of Elliptic Curve Cryptography. So far, we know how to do:

* Multi-signature transactions.
* Atomic swaps.
@@ -43,7 +43,7 @@ Nope, no address. All outputs in Grin are unique and have no common data with an

### If transaction information gets removed, can I just cheat and create money?

No, and this is where MimbleWimble and Grin shine. Confidential transactions are a form of [homomorphic encryption](https://en.wikipedia.org/wiki/Homomorphic_encryption). Without revealing any amount, Grin can verify that the sum of all transaction inputs equal the sum of transaction outputs, plus the fee. Going even further, comparing the sum of all money created by mining with the total sum of money that's being held, Grin nodes can check the correctness of the total money supply.
No, and this is where Mimblewimble and Grin shine. Confidential transactions are a form of [homomorphic encryption](https://en.wikipedia.org/wiki/Homomorphic_encryption). Without revealing any amount, Grin can verify that the sum of all transaction inputs equal the sum of transaction outputs, plus the fee. Going even further, comparing the sum of all money created by mining with the total sum of money that's being held, Grin nodes can check the correctness of the total money supply.

### If I listen to transaction relay, can't I just figure out who they belong to before being cut-through?

@@ -1,28 +1,28 @@
# Bitcoiner를 위한 Grin/MimbleWimble
# Bitcoiner를 위한 Grin/Mimblewimble

## 프라이버시와 대체가능성(Fungibility)

Grin 트랜잭션에는 트랜잭션을 프라이빗하게 만드는 3 가지 주요 속성이 있습니다.

1. 주소가 없습니다.
2. 금액은 없습니다.
3. 하나는 다른 트랜잭션을 사용하는 2 개의 트랜잭션을 하나의 블록으로 병합하여 모든 중간 정보를 제거 할 수 있습니다.
1. 금액은 없습니다.
1. 하나는 다른 트랜잭션을 사용하는 2 개의 트랜잭션을 하나의 블록으로 병합하여 모든 중간 정보를 제거 할 수 있습니다.

처음두 가지 속성은 모든 트랜잭션을 서로 구별 할 수 없음을 의미합니다. 거래에 직접 참여하지 않는 한 모든 입력과 출력은 임의의 데이터 조각처럼 보입니다 (말하자면 출력값과 입력값 모두 랜덤한 곡선 위의 점입니다).

또한 블록에 트랜잭션이 없습니다. Grin 블록은 마치 하나의 거대한 트랜잭션처럼 보이고 입력과 출력 사이의 모든 연관성이 사라집니다.

## 확장성(Scalability)

이전 섹션에서 설명한 것처럼 MimbleWimble 트랜잭션과 블록 포맷 때문에 출력이 다른 트랜잭션의 입력에 의해 직접 소비(spent) 될 때 트랜잭션을 합칠 수 있습니다. (예를 들어 - 문맥의 부드러움을 위해 첨가함, 역자 주 )앨리스가 밥에게 돈을 주고 밥이 캐럴에게 돈을 주면 밥은 결코 연관되지 않은것처럼 보이고 실제로 밥의 트랜잭션은 블록체인에서 보이지 않습니다.
이전 섹션에서 설명한 것처럼 Mimblewimble 트랜잭션과 블록 포맷 때문에 출력이 다른 트랜잭션의 입력에 의해 직접 소비(spent) 될 때 트랜잭션을 합칠 수 있습니다. (예를 들어 - 문맥의 부드러움을 위해 첨가함, 역자 주 )앨리스가 밥에게 돈을 주고 밥이 캐럴에게 돈을 주면 밥은 결코 연관되지 않은것처럼 보이고 실제로 밥의 트랜잭션은 블록체인에서 보이지 않습니다.

더 많은 트랜잭션들을 블록에 밀어 넣으면 대부분의 출력이 다른 입력에 의해 조만간 소비됩니다. 따라서 *모든 소비 출력값을(spent outputs) 안전하게 제거 할 수 있습니다*. 그리고 (bitcoin과 유사한 트랜잭션의 수를 가정 한다면)몇 GB 이하로 전체 블록 체인을 저장하고, 다운로드하고, 완벽하게 검증 할 수 있습니다.

즉, Grin 블록 체인은 트랜잭션 수가 아닌 사용자 수 (사용되지 않은 출력)에 따라 확장됩니다. 그러나 현재 하나 주의하자면 (kernel 이라고 불리는 약 100 바이트의 데이터) 작은 데이터 조각은 각 트랜잭션마다 기다릴 필요가 있습니다. 그러나 이를 최적화하기 위해 노력하고 있습니다.

## 스크립팅(Scripting)

아마도 MimbleWimble은 스크립트(Script)를 지원하지 않는다는 말을 들었을 겁니다. 어떤면에서 이 말은 사실입니다. 그러나 암호화 기법 덕분에 Bitcoin에서 스크립트를 필요로 하는 많은 계약은 Elliptic Curve Cryptography의 속성을 사용하여 Grin으로 작성 할 수 있습니다. 지금까지 아래와 같은 구현을 어떻게 하는지 하는지 알고 있습니다. :
아마도 Mimblewimble은 스크립트(Script)를 지원하지 않는다는 말을 들었을 겁니다. 어떤면에서 이 말은 사실입니다. 그러나 암호화 기법 덕분에 Bitcoin에서 스크립트를 필요로 하는 많은 계약은 Elliptic Curve Cryptography의 속성을 사용하여 Grin으로 작성 할 수 있습니다. 지금까지 아래와 같은 구현을 어떻게 하는지 하는지 알고 있습니다. :

* Multi-signature transactions.
* 아토믹 스왑 (Atomic swap).
@@ -41,7 +41,7 @@ Grin 트랜잭션에는 트랜잭션을 프라이빗하게 만드는 3 가지

### 트랜잭션 정보가 제거된다면 사기는 치거나 코인을 만들어 낼수 있지 않나요?

아니요, MimbleWimble과 Grin의 장점이 돋보이는것이 바로 이런 점 입니다. Confidential transaction은 [동형(homomorphic)암호](https://en.wikipedia.org/wiki/Homomophic_encryption)의 한 형태입니다. 금액을 드러내지 않고 Grin은 모든 거래의 입력값의 합계가 거래의 출력값의 합계 + 수수료를 합한 것과 일치하는지 확인이 가능합니다. 더해서 마이닝으로 만들어진 모든 코인의 합계와 보유하고 있는 총 금액과 비교하여, Grin노드는 코인의 모두 얼마나 공급 되었는지 그 정확성을 확인할 수 있습니다.
아니요, Mimblewimble과 Grin의 장점이 돋보이는것이 바로 이런 점 입니다. Confidential transaction은 [동형(homomorphic)암호](https://en.wikipedia.org/wiki/Homomophic_encryption)의 한 형태입니다. 금액을 드러내지 않고 Grin은 모든 거래의 입력값의 합계가 거래의 출력값의 합계 + 수수료를 합한 것과 일치하는지 확인이 가능합니다. 더해서 마이닝으로 만들어진 모든 코인의 합계와 보유하고 있는 총 금액과 비교하여, Grin노드는 코인의 모두 얼마나 공급 되었는지 그 정확성을 확인할 수 있습니다.

### 만약 트랜잭션 릴레이를 받는다면 컷 쓰루 전에는(cut-through) 누구에게 트랜잭션이 속하는지 알 수 없지 않나요?

@@ -0,0 +1,60 @@
# Grin/Mimblewimble 致比特币持有者

* 阅读其他语言版本:[Korean](grin4bitcoiners_KR.md)、 [English](grin4bitcoiners.md)

## 隐私和可互换性

Grin 链上交易有三个隐私特性:

1. 没有交易地址;
2. 没有交易金额;
3. 两笔交易,一笔输入是另一笔输出,可以在一个区块中混合成为一笔交易,清除所有中间信息。

前两个特性意味着所有交易不可识别。只要直接进行交易,所有输入、输出数据都显示为随机数(专业术语就是“随机曲线点”)。

另外,单个区块中可以添加更多交易。Grin 区块内数据就像是一笔大交易,原始交易所有输入和输出数据都被删除。

## 扩展性

如上所述,由于 Mimblewimble 交易和区块格式,交易可以合并,一笔输出可以从另一笔的输入直接花费。例如,甲给乙转账,乙之后转账给丙。这一系列交易中可以去除乙的数据,乙的交易数据绝对不会在链上显示。

详细来说,区块间多数交易输出迟早都会变成另笔交易的输入。因此,*所有输出花费都可以安全删除*。而且假设 Grin 交易量与比特币相当,仅需几个 GB 或更少容量即可存储、下载和验证完整区块。

这就意味着 Grin 区块链可扩展用户数量,而不是交易数量。目前有个问题是:每笔交易需要保留小量数据(“内核”,大约 100 字节)。但开发团队在努力优化。

## 脚本

或许你听说过 Mimblewimble 协议不支持脚本 (Script)。某种程度上这是事实。但利用密码学方法,许多需要脚本的比特币合约在 Grin 上可以使用椭圆曲线密码学 (Elliptic Curve Cryptography) 实现。迄今为止已知的方法有:

* 多签交易
* 原子交换
* 时间锁定交易和输出
* 闪电网络

## 发行率

比特币出块时间为 10 分钟,初始每个块奖励 50 btc,每四年减半,直到 2100 万比特币全部挖出。Grin 的货币发行率为线性增长,也就是说不会降低。目前为每 60 秒出块,每个区块奖励 60 Grin。这种发行方也有效,因为 1)稀释率逐渐为零;2)每年丢失或销毁的币数量也不小。

## 常见问答集

### 什么?没有地址?

没有地址。Grin 交易中所有输出是单独数据,与之前的输出不共享数据。不用已知地址发送货币,取而代之的是交互式交易,两个(或更多)钱包间彼此交换数据。这种交互方式不需要双方保持同时在线。实际上,有很多方式可以在两个程序间私密安全地进行交互。也可以使用电子邮件或 Signal(或“信鸽”)来进行此种交互通讯。

### 如果删除交易信息,是不是就能欺骗并造新币?

不可以,这就是 Mimblewimble 协议和 Grin 的出众之处。机密交易是一种[同态加密](https://en.wikipedia.org/wiki/Homomorphic_encryption)形式。Grin 不用公开交易金额,即可验证交易输入总额等于交易输出与交易费总和。也就是说,比较挖矿产生的货币总量与现有货币总量,Grin 节点来检查货币总量是否准确。

### 如果监听交易中继,我是不是就能在交易核销前分析出持币人信息?

你可以分析出哪笔交易的输出花费,但也仅限这么多信息。所有输入与输出都是随机数据,因此你没法识别钱是不是被转走,是不是还是同一个人持有,哪笔输出是真正的转账,及哪笔是找零等等。Grin 交易完成*没有可识别信息*。

另外,Grin 利用[蒲公英中继](dandelion/dandelion.md)技术来隐藏交易 IP 地址和客户端,并允许汇集交易,从而提高匿名性。

### 出现量子计算机怎么办?

每笔 Grin 的输出交易中,也包含哈希数据。这些数据抗量子计算。如果出现量子计算机,我们可以安全引入其他验证方式,来保护现有货币系统不会遭到黑客攻击。

### 所有魔法是怎么实现的?

详情请参阅[技术简介](intro.md)。
@@ -1,13 +1,13 @@
# Introduction to MimbleWimble and Grin
# Introduction to Mimblewimble and Grin

*Read this in other languages: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*

MimbleWimble is a blockchain format and protocol that provides
Mimblewimble is a blockchain format and protocol that provides
extremely good scalability, privacy and fungibility by relying on strong
cryptographic primitives. It addresses gaps existing in almost all current
blockchain implementations.

Grin is an open source software project that implements a MimbleWimble
Grin is an open source software project that implements a Mimblewimble
blockchain and fills the gaps required for a full blockchain and
cryptocurrency deployment.

@@ -18,7 +18,7 @@ The main goal and characteristics of the Grin project are:
* Scales mostly with the number of users and minimally with the number of
transactions (<100 byte `kernel`), resulting in a large space saving compared
to other blockchains.
* Strong and proven cryptography. MimbleWimble only relies on Elliptic Curve
* Strong and proven cryptography. Mimblewimble only relies on Elliptic Curve
Cryptography which has been tried and tested for decades.
* Design simplicity that makes it easy to audit and maintain over time.
* Community driven, encouraging mining decentralization.
@@ -29,20 +29,20 @@ A detailed post on the step-by-step of how Grin transactions work (with graphics

This document is targeted at readers with a good
understanding of blockchains and basic cryptography. With that in mind, we attempt
to explain the technical buildup of MimbleWimble and how it's applied in Grin. We hope
to explain the technical buildup of Mimblewimble and how it's applied in Grin. We hope
this document is understandable to most technically-minded readers. Our objective is
to encourage you to get interested in Grin and contribute in any way possible.

To achieve this objective, we will introduce the main concepts required for a good
understanding of Grin as a MimbleWimble implementation. We will start with a brief
understanding of Grin as a Mimblewimble implementation. We will start with a brief
description of some relevant properties of Elliptic Curve Cryptography (ECC) to lay the
foundation on which Grin is based and then describe all the key elements of a
MimbleWimble blockchain's transactions and blocks.
Mimblewimble blockchain's transactions and blocks.

### Tiny Bits of Elliptic Curves

We start with a brief primer on Elliptic Curve Cryptography, reviewing just the
properties necessary to understand how MimbleWimble works and without
properties necessary to understand how Mimblewimble works and without
delving too much into the intricacies of ECC. For readers who would want to
dive deeper into those assumptions, there are other opportunities to
[learn more](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).
@@ -67,15 +67,15 @@ The previous formula `(k+j)*H = k*H + j*H`, with _k_ and _j_ both private
keys, demonstrates that a public key obtained from the addition of two private
keys (`(k+j)*H`) is identical to the addition of the public keys for each of those
two private keys (`k*H + j*H`). In the Bitcoin blockchain, Hierarchical
Deterministic wallets heavily rely on this principle. MimbleWimble and the Grin
Deterministic wallets heavily rely on this principle. Mimblewimble and the Grin
implementation do as well.

### Transacting with MimbleWimble
### Transacting with Mimblewimble

The structure of transactions demonstrates a crucial tenet of MimbleWimble:
The structure of transactions demonstrates a crucial tenet of Mimblewimble:
strong privacy and confidentiality guarantees.

The validation of MimbleWimble transactions relies on two basic properties:
The validation of Mimblewimble transactions relies on two basic properties:

* **Verification of zero sums.** The sum of outputs minus inputs always equals zero,
proving that the transaction did not create new funds, _without revealing the actual amounts_.
@@ -142,7 +142,7 @@ Which as a consequence requires that:

ri1 + ri2 = ro3

This is the first pillar of MimbleWimble: the arithmetic required to validate a
This is the first pillar of Mimblewimble: the arithmetic required to validate a
transaction can be done without knowing any of the values.

As a final note, this idea is actually derived from Greg Maxwell's
@@ -154,7 +154,7 @@ applied to Bitcoin.
#### Ownership

In the previous section we introduced a private key as a blinding factor to obscure the
transaction's values. The second insight of MimbleWimble is that this private
transaction's values. The second insight of Mimblewimble is that this private
key can be leveraged to prove ownership of the value.

Alice sends you 3 coins and to obscure that amount, you chose 28 as your
@@ -219,7 +219,7 @@ fees), is called a _transaction kernel_ and is checked by all validators.

This section elaborates on the building of transactions by discussing how change is
introduced and the requirement for range proofs so all values are proven to be
non-negative. Neither of these are absolutely required to understand MimbleWimble and
non-negative. Neither of these are absolutely required to understand Mimblewimble and
Grin, so if you're in a hurry, feel free to jump straight to
[Putting It All Together](#putting-it-all-together).

@@ -249,7 +249,7 @@ and -3 and still obtain a well-balanced transaction, following the definition in
the previous sections. This can't be easily detected because even if _x_ is
negative, the corresponding point `x*H` on the curve looks like any other.

To solve this problem, MimbleWimble leverages another cryptographic concept (also
To solve this problem, Mimblewimble leverages another cryptographic concept (also
coming from Confidential Transactions) called
range proofs: a proof that a number falls within a given range, without revealing
the number. We won't elaborate on the range proof, but you just need to know
@@ -262,12 +262,12 @@ The requirement to know both values to generate valid rangeproofs is an importan
Carol's UTXO: 113*G + 2*H
Attacker's output: (113 + 99)*G + 2*H

which can be signed by the attacker since Carols private key of 113 cancels due to the adverserial choice of keys. The new output could only be spent by both the attacker and Carol together. However, while the attacker can provide a valid signature for the transaction, it is impossible to create a valid rangeproof for the new output invalidating this attack.
which can be signed by the attacker since Carols private key of 113 cancels due to the adversarial choice of keys. The new output could only be spent by both the attacker and Carol together. However, while the attacker can provide a valid signature for the transaction, it is impossible to create a valid rangeproof for the new output invalidating this attack.


#### Putting It All Together

A MimbleWimble transaction includes the following:
A Mimblewimble transaction includes the following:

* A set of inputs, that reference and spend a set of previous outputs.
* A set of new outputs that include:
@@ -280,13 +280,13 @@ A MimbleWimble transaction includes the following:

### Blocks and Chain State

We've explained above how MimbleWimble transactions can provide
We've explained above how Mimblewimble transactions can provide
strong anonymity guarantees while maintaining the properties required for a valid
blockchain, i.e., a transaction does not create money and proof of ownership
is established through private keys.

The MimbleWimble block format builds on this by introducing one additional
concept: _cut-through_. With this addition, a MimbleWimble chain gains:
The Mimblewimble block format builds on this by introducing one additional
concept: _cut-through_. With this addition, a Mimblewimble chain gains:

* Extremely good scalability, as the great majority of transaction data can be
eliminated over time, without compromising security.
@@ -318,11 +318,11 @@ The same holds true for blocks themselves once we realize a block is simply a se

sum(outputs) - sum(inputs) = sum(kernel_excess)

Simplifying slightly, (again ignoring transaction fees) we can say that MimbleWimble blocks can be treated exactly as MimbleWimble transactions.
Simplifying slightly, (again ignoring transaction fees) we can say that Mimblewimble blocks can be treated exactly as Mimblewimble transactions.

##### Kernel Offsets

There is a subtle problem with MimbleWimble blocks and transactions as described above. It is possible (and in some cases trivial) to reconstruct the constituent transactions in a block. This is clearly bad for privacy. This is the "subset" problem - given a set of inputs, outputs and transaction kernels a subset of these will recombine to reconstruct a valid transaction.
There is a subtle problem with Mimblewimble blocks and transactions as described above. It is possible (and in some cases trivial) to reconstruct the constituent transactions in a block. This is clearly bad for privacy. This is the "subset" problem - given a set of inputs, outputs and transaction kernels a subset of these will recombine to reconstruct a valid transaction.

For example, given the following two transactions -

@@ -401,7 +401,7 @@ A block is simply built from:
* The signatures generated using the excess value.
* The mining fee.

When structured this way, a MimbleWimble block offers extremely good privacy
When structured this way, a Mimblewimble block offers extremely good privacy
guarantees:

* Intermediate (cut-through) transactions will be represented only by their transaction kernels.
@@ -424,17 +424,17 @@ Generalizing, we conclude that the chain state (excluding headers) at any point
in time can be summarized by just these pieces of information:

1. The total amount of coins created by mining in the chain.
2. The complete set of unspent outputs.
3. The transactions kernels for each transaction.
1. The complete set of unspent outputs.
1. The transactions kernels for each transaction.

The first piece of information can be deduced just using the block
height (its distance from the genesis block). And both the unspent outputs and the
transaction kernels are extremely compact. This has 2 important consequences:

* The state a given node in a MimbleWimble blockchain needs to maintain is very
* The state a given node in a Mimblewimble blockchain needs to maintain is very
small (on the order of a few gigabytes for a bitcoin-sized blockchain, and
potentially optimizable to a few hundreds of megabytes).
* When a new node joins a network building up a MimbleWimble chain, the amount of
* When a new node joins a network building up a Mimblewimble chain, the amount of
information that needs to be transferred is also very small.

In addition, the complete set of unspent outputs cannot be tampered with, even
@@ -444,7 +444,7 @@ factors in the outputs.

### Conclusion

In this document we covered the basic principles that underlie a MimbleWimble
In this document we covered the basic principles that underlie a Mimblewimble
blockchain. By using the addition properties of Elliptic Curve Cryptography, we're
able to build transactions that are completely opaque but can still be properly
validated. And by generalizing those properties to blocks, we can eliminate a large
@@ -1,42 +1,42 @@
# Einführung in MimbleWimble und Grin
# Einführung in Mimblewimble und Grin

*In anderen Sprachen lesen: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md)*
*In anderen Sprachen lesen: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md), [简体中文](intro_ZH-CN.md)*

MimbleWimble ist ein Blockchain-Format und Protokoll, welches auf starke kryptographische Primitiven setzt und dadurch äußerst gute Skalierbarkeit, Privatsphäre und Fungibilität bietet. Es befasst sich mit Lücken, die in fast allen gegenwärtigen Blockchainimplementierungen existieren.
Mimblewimble ist ein Blockchain-Format und Protokoll, welches auf starke kryptographische Primitiven setzt und dadurch äußerst gute Skalierbarkeit, Privatsphäre und Fungibilität bietet. Es befasst sich mit Lücken, die in fast allen gegenwärtigen Blockchainimplementierungen existieren.

Grin ist ein Open-Source-Softwareprojekt, dass eine MimbleWimble-Blockchain implementiert und die für den Einsatz einer vollständigen Blockchain und Kryptowährung nötigen Lücken schließt.
Grin ist ein Open-Source-Softwareprojekt, dass eine Mimblewimble-Blockchain implementiert und die für den Einsatz einer vollständigen Blockchain und Kryptowährung nötigen Lücken schließt.

Das Hauptziel und die Charakteristika des Grin-Projekts sind wie folgt:

* Standardmäßige Privatsphäre. Dies ermöglicht volle Fungibilität, ohne die Fähigkeit auszuschließen, Informationen nach Bedarf selektiv preisgeben zu können.
* Skaliert hauptsächlich mit der Anzahl der Nutzer und minimal mit der Anzahl an Transaktionen (<100 byte `kernel`), was zu hoher Platzsparung im Vergleich zu anderen Blockchains führt.
* Starke und bewährte Kryptografie. MimbleWimble setzt nur auf seit Jahrzehnten erprobte Elliptische-Kurven-Kryptografie.
* Starke und bewährte Kryptografie. Mimblewimble setzt nur auf seit Jahrzehnten erprobte Elliptische-Kurven-Kryptografie.
* Einfachheit des Designs, die das dauerhafte Auditieren und Aufrechterhalten leicht gestaltet.
* Von der Gemeinschaft gelenkt, die Dezentralisierung des Minings fördernd.

## Tongue Tying für Jedermann

Dieses Dokument richtet sich an Leser, die ein gutes Verständnis von Blockchain und grundlegender Kryptografie haben. Vor diesem Hintergrund sind wir bestrebt, den technischen Aufbau von MimbleWimble, sowie dessen Einsatz in Grin zu erklären. Wir hoffen, dass dieses Dokument für die meisten technikbegeisterten Leser verständlich ist. Unser Ziel ist es, dich für Grin zu begeistern und dein Interesse zu wecken, dich in jeder möglichen Weise einzubringen.
Dieses Dokument richtet sich an Leser, die ein gutes Verständnis von Blockchain und grundlegender Kryptografie haben. Vor diesem Hintergrund sind wir bestrebt, den technischen Aufbau von Mimblewimble, sowie dessen Einsatz in Grin zu erklären. Wir hoffen, dass dieses Dokument für die meisten technikbegeisterten Leser verständlich ist. Unser Ziel ist es, dich für Grin zu begeistern und dein Interesse zu wecken, dich in jeder möglichen Weise einzubringen.

Um dieses Ziel zu erreichen, führen wir die für ein gutes Verständnis von Grin als MimbleWimble-Umsetzung nötigen Hauptkonzepte ein. Wir beginnen mit einer kurzen Erläutering einiger relevanter Eigenschaften der Elliptischen-Kurven-Kryptografie (ECC), um die Grundlagen für Grin zu legen und anschließend die Kernelemente von Transaktionen und Blocks im MimbleWimble-Blockchain zu beschreiben.
Um dieses Ziel zu erreichen, führen wir die für ein gutes Verständnis von Grin als Mimblewimble-Umsetzung nötigen Hauptkonzepte ein. Wir beginnen mit einer kurzen Erläutering einiger relevanter Eigenschaften der Elliptischen-Kurven-Kryptografie (ECC), um die Grundlagen für Grin zu legen und anschließend die Kernelemente von Transaktionen und Blocks im Mimblewimble-Blockchain zu beschreiben.

### Tiny Bits of Elliptic Curves

Wir beginnen mit einer kurzen Einführung in Elliptische-Kurven-Kryptografie, wobei wir nur die Eigenschaften betrachten, die für das Verständnis von MimbleWimbles Funktionsweise nötig sind, ohne die Feinheiten von ECC eingehend zu vertiefen. Für Leser, die tiefer in diese Vorraussetzungen einzutauchen wünschen, gibt es weitere Möglichkeiten [mehr zu lernen](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).
Wir beginnen mit einer kurzen Einführung in Elliptische-Kurven-Kryptografie, wobei wir nur die Eigenschaften betrachten, die für das Verständnis von Mimblewimbles Funktionsweise nötig sind, ohne die Feinheiten von ECC eingehend zu vertiefen. Für Leser, die tiefer in diese Vorraussetzungen einzutauchen wünschen, gibt es weitere Möglichkeiten [mehr zu lernen](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).

Eine elliptische Kurve zum Zwecke der Kryptografie ist ein großes Set an Punkten, die wir _C_ nennen. Diese Punkte können von Integern (auch Skalare genannt) addiert, substrahiert, oder multipliziert werden. Mit einem Integer _k_ und mittels einer Operation der skalaren Multiplikation können wir `k*H` errechnen, was auch einen Punkt auf der Kurve _C_ darstellt. Mit einem weiteren Integer _j_ können wir ferner `(k+j)*H` errechnen, was `k*H + j*H` gleicht. Diese Addition- und Skalarmultiplikationsoperationen auf einer elliptischen Kurve behalten die kommutativen und assoziativen Eigenschaften der Addition und Multiplikation bei:

(k+j)*H = k*H + j*H

Wenn wir in ECC eine sehr große Zahl _k_ als privaten Schlüssel wählen, gilt `k*H` als der korrespondierende öffentliche Schlüssel. Selbst wenn der Wert des öffentlichen Schlüssels `k*H` bekannt ist, ist die Ableitung von _k_ nahezu unmöglich (oder anders ausgedrückt, während die Multiplikation trivial ist, ist die "Division" durch Kurvenpunkte extrem schwierig).

Die vorherige Formel `(k+j)*H = k*H + j*H`, mit jeweils _k_ und _j_ als privaten Schlüsseln, demonstriert, dass ein aus der Addition zweier privater Schlüssel (`(k+j)*H`) erhaltener öffentlicher Schlüssel identisch mit der Addition der öffentlichen Schlüssel für jeden der zwei privaten Schlüssel (`k*H + j*H`) ist. In der Bitcoin-Blockchain stützen sich Hierarchical Deterministic Wallets stark auf dieses Prinzip. Gleiches gilt auch für MimbleWimble und die Grin-Implementierung.
Die vorherige Formel `(k+j)*H = k*H + j*H`, mit jeweils _k_ und _j_ als privaten Schlüsseln, demonstriert, dass ein aus der Addition zweier privater Schlüssel (`(k+j)*H`) erhaltener öffentlicher Schlüssel identisch mit der Addition der öffentlichen Schlüssel für jeden der zwei privaten Schlüssel (`k*H + j*H`) ist. In der Bitcoin-Blockchain stützen sich Hierarchical Deterministic Wallets stark auf dieses Prinzip. Gleiches gilt auch für Mimblewimble und die Grin-Implementierung.

### Transaktionen mit MimbleWimble
### Transaktionen mit Mimblewimble

Die Struktur von Transaktionen veranschaulicht einen wesentlichen Grundsatz von MimbleWimble: starke Privatsphäre und Garantie der Vertraulichkeit.
Die Struktur von Transaktionen veranschaulicht einen wesentlichen Grundsatz von Mimblewimble: starke Privatsphäre und Garantie der Vertraulichkeit.

Die Validierung von MimbleWimble-Transaktionen hängt von zwei grundlegenden Eigenschaften ab:
Die Validierung von Mimblewimble-Transaktionen hängt von zwei grundlegenden Eigenschaften ab:

* **Verifizierung von Zero Sums.** Die Summe der Outputs minus Inputs ergibt immer Null, was beweist, dass die Transaktion keine neuen Gelder erschaffen hat, _ohne dabei die tatsächlichen Beträge zu enthüllen._
* **Besitz von privaten Schlüsseln.** Wie bei den meisten anderen Kryptowährungen ist das Eigentum der Transtaktionsoutputs durch den Besitz der ECC-Privatschlüssel garantiert. Jedoch wird der Beweis, dass eine Entität jene privaten Schlüssel besitzt, nicht durch das direkte Signieren der Transaktion erreicht.
@@ -81,13 +81,13 @@ Was als Konsequenz vorraussetzt, dass:

ri1 + ri2 = ro3

Dies ist der erste Grundpfeiler von MimbleWimble: die für die Validierung einer Transaktion erforderliche Arithmetik kann durchgeführt werden, ohne Kenntnis über die Werte zu haben.
Dies ist der erste Grundpfeiler von Mimblewimble: die für die Validierung einer Transaktion erforderliche Arithmetik kann durchgeführt werden, ohne Kenntnis über die Werte zu haben.

Zum Schluss sei erwähnt, dass diese Idee von Greg Maxwells [Confidential Transactions](https://elementsproject.org/features/confidential-transactions/investigation) abgeleitet wurde, und jene wiederum von Adam Backs Vorschlag für an Bitcoin angepasste homomorphe Werte.

#### Besitz

In den vorherigen Abschnitten haben wir private Schlüssel als Blinding Factor, um die Transaktionswerte zu verbergen, eingeführt. Die zweite Erkenntnis von MimbleWimble ist, dass private Schlüssel zum Einsatz kommen können, um den privaten Besitz des Wertes zu beweisen.
In den vorherigen Abschnitten haben wir private Schlüssel als Blinding Factor, um die Transaktionswerte zu verbergen, eingeführt. Die zweite Erkenntnis von Mimblewimble ist, dass private Schlüssel zum Einsatz kommen können, um den privaten Besitz des Wertes zu beweisen.

Alice schickt dir 3 Coins. Um diesen Betrag zu verbergen, wählst du 28 als Blinding Factor (es sei angemerkt, dass der Blinding Factor als privater Schlüssel in der Praxis eine sehr große Zahl darstellt). Irgendwo auf der Blockchain erscheint der folgende Output, der nur von dir ausgebbar sein sollte:

@@ -122,7 +122,7 @@ Diese Signatur, die jeder Transaktion zusammen mit weiteren Daten (wie Mininggeb

#### Einige Feinheiten

Dieser Abschnitt führt die Erstellung von Transaktionen weiter aus und erörtert wie Wechselgeld eingeführt wird, sowie ferner die Voraussetzung von Range Proofs, sodass alle Werte nachweislich nicht negativ sind. Keines der beiden ist für das Verständnis von MimbleWimble und Grin absolut von Nöten, falls du es also eilig hast, spring einfach gleich zur [Zusammenfassung](#zusammenfassung).
Dieser Abschnitt führt die Erstellung von Transaktionen weiter aus und erörtert wie Wechselgeld eingeführt wird, sowie ferner die Voraussetzung von Range Proofs, sodass alle Werte nachweislich nicht negativ sind. Keines der beiden ist für das Verständnis von Mimblewimble und Grin absolut von Nöten, falls du es also eilig hast, spring einfach gleich zur [Zusammenfassung](#zusammenfassung).

#### Wechselgeld

@@ -141,13 +141,13 @@ In allen obigen Berechnungen stützen wir uns darauf, dass die Transaktionswerte

Zum Beispiel könnten Transaktionen mit einem Input von 2 und Outputs von 5 und -3 erstellt werden, die trotzdem ausgeglichen sind, folgend der Definition in den vorherigen Abschnitten. Dies ist nicht einfach festzustellen, da sogar wenn _x_ Negativ ist, der korrespondierende Punkt `x.H` auf der Kurve so aussieht wie jeder andere.

Um dieses Problem zu lösen, setzt MimbleWimble ein anderes kryptographisches Konzept (ebenso stammend von Confidential Transactions) namens Range Proofs ein. Wir werden Range Proofs nicht ausführlich behandeln, du solltest nur wissen, dass wir für jedes `r.G + v.H` einen Beweis erstellen können, der zeigt, dass _v_ größer als Null ist und nicht zu Overflow führt.
Um dieses Problem zu lösen, setzt Mimblewimble ein anderes kryptographisches Konzept (ebenso stammend von Confidential Transactions) namens Range Proofs ein. Wir werden Range Proofs nicht ausführlich behandeln, du solltest nur wissen, dass wir für jedes `r.G + v.H` einen Beweis erstellen können, der zeigt, dass _v_ größer als Null ist und nicht zu Overflow führt.

Es ist auch wichtig anzumerken, dass um einen gültigen Range Proof der obigen Beispiele zu erstellen, die beiden Werte 113 und 28, die für die Erstellung und Signierung des Wertüberschusses genutzt werden, bekannt sein müssen. Der Grund dafür, sowie eine genauere Beschreibung von Range Proofs, wird im [Range Proof Paper](https://eprint.iacr.org/2017/1066.pdf) behandelt.

#### Zusammenfassung

Eine MimbleWimble-Transaktion beinhaltet wie folgt:
Eine Mimblewimble-Transaktion beinhaltet wie folgt:

* Eine Reihe von Inputs, die referenzieren, sowie eine Reihe an vorherigen Outputs ausgeben.
* Eine Reihe an neuen Outputs, die Folgendes umfassen:
@@ -158,9 +158,9 @@ Es ist auch wichtig anzumerken, dass um einen gültigen Range Proof der obigen B

### Blocks und Chainstate

Wir haben oben beschrieben, wie MimbleWimble-Transaktionen starke Anonymität gewährleisten können, während die Eigenschaften für eine gültigen Blockchain beibehalten werden, das heißt, dass eine Transaktion kein Geld erstellt und der Eigentumsnachweis über private Schlüssel erfolgt.
Wir haben oben beschrieben, wie Mimblewimble-Transaktionen starke Anonymität gewährleisten können, während die Eigenschaften für eine gültigen Blockchain beibehalten werden, das heißt, dass eine Transaktion kein Geld erstellt und der Eigentumsnachweis über private Schlüssel erfolgt.

Das MimbleWimble-Blockformat baut darauf auf, indem es ein weiteres Konzept einführt: _cut-through_. Mit dieser Erweiterung erlangt eine MimbleWimble-Blockchain:
Das Mimblewimble-Blockformat baut darauf auf, indem es ein weiteres Konzept einführt: _cut-through_. Mit dieser Erweiterung erlangt eine Mimblewimble-Blockchain:

* äußerst gute Skalierbarkeit, da die große Mehrzahl der Transaktionsdaten über Zeit entfernt werden können, ohne dabei Sicherheit zu beeinträchtigen.
* Weitergehende Anonymität durch das Vermischen und Löschen von Transaktionsdaten.
@@ -190,11 +190,11 @@ Das gleiche gilt auch für Blocks, sobald wir realisieren, dass ein Block ledigl

sum(outputs) - sum(inputs) = sum(kernel_excess)

Leicht vereinfacht (weiterhin Transaktionsgebühren ignorierend) können wir sagen, dass MimbleWimble-Blocks genau wie MimbleWimble-Transaktionen behandelt werden können.
Leicht vereinfacht (weiterhin Transaktionsgebühren ignorierend) können wir sagen, dass Mimblewimble-Blocks genau wie Mimblewimble-Transaktionen behandelt werden können.

##### Kernel-Offsets

In den wie oben beschriebenen MimbleWimble-Blocks und Transaktionen gibt es ein subtiles Problem. Es ist möglich (und in manchen Fällen trivial) die konstituierende Transaktion in einem Block zu rekonstruieren. Dies ist eindeutig schlecht für die Privatsphäre. Es handelt sich um ein "subset"-Problem - bei einer gegebenen Reihe an Inputs, Outputs, und Transaktionskerneln, wird ein Subset dieser Reihe eine gültige Transaktion rekombinieren.
In den wie oben beschriebenen Mimblewimble-Blocks und Transaktionen gibt es ein subtiles Problem. Es ist möglich (und in manchen Fällen trivial) die konstituierende Transaktion in einem Block zu rekonstruieren. Dies ist eindeutig schlecht für die Privatsphäre. Es handelt sich um ein "subset"-Problem - bei einer gegebenen Reihe an Inputs, Outputs, und Transaktionskerneln, wird ein Subset dieser Reihe eine gültige Transaktion rekombinieren.

Beispielsweise seien die folgenden beiden Transaktionen gegeben -

@@ -260,7 +260,7 @@ Ein Block ist einfach ausgebaut aus:
* Die Signaturen die durch die excess value generiert werden.
* Die Mininggebühr.

Sofern so strukturiert, bietet ein MimbleWimble-Block äußerst gute Garantie der Vertraulichkeit:
Sofern so strukturiert, bietet ein Mimblewimble-Block äußerst gute Garantie der Vertraulichkeit:

* Intermediäre (cut-through) Transaktionen werden nur von ihren Transaktionskerneln repräsentiert.
* Alle Outputs sehen gleich aus: nur sehr große Zahlen, die unmöglich voneinander differenzierbar sind. Um einige Outputs auszuschließen, müssten alle ausgeschlossen werden.
@@ -275,16 +275,16 @@ Bezug nehmend auf den vorherigen Beispielblock, müssen die Outputs x1 und x2, a
Verallgemeinernd können wir schlussfolgern, dass der Chainstate (ausgenommen Header) zu jedem Zeitpunkt durch lediglich die folgenden Informationsstücke zusammengefasst werden kann:

1. Die Gesamtanzahl an Coins, die durch Mining in der Chain erstellt wurden.
2. Das komplette Set nicht verwendeter Outputs.
3. Die Transaktionskernel für jede Transaktion.
1. Das komplette Set nicht verwendeter Outputs.
1. Die Transaktionskernel für jede Transaktion.

Das erste Informationsstück kann nur mittels der Blockhöhe (seiner Distanz zum Genesisblock), abgeleitet werden. Beide nicht verwendeten Outputs und die Transaktionskernel sind höchst kompakt. Dies hat 2 wichtige Konsequenzen:

* Der Zustand, den eine gegebene Node in einer MimbleWimble-Blockchain aufrechterhalten muss, ist sehr klein (von etwa einigen Gigabytes für eine Blockchain in der Größe von Bitcoin, und potentiall optimierbar auf wenige hundert Megabytes).
* Wenn eine neue Node einem neuen Netzwerk beitritt, dass eine MimbleWimble-Chain aufbaut, ist die Menge an Informationen, die transferiert werden müssen, ebenfalls sehr klein.
* Der Zustand, den eine gegebene Node in einer Mimblewimble-Blockchain aufrechterhalten muss, ist sehr klein (von etwa einigen Gigabytes für eine Blockchain in der Größe von Bitcoin, und potentiall optimierbar auf wenige hundert Megabytes).
* Wenn eine neue Node einem neuen Netzwerk beitritt, dass eine Mimblewimble-Chain aufbaut, ist die Menge an Informationen, die transferiert werden müssen, ebenfalls sehr klein.

Darüber hinaus kann das vollständige Set an nicht verwendeten Outputs nicht manipuliert werden, selbst nicht durch das Hinzufügen oder Entfernen eines Outputs. Würde dies getan werden, führe es dazu, dass die Summierung aller Blinding Factors in den Transaktionskerneln von der Summierung der Blinding Factors in den Outputs abweichen würde.

### Fazit

In diesem Dokument haben wir die grundlegenden Prinzipien abgedeckt, die einer MimbleWimble-Blockchain unterliegen. Durch die Nutzung von Additionseigenschaften der Elliptischen-Kurven-Kryptografie können wir Transaktionen erstellen, die völlig undurchsichtig sind, aber dennoch korrekt validiert werden können. Durch die Verallgemeinerung dieser Eigenschaften auf Blocks, können wir große Mengen an Blockchaindaten entfernen, was hohe Skalierbarkeit und schnelle Synchronisierung neuer Peers erlaubt.
In diesem Dokument haben wir die grundlegenden Prinzipien abgedeckt, die einer Mimblewimble-Blockchain unterliegen. Durch die Nutzung von Additionseigenschaften der Elliptischen-Kurven-Kryptografie können wir Transaktionen erstellen, die völlig undurchsichtig sind, aber dennoch korrekt validiert werden können. Durch die Verallgemeinerung dieser Eigenschaften auf Blocks, können wir große Mengen an Blockchaindaten entfernen, was hohe Skalierbarkeit und schnelle Synchronisierung neuer Peers erlaubt.
@@ -1,12 +1,12 @@
# Introducción a MimbleWimble y Grin
# Introducción a Mimblewimble y Grin

*Lea esto en otros idiomas: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*
*Lea esto en otros idiomas: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md), [简体中文](intro_ZH-CN.md).*

MimbleWimble es un formato y un protocolo de cadena de bloques que proporciona una escalabilidad, privacidad y funcionalidad
Mimblewimble es un formato y un protocolo de cadena de bloques que proporciona una escalabilidad, privacidad y funcionalidad
extremadamente buenas al basarse en fuertes algoritmos criptográficos. Aborda los vacíos existentes en casi todas las
implementaciones actuales de cadenas de bloques.

Grin es un proyecto de software de código abierto que implementa una cadena de bloques MimbleWimble y rellena los espacios
Grin es un proyecto de software de código abierto que implementa una cadena de bloques Mimblewimble y rellena los espacios
necesarios para una implementación completa de la cadena de bloques y moneda criptográfica.

El objetivo principal y las características del proyecto Grin son:
@@ -15,7 +15,7 @@ El objetivo principal y las características del proyecto Grin son:
selectiva cuando sea necesario.
* Se escala principalmente con el número de usuarios y mínimamente con el número de transacciones (`<100 bytes kernel`), lo
que resulta en un gran ahorro de espacio en comparación con otras cadenas de bloques.
* Criptografía robusta y probada. MimbleWimble sólo se basa en la criptografía de curvas elípticas que ha sido probada y
* Criptografía robusta y probada. Mimblewimble sólo se basa en la criptografía de curvas elípticas que ha sido probada y
comprobada durante décadas.
* Simplicidad de diseño que facilita la auditoría y el mantenimiento a lo largo del tiempo.
* Dirigido por la comunidad, utilizando un algoritmo de minería resistente a la ASICs (Cuckoo Cycle) que fomenta la
@@ -24,19 +24,19 @@ El objetivo principal y las características del proyecto Grin son:
## Tongue Tying para todos

Este documento está dirigido a lectores con un buen conocimiento de cadenas de bloques y de la criptografía básica. Con
esto en mente, tratamos de explicar el desarrollo técnico de MimbleWimble y cómo se aplica en Grin. Esperamos que este
esto en mente, tratamos de explicar el desarrollo técnico de Mimblewimble y cómo se aplica en Grin. Esperamos que este
documento sea comprensible para la mayoría de los lectores con visión técnica. Nuestro objetivo es animarles a interesarse en
Grin y contribuir de cualquier manera posible.

Para lograr este objetivo, presentaremos los principales conceptos necesarios para una buena comprensión de Grin como
implementación de MimbleWimble. Comenzaremos con una breve descripción de algunas propiedades relevantes de la Criptografía
implementación de Mimblewimble. Comenzaremos con una breve descripción de algunas propiedades relevantes de la Criptografía
de Curva Elíptica (ECC) para sentar las bases sobre las que se basa Grin y luego describir todos los elementos clave de las
transacciones y bloques de una cadena de bloques MimbleWimble.
transacciones y bloques de una cadena de bloques Mimblewimble.

### Pequeños Bits de Curvas Elípticas

Comenzamos con una breve introducción a la Criptografía de Curva Elíptica, revisando sólo las propiedades necesarias para
entender cómo funciona MimbleWimbleWimble y sin profundizar demasiado en las complejidades de ECC. Para los lectores que
entender cómo funciona MimblewimbleWimble y sin profundizar demasiado en las complejidades de ECC. Para los lectores que
deseen profundizar en estos supuestos, existen otras opciones para [aprender más](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).

Una curva elíptica con el objetivo de criptografía es simplemente un gran conjunto de puntos que llamaremos _C_. Estos puntos
@@ -54,14 +54,14 @@ la multiplicación es trivial, la "división" por puntos de curva es extremadame
La fórmula anterior `(k+j)*H = k*H + j*H`, con _k_ y _j_ ambas claves privadas, demuestra que una clave pública obtenida de
la adición de dos claves privadas (`(k+j)*H`) es idéntica a la adición de las claves públicas para cada una de esas dos
claves privadas (`k*H + j*H`). En la cadena de bloques Bitcoin, las carteras jerárquicas deterministas se basan en gran
medida en este principio. MimbleWimble y la implementación de Grin también lo hacen.
medida en este principio. Mimblewimble y la implementación de Grin también lo hacen.

### Transacciones con MimbleWimble
### Transacciones con Mimblewimble

La estructura de las transacciones demuestra un principio crucial de MimbleWimble:
La estructura de las transacciones demuestra un principio crucial de Mimblewimble:
fuertes garantías de privacidad y confidencialidad.

La validación de las transacciones de MimbleWimbleWimble se basa en dos propiedades básicas:
La validación de las transacciones de MimblewimbleWimble se basa en dos propiedades básicas:

* **Verificación de importes nulos.** La suma de las salidas menos las entradas siempre es igual a cero, lo que demuestra que
la transacción no creó nuevos fondos, _sin revelar los importes reales_.
@@ -118,7 +118,7 @@ Lo cual, requiere como consecuencia que:

ri1 + ri2 = ro3

Este es el primer pilar de MimbleWimble: la aritmética necesaria para validar una transacción se puede hacer sin conocer
Este es el primer pilar de Mimblewimble: la aritmética necesaria para validar una transacción se puede hacer sin conocer
ninguno de los valores.

Como nota final, esta idea se deriva en realidad de Greg Maxwell's
@@ -128,7 +128,7 @@ que a su vez se deriva de una propuesta de Adam Back para valores homomórficos
#### Propiedad

En la sección anterior introducimos una clave privada como factor de ocultación para cubrir los valores de la transacción. La
segunda idea de MimbleWimble es que esta clave privada puede ser utilizada para probar la propiedad del valor.
segunda idea de Mimblewimble es que esta clave privada puede ser utilizada para probar la propiedad del valor.

Alice te envía 3 monedas y para ocultar esa cantidad, elegiste 28 como tu factor de ocultación (nota que en la práctica,
siendo el factor de ocultación una llave privada, es un número extremadamente grande). En algún lugar de la cadena de
@@ -177,7 +177,7 @@ denomina _transacción de kernel_ y es comprobada por todos los validadores.

#### Algunos puntos más precisos

Esta sección explica con más detalle la creación de transacciones discutiendo cómo se introduce el cambio y el requisito de pruebas de rango para que se demuestre que todos los valores no son negativos. Ninguno de los dos es absolutamente necesario para entender MimbleWimbleWimble y Grin, así que si tienes prisa, no dudes en ir directamente a
Esta sección explica con más detalle la creación de transacciones discutiendo cómo se introduce el cambio y el requisito de pruebas de rango para que se demuestre que todos los valores no son negativos. Ninguno de los dos es absolutamente necesario para entender MimblewimbleWimble y Grin, así que si tienes prisa, no dudes en ir directamente a
[Poniendo todo junto](https://github.com/wimel/grin/blob/master/doc/intro.md#putting-it-all-together).

##### Cambio
@@ -203,7 +203,7 @@ Por ejemplo, se podría crear una transacción con una entrada de 2 y salidas de
equilibrada, siguiendo la definición de las secciones anteriores. Esto no puede ser fácilmente detectado porque incluso si
_x_ es negativo, el punto correspondiente `x.H` en la curva se ve como cualquier otro.

Para resolver este problema, MimbleWimble utiliza otro concepto criptográfico (también procedente de Transacciones
Para resolver este problema, Mimblewimble utiliza otro concepto criptográfico (también procedente de Transacciones
Confidenciales) llamado pruebas de rango: una prueba de que un número está dentro de un rango dado, sin revelar el número. No
daremos más detalles sobre la prueba de rango, pero sólo necesitas saber que para cualquier `r.G + v.H` podemos construir una
prueba que demuestre que _v_ es mayor que cero y no se sobrecarga.
@@ -214,7 +214,7 @@ descripción más detallada de las pruebas de rango, se detallan en la sección

#### Poniendo todo junto

Una transacción MimbleWimble incluye lo siguiente:
Una transacción Mimblewimble incluye lo siguiente:

* Un conjunto de entradas, que hacen referencia y consumen un conjunto de salidas anteriores.
* Un conjunto de nuevos resultados que incluyen:
@@ -227,11 +227,11 @@ Una transacción MimbleWimble incluye lo siguiente:

### Bloques y estado de la cadena

Hemos explicado anteriormente cómo las transacciones de MimbleWimble pueden proporcionar fuertes garantías de anonimato a la
Hemos explicado anteriormente cómo las transacciones de Mimblewimble pueden proporcionar fuertes garantías de anonimato a la
vez que mantienen las propiedades requeridas para una cadena de bloques válida, es decir, una transacción no crea dinero y la
prueba de la propiedad se establece a través de claves privadas.

El formato de bloques MimbleWimble se basa en esto introduciendo un concepto adicional: _cut-through_. Con esta incorporación, una cadena MimbleWimble gana:
El formato de bloques Mimblewimble se basa en esto introduciendo un concepto adicional: _cut-through_. Con esta incorporación, una cadena Mimblewimble gana:

* Extremadamente buena escalabilidad, ya que la gran mayoría de los datos de las transacciones pueden ser eliminados con el
tiempo, sin comprometer la seguridad.
@@ -264,12 +264,12 @@ compromiso resultante de Pedersen con la suma de los excesos del núcleo. -

sum(outputs) - sum(inputs) = sum(kernel_excess)

Simplificando un poco, (de nuevo ignorando las tarifas de transacción) podemos decir que los bloques MimbleWimble pueden ser
tratados exactamente como transacciones MimbleWimble.
Simplificando un poco, (de nuevo ignorando las tarifas de transacción) podemos decir que los bloques Mimblewimble pueden ser
tratados exactamente como transacciones Mimblewimble.

#### Kernel Offsets

Hay un problema leve con los bloques y las transacciones de MimbleWimble como se describe anteriormente. Es posible (y en
Hay un problema leve con los bloques y las transacciones de Mimblewimble como se describe anteriormente. Es posible (y en
algunos casos trivial) reconstruir las transacciones constituyentes en un bloque. Esto es claramente malo para la privacidad.
Este es el problema del "subconjunto" - dado un conjunto de entradas, salidas y núcleos de transacción, un subconjunto de
estos se recombinará para reconstruir una transacción válida.
@@ -354,7 +354,7 @@ Un bloque se contruye simplemente a partir de:
* Las firmas creadas utilizando el exceso de valor.
* La tasa minera.

Con esta estructura, un bloque MimbleWimble ofrece unas garantías de privacidad muy buenas:
Con esta estructura, un bloque Mimblewimble ofrece unas garantías de privacidad muy buenas:

* Las transacciones intermedias (cut-through) estarán representadas únicamente por sus núcleos de transacciones.
* Todas las salidas se ven iguales: sólo números muy grandes que son imposibles de diferenciar entre sí. Si uno quisiera
@@ -374,16 +374,16 @@ Concluimos que, generalizando, el estado de la cadena (excluyendo las cabeceras)
por estas piezas de información:

1. La cantidad total de monedas creadas por la minería en la cadena.
2. El conjunto completo de resultados no utilizados.
3. Los núcleos de transacciones para cada transacción.
1. El conjunto completo de resultados no utilizados.
1. Los núcleos de transacciones para cada transacción.

La primera información se puede deducir simplemente usando la altura del bloque (su distancia del bloque génesis). Y tanto
las salidas no utilizadas como los núcleos de transacción son extremadamente compactos. Esto tiene dos consecuencias
importantes:

* El estado que un nodo dado en una cadena de bloques MimbleWimble necesita mantener es muy pequeño (del orden de unos pocos
* El estado que un nodo dado en una cadena de bloques Mimblewimble necesita mantener es muy pequeño (del orden de unos pocos
gigabytes para una cadena del tamaño de Bitcoin, y potencialmente configurable a unos pocos centenares de megabytes).
* Cuando un nuevo nodo se une a una red formando una cadena MimbleWimble, la cantidad de información que necesita ser
* Cuando un nuevo nodo se une a una red formando una cadena Mimblewimble, la cantidad de información que necesita ser
transferida es también muy pequeña..

Además, el conjunto completo de resultados no utilizados no puede ser alterado, ni siquiera añadiendo o quitando un
@@ -392,7 +392,7 @@ suma de la ocultación, factores que influyen en los resultados.

### Conclusión

En este documento tratamos los principios básicos que subyacen a una cadena de bloques MimbleWimble. Utilizando las
En este documento tratamos los principios básicos que subyacen a una cadena de bloques Mimblewimble. Utilizando las
propiedades de suma de la Criptografía de Curva Elíptica, somos capaces de construir transacciones que son completamente
opacas pero que todavía pueden ser validadas adecuadamente. Y al generalizar esas propiedades a bloques, podemos eliminar una
gran cantidad de datos de la cadena de bloques, lo que permite una gran escalabilidad y una sincronización rápida de nuevos
@@ -1,9 +1,9 @@
# MimbleWimble と Grin 概論
# Mimblewimble と Grin 概論

*この文章を他の言語で読む: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*
*この文章を他の言語で読む: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md), [简体中文](intro_ZH-CN.md).*

MimbleWimble は、極めてよいスケーラビリティ、プライバシー、そして代替可能性(fungibility)の解決法を提供
するブロックチェーンのフォーマット・プロトコルである。MimbleWimble は、ほとんどすべてのブロックチェーンの
Mimblewimble は、極めてよいスケーラビリティ、プライバシー、そして代替可能性(fungibility)の解決法を提供
するブロックチェーンのフォーマット・プロトコルである。Mimblewimble は、ほとんどすべてのブロックチェーンの
実装に存在する課題を解決する。

Grin は、その Mimblewimble ブロックチェーンを実装する OSS プロジェクトであり、完全なブロックチェーン、
@@ -14,7 +14,7 @@ Grin プロジェクトの主なゴールと特徴は以下:
* デフォルトでプライバシーが保たれていること。これにより、選択的に必要な情報を開示する能力を排除することなく、完全な代替可能性を実現できる。
* ユーザー数を最大限に、トランザクション数を最小限にスケールさせることができるため、 [TODO: enhance translation]
他のブロックチェーンに比べて大きなスペース節約が可能である。
* 強固で証明された暗号技術。 MimbleWimble は、数十年にも渡る攻撃やテストに耐えてきた楕円曲線暗号に
* 強固で証明された暗号技術。 Mimblewimble は、数十年にも渡る攻撃やテストに耐えてきた楕円曲線暗号に
のみ依存する。
* デザインがシンプルなため、長年に渡って監査・メンテナンスが容易である。
* コミュニティドリブンであり、ASIC耐性のあるマイニングアルゴリズム(Cuckoo Cycle)
@@ -23,19 +23,19 @@ Grin プロジェクトの主なゴールと特徴は以下:
## Tongue Tying for Everyone

本ドキュメントは、ブロックチェーンと基礎的な暗号学の理解を前提としている。
それに留意しながら、我々は MimbleWimble の技術的なビルドアップと、それがどのように Grin に
それに留意しながら、我々は Mimblewimble の技術的なビルドアップと、それがどのように Grin に
応用されているかの説明を試みる。本ドキュメントが多くの技術に慣れ親しんだ読者にとって理解しやすい
ものであることを願う。我々の目的は、読者に Grin について興味をもってもらい、可能な方法で Grin に
貢献していただくことである。

この目的を達成するため、MimbleWimble の実装としての Grin を理解するための主要なコンセプトを
この目的を達成するため、Mimblewimble の実装としての Grin を理解するための主要なコンセプトを
いくつか紹介する。まずはじめに、Grin を基礎づける楕円曲線暗号(ECC)のいくつかの関係する性質についての簡単な説明を
し、その後、MimbleWimble ブロックチェーンのトランザクションとブロクについてのキーとなる要素について説明する。
し、その後、Mimblewimble ブロックチェーンのトランザクションとブロクについてのキーとなる要素について説明する。

### Tiny Bits of Elliptic Curves

まずは楕円曲線暗号の概要から見ていく。ECCの複雑な部分を掘り下げすぎないように、MimbleWimble の仕組みを理解する
のに必要な分のみ。より詳細な掘り下げた部分を知りたい読者は、[こちらのリンク](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/)が助けになるだろう。
まずは楕円曲線暗号の概要から見ていく。Mimblewimble の仕組みを理解するのに必要な分のみ検討し、ECCの複雑な部分を掘り下げすぎないようにする。
より詳細な掘り下げた部分を知りたい読者は、[こちらのリンク](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/)が助けになるだろう。

楕円曲線を暗号にもちいる目的は、単純に大きな点の集合のためである。これを _C_ と呼ぶことにする。
この集合に属する点は、足し算、引き算、またスカラー倍の演算をすることが可能です。 _k_ を整数としたとき、
@@ -51,14 +51,14 @@ _j_ をもう一つの整数としたとき、`(k+j)*H` を計算することも

上の式 `(k+j)*H = k*H + j*H` を見ると、 _k_, _j_ を共に秘密鍵とした時、秘密鍵同士の和から
得られる公開鍵は、それぞれの秘密鍵から得られる公開鍵の和と等しいことが分かる。Bitcoin では、
HDウォレットがこの原理を大いに利用している。そして MimbleWimble と Grin もこの性質を利用する。
HDウォレットがこの原理を大いに利用している。そして Mimblewimble と Grin もこの性質を利用する。

### Transacting with MimbleWimble
### Transacting with Mimblewimble

トランザクションの構造には MimbleWimble の核となる信条を見ることができる:
トランザクションの構造には Mimblewimble の核となる信条を見ることができる:
強固なプライバシーと機密性の保証だ。

MimbleWimble トランザクションのバリデーションは以下の2つの性質に依る:
Mimblewimble トランザクションのバリデーションは以下の2つの性質に依る:

* **ゼロサムの検証** outputs に -inputs を足した値は常にゼロであること。
これはトランザクションが新たなコインを作っていないことを証明する。 _実際の送金額を露呈することなしに_。
@@ -83,7 +83,7 @@ _v_ を トランザクション input または output の value とし、_H_
実際の value を知ることなく検証できる。しかし、使用できる value の数は有限であり、それらをすべて試すことでトランザクションの
value を推測することは可能である。加えて、 v1 とそれから導出できる `v*H` を知っているということは、ブロックチェーン上のすべての
value が v1 の output からわかってしまうということを意味する。これらの理由から、2つ目の楕円曲線 _G_ (実際には、 _H_ と同じ
曲線上の他の生成元である)と秘密鍵 _r_ を *目くらまし因子(blinding factor)*として導入することにする。
曲線上の他の生成元である)と秘密鍵 _r_ を *目くらまし因子(blinding factor)* として導入することにする。

今、トランザクションの input/output value は次のように表すことができる:

@@ -115,7 +115,7 @@ value が v1 の output からわかってしまうということを意味す

ri1 + ri2 = ro3

これが MimbleWimble の第一の柱である。すなわち、トランザクションを検証するための演算は
これが Mimblewimble の第一の柱である。すなわち、トランザクションを検証するための演算は
value を知ることなくして実行可能であるということだ。

最後に、本アイディアは実際には Greg Maxwell の [Confidential Transactions](https://elementsproject.org/features/confidential-transactions/investigation), から派生したものである。
@@ -124,7 +124,7 @@ value を知ることなくして実行可能であるということだ。
#### 所有権

前の章において、秘密鍵を blinding factor として導入することでトランザクションの value を
秘匿化した。MimbleWimble の第二のポイントは、この秘密鍵を活用して value の所有権を証明できる
秘匿化した。Mimblewimble の第二のポイントは、この秘密鍵を活用して value の所有権を証明できる
ということです。

Alice があなたに 3 coin を送金し、その額を秘匿化するために、あなたは 28 を blinding factor
@@ -180,7 +180,7 @@ _transaction kernel_ と呼ばれ、バリデーターにチェックされる

この章では、お釣りがどのように導入されるか、またすべての値が非負であることの証明となる range proof
の要件について議論することで、トランザクションの構成についての詳細を詳しく述べる。これらの両方とも、
MimbleWimble と Grin を理解するために必ず必要というわけではない。したがって、もし急いでいるのならば
Mimblewimble と Grin を理解するために必ず必要というわけではない。したがって、もし急いでいるのならば
[まとめ](#まとめ) まで読み飛ばしてもらって構わない。

##### お釣り
@@ -209,7 +209,7 @@ blinding factor として生成しお釣りの output を保護する。Carol
もし _x_ が負だとしても対応する `x*H` を他と見分けることはできないので、
これを用意に検知することはできない。

この問題を解決するため、MimbleWimble は(これも Confidential Transaction に由来しているが)
この問題を解決するため、Mimblewimble は(これも Confidential Transaction に由来しているが)
range proofs と呼ばれるさらに他の暗号学的概念を利用する。 range proofs とは、その数字を知ること無く
その数字が特定の範囲の大きさであることを証明するものである。ここでは range proof について詳細には
記さないが、任意の `r*G + v*H` について _v_ がゼロ以上であること、さらにオーバーフローしないこと
@@ -221,7 +221,7 @@ blinding factor として生成しお釣りの output を保護する。Carol

#### まとめ

MimbeWimbe のトランザクションは以下の要素を持つ:
Mimblewimble のトランザクションは以下の要素を持つ:

* inputs の組。1つ前の outputs を参照・使用する。
* 以下を含む新しい outputs の組:
@@ -234,13 +234,13 @@ MimbeWimbe のトランザクションは以下の要素を持つ:

### Block と Chain State

これまでに、MimbleWimble のトランザクションが、有効なブロックチェーンに
これまでに、Mimblewimble のトランザクションが、有効なブロックチェーンに
必要な性質を満たす一方で、強い匿名性を提供する方法を説明しました(
i.e. トランザクションは新たなお金を作らない、所有権の証明は秘密鍵によって
行われる)。

MimbleWimble のブロックフォーマットはこれに新たなコンセプトを1つ追加することで導入
される: _cut-through_ である。これを追加することによって、MimbleWimble chain は
Mimblewimble のブロックフォーマットはこれに新たなコンセプトを1つ追加することで導入
される: _cut-through_ である。これを追加することによって、Mimblewimble chain は
以下の特徴を得る:

* 極めて良いスケーラビリティ。これは、セキュリティを犠牲にすることなく大量のトランザクションデータ
@@ -274,11 +274,11 @@ tx outputs の和から tx inputs の和を引いて、その結果である Ped

sum(outputs) - sum(inputs) = sum(kernel_excess)

少し単純化して言うならば(ここでも手数料は無視することにする)、MimbleWimble のブロックはトランザクションと同様に扱うことが可能である・
少し単純化して言うならば(ここでも手数料は無視することにする)、Mimblewimble のブロックはトランザクションと同様に扱うことが可能である・

##### Kernel Offsets

上で説明した MimbleWimble のブロックとトランザクションには、小さな問題がある。ブロックの中の構成要素から、トランザクションを再構成することが可能(ときには容易)であるということだ。これは明らかにプライバシーの観点から良くないことだ。これは、「部分集合」の問題である - inputs, outputs, transaction kernels の集合が与えられた時、これらの部分集合によって有効なトランザクションが再構築できてしまう。
上で説明した Mimblewimble のブロックとトランザクションには、小さな問題がある。ブロックの中の構成要素から、トランザクションを再構成することが可能(ときには容易)であるということだ。これは明らかにプライバシーの観点から良くないことだ。これは、「部分集合」の問題である - inputs, outputs, transaction kernels の集合が与えられた時、これらの部分集合によって有効なトランザクションが再構築できてしまう。

例えば、次の2つのトランザクションがあるとする -

@@ -301,7 +301,7 @@ tx outputs の和から tx inputs の和を引いて、その結果である Ped

sum(outputs) - sum(inputs) = kernel_excess + kernel_offset

トランザクションをブロックに集約する時、ブロックヘッダに_一つだけ_ aggregate offset を保存する。これにより、一つ一つの transaction kernel offsets に分解できない一つの offset を作ることができた。そしてトランザクションは再構成できない -
トランザクションをブロックに集約する時、ブロックヘッダに _一つだけ_ aggregate offset を保存する。これにより、一つ一つの transaction kernel offsets に分解できない一つの offset を作ることができた。そしてトランザクションは再構成できない -

sum(outputs) - sum(inputs) = sum(kernel_excess) + kernel_offset

@@ -353,7 +353,7 @@ inputs はそれらが使用する outputs への参照である。以前のブ
* excess value によって生成された署名
* マイニング手数料

このように構成された時、MimbleWimble ブロックは極めて良いプライバシーを提供する:
このように構成された時、Mimblewimble ブロックは極めて良いプライバシーを提供する:

* transaction kernel のみによって中間(cut-through)トランザクションが表現される
* すべての outputs が同じに見える:互いに区別することのできないとても大きな数だけである。
@@ -372,16 +372,16 @@ inputs はそれらが使用する outputs への参照である。以前のブ
一般化すると、任意の時点において(ヘッダーを除いて)チェーンの状態は以下の情報によって要約される:

1. マイニングによって作られたコインの総額
2. utxo の完全な集合
3. それぞれのトランザクションに対する transaction kernel
1. utxo の完全な集合
1. それぞれのトランザクションに対する transaction kernel

はじめの情報はブロック高(ジェネシスブロックからの距離)から推定することができる。
そして、utxo と transaction kernel は共に極めてコンパクトだ。これは2つの重要な結果を持っている:

* MimbleWimble ブロックチェーンのノードが保持する必要のある状態はとても少量である(
* Mimblewimble ブロックチェーンのノードが保持する必要のある状態はとても少量である(
bitcoinサイズのブロックチェーンでは数ギガバイトのオーダーなのが、数百メガバイトにまで最適化
され得る)。
* MimbleWimble チェーンのネットワークに新しいノードがジョインする時、送信する必要のある
* Mimblewimble チェーンのネットワークに新しいノードがジョインする時、送信する必要のある
情報の量もまたとても少量である。

さらに、output を足したり除いたりするだけでも、 utxo の完全な集合は改ざんできない。そうすると
@@ -390,7 +390,7 @@ transaction kernel 内の blinding factor の和と outputs 内の blinding fact

### 結論

このドキュメントでは、MimbleWimble ブロックチェーン根底にある基本的な原理を
説明した。楕円曲線暗号の加算の性質を用いることで、完全に不透明な、しかし validate
このドキュメントでは、Mimblewimble ブロックチェーンの根底にある基本的な原理を
説明した。楕円曲線暗号の加算の性質を用いることで、完全に不透明な、しかしバリデート
することのできるトランザクションを構築することができた。そしてこの性質をブロックに
一般化することによって、大量のブロックチェーンのデータを節約することができ、素晴らしいスケーリング、新しいピアの高速な同期が可能となった。
@@ -1,9 +1,9 @@
# MimbleWimble 과 Grin 에 대한 소개
# Mimblewimble 과 Grin 에 대한 소개

*다른 언어로 Intro를 읽으시려면: [English](intro.md), [简体中文](intro.zh-cn.md), [Español](intro_ES.md), [Русский](intro.ru.md), [日本語](intro.jp.md).*
*다른 언어로 되어있는 문서를 읽으려면: [English](intro.md), [Español](intro_ES.md), [Русский](intro.ru.md), [日本語](intro.jp.md), [简体中文](intro.zh-cn.md).*

MimbleWimlbe은 블록체인 포맷이면서 프로토콜 입니다.
MimbleWimble은 암호학적 기반에 의해서 극대화된 좋은 확장성, 프라이버시, 그리고 대체가능성을 제공합니다. 이러한 특성은 지금 현존하는 모든 블록체인 구현체에 존재하는 문제점들을 처리합니다.
Mimblewimble은 암호학적 기반에 의해서 극대화된 좋은 확장성, 프라이버시, 그리고 대체가능성을 제공합니다. 이러한 특성은 지금 현존하는 모든 블록체인 구현체에 존재하는 문제점들을 처리합니다.

Grin 은 Mimble Wimble 블록체인을 구현한 오픈소스 프로젝트 입니다. 또한 완전한 블록체인와 크립토 커런시의 배포에 필요한 갭을 채워줍니다.
Grin 프로젝트의 주요 목적과 특성들은 아래 설명을 참고하십시오.
@@ -15,11 +15,11 @@ Grin 프로젝트의 주요 목적과 특성들은 아래 설명을 참고하십
* 커뮤니티가 주도하며, 채굴 탈중앙화가 권장됩니다.

## 모두의 혀를 묶자.
이 문서는 블록체인에 대해 어느정도 이해가 있고 암호학에 대한 기본적인 이해가 있는 독자들을 대상으로 합니다. 이것을 염두에 두고 우리는 MimbleWimble의 기술적인 발전과 어떻게 Grin에 적용되었는지 관해 설명 할 것입니다.
이 문서는 블록체인에 대해 어느정도 이해가 있고 암호학에 대한 기본적인 이해가 있는 독자들을 대상으로 합니다. 이것을 염두에 두고 우리는 Mimblewimble의 기술적인 발전과 어떻게 Grin에 적용되었는지 관해 설명 할 것입니다.
저희는 이 문서가 대부분의 기술적인 성격을 가진 독자들을 이해시킬 수 있길 바랍니다. 우리의 목적은 독자가 Grin에 대해 흥미를 느끼게 하고 어떤 방식으로든 Grin에 기여할 수 있게 이끄는 것입니다.
이러한 목적을 이루기 위해, 우리는 MimbleWimble 의 구현체인 Grin을 이해하는데 필요한 주요 컨셉들에 대해서 소개할것입니다.
이러한 목적을 이루기 위해, 우리는 Mimblewimble 의 구현체인 Grin을 이해하는데 필요한 주요 컨셉들에 대해서 소개할것입니다.

우선 Grin이 어디에서 부터 기초로 하고 있는지에 대해 이해하기 위해서 타원 곡선 암호 (ECC)의 몇몇 속성들에 대한 간단한 설명으로 시작하겠습니다. 그 다음, MimbleWimble 블록체인의 트랜잭션과 블록에 한 모든 요소들을 설명하겠습니다.
우선 Grin이 어디에서 부터 기초로 하고 있는지에 대해 이해하기 위해서 타원 곡선 암호 (ECC)의 몇몇 속성들에 대한 간단한 설명으로 시작하겠습니다. 그 다음, Mimblewimble 블록체인의 트랜잭션과 블록에 한 모든 요소들을 설명하겠습니다.

### 타원곡선에 대한 조그마한 조각들
ECC의 너무 복잡한 사항을 캐지 않고 어떻게 mimble wimble 이 어떻게 작동하는지에 대해 이해하는데 필요한 요소들만 리뷰할 것입니다. 이런 가정들을 좀 더 알고싶은 독자들은 [이 링크](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/)를 참고하세요.
@@ -36,12 +36,12 @@ ECC의 너무 복잡한 사항을 캐지 않고 어떻게 mimble wimble 이 어

ECC 안에서 우리가 매우 큰 숫자인 _k_ 를 프라이빗 키로 가정할 때 `k*H` 는 해당하는 퍼블릭 키로 해당되어 집니다. 누군가 공개키인 `k*H`의 값을 알더라도 _k_ 를 추론해 내는것은 불가능에 가깝습니다. ( 달리 얘기하자면, 곱셉은 쉬우나 곡선 좌표에 의한 "나눗셈"은 정말 어렵습니다. )

_k_ 와 _j_ 둘다 비밀키인 이전 공식 `(k+j)*H = k*H + j*H` 는 두개의 비밀키를 더해서 얻은 한 개의 공개키 (`(k+j)*H`) 와 각각 두개의 비밀키에 공개키를 더한것과 같습니다. Bitcoin blockchain에서도 HD 지갑은 이 원칙에 의존하고 있습니다. MimbleWimble 과 Grin의 구현또한 마찬가지 입니다.
_k_ 와 _j_ 둘다 비밀키인 이전 공식 `(k+j)*H = k*H + j*H` 는 두개의 비밀키를 더해서 얻은 한 개의 공개키 (`(k+j)*H`) 와 각각 두개의 비밀키에 공개키를 더한것과 같습니다. Bitcoin blockchain에서도 HD 지갑은 이 원칙에 의존하고 있습니다. Mimblewimble 과 Grin의 구현또한 마찬가지 입니다.

### MimbleWimble 함께 거래하기
트랜잭션의 구조는 MimbleWimble의 강력한 프라이버시와 비밀이 유지된다라고 하는 중요한 규칙을 나타냅니다.
### Mimblewimble 함께 거래하기
트랜잭션의 구조는 Mimblewimble의 강력한 프라이버시와 비밀이 유지된다라고 하는 중요한 규칙을 나타냅니다.

MimbleWimble 트랜잭션의 확인은 두가지 기본적인 성격을 전제로 합니다.
Mimblewimble 트랜잭션의 확인은 두가지 기본적인 성격을 전제로 합니다.

* **제로섬의 검증:** 결과값에서 입력값을 뺸 합은 항상 0과 같습니다. 이것은 실제 전송되는 코인의 양을 드러내지 않고도 트랜잭션ㅇ이 새로운 코인을 만들지 않았다는 것을 증명합니다.
* **비밀키의 소유:** 다른 많은 크립토 커런시 들처럼 , 트랜잭션의 소유권은 ECC 비밀키에 의해 보장됩니다. 그러나 어떤 실체가 이런 비밀키들을 소유하고 있다고 증명하는것이 직접적으로 트랜잭션에 사인한다고해서 얻어지는 것은 아닙니다.
@@ -89,13 +89,13 @@ MimbleWimble 트랜잭션의 확인은 두가지 기본적인 성격을 전제

ri1 + ri2 = ro3

이것이 MimbleWimble의 첫번째 특징입니다. 트랜잭션을 검증하는 산술적인 연산은 아무런 값을 알지 못해도 가능합니다.
이것이 Mimblewimble의 첫번째 특징입니다. 트랜잭션을 검증하는 산술적인 연산은 아무런 값을 알지 못해도 가능합니다.

이 아이디어는 Greg Maxwell 의 [Confidential Transactions](https://elementsproject.org/features/confidential-transactions/investigation) 에서 유래했습니다. Confidential transaction은 Adam back의 비트코인에 동형암호를 적용하자는 제안에서 비롯되었습니다.

#### 소유권

이전의 섹션에서 트랜잭션의 값을 보기 어렵게 하는 Blinding factor로서 비밀키를 소개했습니다. MimbleWimble 의 두번째 통찰은 비밀키가 어떤 값의 소유권을 증명하는데 사용할 수 있다는 것입니다.
이전의 섹션에서 트랜잭션의 값을 보기 어렵게 하는 Blinding factor로서 비밀키를 소개했습니다. Mimblewimble 의 두번째 통찰은 비밀키가 어떤 값의 소유권을 증명하는데 사용할 수 있다는 것입니다.

Alice는 당신에게 3 코인을 보내면서 그 양을 가렸고, 당신은 28을 당신의 blinding factor로 선택했습니다. ( 실제로 blinding factor는 비밀키로 정말 무진장 큰 숫자 입니다.)

@@ -140,7 +140,7 @@ _X_ 는 덧셈의 결과이면서 모두에게 다 보여집니다. 3은 당신

#### 몇몇 더 좋은 점들

이 섹션은 트랜잭션을 만들때 잔돈이 어떻게 보여지고 범위 증명(range proofs)의 요구사항에 대해서 모든 값이 음수가 아닌지에 대해서 좀 더 자세하게 설명하려고 합니다. 이러한 개념들 역시 MimbleWimble 과 Grin 에 대한 이해가 당연히 필요합니다. 만약 당신이 조급하다면 [이 링크를 참고하세요.](#putting-it-all-together).
이 섹션은 트랜잭션을 만들때 잔돈이 어떻게 보여지고 범위 증명(range proofs)의 요구사항에 대해서 모든 값이 음수가 아닌지에 대해서 좀 더 자세하게 설명하려고 합니다. 이러한 개념들 역시 Mimblewimble 과 Grin 에 대한 이해가 당연히 필요합니다. 만약 당신이 조급하다면 [이 링크를 참고하세요.](#putting-it-all-together).

##### 잔돈에 대해서

@@ -159,7 +159,7 @@ _X_ 는 덧셈의 결과이면서 모두에게 다 보여집니다. 3은 당신

예를 들어 입력값이 3이고 출력값이 5과 -3인 트랜잭션을 만들수 있으며 이것은 이전 섹션의 정의에 따라 잘 구성된 트랜잭션입니다. 적절한 좌표 `x.H`가 다른 좌표처럼 곡선위에 있어서 _x_가 음수이더라도 찾기가 쉽지 않습니다.

이 문제점을 해결하기 위해서, MimbleWimble 은 Range proofs 라는 다른 암호학 개념을 사용합니다. ( 이 또한 Confidential Transaction 에서 유래했습니다.)
이 문제점을 해결하기 위해서, Mimblewimble 은 Range proofs 라는 다른 암호학 개념을 사용합니다. ( 이 또한 Confidential Transaction 에서 유래했습니다.)
Range proof 란 숫자를 밝히지 않고 어떤 숫자가 주어진 범위안에 있는지 증명하는 것입니다.
Range proof 에 대해서 자세히 설명하지 않을것이지만은, 그래도 어떤 `r.G + v.H` 의 결과가 _v_ 가 0보다 크고 오버플로우가 일어나지 않는다는 것을 증명할 수 있습니다. 또한 위의 예에서 유효한 Range proof 를 만들기 위해서 트랜잭션을 만들고 Signing 할때 사용된 초과값인 113과 28 두 값이 알려지는것은 중요합니다. 그 이유에 대해선 [range proof paper](https://eprint.iacr.org/2017/1066.pdf) 안에 Range proof에 대해 좀더 자세한 설명이 있습니다.

@@ -176,9 +176,9 @@ MimbleWimlbe 트랜잭션은 다음을 포함합니다.

### 블록들과 체인 state에 대해서

위에서 MimbleWimble 트랜잭션이 유요한 블록체인에 필요한 속성을 유지하면서 어떻게 강한 익명성을 보장하는지 설명했습니다.예를 들면 트랜잭션이 더이상 코인을 만들지 않으면서 비밀키를 통해 소유권을 증명하지 않는 방법들 같은것 말이죠.
위에서 Mimblewimble 트랜잭션이 유요한 블록체인에 필요한 속성을 유지하면서 어떻게 강한 익명성을 보장하는지 설명했습니다.예를 들면 트랜잭션이 더이상 코인을 만들지 않으면서 비밀키를 통해 소유권을 증명하지 않는 방법들 같은것 말이죠.

추가적으로 _cut-through_ 라는 개념이 MimbleWimble 블록 포멧에 사용 됩니다. 이로 인해 MimbleWimble 체인은 아래와 같은 장점을 얻습니다.
추가적으로 _cut-through_ 라는 개념이 Mimblewimble 블록 포멧에 사용 됩니다. 이로 인해 Mimblewimble 체인은 아래와 같은 장점을 얻습니다.

* 대부분의 트랜잭션 데이터는 보안을 희생하지 않고서도 시간이 지나면 없어 질 수 있으므로 엄청나게 좋은 확장성을 얻게 됩니다.
* 트랜잭션 데이터를 섞고 없애서 익명성을 추가로 획득합니다.
@@ -207,11 +207,11 @@ Signature 공개키로서 트랜잭션의 합이 0임을 증명하는 _kernel ex

출력값의 합 - 입력값의 합 = kernel_excess의 합

약간 단순화 시켜서 ( 트랜잭션 수수료를 무시하고) 우리는 MimbleWimbl block 이 MimbleWimble 트랜잭션들로 다뤄진다고 말 할 수 있습니다.
약간 단순화 시켜서 ( 트랜잭션 수수료를 무시하고) 우리는 MimbleWimbl block 이 Mimblewimble 트랜잭션들로 다뤄진다고 말 할 수 있습니다.

##### Kernel 오프셋들

위에 설명했던겉 처럼 MimbleWimble 블록과 트랜잭션에 조그마한 문제가 있습니다. 그것은 블록에 있는 구성 트랜잭션을 재구성하는것이 가능합다는 겁니다.(그리고 어떤 사소한 경우에도요).
위에 설명했던겉 처럼 Mimblewimble 블록과 트랜잭션에 조그마한 문제가 있습니다. 그것은 블록에 있는 구성 트랜잭션을 재구성하는것이 가능합다는 겁니다.(그리고 어떤 사소한 경우에도요).
이것은 분명히 프라이버시에는 좋지 않습니다. 이걸 "subset" 문제 라고 합니다.
"Subset" 문제란 주어진 입력값들, 출력값들과 트랜잭션 kernel들의 Subset 들이 재조합되어서 유효한 트랜잭션을 다시 만든다는 것입니다.

@@ -302,17 +302,17 @@ Signature 공개키로서 트랜잭션의 합이 0임을 증명하는 _kernel ex


1. 체인안에서 채굴에 의해서 만들어진 코인의 총량
2. 쓰지 않은 출력값의 모든 세트
3. 각 트랜잭션의 트랜잭션 kernel
1. 쓰지 않은 출력값의 모든 세트
1. 각 트랜잭션의 트랜잭션 kernel

첫번째 정보는 Genesis 블록으로부터의 거리인 블록 높이를 가지고 유추 될 수 있습니다. 그리고 쓰지 않는 출력값과 트랜잭션 kernel은 매우 작습니다. 이것에는 아래와 같이 2가지 중요한 결과를 가지고 있습니다.

* MimbleWimble 블록체인에 있는 노드가 유지해야 되는 스테이트가 매우 작습니다.(비트코인 사이즈의 Blockchain의 경우 수 기가 바이트이고 잠재젹으로 수백 메가바이트까지 최적화 될 수 있습니다.)
* 새로운 노드가 MimbleWimble 체인에 가입히면, 전달해야 하는 정보의 양이 매우 적습니다.
* Mimblewimble 블록체인에 있는 노드가 유지해야 되는 스테이트가 매우 작습니다.(비트코인 사이즈의 Blockchain의 경우 수 기가 바이트이고 잠재젹으로 수백 메가바이트까지 최적화 될 수 있습니다.)
* 새로운 노드가 Mimblewimble 체인에 가입히면, 전달해야 하는 정보의 양이 매우 적습니다.

덧붙여서 출력값을 더하거나 제거하더라도 쓰지 않는 출력값의 모든 세트를 조작할 순 없습니다. 그렇게 하면 트랜잭션 kernel 내의 모든 blinding factor의 합과 출력값 내의 blinding factor의 합이 달라집니다.

### 결론 내리기

이 문서에서는 MimbleWimble 블록체인 안의 기본적인 원리에 대해서 다루었습니다.
이 문서에서는 Mimblewimble 블록체인 안의 기본적인 원리에 대해서 다루었습니다.
타원 곡선 암호의 다른 성질을 사용해서 알아보기 어려우나 적절하게 입증될 수 있는 트랜잭션을 만들수 있습니다. 블록에 이러한 성질들을 일반화 시키면 큰 용량의 블록체인 데이터를 없앨 수 있고 새로운 피어들에게 높은 확장성과 빠른 동기화를 가능하게 할 수 있습니다.
@@ -1,10 +1,10 @@
# Inleiding tot MimbleWimble en Grin
# Inleiding tot Mimblewimble en Grin

*Lees dit in andere talen: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*
*Lees dit in andere talen: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md), [简体中文](intro_ZH-CN.md).*

MimbleWimble is een blockchain formaat en protocol die extreem goede schaalbaarheid, privacy en fungibiliteit biedt door zich te berusten op sterke cryptografische primiteven. Het adresseert de lacunes die in bijna alle huidige blockchain-implementaties bestaan.
Mimblewimble is een blockchain formaat en protocol die extreem goede schaalbaarheid, privacy en fungibiliteit biedt door zich te berusten op sterke cryptografische primiteven. Het adresseert de lacunes die in bijna alle huidige blockchain-implementaties bestaan.

Grin is een open source softwareproject dat een MimbleWimble blockchain
Grin is een open source softwareproject dat een Mimblewimble blockchain
implementeert en de lacunes vult die nodig zijn voor een
volledige blockchain en
cryptovaluta inzet
@@ -16,7 +16,7 @@ Het belangrijkste doel en eigenschappen van het Grin project zijn:
* Schaalt meestal met het aantal gebruikers en minimaal met het aantal transacties
(<100 byte `kernel), wat resulteert in een grotere ruimtebesparing
vergeleken met andere blockchains.
* Sterk en bewezen cryptografie. MimbleWimble rust enkel op Elliptic Curve
* Sterk en bewezen cryptografie. Mimblewimble rust enkel op Elliptic Curve
Cryptografie die al decennia beproefd en getest wordt.
* Eenvoud van het ontwerp die het makkelijk maakt om na verloop van tijd te
controleren en onderhouden.
@@ -27,22 +27,22 @@ Het belangrijkste doel en eigenschappen van het Grin project zijn:

Dit document is bedoeld voor lezers met een sterke achtergrond
van blockchains en elementaire cryptografie. Met dat in ons achterhoofd, proberen we
de technische opbouw van MimbleWimble en hoe het in Grin is toegepast uit te leggen.
de technische opbouw van Mimblewimble en hoe het in Grin is toegepast uit te leggen.
We hopen dat dit document verstaanbaar is voor de meeste technische lezers
Ons doel is om u aan te moedigen geïnteresseerd te raken in Grin en
op welke manier mogelijk dan ook bij te dragen.

Om dit doel te bereiken, zullen we de belangrijkste concepten introduceren die vereist
zijn voor een goed begrip van Grin als een MimbleWimble-implementatie. We beginnen met een beknopte beschrijving
zijn voor een goed begrip van Grin als een Mimblewimble-implementatie. We beginnen met een beknopte beschrijving
van enkele relevante eigenschappen van Elliptic Curve Cryptografie (ECC) om de basis
waarop Grin gebaseerd is en vervolgens alle belangrijke elementen van
MimbleWimble blockchain's transacties en -blokken te beschrijven.
Mimblewimble blockchain's transacties en -blokken te beschrijven.

### Minuscule Databits van Elliptic Curves

We beginnen met een korte inleiding van Elliptic Curve Cryptografie, waarbij we alleen
de eigenschappen evalueren die nodig zijn om om te begrijpen hoe
MimbleWimble werkt zonder te diep op de complexiteit van ECC in te gaan.
Mimblewimble werkt zonder te diep op de complexiteit van ECC in te gaan.
Voor lezer die dat wel zouden willen, zijn er andere mogelijkheden om
[er meer over te weten te komen](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).

@@ -67,14 +67,14 @@ demonstreert dat een openbare sleutel verkregen is door de toevoeging van
twee privésleutels (`(k+j)*H`) zijn identiek aan de toevoeging van de
openbare sleutels voor elk van die twee privésleutels (`k*H + j*H`).
In de Bitcoin blockchain, zijn Hiërarchische Deterministische portefeuilles
sterk afhankelijk van dit principe. Alsook MimbleWimble en de Grin-implementatie.
sterk afhankelijk van dit principe. Alsook Mimblewimble en de Grin-implementatie.

### Transacties met MimbleWimble
### Transacties met Mimblewimble

De structuur van transacties toont een cruciaal principe van MimbleWimble:
De structuur van transacties toont een cruciaal principe van Mimblewimble:
sterke privacy- en vertrouwelijkheidsgaranties.

De validatie van MimbleWimble transacties zijn gebaseerd op twee basiseigenschappen:
De validatie van Mimblewimble transacties zijn gebaseerd op twee basiseigenschappen:

* **Verificatie van zero sums.** De som van de uitkomsten min de ingaven is altijd
gelijk aan nul, welke bewijst dat de transactie geen nieuw geld gecreëerd heeft, _zonder de werkelijke bedragen te onthullen_.
@@ -140,7 +140,7 @@ Wat als gevolg vereist dat:

ri1 + ri2 = ro3

Dit is de eerste pijler van MimbleWimble: de arithmetische vereist om een transactie te valideren gedaan kan worden
Dit is de eerste pijler van Mimblewimble: de arithmetische vereist om een transactie te valideren gedaan kan worden
zonder dat één van de waarden gekend is.

Tot slot, is dit idee eigenlijk afgeleid van Greg Maxwell's
@@ -151,7 +151,7 @@ aan Bitcoin.
#### Eigendom

In het vorige gedeelte hebben we een privésleutel geïntroduceerd als een blinding factor om de transactiewaarden te verdoezelen.
Het tweede inzicht van MimbleWimble is dat deze privésleutel
Het tweede inzicht van Mimblewimble is dat deze privésleutel
gebruikt kan worden om het eigendom van de waarde aan te tonen.

Alice stuurt je 3 munten en om dat bedrag te verdoezelen, kies je 28 als jouw
@@ -205,7 +205,7 @@ mining fees), wordt een _transaction kernel_ genoemd en wordt gecontroleerd door

Dit gedeelte gaat in op het maken van transacties door te bespreken hoe verandering geïntroduceerd is
en de vereiste voor range proofs zodat alle waarden bewezen zijn als niet-negatieve.
Geen van beide zijn absoluut vereist om MimbleWimble en
Geen van beide zijn absoluut vereist om Mimblewimble en
Grin te begrijpen, dus als je gehaast bent, voel je virj om meteen over te gaan naar
[Alles bij elkaar samenbrengen](#putting-it-all-together).

@@ -234,7 +234,7 @@ Bijvoorbeeld, kan iemand een transactie creëren met een invoer van 2 en uitvoer
en -3 en nog steeds een goed gebalanceerde transactie krijgen, volgens de definitie in de vorige secties. Dit kan niet makkelijk gedetecteerd worden, zelfs als _x_
negatief is, het overeenkomstige punt `x.H` op de curve lijkt op een ander.

Om dit te probleem te verhelpen, maakt MimbleWimble gebruik van een ander cryptografisch concept (ook afkomstig
Om dit te probleem te verhelpen, maakt Mimblewimble gebruik van een ander cryptografisch concept (ook afkomstig
vanuit Confidential Transactions) genaamd
range proofs: een bewijs dat een getal binnen een gegeven bereik valt, zonder het nummer te onthullen.
We gaan niet uitweiden op de range proof, maar u moet gewoon weten
@@ -245,7 +245,7 @@ Het is ook belangrijk om op te merken dat om een geldige range proof te maken ui

#### Alles bij elkaar samenbrengen

Een MimbleWimble transactie omvat het volgende:
Een Mimblewimble transactie omvat het volgende:

* Een reeks invoeren, die verwijzen naar en een vorige reeks aan uitvoeren spendeert.
* Een reeks van nieuwe uitvoeren met:
@@ -257,11 +257,11 @@ Een MimbleWimble transactie omvat het volgende:

### Blocks en Chain State

We hebben hierboven uitgelegd hoe MimbleWimble transacties sterke anonimiteit kunnen garanderen terwijl de eigenschappen die vereist zijn voor een geldige blockchain handhaaft,
We hebben hierboven uitgelegd hoe Mimblewimble transacties sterke anonimiteit kunnen garanderen terwijl de eigenschappen die vereist zijn voor een geldige blockchain handhaaft,
d.w.z. een transactie creëert geen geld en een bewijs van eigendom wordt vastgelegd met privésleutels.

Het MimbleWimble blockformaat bouwt hierop voort door een aanvullend concept te introduceren: _cut-through_.
Met deze aanvulling, verkrijgt een MimbleWimble chain:
Het Mimblewimble blockformaat bouwt hierop voort door een aanvullend concept te introduceren: _cut-through_.
Met deze aanvulling, verkrijgt een Mimblewimble chain:

* Zeer goede schaalbaarheid, zoals de grote meerderheid van transactiegegevens geëlimineerd kunnen worden met de tijd,
zonder de beveiliging in gevaar te brengen.
@@ -292,11 +292,11 @@ Hetzelfde geldt voor blokken zelf als we eenmaal realiseren dat een blok eenvoud

som(uitvoeren) - som(invoeren) = sum(kernel_excess)

Enigszins vereenvoudigd, (wederom negeren we de transactiekosten) kunnen we zeggen dat MimbleWimble-blokken behandeld kunnen worden als MimbleWimble-transacties.
Enigszins vereenvoudigd, (wederom negeren we de transactiekosten) kunnen we zeggen dat Mimblewimble-blokken behandeld kunnen worden als Mimblewimble-transacties.

##### Kernel Offsets

Er is een subtiel probleem met MimbleWimble-blokken en transacties zoals hierboven beschreven. Het is mogelijk (en in sommige gevallen triviaal) om de constituerende transacties in een blok te reconstrueren. Dit is duidelijk slecht voor privacy. Dit is een "subset" probleem - gegeven een verzameling van invoeren, uitvoeren en transactiekernels zal een subnet van dit formaat recombineren om een geldige transactie te reconstrueren.
Er is een subtiel probleem met Mimblewimble-blokken en transacties zoals hierboven beschreven. Het is mogelijk (en in sommige gevallen triviaal) om de constituerende transacties in een blok te reconstrueren. Dit is duidelijk slecht voor privacy. Dit is een "subset" probleem - gegeven een verzameling van invoeren, uitvoeren en transactiekernels zal een subnet van dit formaat recombineren om een geldige transactie te reconstrueren.

Bijvoorbeeld, gegeven zijn de volgende twee transacties -

@@ -370,7 +370,7 @@ Een blok is eenvoudigweg opgebouwd uit:
* De handtekeningen gegenereerd door middel van de overtollige waarde.
* De miningkost.

Wanneer het op deze manier geconstructureerd wordt, biedt een MimbleWimble-blok buitengewoon goede privacygaranties:
Wanneer het op deze manier geconstructureerd wordt, biedt een Mimblewimble-blok buitengewoon goede privacygaranties:

* Intermediaire (cut-through) transacties worden alleen weergegeven door hun transactiekernels.
* Alle uitvoeren zien er hetzelfde uit: gewoon hele grote getallen die onmogelijk van elkaar te differentiëren zijn.
@@ -390,17 +390,17 @@ Veralgemeend, concluderen we dat de ketenstatus (met uitzondering van headers) o
samengevat kunnen worden door alleen deze stukjes informatie:

1. Het totale aantal munten gecreëerd door mining in de keten.
2. De volledige verzameling aan niet-bestede uitgaven.
3. De transactiekernels voor elke transactie.
1. De volledige verzameling aan niet-bestede uitgaven.
1. De transactiekernels voor elke transactie.

Het eerste stuk informatie kan afgeleid worden door alleen de blokhoogte
(de afstand tot het genesisblok) te gebruiken. En zowel de niet-bestede uitgaven als de
transactiekernels zijn ontzettend compact. Dit heeft 2 belangrijke gevolgen:

* De stand die een bepaalde node in een MimbleWimble blockchain moet behouden blijven is zeer klein
* De stand die een bepaalde node in een Mimblewimble blockchain moet behouden blijven is zeer klein
(in de volgorde van enkele gigabytes voor een bitcoin-sized blockchain en
potentieel optimaliseerbaar tot enkele honderden megabytes).
* Wanneer een nieuwe node zich aansluit bij een netwerk ter bijdrage aan de MimbleWimble chain, is het aantal informatie die overdragen moet worden ook enorm klein.
* Wanneer een nieuwe node zich aansluit bij een netwerk ter bijdrage aan de Mimblewimble chain, is het aantal informatie die overdragen moet worden ook enorm klein.

Bovendien kan er niet met de gehele verzameling aan ongebruikte uitgaven gesjoemeld worden, zelfs
niet door een uitgave toe te voegen of te verwijderen. Daarmee zou de sommatie van alle
@@ -409,7 +409,7 @@ factors in de uitgaven.

### Conclusie

In dit document hebben we de basisprincipes behandeld die ten grondslag liggen van een MimbleWimble
In dit document hebben we de basisprincipes behandeld die ten grondslag liggen van een Mimblewimble
blockchain. Door de aanvullende eigenschappen te gebruiken van Elliptic Curve Cryptografie, zijn
we in staat om transacties te bouwen die geheel ondoorzichtig zijn maar nog steeds goed gevalideerd kunnen worden.
En door deze eigenschappen te generaliseren naar blokken, kunnen we een grote hoeveelheid aan blockchaingegevens elimineren,
@@ -1,42 +1,42 @@
# Introdução ao MimbleWimble e ao Grin
# Introdução ao Mimblewimble e ao Grin

*Leia isto em outros idiomas: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*
*Leia isto em outros idiomas: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md), [简体中文](intro_ZH-CN.md).*

O MimbleWimble é um formato e protocolo blockchain que fornece ótima escalabilidade, privacidade e fungibilidade, para isso contando com primitivas criptográficas fortes. Ele aborda as lacunas existentes em quase todos as implementações blockchain atuais.
O Mimblewimble é um formato e protocolo blockchain que fornece ótima escalabilidade, privacidade e fungibilidade, para isso contando com primitivas criptográficas fortes. Ele aborda as lacunas existentes em quase todos as implementações blockchain atuais.

O Grin é um projeto de software de código aberto que implementa um blockchain MimbleWimble e preenche os vãos necessários para se construir um blockchain e uma criptomoeda completos.
O Grin é um projeto de software de código aberto que implementa um blockchain Mimblewimble e preenche os vãos necessários para se construir um blockchain e uma criptomoeda completos.

O principal objetivo e as características do projeto Grin são:

* Privacidade por padrão. Isto permite fungibilidade completa sem impedir a capacidade de divulgação seletiva de informações quando necessário.
* Escalabilidade, sobretudo quanto ao número de usuários e minimamente com relação ao número de transações (<100 byte `núcleo`), resultando em uma grande economia de espaço quando comparado a outros blockchains.
* Criptografia forte e comprovada. O MimbleWimble se baseia apenas em Criptografia de Curva Elíptica testada e experimentada há décadas.
* Criptografia forte e comprovada. O Mimblewimble se baseia apenas em Criptografia de Curva Elíptica testada e experimentada há décadas.
* Simplicidade no design o que facilita a auditoria e manutenção com o tempo.
* Direcionado pela comunidade, incentivando a descentralização da mineração.

## Amarra-Língua para Todos

Este documento destina-se a leitores com uma boa compreensão de blockchains e criptografia básica. Tendo isto em mente, tentamos explicar o desenvolvimento técnico do MimbleWimble e como ele é aplicado no Grin. Acreditamos que este documento seja compreensível para a maioria dos leitores tecnicamente conscientes. Nosso objetivo é incentivá-los a se interessar pelo Grin e contribuir da maneira que for possível.
Este documento destina-se a leitores com uma boa compreensão de blockchains e criptografia básica. Tendo isto em mente, tentamos explicar o desenvolvimento técnico do Mimblewimble e como ele é aplicado no Grin. Acreditamos que este documento seja compreensível para a maioria dos leitores tecnicamente conscientes. Nosso objetivo é incentivá-los a se interessar pelo Grin e contribuir da maneira que for possível.

Para alcançar este objetivo, apresentaremos os principais conceitos necessários para uma boa compreensão do Grin, sendo esta uma implementação do MimbleWimble. Vamos começar com uma breve descrição de algumas propriedades relevantes da Criptografia de Curva Elíptica (CCE) de forma a sedimentar a fundação em que o Grin é baseado e, em seguida, descrever todos os elementos-chave de transações e blocos do blockchain MimbleWimble.
Para alcançar este objetivo, apresentaremos os principais conceitos necessários para uma boa compreensão do Grin, sendo esta uma implementação do Mimblewimble. Vamos começar com uma breve descrição de algumas propriedades relevantes da Criptografia de Curva Elíptica (CCE) de forma a sedimentar a fundação em que o Grin é baseado e, em seguida, descrever todos os elementos-chave de transações e blocos do blockchain Mimblewimble.

### Um Pouquinho sobre Curvas Elípticas

Começamos com uma breve cartilha sobre Criptografia de Curva Elíptica, revisando apenas propriedades necessárias para entender como o MimbleWimble funciona e sem se aprofundar muito nos meandros da CCE. Para os leitores que gostariam de mergulhar mais fundo nesses pressupostos, existem outras oportunidades para [aprender mais](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).
Começamos com uma breve cartilha sobre Criptografia de Curva Elíptica, revisando apenas propriedades necessárias para entender como o Mimblewimble funciona e sem se aprofundar muito nos meandros da CCE. Para os leitores que gostariam de mergulhar mais fundo nesses pressupostos, existem outras oportunidades para [aprender mais](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).

Uma Curva Elíptica para fins de criptografia é simplesmente um grande conjunto de pontos que nós chamaremos de _C_. Estes pontos podem ser adicionados, subtraídos ou multiplicados por inteiros (também chamados de escalares). Dado um inteiro _k_ e usando a operação de multiplicação escalar, podemos calcular `k*H`, que também é um ponto da curva _C_. Dado outro inteiro _j_ também podemos calcular `(k+j)*H`, que é igual a `k*H + j*H`. As operações de adição e multiplicação escalar em uma curva elíptica mantem as propriedades comutativa e associativa da adição e multiplicação:

(k+j)*H = k*H + j*H

Em CCE, se escolhermos um número muito grande _k_ como uma chave privada, `k*H` é considerada a chave pública correspondente. Mesmo tendo conhecimento do valor da chave pública `k*H`, deduzir _k_ é quase impossível (ou em outras palavras, enquanto a multiplicação é trivial, a "divisão" por pontos da curva é extremamente difícil).

A fórmula anterior `(k+j)*H = k*H + j*H`, com _k_ e _j_ ambos sendo chaves privadas, demonstra que uma chave pública obtida a partir da adição de duas chaves privadas (`(k+j)*H`) é idêntica à adição das chaves públicas para cada uma dessas duas chaves privadas (`k*H + j*H`). No blockchain do Bitcoin, as carteiras Hierárquicas Determinísticas dependem fortemente desse princípio. O MimbleWimble e a implementação do Grin dependem também.
A fórmula anterior `(k+j)*H = k*H + j*H`, com _k_ e _j_ ambos sendo chaves privadas, demonstra que uma chave pública obtida a partir da adição de duas chaves privadas (`(k+j)*H`) é idêntica à adição das chaves públicas para cada uma dessas duas chaves privadas (`k*H + j*H`). No blockchain do Bitcoin, as carteiras Hierárquicas Determinísticas dependem fortemente desse princípio. O Mimblewimble e a implementação do Grin dependem também.

### Transacionando com o MimbleWimble
### Transacionando com o Mimblewimble

A estrutura das transações demonstra um princípio crucial do MimbleWimble: a garantia forte de privacidade e confidencialidade.
A estrutura das transações demonstra um princípio crucial do Mimblewimble: a garantia forte de privacidade e confidencialidade.

A validação das transações do MimbleWimble depende de duas propriedades básicas:
A validação das transações do Mimblewimble depende de duas propriedades básicas:

* **Verificação de somas zero.** A soma das saídas menos as entradas é sempre igual a zero, provando que a transação não criou novos fundos, _sem revelar os montantes reais_.
* **Posse de chaves privadas.** Como na maioria das outras criptomoedas, a propriedade sobre as saídas das transações é garantida pela posse de chaves privadas CCE. Contudo, a prova de que uma entidade possui essas chaves privadas não é obtida através da assinatura direta da transação.
@@ -81,13 +81,13 @@ Que, como consequência, requer:

re1 + re2 = rs3

Este é o primeiro pilar do MimbleWimble: a aritmética necessária para validar uma transação pode ser feita sem conhecer nenhum dos montantes.
Este é o primeiro pilar do Mimblewimble: a aritmética necessária para validar uma transação pode ser feita sem conhecer nenhum dos montantes.

Como nota final, esta ideia é, na verdade, derivada das [Transações Confidenciais](https://elementsproject.org/features/confidential-transactions/investigation) de Greg Maxwell, que por si derivou de uma proposta de Adam Back para montantes homomórficos aplicados ao Bitcoin.

#### Propriedade

Na seção anterior, introduzimos uma chave privada como um fator de cegueira para obscurecer os montantes da transação. A segunda perspicácia do MimbleWimble é que esta chave privada pode ser aproveitada para provar a propriedade do montante.
Na seção anterior, introduzimos uma chave privada como um fator de cegueira para obscurecer os montantes da transação. A segunda perspicácia do Mimblewimble é que esta chave privada pode ser aproveitada para provar a propriedade do montante.

Alice lhe envia 3 moedas e, para obscurecer essa quantia, você escolheu 28 como seu fator de cegueira (note que, na prática, o fator de cegueira sendo uma chave privada, é um número extremamente grande). Em algum lugar no blockchain, a seguinte saída aparece e só pode ser gasta por você:

@@ -122,7 +122,7 @@ Esta assinatura, anexada a todas as transações, juntamente com alguns dados ad

#### Alguns Pontos Refinados

Esta seção detalha a construção de transações discutindo como o troco deve ser introduzido e a exigência de provas de intervalo para que todos os montantes sejam comprovadamente não-negativos. Nenhum destes pontos é absolutamente necessário para entender o MimbleWimble e o Grin, por isso, se estiver com pressa, sinta-se à vontade para pular direto para [Juntando Tudo] (#juntando-tudo).
Esta seção detalha a construção de transações discutindo como o troco deve ser introduzido e a exigência de provas de intervalo para que todos os montantes sejam comprovadamente não-negativos. Nenhum destes pontos é absolutamente necessário para entender o Mimblewimble e o Grin, por isso, se estiver com pressa, sinta-se à vontade para pular direto para [Juntando Tudo] (#juntando-tudo).

##### Troco

@@ -141,13 +141,13 @@ Em todos os cálculos acima, trabalhamos com montantes de transação sempre pos

Por exemplo, pode-se criar uma transação com uma entrada de montante 2 e saídas de montantes 5 e -3 obtendo uma transação devidamente estruturada, de acordo com a definição das seções anteriores. Isso não pode ser detectado facilmente, porque mesmo sendo _x_ negativo, o ponto correspondente `x.H` na curva se assemelha a qualquer outro.

Para resolver este problema, o MimbleWimble utiliza outro conceito criptográfico (também proveniente de transações confidenciais) chamado prova de intervalo: uma prova que um número se enquadra dentro de um determinado intervalo, sem revelar este número. Nós não iremos elaborar sobre a prova de intervalo, basta saber que para qualquer `r.G + v.H` podemos construir uma prova que mostrará que _v_ é maior que zero e não sofre estouro numérico.
Para resolver este problema, o Mimblewimble utiliza outro conceito criptográfico (também proveniente de transações confidenciais) chamado prova de intervalo: uma prova que um número se enquadra dentro de um determinado intervalo, sem revelar este número. Nós não iremos elaborar sobre a prova de intervalo, basta saber que para qualquer `r.G + v.H` podemos construir uma prova que mostrará que _v_ é maior que zero e não sofre estouro numérico.

Também é importante notar que, para criar uma prova de intervalo válida a partir do exemplo acima, ambos os montantes 113 e 28 usados na criação e assinatura do montante excedente devem ser conhecidos. A razão disto, assim como uma descrição aprofundada da prova de intervalo, estão mais detalhadas no [artigo sobre provas de intervalo] (https://eprint.iacr.org/2017/1066.pdf).

#### Juntando Tudo

Uma transação MimbleWimble inclui o seguinte:
Uma transação Mimblewimble inclui o seguinte:

* Um conjunto de entradas, que referencia e gasta um conjunto de saídas anteriores.
* Um conjunto de novas saídas que incluem:
@@ -158,9 +158,9 @@ Uma transação MimbleWimble inclui o seguinte:

### Blocos e Estado da Cadeia

Nós explicamos acima como as transações do MimbleWimble podem fornecer forte garantia de anonimato, mantendo as propriedades necessárias de um blockchain válido, ou seja, uma transação não cria dinheiro e a prova de propriedade é estabelecida através de chaves privadas.
Nós explicamos acima como as transações do Mimblewimble podem fornecer forte garantia de anonimato, mantendo as propriedades necessárias de um blockchain válido, ou seja, uma transação não cria dinheiro e a prova de propriedade é estabelecida através de chaves privadas.

O formato de bloco MimbleWimble se baseia nisso, introduzindo um conceito: _corte-completo_ (cut-through). Com esta adição, uma cadeia MimbleWimble ganha:
O formato de bloco Mimblewimble se baseia nisso, introduzindo um conceito: _corte-completo_ (cut-through). Com esta adição, uma cadeia Mimblewimble ganha:

* Ótima escalabilidade, já que a grande maioria dos dados de transações podem ser eliminados com o tempo, sem comprometer a segurança.
* Mais anonimato, misturando e removendo dados de transações.
@@ -190,11 +190,11 @@ O mesmo vale para os próprios blocos, uma vez que percebemos que um bloco é si

soma(saídas) - soma(entradas) = soma(excedente do núcleo)

Simplificando um pouco (ignorando novamente as taxas de transação), podemos dizer que os blocos MimbleWimble podem ser tratados exatamente como transações MimbleWimble.
Simplificando um pouco (ignorando novamente as taxas de transação), podemos dizer que os blocos Mimblewimble podem ser tratados exatamente como transações Mimblewimble.

##### Deslocamentos do Núcleo

Há um problema sutil nos blocos e transações do MimbleWimble, conforme descrito acima. É possível (e em alguns casos, é trivial) reconstruir as transações constituintes de um bloco. Isso é claramente ruim para a privacidade. Este é o problema do "subconjunto" - dado um conjunto de entradas, saídas e núcleos de transação, um subconjunto destes recombinará para reconstruir uma transação válida.
Há um problema sutil nos blocos e transações do Mimblewimble, conforme descrito acima. É possível (e em alguns casos, é trivial) reconstruir as transações constituintes de um bloco. Isso é claramente ruim para a privacidade. Este é o problema do "subconjunto" - dado um conjunto de entradas, saídas e núcleos de transação, um subconjunto destes recombinará para reconstruir uma transação válida.

Por exemplo, dadas as duas transações a seguir -

@@ -260,7 +260,7 @@ Um bloco é simplesmente constituído de:
  * As assinaturas geradas usando o montante excedente.
  * A taxa de mineração.

Quando estruturado dessa maneira, um bloco MimbleWimble oferece garantias extremamente boas de privacidade:
Quando estruturado dessa maneira, um bloco Mimblewimble oferece garantias extremamente boas de privacidade:

* Transações intermediárias (corte-completo) serão representadas apenas por seus núcleos de transação.
* Todas as saídas se assemelham: apenas números muito grandes que são impossíveis de distinguir um do outro. Se alguém quisesse excluir algumas saídas, este teria que excluir todas.
@@ -275,17 +275,17 @@ Voltando ao bloco do exemplo anterior, as saídas x1 e x2, gastas por I1 e I2, d
Generalizando, concluímos que o estado da cadeia (excluindo cabeçalhos) a qualquer momento pode ser resumido simplesmente pelas seguintes informações:

1. A quantidade total de moedas criadas pela mineração na cadeia.
2. O conjunto completo de saídas não gastas.
3. Os núcleos de transações para cada transação.
1. O conjunto completo de saídas não gastas.
1. Os núcleos de transações para cada transação.

A primeira informação pode ser deduzida usando apenas a altura do bloco (sua distância do bloco de gênese). E tanto as saídas não gastas quanto os núcleos de transação são extremamente compactos. Isso tem 2 consequências importantes:

* O estado que um determinado nó do blockchain MimbleWimble precisa manter é muito pequeno (na ordem de alguns gigabytes para um blockchain do tamanho do bitcoin, e potencialmente otimizável para algumas centenas de megabytes).
* Quando um novo nó se une à rede que constrói uma cadeia MimbleWimble, a quantidade de informação que precisa ser transferida também é muito pequena.
* O estado que um determinado nó do blockchain Mimblewimble precisa manter é muito pequeno (na ordem de alguns gigabytes para um blockchain do tamanho do bitcoin, e potencialmente otimizável para algumas centenas de megabytes).
* Quando um novo nó se une à rede que constrói uma cadeia Mimblewimble, a quantidade de informação que precisa ser transferida também é muito pequena.

Além disso, o conjunto completo de saídas não gastas não pode ser adulterado, mesmo somente adicionando ou removendo uma saída. Isso faria com que a soma de todos os fatores de cegueira nos núcleos de transação diferissem da soma dos fatores de cegueira nas saídas.

### Conclusão

Neste documento, cobrimos os princípios básicos subjacentes a um blockchain MimbleWimble. Usando as propriedades de adição da Criptografia de Curva Elíptica, construímos transações completamente opacas, mas que ainda assim podem ser corretamente validadas. E ao generalizar essas propriedades em blocos, podemos eliminar uma grande quantidade de dados do blockchain, permitindo uma grande escalabilidade bem como a rápida sincronização de novos pares.
Neste documento, cobrimos os princípios básicos subjacentes a um blockchain Mimblewimble. Usando as propriedades de adição da Criptografia de Curva Elíptica, construímos transações completamente opacas, mas que ainda assim podem ser corretamente validadas. E ao generalizar essas propriedades em blocos, podemos eliminar uma grande quantidade de dados do blockchain, permitindo uma grande escalabilidade bem como a rápida sincronização de novos pares.

@@ -1,6 +1,6 @@
# Введение в МимблВимбл и Grin

*На других языках: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*
*На других языках: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md), [简体中文](intro_ZH-CN.md).*

МимблВимбл это формат и протокол блокчейна, предоставляющий
исключительную масштабируемость, приватность и обезличенность криптовалюты,
@@ -407,8 +407,8 @@ range proof-ов можно найти в [публикации о range proof-
сведено к следующей информации:

1. Общее количество монет, созданное майнингом цепочки.
2. Полный набор непотраченых выходов транзакций.
3. Ядра каждой из транзакций.
1. Полный набор непотраченых выходов транзакций.
1. Ядра каждой из транзакций.

Информацию для первого пункта мы можем получить просто из высоты (расстояние от генезиса) блока.
А, в свою очередь, непотраченые выходы и ядра транзакций довольно компактны.
@@ -1,12 +1,12 @@
# Introduktion till MimbleWimble och Grin
# Introduktion till Mimblewimble och Grin

*Läs detta på andra språk: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*
*Läs detta på andra språk: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md), [简体中文](intro_ZH-CN.md).*

MimbleWimble är ett blockkedjeformat och protokoll som erbjuder extremt bra
Mimblewimble är ett blockkedjeformat och protokoll som erbjuder extremt bra
skalbarhet, integritet, och fungibilitet genom starka kryptografiska primitiver.
Den angriper brister som existerar i nästan alla nuvarande blockkedjeimplementationer.

Grin är ett mjukvaruprojekt med öppen källkod som implementerar en MimbleWimble-blockkedja
Grin är ett mjukvaruprojekt med öppen källkod som implementerar en Mimblewimble-blockkedja
och fyller igen luckorna för att skapa en fullständig blockkedja och kryptovaluta.

Grin-projektets huvudsakliga mål och kännetecken är:
@@ -15,28 +15,28 @@ Grin-projektets huvudsakliga mål och kännetecken är:
förhindra förmågan att selektivt uppdaga information efter behov.
* Växer mestadels med antal användare och minimalt med antal transaktioner (< 100 bytes transaktionskärna),
vilket resulterar i stora utrymmesbesparingar i jämförelse med andra blockkedjor.
* Stark och bevisad kryptografi. MimbleWimble förlitar sig endast på kryptografi med
* Stark och bevisad kryptografi. Mimblewimble förlitar sig endast på kryptografi med
elliptiska kurvor (ECC) vilket har beprövats i decennier.
* Simplistik design som gör det enkelt att granska och underhålla på lång sikt.
* Gemenskapsdriven, uppmuntrar mining och decentralisering.

## Tungknytande för alla

Detta dokument är riktat mot läsare med en bra förståelse för blockkedjor och grundläggande kryptografi.
Med det i åtanke försöker vi förklara den tekniska uppbyggnaden av MimbleWimble och hur det appliceras i Grin.
Med det i åtanke försöker vi förklara den tekniska uppbyggnaden av Mimblewimble och hur det appliceras i Grin.
Vi hoppas att detta dokument är föreståeligt för de flesta tekniskt inriktade läsare. Vårt mål är att
uppmuntra er att bli intresserade i Grin och bidra på något möjligt sätt.

För att uppnå detta mål kommer vi att introducera de huvudsakliga begrepp som krävs för en
bra förståelse för Grin som en MimbleWimble-implementation. Vi kommer att börja med en kort
bra förståelse för Grin som en Mimblewimble-implementation. Vi kommer att börja med en kort
beskrivning av några av elliptiska kurvornas relevanta egenskaper för att lägga grunden som Grin
är baserat på och därefter beskriva alla viktiga element i en MimbleWimble-blockkedjas
är baserat på och därefter beskriva alla viktiga element i en Mimblewimble-blockkedjas
transaktioner och block.

### Småbitar av elliptiska kurvor

Vi börjar med en kort undervisning i kryptografi med elliptiska kurvor (ECC) där vi endast
går igenom de nödvändiga egenskaper för att förstå hur MimbleWimble fungerar utan att
går igenom de nödvändiga egenskaper för att förstå hur Mimblewimble fungerar utan att
gå djupt in på dess krångligheter. För läsare som vill fördjupa sig i detta finns andra
möjligheter att [lära sig mer](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).

@@ -55,14 +55,14 @@ multiplikation med kurvpunkter är trivialt är "division" extremt svårt).
Den föregående formeln `(k+j)*H = k*H + j*H`, med _k_ och _j_ båda som privata nycklar, demonstrerar att en publik nyckel
erhållen av att ha adderat de två privata nycklarna är identisk med de två privata nycklarnas respektive
publika nycklar adderade (`k*H + j*H`). I Bitcoin-blockkedjan använder hierarkiska deterministiska plånböcker (HD wallets)
sig flitigt av denna princip. MimbleWimble och Grin-implementationer gör det också.
sig flitigt av denna princip. Mimblewimble och Grin-implementationer gör det också.

### Transaktioner med MimbleWimble
### Transaktioner med Mimblewimble

Transaktionernas struktur demonstrerar en av MimbleWimbles kritiska grundsatser:
Transaktionernas struktur demonstrerar en av Mimblewimbles kritiska grundsatser:
starka garantier av integritet och konfidentialitet.

Valideringen av MimbleWimble-transaktioner använder sig av två grundläggande egenskaper:
Valideringen av Mimblewimble-transaktioner använder sig av två grundläggande egenskaper:

* **Kontroll av nollsummor.** Summan av outputs minus inputs är alltid lika med noll, vilket bevisar—utan att
avslöja beloppen—att transaktionen inte skapade nya pengar.
@@ -120,7 +120,7 @@ Vilket som följd kräver att:

ri1 + ri2 = ro3

Detta är MimbleWimbles första pelare: de beräkningar som är nödvändiga för att validera en transaktion
Detta är Mimblewimbles första pelare: de beräkningar som är nödvändiga för att validera en transaktion
kan göras utan att veta några belopp.

Denna idé härstammar faktiskt från Greg Maxwells
@@ -130,7 +130,7 @@ som i sin tur härstammar från ett förslag av Adam Back för homomorfiska belo
#### Ägande

I föregående stycke introducerade vi en privat nyckel som en förblindningsfaktor för att dölja transaktionens belopp.
MimbleWimbles andra insikt är att denna privata nyckel kan användas för att bevisa ägande av beloppet.
Mimblewimbles andra insikt är att denna privata nyckel kan användas för att bevisa ägande av beloppet.

Alice skickar 3 mynt till dig och för att dölja beloppet väljer du 28 som din förblindningsfaktor (notera att förblindningsfaktorn i praktiken
är ett extremt stort tal). Någonstans i blockkedjan dyker följande output upp och ska endast kunna spenderas av dig:
@@ -178,7 +178,7 @@ för _transaktionskärna_ och kontrolleras av alla validerare.
#### Några finare punkter

Detta stycke detaljerar byggandet av transaktioner genom att diskutera hur växel införs och kravet för "range proofs"
så att alla belopp är bevisade att vara icke-negativa. Inget av detta är absolut nödvändigt för att förstå MimbleWimble
så att alla belopp är bevisade att vara icke-negativa. Inget av detta är absolut nödvändigt för att förstå Mimblewimble
och Grin, så om du har bråttom känn dig fri att hoppa direkt till [Sammanställningen av allt](#sammanställningen-av-allt).

#### Växel
@@ -204,7 +204,7 @@ Till exempel skulle man kunna skapa en transaktion med input-belopp 2 och output
ha en balanserad transaktion. Detta kan inte upptäcklas enkelt eftersom punkten `x*H` ser ut som vilken annan punkt
som helst på kurvan även om _x_ är negativt.

För att lösa detta problem använder MimbleWimble sig av ett kryptografiskt koncept som kallas "range proofs" (som också härstammar
För att lösa detta problem använder Mimblewimble sig av ett kryptografiskt koncept som kallas "range proofs" (som också härstammar
från Confidential Transactions): ett bevis på att ett tal befinner sig inom ett visst intervall utan att avsölja talet.
Vi kommer inte att förklara range proofs; du behöver endast veta att vi för varje `r*G + v*H` kan skapa ett bevis som visar
att _v_ är större än noll och inte orsakar overflow.
@@ -223,7 +223,7 @@ Denna output (`(113 + 99)*G + 2*H`) kräver att både talen 113 och 99 är känd

#### Sammanställningen av allt

En MimbleWimble-transaktion inkluderar följande:
En Mimblewimble-transaktion inkluderar följande:

* En mängd inputs som refererar till och spenderar en mängd föregående outputs.
* En mängd nya outputs som inkluderar:
@@ -236,12 +236,12 @@ avgiften minus inputs).

### Block och kedjetillstånd

Vi förklarade ovan hur MimbleWimble-transaktioner kan erbjuda starka anonymitetsgarantier samtidigt som de
Vi förklarade ovan hur Mimblewimble-transaktioner kan erbjuda starka anonymitetsgarantier samtidigt som de
upprätthåller egenskaperna för en giltig blockkedja, d v s att en transaktion inte skapar pengar och att ägandebevis
fastställs med privata nycklar.

MimbleWimble-blockformatet bygger på detta genom att introducera ett till koncept: _cut-through_. Med detta
får en MimbleWimble-kedja:
Mimblewimble-blockformatet bygger på detta genom att introducera ett till koncept: _cut-through_. Med detta
får en Mimblewimble-kedja:

* Extremt bra skalbarhet då den stora majoriteten av transaktionsinformation kan elimineras på lång sikt utan att
kompromissa säkerhet.
@@ -273,12 +273,12 @@ transaktionskärnor. Vi kan summera alla outputs, subtrahera det med summan av a
(summan av outputs) - (summan av inputs) = (summan av kärnöverskott)


Något förenklat (återigen utan hänsyn till transaktionsavgifter) kan vi säga att MimbleWimble-block kan betraktas precis som
MimbleWimble-transaktioner.
Något förenklat (återigen utan hänsyn till transaktionsavgifter) kan vi säga att Mimblewimble-block kan betraktas precis som
Mimblewimble-transaktioner.

##### Kärn-offset

Det finns ett subtilt problem med MimbleWimble-block och transaktioner som beskrivet ovan. Det är möjligt (och i vissa fall
Det finns ett subtilt problem med Mimblewimble-block och transaktioner som beskrivet ovan. Det är möjligt (och i vissa fall
trivialt) att rekonstruera de konstituerande transaktionerna i ett block. Detta är naturligtvis dåligt för integriteten.
Detta kallas för "delmängdsproblemet": givet en mängd inputs, outputs, och transaktionskärnor kommer någon delmängd av detta
kunna kombineras för att rekonstruera en giltig transaktion.
@@ -351,7 +351,7 @@ Ett block består av:
* Signaturen genererad av överskottsbeloppet.
* Mining-avgiften

Med denna struktur erbjuder ett MimbleWimble-block extremt bra integritetsgarantier:
Med denna struktur erbjuder ett Mimblewimble-block extremt bra integritetsgarantier:

* Mellanliggande (genomskurna) transaktioner är endast representerade av sina transaktionskärnor.
* Alla outputs ser likadana ut: väldigt stora tal som inte går att skilja åt på något meningsfullt sätt.
@@ -369,21 +369,21 @@ I1 och I2 alla tas bort från blockkedjan eftersom de nu är mellanliggande tran
Vi slutleder att kedjetillståndet kan (bortsett från block headers) vid varje tidspunkt sammanfattas med endast dessa tre ting:

1. Den totala mängden mynt skapade genom mining.
2. Den kompletta mängden av UTXOs.
3. Transaktionskärnorna för varje transaktion.
1. Den kompletta mängden av UTXOs.
1. Transaktionskärnorna för varje transaktion.

Det första kan härledas genom att endast observera blockhöjden.

Både mängden av UTXOs och transaktionskärnorna är extremt kompakta. Detta har två följder:

* En nod i en MimbleWimble-blockkedja får en väldigt liten kedja att behöva ta vara på.
* En nod i en Mimblewimble-blockkedja får en väldigt liten kedja att behöva ta vara på.
* När en ny nod ansluter sig till nätverket krävs det väldigt lite information för att den ska bygga kedjan.

Dessutom kan man inte manipulera mängden av UTXOs. Tar man bort ett element ändras summan av transaktionerna och är längre inte lika med noll.

### Slutsats

I detta dokument gick vi igenom de grundläggande principerna för en MimbleWimble-blockkedja. Genom att använda egenskaperna
I detta dokument gick vi igenom de grundläggande principerna för en Mimblewimble-blockkedja. Genom att använda egenskaperna
för addition i kryptografi med elliptiska kurvor kan vi skapa fullständigt förmörkade transaktioner som ändå kan valideras.
Genom att generalisera dessa egenskaper till block kan vi eliminera en stor mängd blockkedjeinformation vilket medför
väldigt bra skalbarhet.
@@ -1,32 +1,32 @@
MimbleWimble 和 Grin 简介
Mimblewimble 和 Grin 简介
=====================================

*阅读其它语言版本: [English](intro.md), [简体中文](intro_ZH-CN.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*
*阅读其它语言版本: [English](intro.md), [Español](intro_ES.md), [Nederlands](intro_NL.md), [Русский](intro_RU.md), [日本語](intro_JP.md), [Deutsch](intro_DE.md), [Portuguese](intro_PT-BR.md), [Korean](intro_KR.md).*

MimbleWimble是一个区块链格式和协议,依托于健壮的加密原语,提供非常好的可扩展性、隐私和可替代性。它解决了当前几乎所有实现的区块链(与现实需求之间)差距。MimbleWimble 的白皮书在[本项目的WiKi](https://github.com/mimblewimble/docs/wiki/A-Brief-History-of-MinbleWimble-White-Paper)中可以找到,WiKi是开放的。
Mimblewimble是一个区块链格式和协议,依托于健壮的加密原语,提供非常好的可扩展性、隐私和可替代性。它解决了当前几乎所有实现的区块链(与现实需求之间)差距。Mimblewimble 的白皮书在[本项目的WiKi](https://github.com/mimblewimble/docs/wiki/A-Brief-History-of-MinbleWimble-White-Paper)中可以找到,WiKi是开放的。

Grin是一个实现MimbleWimble区块链的开源软件项目,并填补了(MimbleWimble协议所缺失的)实现一个完整的区块链和加密货币必需的一些东西。
Grin是一个实现Mimblewimble区块链的开源软件项目,并填补了(Mimblewimble协议所缺失的)实现一个完整的区块链和加密货币必需的一些东西。

Grin 项目的主要目的和特性如下:

* 隐私保护的缺省特性。 这使它具备了完全可替代性,且保留了按需选择性披露信息的能力。
* 区块大小与交易量相适配,历史交易仅保留约100字节的交易核(_transaction kernel_), 相比其它区块链节省了大量空间。
* 强大且经过验证的密码学。 MimbleWimble只采用椭圆曲线密码,该密码技术已经过了数十年的试用和测试。
* 强大且经过验证的密码学。 Mimblewimble只采用椭圆曲线密码,该密码技术已经过了数十年的试用和测试。
* 简单的设计使得日后的代码审查和维护变得容易。
* 社区驱动。采用一种抗拒ASIC的挖矿算法(Cuckoo Cycle算法),借此来鼓励去中心化的挖矿。

# Tongue Tying for Everyone

**备注**:MimbleWimble 出自《哈利波特》中的一句咒语,详见:[Tongue-Tying Curse](http://harrypotter.wikia.com/wiki/Tongue-Tying_Curse),这个标题的涵义应该是希望所有读到这篇介绍的人都可以来为这个开放社区做点贡献,真心希望如此。
**备注**:Mimblewimble 出自《哈利波特》中的一句咒语,详见:[Tongue-Tying Curse](http://harrypotter.wikia.com/wiki/Tongue-Tying_Curse),这个标题的涵义应该是希望所有读到这篇介绍的人都可以来为这个开放社区做点贡献,真心希望如此。

本文针对的读者是已经了解过区块链并了解一些基本的密码学知识的人群。我们尝试解释MimbleWimble的技术构建,以及它如何应用于Grin。我们希望这篇介绍能够浅显易懂,我们的目的是鼓励您对Grin产生兴趣,并加入Grin的开放社区,以任何您可能的方式对其做出贡献。
本文针对的读者是已经了解过区块链并了解一些基本的密码学知识的人群。我们尝试解释Mimblewimble的技术构建,以及它如何应用于Grin。我们希望这篇介绍能够浅显易懂,我们的目的是鼓励您对Grin产生兴趣,并加入Grin的开放社区,以任何您可能的方式对其做出贡献。

为了实现这个目标,我们将介绍一个主要概念:Grin是一个MimbleWimble实现。我们将从椭圆曲线密码(ECC)的简短描述开始,这是Grin的重要基础。然后描述MimbleWimble区块链交易和区块的所有关键要素
为了实现这个目标,我们将介绍一个主要概念:Grin是一个Mimblewimble实现。我们将从椭圆曲线密码(ECC)的简短描述开始,这是Grin的重要基础。然后描述Mimblewimble区块链交易和区块的所有关键要素


## 椭圆曲线简介

我们首先简要介绍一下椭圆曲线密码学(后面简称为:ECC),只是简单说明一下理解MimbleWimble如何工作所必需了解的ECC属性,这里并不深入研究和讨论ECC。对于想要更多一点了解ECC的读者,可以参考这个介绍:
我们首先简要介绍一下椭圆曲线密码学(后面简称为:ECC),只是简单说明一下理解Mimblewimble如何工作所必需了解的ECC属性,这里并不深入研究和讨论ECC。对于想要更多一点了解ECC的读者,可以参考这个介绍:
[了解更多](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).

用于密码学目的的椭圆曲线只是一大组我们称之为 _C_ 的点。这些点可以被加、减或乘以整数(也称为标量)。 给定一个整数 _k_ 并使用标量乘法运算,我们可以计算`k * H`,这也是曲线 _C_ 上的一个点。
@@ -36,16 +36,16 @@ Grin 项目的主要目的和特性如下:

在ECC中,如果我们选择一个非常大的数字 _k_ 作为私钥,则`k * H`被作为相应的公钥。 即使人们知道公钥`k * H`的值,推导 _k_ 几乎不可能(或者换句话说,椭圆曲线点的乘法计算是微不足道的,然而曲线点的“除法”计算却极其困难。参见:[椭圆曲线密码学](https://zh.wikipedia.org/wiki/椭圆曲线密码学)。

先前的公式`(k + j)* H = k * H + j * H`中, _k_ 和 _j_ 都是私钥,演示了从两个私钥的加和获取公钥`(k + j)* H`,等价于每个私钥的对应公钥加和(`k * H + j * H`)。在比特币区块链中,[分层确定性钱包(HD Wallets/BIP32)](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)严重依赖于这个原则。 MimbleWimble和Grin也是如此
先前的公式`(k + j)* H = k * H + j * H`中, _k_ 和 _j_ 都是私钥,演示了从两个私钥的加和获取公钥`(k + j)* H`,等价于每个私钥的对应公钥加和(`k * H + j * H`)。在比特币区块链中,[分层确定性钱包(HD Wallets/BIP32)](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)严重依赖于这个原则。 Mimblewimble和Grin也是如此

## MimbleWimble 交易
## Mimblewimble 交易

交易结构的设计显示了MimbleWimble的一个关键原则:强大的隐私性和保密性。
交易结构的设计显示了Mimblewimble的一个关键原则:强大的隐私性和保密性。

MimbleWimble的交易确认依赖于两个基本属性:
Mimblewimble的交易确认依赖于两个基本属性:

* **0和验证。** 输出总和减去输入总是等于零,证明交易没有凭空创造新的资金,而且**不会显示实际金额**。
* **拥有私钥即拥有交易输出的所有权。** 像大多数其他加密货币一样,交易输出通过拥有ECC私钥来保证其所有权。 然而,在MimbleWimble中,证明一个所有者拥有这些私钥并不是通过直接签署交易来实现的。
* **拥有私钥即拥有交易输出的所有权。** 像大多数其他加密货币一样,交易输出通过拥有ECC私钥来保证其所有权。 然而,在Mimblewimble中,证明一个所有者拥有这些私钥并不是通过直接签署交易来实现的。

下面介绍账户余额、所有权、变更和证明,并借此说明上面的这两个基本属性是如何得以实现的。

@@ -88,13 +88,13 @@ MimbleWimble的交易确认依赖于两个基本属性:

ri1 + ri2 = ro3

这是MimbleWimble的第一个支柱:验证交易的算术运算可以在完全不知道任何实际交易值的情况下完成。
这是Mimblewimble的第一个支柱:验证交易的算术运算可以在完全不知道任何实际交易值的情况下完成。

补充最后一点说明,这个想法实际上派生自Greg Maxwell的[机密交易](https://elementsproject.org/features/confidential-transactions/investigation),机密交易本身是从Adam Back提出的用于比特币的同态值提议中发展而来。

### 所有权

在前面的章节中,我们介绍了一个私钥作为致盲因子来掩盖实际交易值。MimbleWimble的第二个见解就是这个私钥可以用来证明值的所有权
在前面的章节中,我们介绍了一个私钥作为致盲因子来掩盖实际交易值。Mimblewimble的第二个见解就是这个私钥可以用来证明值的所有权

Alice 给你发了3个币并且隐藏了这个数字,你选择了28作为你的致盲因子(请注意,在实践中,致盲因子是一个私钥,是一个非常大的数字)。 区块链上的某处显示以下交易输出,并只能由你来用(做交易输入):

@@ -133,7 +133,7 @@ _X_, 上述加法的输出值,是对所有人可见的。 但是值3只有你

### 一些更深入的细节

本节阐述创建交易,通过讨论交易的找零机制和范围证明的要求以便所有值都被证明为非负。 这些都不是了解MimbleWimble和Grin的必需内容,所以如果你想快速了解,随时可以直接跳过本节内容,直接到[Putting It All Together](#transaction-conclusion).
本节阐述创建交易,通过讨论交易的找零机制和范围证明的要求以便所有值都被证明为非负。 这些都不是了解Mimblewimble和Grin的必需内容,所以如果你想快速了解,随时可以直接跳过本节内容,直接到[Putting It All Together](#transaction-conclusion).

#### 找零

@@ -155,15 +155,15 @@ _X_, 上述加法的输出值,是对所有人可见的。 但是值3只有你

例如,可以创建一个输入为2并且输出为5和-3的交易,并且依照前面章节中的定义仍然可以获得平衡的事务。 这是不容易被检测到的,因为即使x是负数,ECDSA曲线上的对应点x.H看起来也是任何值。

为了解决这个问题,MimbleWimble利用了另一个加密概念(也来自机密交易),称为范围证明:一个数字落在给定范围内的证明,而不会泄露数字。
为了解决这个问题,Mimblewimble利用了另一个加密概念(也来自机密交易),称为范围证明:一个数字落在给定范围内的证明,而不会泄露数字。
我们不会详细说明范围证明,您只需要知道,对于任何`r.G + v.H`,我们都可以创建一个证明,证明 _v_ 大于零且不会溢出。

同样重要的是要注意,为了从上面的示例中创建有效的范围证明,必须知道在创建和签署excess value时使用的值113和28。 其原因以及范围证明的更详细描述在[range proof paper](https://eprint.iacr.org/2017/1066.pdf)中进一步详述。

<a name="transaction-conclusion"></a>
### 小结

MimbleWimble交易包括以下内容
Mimblewimble交易包括以下内容

* 一组输入,参考和花费一组以前的输出。
* 一组新的输出包括:
@@ -174,9 +174,9 @@ MimbleWimble交易包括以下内容:

## 区块状态和链状态

我们已经在上面解释了MimbleWimble交易如何在保持有效区块链所需的属性的同时提供强大的匿名性保证,即交易不会凭空捏造出货币,并且通过私钥建立所有权证明。
我们已经在上面解释了Mimblewimble交易如何在保持有效区块链所需的属性的同时提供强大的匿名性保证,即交易不会凭空捏造出货币,并且通过私钥建立所有权证明。

MimbleWimble区块格式通过引入一个附加概念来构建:核销(_cut-through_)。 有了这个补充,一个MimbleWimble链可获得
Mimblewimble区块格式通过引入一个附加概念来构建:核销(_cut-through_)。 有了这个补充,一个Mimblewimble链可获得

* 极大的可扩展性,因为绝大部分交易数据主体可以随时间消除,而不会影响安全性。
* 通过混合和删除交易数据进一步匿名。
@@ -206,11 +206,11 @@ MimbleWimble区块格式通过引入一个附加概念来构建:核销(_cut-

sum(outputs) - sum(inputs) = sum(kernel_excess)

简单来说,(依然忽略交易费部分)我们可以认为,对MimbleWimble区块的处理方法和对MimbleWimble交易的处理方法是严格一致的
简单来说,(依然忽略交易费部分)我们可以认为,对Mimblewimble区块的处理方法和对Mimblewimble交易的处理方法是严格一致的

#### 交易核偏移因子(Kernel Offsets)

上面描述的MimbleWimble区块和交易设计有一个小问题,有可能从一个区块中的数据来重建交易(即找出一笔或几笔完整的交易,分辨哪一笔交易输入对应哪一笔交易输出)。这个对于隐私而言当然是不好的事情。这个问题也被称为子集问题("subset" problem) - 给定一系列交易输入、交易输出和交易核,有可能能够从中分辨出一个子集来重新拼出对应的完整的交易(很像拼图游戏)。
上面描述的Mimblewimble区块和交易设计有一个小问题,有可能从一个区块中的数据来重建交易(即找出一笔或几笔完整的交易,分辨哪一笔交易输入对应哪一笔交易输出)。这个对于隐私而言当然是不好的事情。这个问题也被称为子集问题("subset" problem) - 给定一系列交易输入、交易输出和交易核,有可能能够从中分辨出一个子集来重新拼出对应的完整的交易(很像拼图游戏)。

例如,假如有下面的两笔交易:

@@ -277,7 +277,7 @@ MimbleWimble区块格式通过引入一个附加概念来构建:核销(_cut-
* 使用excess value生成的签名。
* 挖矿费用 (fee)。

当区块以这种方式构建时,MimbleWimble区块提供了非常好的隐私保证
当区块以这种方式构建时,Mimblewimble区块提供了非常好的隐私保证

* 更多的交易可能已经完成,但不会显式出现(在区块中)。
* 所有的输出看起来都是一样的:只是一些非常大的数字,不可能相互区分。 如果有人想排除某些输出,他们将不得不排除所有输出。
@@ -291,18 +291,18 @@ MimbleWimble区块格式通过引入一个附加概念来构建:核销(_cut-

总而言之,我们得出结论:任何时间点的链状态(不包括区块头)都可以通过这些信息来概括:

1. 链中采矿产生的硬币总量
2. 未使用的交易输出(即UTXO)的完整集合。
3. 每笔交易的交易内核。
1. 链中采矿产生的代币总量
1. 未使用的交易输出(即UTXO)的完整集合。
1. 每笔交易的交易内核。

第一条信息可以使用块高度(与起始块的距离)推导出来。未使用的输出和交易内核都非常紧凑。这有两个重要的后果:

* MimbleWimble区块链中给定的节点需要维护的状态非常小(对于比特币大小的区块链,几个G字节大小的数量级,可能优化到几百兆字节)。
* 当新节点加入构建MimbleWimble链的网络时,需要传输的信息量也非常小。
* Mimblewimble区块链中给定的节点需要维护的状态非常小(对于比特币大小的区块链,几个G字节大小的数量级,可能优化到几百兆字节)。
* 当新节点加入构建Mimblewimble链的网络时,需要传输的信息量也非常小。

另外,未使用的交易输出(即UTXO)组成的完整集是不可篡改的,即使只是想去添加或删除一些交易输出。这样做会导致交易内核中所有致盲因因子的总和与输出中致盲因素的总和不同。

## 结论

在本文中,我们介绍了基于MimbleWimble区块链的基本原则。 通过使用椭圆曲线密码的附加属性,我们能够构建完全不透明但仍可以正确验证的交易。
在本文中,我们介绍了基于Mimblewimble区块链的基本原则。 通过使用椭圆曲线密码的附加属性,我们能够构建完全不透明但仍可以正确验证的交易。
通过应用这些属性,我们可以消除大量区块链数据,从而实现新对等点的大规模部署和快速同步。
@@ -1,8 +1,8 @@
# Merkle Structures

*Read this in other languages:[Korean](merkle_KR.md)
*Read this in other languages: [Korean](merkle_KR.md), [简体中文](merkle_ZH-CN.md).*

MimbleWimble is designed for users to verify the state of the system given
Mimblewimble is designed for users to verify the state of the system given
only pruned data. To achieve this goal, all transaction data is committed
to the blockchain by means of Merkle trees which should support efficient
updates and serialization even when pruned.
@@ -12,7 +12,7 @@ proofs) have the ability to be summed in some way, so it makes sense to
treat Merkle sum trees as the default option, and address the sums here.

A design goal of Grin is that all structures be as easy to implement and
as simple as possible. MimbleWimble introduces a lot of new cryptography
as simple as possible. Mimblewimble introduces a lot of new cryptography
so it should be made as easy to understand as possible. Its validation rules
are simple to specify (no scripts) and Grin is written in a language with
very explicit semantics, so simplicity is also good to achieve well-understood
@@ -34,12 +34,12 @@ The root sum should be equal to the sum of all excesses since the genesis.
Design requirements:

1. Efficient additions and updating from unspent to spent.
2. Efficient proofs that a specific output was spent.
3. Efficient storage of diffs between UTXO roots.
4. Efficient tree storage even with missing data, even with millions of entries.
5. If a node commits to NULL, it has no unspent children and its data should
1. Efficient proofs that a specific output was spent.
1. Efficient storage of diffs between UTXO roots.
1. Efficient tree storage even with missing data, even with millions of entries.
1. If a node commits to NULL, it has no unspent children and its data should
eventually be able to be dropped forever.
6. Support for serialization and efficient merging of pruned trees from partial archival nodes.
1. Support for serialization and efficient merging of pruned trees from partial archival nodes.

### Output witnesses

@@ -66,7 +66,9 @@ The root sum should be equal to the sum of excesses for this block. See the
next section.

In general, validators will see either 100% of this Merkle tree or 0% of it,
so it is compatible with any design. Design requirements:
so it is compatible with any design.

Design requirements:

1. Efficient inclusion proofs, for proof-of-publication.

@@ -1,11 +1,11 @@
# 머클의 구조

MimbleWimble은 Pruning 데이터만 있는 시스템의 상태를 사용자가 증명하도록 설계되었습니다. 이러한 목표를 달성하기 위해 모든 트랜잭션 데이터는 pruning 된 경우라도 효율적인 업데이트와 serialization을 지원하는 Merkle 트리를 사용하여 블록 체인에 커밋됩니다.
Mimblewimble은 Pruning 데이터만 있는 시스템의 상태를 사용자가 증명하도록 설계되었습니다. 이러한 목표를 달성하기 위해 모든 트랜잭션 데이터는 pruning 된 경우라도 효율적인 업데이트와 serialization을 지원하는 Merkle 트리를 사용하여 블록 체인에 커밋됩니다.

또한 거의 모든 거래 데이터 (입력, 출력, Excess 및 Excess proof)는 어떤 방식으로 합산 될 수 있으므로 Merkle sum 트리를 기본 옵션으로 처리하고 여기에서 합계를 처리하는 것이 좋습니다.

Grin의 디자인 목표는 모든 구조를 구현하기 쉽고 가능한 한 간단하게 만드는 것입니다.
MimbleWimble은 많은 새로운 암호화 방식을 내 놓았고 이러한 방식을 가능한 한 쉽게 이해할 수 있도록 만들어야합니다.
Mimblewimble은 많은 새로운 암호화 방식을 내 놓았고 이러한 방식을 가능한 한 쉽게 이해할 수 있도록 만들어야합니다.
새로운 암호화 방식의 입증 규칙은 스크립트가 없이도 구체화 하기 쉽고 Grin은 매우 명확한 의미론을 가진 프로그래밍 언어로 작성되기 때문에 단순함은 잘 알려진 컨센서스 룰을 달성하는 것에도 좋습니다.

## Merkle Trees
@@ -0,0 +1,128 @@
# Merkle Structures

*阅读其它语言版本: [English](merkle.md), [Korean](merkle_KR.md).*

Mimblewimble 是设计给用户在仅给出修剪后的数据的情况下也能验证系统状态。
为了实现此目标,所有交易数据都通过 Merkle trees 提交给了区块链,该 Merkle trees 即使在被修剪后也需要在更新和序列化操作上有良好的效率。

同样,几乎所有交易数据(输入(inputs),输出(outputs),超额(excesses) 和 超额证明(excess proofs))都可以某种方式进行求和,因此将 Merkle sum trees 作为默认选项并在这里处理总和是完全合理的。

Grin 的设计目标是使所有结构都易于实现尽可能的简单。Mimblewimble 引入了许多新的加密技术,应该会使其更易于理解。
它的验证规则很容易指定(没有脚本),并且 Grin 是用一种具有非常明确的语义的语言编写的,因此简单性也有助于达成易于理解的共识规则。

## Merkle Trees

每个块有四棵 Merkle trees:

### 总输出集(Total Output Set)

每个对象都是以下两项之一:用一个承诺来表示未花费的输出(unspent output)或用一个 NULL 标记表示一个已经被花费(spent)的。
它是所有未花费输出的总和树(已花费输出对总和没有任何贡献)。输出集应反映出在当前块*生效后*链的状态。

自创世起,根总和应等于所有超额的总和。

设计要求:

1. 高效地添加和更新从未花费的到已花费的。
1. 高效地证明一个特定的输出被花费了。
1. 高效的 UTXO roots 之间的差异存储。
1. 高效的树存储即使丢失数据、有数百万个条目。
1. 如果节点提交为 NULL,表示它没有未花费的子节点,并且最终其数据应该是可以被清除的。
1. 支持序列化并且能够高效的合并来自部分存档的节点中的被修剪过的树。

### 输出见证(Output witnesses)

该树反映了总输出集,但具有范围证明来代替承诺。它从不更新,仅做附加,并且不对任何内容求和。
当一个输出被花费时,从树上修剪它的范围证明就足够了而不用删除它。

设计要求:

1. 支持序列化并且能够高效的合并来自部分存档的节点中的被修剪过的树。

### 输入和输出(Inputs and Outputs)

每个对象都是这两件事之一:输入(对旧交易(transaction)输出的明确引用)或输出(一对(承诺,范围证明))。
它是一颗输出承诺的总和树,也就是输入承诺的对立面。

输入引用是旧承诺的哈希值。这是一个共识规则,所有未花费的输出必须是唯一的。

根总和应等于此块的超额总和。请参阅下一节。

通常,验证者要么会看到此 Merkle trees 的 100% 要么 0%,因此它与任何设计都兼容。

设计要求:

1. 有效的包含证明,用于发布证明。(Efficient inclusion proofs, for proof-of-publication.)

### 超额(Excesses)

每个对象的形式为(超额,签名)。它是一颗关于超额的总和树。

通常,验证者总是会看到该树的 100%,因此甚至完全没有必要使用 Merkle 结构。
但是,为了将来支持部分存档的节点,我们希望支持高效地修剪。

设计要求:

1. 支持序列化并且能够高效的合并来自部分存档的节点中的被修剪过的树。

## 提议的 Merkle 结构

**针对所有树提出了以下设计:对于一个 sum-MMR,其中每个节点应该将其子节点的数量以及数据求和然后相加。**
**结果是,每个节点都会提交其所有子节点的计数。**

[MMRs,或 Merkle Mountain Ranges](https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md)

输出集的六个设计标准是:

### 高效地插入/更新

立即(包含证明)。对于任何平衡 Merkle tree 的设计都是如此。

### 高效的花费证明

Grin 本身不需要花费证明,但支持是一件好事以便将来应用于 SPV 客户端。

子计数(children-counts)表示树中每个对象的索引,该索引不会变动,因为插入仅发生在树的最右侧。

这同时也允许永久的花费证明,即使稍后将相同的输出添加到树中,对于相同的输出也可以防止错误的证明。
对于非插入顺序的树,这些属性很难实现。

### 高效的差异存储

对于这个来说,存储完整的块应该足够了。
显然,更新和撤消操作一样容易,并且由于总是按顺序处理块,因此在重组期间回滚它们就像从树的右侧删除一组连续的输出一样简单。
(这比通过重复删除来支持删除操作的树中要快得多。)

### 高效的树存储即使丢失数据

要在随机输出被花费时候更新根哈希,我们不需要存储或计算整个树。取而代之的是,我们只能存储深度为 20 的哈希,其中最多不超过一百万。
然后,每个更新仅需要重新计算高于该深度的哈希(根据比特币的历史记录可知它现在的输出数量少于 2^29 个,这意味着只需要为每个更新计算大小为 2^9 = 512 的树),在完成所有更新后,根哈希可以被重新计算出来。

这个深度是可配置的,并且可以随着输出集的增长或可用磁盘空间而改变。

这对任何 Merkle tree 都是可行的,但可能会因 PATRICIA tree 或其他前缀树而变得复杂,具体取决于如何计算深度。

### 丢弃已花费的代币(Dropping spent coins)

由于代币永远不会从已花费变为未花费,因此对于已花费的代币上的数据不再需要进行任何更新或查找。

### 高效地序列化已被修剪的树

由于每个节点都有其子节点数,因此验证人无需所有哈希就可以确定树的结构,并且可以确定哪些节点是兄弟节点,依此类推。

在输出集中,每个节点还提交其未花费子项的总和,因此验证人通过检查已被修剪节点上的总和是否为零来知道它是否缺少未花费代币的数据。

## 算法

(To appear alongside an implementation.)

## 存储

求和树数据结构允许高效地存储输出集和输出见证,同时允许立即检索根哈希或根和(适用时)。
但是,该树必须包含系统中的每个输出承诺和见证哈希。
这些数据太大,无法永久存储在内存中,即使我们考虑修剪,因为开销太大也无法在每次重新启动时从头开始进行重建(目前,比特币有超过 5000 万个 UTXO,它们需要至少 3.2 GB,假设每个 UTXO 都有几个哈希)。
因此,我们需要一种高效的方法来将这些数据结构存储在磁盘上。

哈希树的另一个限制是,给定一个键(即输出承诺),不可能在与该键关联的树中找到叶子。我们不能以任何有意义的方式从树根上走下来。
因此,需要在整个键空间上附加索引。由于 MMR 是 append-only 的二叉树,因此我们可以通过其插入位置在树中找到该键。
因此,还需要插入到树中的键的完整索引(即输出承诺)。
@@ -1,6 +1,6 @@
# Merkle Mountain Ranges

*Read this in other languages:[Korean](mmr_KR.md)
*Read this in other languages: [Korean](mmr_KR.md), [简体中文](mmr_ZH-CN.md).*

## Structure

@@ -103,7 +103,7 @@ until we have a node that exists in our MMR. Once we find that next peak,
keep repeating the process until we're at the last node.

All these operations are very simple. Jumping to the right sibling of a node at
height `h` is adding `2^(h+1) - 1` to its position. Taking its left sibling is
height `h` is adding `2^(h+1) - 1` to its position. Taking its left child is
subtracting `2^h`.

Finally, once all the positions of the peaks are known, "bagging" the peaks
@@ -128,8 +128,8 @@ Pruning a MMR relies on a simple iterative process. `X` is first initialized as
the leaf we wish to prune.

1. Prune `X`.
2. If `X` has a sibling, stop here.
3. If 'X' has no sibling, assign the parent of `X` as `X`.
1. If `X` has a sibling, stop here.
1. If 'X' has no sibling, assign the parent of `X` as `X`.

To visualize the result, starting from our first MMR example and removing leaves
[0, 3, 4, 8, 16] leads to the following pruned MMR:
@@ -1,5 +1,7 @@
# Merkle Mountain Ranges

*다른 언어로 되어있는 문서를 읽으려면: [English](mmr.md), [简体中文](mmr_ZH-CN.md).*

## MMR의 구조

Merkle Mountain Ranges [1]은 Merkle trees [2]의 대안입니다. 후자는 완벽하게 균형 잡힌 이진 트리를 사용하지만 전자는 완벽하게 균형잡힌 binary tree list 거나 오른쪽 상단에서 잘린 single binary tree로 볼 수 있습니다. Merkle Mountain Range (MMR)는 엄격하게 append 에서만 사용됩니다. 원소는 왼쪽에서 오른쪽으로 추가되고, 두 하위 원소가 있는 즉시 부모를 추가하여 그에 따라 범위를 채웁니다.
@@ -91,8 +93,8 @@ Grin에서는 해시되고 MMR에 저장되는 많은 데이터가 결국 제거
MMR의 pruning은 간단한 반복 프로세스에 의존합니다. `X`는 우선 첫번째 제거할 리프로 초기화됩니다.

1. `X`를 Pruning 한다.
2. 만약 `x`가 형제 노드가 있다면 여기서 prunging을 중단한다.
3. 만약 `X`가 형제 노드가 없다면 `X`의 부모 노드는 `X`라고 배정된다.
1. 만약 `x`가 형제 노드가 있다면 여기서 prunging을 중단한다.
1. 만약 `X`가 형제 노드가 없다면 `X`의 부모 노드는 `X`라고 배정된다.

결과를 시각화하기 위해 첫 번째 MMR 예시에서 시작하여 리프[0, 3, 4, 8, 16]을 제거하면 다음과 같은 pruning MMR이 발생합니다.

@@ -0,0 +1,133 @@
# Merkle Mountain Ranges

*阅读其它语言版本: [English](mmr.md), [Korean](mmr_KR.md).*

## 结构

Merkle Mountain Ranges [1] 是 Merkle trees [2] 的替代品。
后者依赖于完美二叉树,前者既可以看作是由许多完美二叉树组成的一个列表或着看作是从一个右上角被截断的二叉树。
Merkle Mountain Ranges(MMR)是 append-only 的:
元素自左向右添加,如果有 2 个孩子节点,则立即添加一个父节点,并将相应的 range 填满/补完整。

下面展示了一个 range,有 11 个叶子,总大小为 19(个节点),其中每个节点都按照插入顺序编号被标记。

```
高度

3 14
/ \
/ \
/ \
/ \
2 6 13
/ \ / \
1 2 5 9 12 17
/ \ / \ / \ / \ / \
0 0 1 3 4 7 8 10 11 15 16 18
```

下面是这个 range 以平面列表的方式展示出来的样子,存储了每个节点在其插入位置对应的高度:

```
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0
```

我们可以轻松地从其大小(19)来对这个结构做一个全面的描述。
同时还带来了另一个好处,可以很容易地使用快速二进制操作在 MMR 中导航。
给定一个节点的位置 `n`,我们可以计算它的高度、父节点、兄弟节点等等。

## Hashing 与 Bagging

就像 Merkle trees 一样,MMR 中的父节点的值由其两个孩子节点 hash 得到。
Grin 总是使用 Blake2b 哈希函数,在进行 hashing 之前会预先设置节点在 MMR 中的位置以此来避免冲突。
所以对于索引 `n` 处的叶子 `l` 存储的数据 `D`(在输出的情况下,数据是其 Pedersen 承诺,例如),我们有:

```
Node(l) = Blake2b(n | D)
```

对于任何处于索引 `m` 的父节点 `p`:

```
Node(p) = Blake2b(m | Node(left_child(p)) | Node(right_child(p)))
```

与 Merkle tree 相反,MMR 通常在构造时没有单独的根所以我们需要一种方法来计算一个(不然就违背了使用哈希树的本意)。
由于某些原因,此过程称为 “bagging the peaks” 详见 [1] 中描述。

首先,我们确定 MMR 所有的峰;在这里我们将定义这样一种方法。
首先编写另一个小示例 MMR,但将其索引用表示二进制表示出来(而不是十进制),从 1 开始:

```
高度

2 111
/ \
1 11 110 1010
/ \ / \ / \
0 1 10 100 101 1000 1001 1011
```

这个 MMR 一共有 11 个节点,它所有的峰分别位于索引 111(7),1010(10)和 1011(11)处。
我们首先会注意到最左边的第一个峰以二进制表示时,总是最高的并且其所有位 “都是 1”。
因此该最高峰的索引总是遵循 `2^n - 1` 的规律且处于这个位置的峰一定是 MMR 内部最大的峰(其索引值小于 MMR 的大小即节点个数)。
我们针对大小为 11 的 MMR 进行迭代处理:

```
2^0 - 1 = 0, and 0 < 11
2^1 - 1 = 1, and 1 < 11
2^2 - 1 = 3, and 3 < 11
2^3 - 1 = 7, and 7 < 11
2^4 - 1 = 15, and 15 is not < 11
```

(这个过程可以通过非迭代地计算表示为 `2^(大小的二进制对数 + 1) - 1`

因此,第一个峰为 7。要找到下一个峰,我们需要 “跳” 到它的右边的兄弟节点。
如果该节点不在 MMR 中(确实不在),走到它的左孩子节点。
如果该孩子节点也不在 MMR 中,继续跟进到其左孩子节点直到发现这个节点存在于我们的 MMR 中。
一旦找到下一个峰,继续重复该过程,直到到达最后一个节点。

所有这些操作都非常的简单。高度 `h` 下跳至节点的右兄弟节点只需要将 `2^(h+1) - 1` 加到其索引上。
减去 `2^h` 即可跳到它的左孩子节点。

最后,一旦知道了峰的所有位置,就对这些峰进行 “bagging” 操作,使用 MMR 的总大小作为前缀从右侧开始迭代地对其进行哈希处理。
对于一个大小为 N 具有 3 个峰 p1,p2 和 p3 的 MMR,我们最终得到的最高峰为:

```
P = Blake2b(N | Blake2b(N | Node(p3) | Node(p2)) | Node(p1))
```

## 修剪

在 Grin 中,有很多经过哈希并存储在 MMR 中的数据最后是可以被删除掉的。
发生这种情况时,相应的叶子节点的哈希也就变得不必要了,它们也是可以被删除掉的。
当删除掉足够多的叶子后,它们的父节点也会变得不必要,因此,我们可以通过删除它的叶子来修剪掉 MMR 的一大部分。

修剪 MMR 依赖于一个简单的迭代过程。首先将 `X` 初始化为我们希望修剪的那个叶子。

1. 修剪 `X`。
2. 如果 `X` 有兄弟节点,则在此处停止。
3. 如果 `X` 没有兄弟节点,则将 `X` 的父节点指定为 `X`。

为了可视化结果,从我们的第一个 MMR 示例开始,删除叶子 [0、3、4、8、16] 后得到以下修剪过的 MMR:

```
高度

3 14
/ \
/ \
/ \
/ \
2 6 13
/ / \
1 2 9 12 17
\ / / \ /
0 1 7 10 11 15 18
```

[1] Peter Todd, [merkle-mountain-range](https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md)

[2] [Wikipedia, Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree)
@@ -1,22 +1,22 @@
# Pruning Blockchain Data

*Read this in other languages: [Korean](pruning_KR.md).*
*Read this in other languages: [Korean](pruning_KR.md), [简体中文](pruning_ZH-CN.md).*

One of the principal attractions of MimbleWimble is its theoretical space
One of the principal attractions of Mimblewimble is its theoretical space
efficiency. Indeed, a trusted or pre-validated full blockchain state only
requires unspent transaction outputs, which could be tiny.

The grin blockchain includes the following types of data (we assume prior
understanding of the MimbleWimble protocol):
understanding of the Mimblewimble protocol):

1. Transaction outputs, which include for each output:
1. A Pedersen commitment (33 bytes).
2. A range proof (over 5KB at this time).
2. Transaction inputs, which are just output references (32 bytes).
3. Transaction "proofs", which include for each transaction:
1. A range proof (over 5KB at this time).
1. Transaction inputs, which are just output references (32 bytes).
1. Transaction "proofs", which include for each transaction:
1. The excess commitment sum for the transaction (33 bytes).
2. A signature generated with the excess (71 bytes average).
4. A block header includes Merkle trees and proof of work (about 250 bytes).
1. A signature generated with the excess (71 bytes average).
1. A block header includes Merkle trees and proof of work (about 250 bytes).

Assuming a blockchain of a million blocks, 10 million transactions (2 inputs, 2.5
outputs average) and 100,000 unspent outputs, we get the following approximate
@@ -47,7 +47,7 @@ There may be several contexts in which data can be pruned:
## Validation of Fully Pruned State

Pruning needs to remove as much data as possible while keeping all the
guarantees of a full MimbleWimble-style validation. This is necessary to keep
guarantees of a full Mimblewimble-style validation. This is necessary to keep
a pruning node state's sane, but also on first fast sync, where only the
minimum amount of data is sent to a new node.

@@ -1,8 +1,8 @@
# 블록체인 데이터 프루닝(가지치기)에 대해

MimbleWimble의 주된 매력 중 하나는 이론적인 공간효율성 입니다. 실제로 신뢰 할수 있거나 또는 사전에 입증된 전체 블록체인 스테이트는 아주 작을수도 있는 UTXO(unspent transaction outputs)만 나타냅니다.
Mimblewimble의 주된 매력 중 하나는 이론적인 공간효율성 입니다. 실제로 신뢰 할수 있거나 또는 사전에 입증된 전체 블록체인 스테이트는 아주 작을수도 있는 UTXO(unspent transaction outputs)만 나타냅니다.

Grin의 블록체인에는 다음 유형의 데이터가 포함됩니다 (MimbleWimble 프로토콜에 대한 사전 지식이 있다고 가정합니다).
Grin의 블록체인에는 다음 유형의 데이터가 포함됩니다 (Mimblewimble 프로토콜에 대한 사전 지식이 있다고 가정합니다).

1. 아래를 포함하는 트랜잭션 출력값
1. Pedersen commitment (33 bytes).
@@ -36,7 +36,7 @@ Grin의 블록체인에는 다음 유형의 데이터가 포함됩니다 (Mimble

## 완전히 정리된 스테이트(Fully Pruned State)의 입증에 대해서

(데이터)Pruning은 가능한 한 많은 양의 데이터를 제거하면서 MimbleWimble 스타일의 검증을 보장하는 것이 필요합니다.
(데이터)Pruning은 가능한 한 많은 양의 데이터를 제거하면서 Mimblewimble 스타일의 검증을 보장하는 것이 필요합니다.
이는 pruning 노드 상태를 정상적으로 유지하는 데 필요할 뿐만 아니라 최소한의 양의 데이터만 새 노드로 전송할 첫번째 고속 동기화에서도 필요합니다.

체인 스테이트의 완전한 입증을 위해 아래와 같은 사항들이 필요합니다.
@@ -0,0 +1,61 @@
# 修剪区块链数据

*阅读其它语言版本: [English](pruning.md), [Korean](pruning_KR.md).*

Mimblewimble 的主要吸引力之一是其理论空间效率。确实,一个受信任或预先验证的完整区块链状态仅需要未花费的交易输出,它可以非常小。

grin 的区块链包括以下类型的数据(我们假设对 Mimblewimble 协议有事先了解):

1. 交易输出,其中每个输出包括:
1. 一个 Pedersen 承诺(33 个字节)。
1. 范围证明(目前超过 5KB)。
1. 交易输入,仅作为输出的引用(32 字节)。
1. 交易“证明”,每笔交易包括:
1. 交易的超额承诺总和(33 个字节)。
1. 超额签名生成(平均 71 字节)。
1. 一个块头包括 Merkle trees 和工作量证明(约 250 个字节)。

假设有一个有一百万个区块区块链,一千万笔交易(平均 2 个输入,2.5 个输出)和 100,000 个未花费的输出,我们得到以下近似大小以的一条完整的链(无修剪(no pruning),无核销(no cut-through)):

* 128GB的 交易数据(输入和输出)。
* 1 GB 的交易证明数据。
* 250MB 的块头。
* 总链大小约为 130GB。
* 核销后的总链大小为 1.8GB(但包含块头)。
* UTXO 大小为 520MB。
* 总链大小,不包含 4GB 的范围证明。
* UTXO 大小,不包含 3.3MB 的范围证明。

我们注意到,在所有数据中,一旦对链进行了充分验证,对于节点正常运行,仅严格要求一组 UTXO 承诺即可。

在某些情况下可以修剪数据:

* 完全验证的节点可能会删除一些已经验证过的数据以释放空间。
* 部分验证的节点(类似于 SPV)可能并不会对接收或保留所有数据感兴趣。
* 当一个新节点加入网络时,即使最终是要成为一个完全验证的节点,它也可能暂时充当部分验证的节点,使其可以更快地使用。

## 验证完全修剪后的状态

修剪需要删除尽可能多的数据,但同时保留完整的 Mimblewimble-style 验证的所有保证。
为了保持正在修剪的节点状态的正常运行,这是必需的,而且在第一次快速同步时(仅将最小量的数据发送到新节点)。

完整验证链状态要求:

* 所有内核签名均根据其公共密钥进行验证。
* 所有 UTXO 承诺的总和,减去供给即为有效的公共密钥(可用于对空字符串进行签名)。
* 所有内核发布密钥的总和等于所有 UTXO 承诺的总和减去供应。
* UTXO PMMR 的根哈希,范围证明的 PMMR 和内核的 MMR 将块头与有效的工作量证明链相匹配。
* 所有范围证明均有效。

另外,尽管不必验证整个链状态,但为了能够接受和验证新块,还需要其他数据:

*输出功能,使所有 UTXO 都需要完整的输出数据。

至少,需要以下数据:

* 块头链。
* 所有内核,按包含在链中的顺序。这也允许重建内核 MMR。
* 所有未花费的输出。
* UTXO MMR 和 范围证明 MMR(以了解修剪后的数据的哈希值)。

请注意,可以通过仅验证一个由验证节点随机选择的范围证明的子集来进行进一步修剪。
@@ -9,8 +9,8 @@ In Grin, we're using [Semantic Versioning 2.0.0](https://semver.org). For a shor
A version number include MAJOR.MINOR.PATCH, and increment the:

1. MAJOR version when you make incompatible API changes,
2. MINOR version when you add functionality in a backwards-compatible manner, and
3. PATCH version when you make backwards-compatible bug fixes.
1. MINOR version when you add functionality in a backwards-compatible manner, and
1. PATCH version when you make backwards-compatible bug fixes.

And **additional labels for pre-release** and **build metadata** are available as extensions to the MAJOR.MINOR.PATCH format.

@@ -1,6 +1,6 @@
# State and Storage

*Read this in other languages: [Korean](state_KR.md), [日本語](state_JP.md).*
*Read this in other languages: [Korean](state_KR.md), [日本語](state_JP.md), [简体中文](state_ZH-CN.md).*

## The Grin State

@@ -1,6 +1,6 @@
# 状態とストレージ

*別の言語で読む: [Korean](state_KR.md), [日本語](state_JP.md).*
*別の言語で読む: [English](state.md), [Korean](state_KR.md), [简体中文](state_ZH-CN.md).*

## Grinの状態

@@ -1,5 +1,7 @@
# 상태와 스토리지

*다른 언어로 되어있는 문서를 읽으려면: [English](state.md), [日本語](state_JP.md), [简体中文](state_ZH-CN.md).*

## Grin의 상태

### 구조
@@ -19,9 +21,9 @@ Grin chain의 모든 상태는 다음 데이터와 같이 이루어져 있습니
완전한 Grin의 상태를 사용해서 우리는 다음과 같은 것들을 인증 할 수 있습니다.

1. Kernel 의 signature 가 Kernel의 실행에 대해 유효하다면 (공개키), 이것은 Kernel이 유효하다는것을 증명합니다.
2. 모든 커밋 실행의 합이 모든 UTXO 실행의 합에서 총 공급량을 뺸 값이 같다면 이것은 Kernal과 출력값의 실행들이 유효하고 코인이 새로이 만들어지지 않았다는 것을 증명합니다.
3. 모든 UTXO, range prook 와 Kernel 해쉬들은 각각의 MMR이 있고 그 MMR 들은 유효한 root 를 해쉬합니다.
4. 특정 시점에 가장 많이 일했다고 알려진 Block header 에는 3개의 MMR에 대한 root 가 포함됩니다. 이것은 전체 상태가 가장 많이 일한 chain (가장 긴 체인)에서 MMR과 증명들이 만들어졌다는 것을 입증합니다.
1. 모든 커밋 실행의 합이 모든 UTXO 실행의 합에서 총 공급량을 뺸 값이 같다면 이것은 Kernal과 출력값의 실행들이 유효하고 코인이 새로이 만들어지지 않았다는 것을 증명합니다.
1. 모든 UTXO, range prook 와 Kernel 해쉬들은 각각의 MMR이 있고 그 MMR 들은 유효한 root 를 해쉬합니다.
1. 특정 시점에 가장 많이 일했다고 알려진 Block header 에는 3개의 MMR에 대한 root 가 포함됩니다. 이것은 전체 상태가 가장 많이 일한 chain (가장 긴 체인)에서 MMR과 증명들이 만들어졌다는 것을 입증합니다.

### MMR 과 Pruning

@@ -0,0 +1,45 @@
# 状态和存储

*阅读其它语言版本: [English](state.md), [Korean](state_KR.md), [日本語](state_JP.md).*

## Grin 的状态

### 结构

一条 Grin 链的完整状态包含以下所有数据:

1. 完整的未花费输出(UTXO)集。
1. 每个输出的范围证明。
1. 所有交易内核。
1. 针对上述每一项的 MMR(除了输出 MMR 以外包括其它所有输出的哈希,不仅仅是未花费的哈希)。

此外,链中的所有头都必须使用有效的工作证明来锚定上述状态(该状态对应于工作量最大的链)。
我们注意到,一旦验证了每个范围证明并计算了所有内核承诺的总和,就不再严格要求范围证明和内核对节点起作用。

### 验证方式

对于一个完整状态的 Grin,我们可以验证以下内容:

1. 内核签名针对其承诺(公钥)有效。 这证明内核是有效的。
1. 所有内核承诺的总和等于所有 UTXO 承诺的总和减去总供应量。这证明内核和输出承诺均有效,并且没有创建任何预期之外的代币。
1. 所有 UTXO,范围证明和内核哈希都存在于它们各自的 MMR 中,并且那些 MMR 哈希到有效根。
1. 在给定的时间点上一个已知的工作量最多的块头包括 3 个 MMR 的根。这验证了 MMR,并证明整个状态是由工作量最多的链产生的。

### MMR 与修剪

用于为每个 MMR 中的叶节点生成的哈希数据(除其位置以外,还包括以下内容:

* 输出 MMR 哈希的特征字段和自创世以来所有输出的承诺。
* 范围证明 MMR,对整个范围证明数据进行哈希处理。
* 内核 MMR 哈希了内核的所有字段:功能(feature),费用(fee),锁高度(lock height),超额承诺(excess commitment)和超额签名(excess signature)。

请注意,所有输出,范围证明和内核均以它们在每个块中出现的顺序添加到其各自的 MMR 中(还有一点需要注意,需要对块数据进行排序)。

随着输出被花费掉,其承诺和范围证明数据都可以被删除掉。此外,相应的输出和范围验证 MMR 可以被修剪。

## 状态存储

Grin 中的输出,范围证明和内核的数据存储很简单:一个 append-only 的文件,通过内存映射来访问数据。
随着输出被花费,删除日志将维护可以删除的职位。这些位置与 MMR 节点位置完全匹配,因为它们均以相同顺序插入。
当删除日志变大时,可以偶尔通过重写相应文件来压缩这些文件,而无需删除它们(同样也是 append-only),并且可以清空删除日志。
对于 MMR,我们需要增加一些复杂性。
@@ -6,14 +6,14 @@

1. [Messages](#메세지_들)
1. [getjobtemplate](#getjobtemplate)
2. [job](#job)
3. [keepalive](#keepalive)
4. [login](#login)
5. [status](#status)
6. [submit](#submit)
2. [에러 메시지들](#error-messages)
3. [채굴자의 행동양식](#miner-behavior)
4. [참고 구현체](#reference-implementation)
1. [job](#job)
1. [keepalive](#keepalive)
1. [login](#login)
1. [status](#status)
1. [submit](#submit)
1. [에러 메시지들](#error-messages)
1. [채굴자의 행동양식](#miner-behavior)
1. [참고 구현체](#reference-implementation)

## 메세지 들

@@ -38,7 +38,7 @@ did not change the secret number during the game.

### Pedersen Commitment

Other, more advanced commitment schemes can have additional properties. For example MimbleWimble
Other, more advanced commitment schemes can have additional properties. For example Mimblewimble
and Confidential Transactions (CT) make heavy use of
_[Pedersen Commitments](https://link.springer.com/content/pdf/10.1007/3-540-46766-1_9.pdf)_,
which are _homomorphic_ commitments. Homomorphic in this context means that (speaking in the
@@ -51,7 +51,7 @@ from _box1_ and _box2_.
While this "box" metaphor no longer seems to be reasonable in the real-world this
is perfectly possible using the properties of operations on elliptic curves.

Look into [Introduction to MimbleWimble](intro.md) for further details on Pedersen Commitments
Look into [Introduction to Mimblewimble](intro.md) for further details on Pedersen Commitments
and how they are used in Grin.


@@ -6,7 +6,7 @@ This should get progressively filled up, until we're ready to advertize it
more widely.

* What is Grin?
* [Introduction to MimbleWimble](intro.md)
* [Introduction to Mimblewimble](intro.md)
* Cryptographic Primitives
* Pedersen Commitments
* Aggregate (Schnorr) Signatures
@@ -1,8 +1,8 @@
[package]
name = "grin_keychain"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
@@ -27,4 +27,4 @@ ripemd160 = "0.7"
sha2 = "0.7"
pbkdf2 = "0.2"

grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "3.0.0" }
@@ -122,7 +122,7 @@ impl BIP32Hasher for BIP32GrinHasher {
b"IamVoldemort".to_owned()
}
fn init_sha512(&mut self, seed: &[u8]) {
self.hmac_sha512 = HmacSha512::new_varkey(seed).expect("HMAC can take key of any size");;
self.hmac_sha512 = HmacSha512::new_varkey(seed).expect("HMAC can take key of any size");
}
fn append_sha512(&mut self, value: &[u8]) {
self.hmac_sha512.input(value);
@@ -385,7 +385,7 @@ impl ExtendedPrivKey {
Err(e) => return Err(Error::MnemonicError(e)),
};
let mut hasher = BIP32GrinHasher::new(is_floo);
let key = r#try!(ExtendedPrivKey::new_master(secp, &mut hasher, &seed));
let key = ExtendedPrivKey::new_master(secp, &mut hasher, &seed)?;
Ok(key)
}

@@ -716,7 +716,7 @@ mod tests {
b"Bitcoin seed".to_owned()
}
fn init_sha512(&mut self, seed: &[u8]) {
self.hmac_sha512 = HmacSha512::new_varkey(seed).expect("HMAC can take key of any size");;
self.hmac_sha512 = HmacSha512::new_varkey(seed).expect("HMAC can take key of any size");
}
fn append_sha512(&mut self, value: &[u8]) {
self.hmac_sha512.input(value);
@@ -900,5 +900,4 @@ mod tests {
serde_round_trip!(ChildNumber::from_hardened_idx(1));
serde_round_trip!(ChildNumber::from_hardened_idx((1 << 31) - 1));
}

}
@@ -25,8 +25,6 @@ extern crate serde_derive;
#[macro_use]
extern crate lazy_static;

extern crate sha2;

mod base58;
pub mod extkey_bip32;
pub mod mnemonic;
@@ -72,7 +72,10 @@ pub fn to_entropy(mnemonic: &str) -> Result<Vec<u8>, Error> {
}

// u11 vector of indexes for each word
let mut indexes: Vec<u16> = r#try!(words.iter().map(|x| search(x)).collect());
let mut indexes: Vec<u16> = words
.iter()
.map(|x| search(x))
.collect::<Result<Vec<_>, _>>()?;
let checksum_bits = words.len() / 3;
let mask = ((1 << checksum_bits) - 1) as u8;
let last = indexes.pop().unwrap();
@@ -155,7 +158,7 @@ where
Option<&'a str>: From<T>,
{
// make sure the mnemonic is valid
r#try!(to_entropy(mnemonic));
to_entropy(mnemonic)?;

let salt = ("mnemonic".to_owned() + Option::from(passphrase).unwrap_or("")).into_bytes();
let data = mnemonic.as_bytes();
@@ -220,8 +220,8 @@ impl AsRef<[u8]> for Identifier {

impl ::std::fmt::Debug for Identifier {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
r#try!(write!(f, "{}(", stringify!(Identifier)));
r#try!(write!(f, "{}", self.to_hex()));
write!(f, "{}(", stringify!(Identifier))?;
write!(f, "{}", self.to_hex())?;
write!(f, ")")
}
}
@@ -1,8 +1,8 @@
[package]
name = "grin_p2p"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
@@ -13,6 +13,7 @@ edition = "2018"
bitflags = "1"
bytes = "0.4"
enum_primitive = "0.1"
lru-cache = "0.1"
net2 = "0.2"
num = "0.1"
rand = "0.6"
@@ -22,10 +23,10 @@ tempfile = "3.0.5"
log = "0.4"
chrono = { version = "0.4.4", features = ["serde"] }

grin_core = { path = "../core", version = "2.1.0-beta.3" }
grin_store = { path = "../store", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_chain = { path = "../chain", version = "2.1.0-beta.3" }
grin_core = { path = "../core", version = "3.0.0" }
grin_store = { path = "../store", version = "3.0.0" }
grin_util = { path = "../util", version = "3.0.0" }
grin_chain = { path = "../chain", version = "3.0.0" }

[dev-dependencies]
grin_pool = { path = "../pool", version = "2.1.0-beta.3" }
grin_pool = { path = "../pool", version = "3.0.0" }
@@ -23,12 +23,11 @@
use crate::core::ser;
use crate::core::ser::{FixedLength, ProtocolVersion};
use crate::msg::{
read_body, read_discard, read_header, read_item, write_to_buf, MsgHeader, MsgHeaderWrapper,
Type,
read_body, read_discard, read_header, read_item, write_message, Msg, MsgHeader,
MsgHeaderWrapper,
};
use crate::types::Error;
use crate::util::{RateCounter, RwLock};
use std::fs::File;
use std::io::{self, Read, Write};
use std::net::{Shutdown, TcpStream};
use std::sync::atomic::{AtomicBool, Ordering};
@@ -39,21 +38,24 @@ use std::{
thread::{self, JoinHandle},
};

const IO_TIMEOUT: Duration = Duration::from_millis(1000);
pub const SEND_CHANNEL_CAP: usize = 100;

const HEADER_IO_TIMEOUT: Duration = Duration::from_millis(2000);
const CHANNEL_TIMEOUT: Duration = Duration::from_millis(1000);
const BODY_IO_TIMEOUT: Duration = Duration::from_millis(60000);

/// A trait to be implemented in order to receive messages from the
/// connection. Allows providing an optional response.
pub trait MessageHandler: Send + 'static {
fn consume<'a>(
&self,
msg: Message<'a>,
writer: &'a mut dyn Write,
stopped: Arc<AtomicBool>,
tracker: Arc<Tracker>,
) -> Result<Option<Response<'a>>, Error>;
) -> Result<Option<Msg>, Error>;
}

// Macro to simplify the boilerplate around async I/O error handling,
// especially with WouldBlock kind of errors.
// Macro to simplify the boilerplate around I/O and Grin error handling
macro_rules! try_break {
($inner:expr) => {
match $inner {
@@ -76,6 +78,15 @@ macro_rules! try_break {
};
}

macro_rules! try_header {
($res:expr, $conn: expr) => {{
$conn
.set_read_timeout(Some(HEADER_IO_TIMEOUT))
.expect("set timeout");
try_break!($res)
}};
}

/// A message as received by the connection. Provides access to the message
/// header lazily consumes the message body, handling its deserialization.
pub struct Message<'a> {
@@ -121,66 +132,6 @@ impl<'a> Message<'a> {
}
}

/// Response to a `Message`.
pub struct Response<'a> {
resp_type: Type,
body: Vec<u8>,
version: ProtocolVersion,
stream: &'a mut dyn Write,
attachment: Option<File>,
}

impl<'a> Response<'a> {
pub fn new<T: ser::Writeable>(
resp_type: Type,
version: ProtocolVersion,
body: T,
stream: &'a mut dyn Write,
) -> Result<Response<'a>, Error> {
let body = ser::ser_vec(&body, version)?;
Ok(Response {
resp_type,
body,
version,
stream,
attachment: None,
})
}

fn write(mut self, tracker: Arc<Tracker>) -> Result<(), Error> {
let mut msg = ser::ser_vec(
&MsgHeader::new(self.resp_type, self.body.len() as u64),
self.version,
)?;
msg.append(&mut self.body);
self.stream.write_all(&msg[..])?;
tracker.inc_sent(msg.len() as u64);

if let Some(mut file) = self.attachment {
let mut buf = [0u8; 8000];
loop {
match file.read(&mut buf[..]) {
Ok(0) => break,
Ok(n) => {
self.stream.write_all(&buf[..n])?;
// Increase sent bytes "quietly" without incrementing the counter.
// (In a loop here for the single attachment).
tracker.inc_quiet_sent(n as u64);
}
Err(e) => return Err(From::from(e)),
}
}
}
Ok(())
}

pub fn add_attachment(&mut self, file: File) {
self.attachment = Some(file);
}
}

pub const SEND_CHANNEL_CAP: usize = 100;

pub struct StopHandle {
/// Channel to close the connection
stopped: Arc<AtomicBool>,
@@ -220,20 +171,31 @@ impl StopHandle {
}
}

#[derive(Clone)]
pub struct ConnHandle {
/// Channel to allow sending data through the connection
pub send_channel: mpsc::SyncSender<Vec<u8>>,
pub send_channel: mpsc::SyncSender<Msg>,
}

impl ConnHandle {
pub fn send<T>(&self, body: T, msg_type: Type, version: ProtocolVersion) -> Result<u64, Error>
where
T: ser::Writeable,
{
let buf = write_to_buf(body, msg_type, version)?;
let buf_len = buf.len();
self.send_channel.try_send(buf)?;
Ok(buf_len as u64)
/// Send msg via the synchronous, bounded channel (sync_sender).
/// Two possible failure cases -
/// * Disconnected: Propagate this up to the caller so the peer connection can be closed.
/// * Full: Our internal msg buffer is full. This is not a problem with the peer connection
/// and we do not want to close the connection. We drop the msg rather than blocking here.
/// If the buffer is full because there is an underlying issue with the peer
/// and potentially the peer connection. We assume this will be handled at the peer level.
pub fn send(&self, msg: Msg) -> Result<(), Error> {
match self.send_channel.try_send(msg) {
Ok(()) => Ok(()),
Err(mpsc::TrySendError::Disconnected(_)) => {
Err(Error::Send("try_send disconnected".to_owned()))
}
Err(mpsc::TrySendError::Full(_)) => {
debug!("conn_handle: try_send but buffer is full, dropping msg");
Ok(())
}
}
}
}

@@ -285,22 +247,24 @@ where
{
let (send_tx, send_rx) = mpsc::sync_channel(SEND_CHANNEL_CAP);

stream
.set_read_timeout(Some(IO_TIMEOUT))
.expect("can't set read timeout");
stream
.set_write_timeout(Some(IO_TIMEOUT))
.expect("can't set read timeout");

let stopped = Arc::new(AtomicBool::new(false));

let (reader_thread, writer_thread) =
poll(stream, version, handler, send_rx, stopped.clone(), tracker)?;
let conn_handle = ConnHandle {
send_channel: send_tx,
};

let (reader_thread, writer_thread) = poll(
stream,
conn_handle.clone(),
version,
handler,
send_rx,
stopped.clone(),
tracker,
)?;

Ok((
ConnHandle {
send_channel: send_tx,
},
conn_handle,
StopHandle {
stopped,
reader_thread: Some(reader_thread),
@@ -311,9 +275,10 @@ where

fn poll<H>(
conn: TcpStream,
conn_handle: ConnHandle,
version: ProtocolVersion,
handler: H,
send_rx: mpsc::Receiver<Vec<u8>>,
send_rx: mpsc::Receiver<Msg>,
stopped: Arc<AtomicBool>,
tracker: Arc<Tracker>,
) -> io::Result<(JoinHandle<()>, JoinHandle<()>)>
@@ -323,16 +288,21 @@ where
// Split out tcp stream out into separate reader/writer halves.
let mut reader = conn.try_clone().expect("clone conn for reader failed");
let mut writer = conn.try_clone().expect("clone conn for writer failed");
let mut responder = conn.try_clone().expect("clone conn for writer failed");
let reader_stopped = stopped.clone();

let reader_tracker = tracker.clone();
let writer_tracker = tracker.clone();

let reader_thread = thread::Builder::new()
.name("peer_read".to_string())
.spawn(move || {
loop {
// check the read end
match try_break!(read_header(&mut reader, version)) {
match try_header!(read_header(&mut reader, version), &mut reader) {
Some(MsgHeaderWrapper::Known(header)) => {
reader
.set_read_timeout(Some(BODY_IO_TIMEOUT))
.expect("set timeout");
let msg = Message::from_header(header, &mut reader, version);

trace!(
@@ -342,17 +312,24 @@ where
);

// Increase received bytes counter
tracker.inc_received(MsgHeader::LEN as u64 + msg.header.msg_len);

if let Some(Some(resp)) =
try_break!(handler.consume(msg, &mut responder, tracker.clone()))
{
try_break!(resp.write(tracker.clone()));
reader_tracker.inc_received(MsgHeader::LEN as u64 + msg.header.msg_len);

let resp_msg = try_break!(handler.consume(
msg,
reader_stopped.clone(),
reader_tracker.clone()
));
if let Some(Some(resp_msg)) = resp_msg {
try_break!(conn_handle.send(resp_msg));
}
}
Some(MsgHeaderWrapper::Unknown(msg_len)) => {
Some(MsgHeaderWrapper::Unknown(msg_len, type_byte)) => {
debug!(
"Received unknown message header, type {:?}, len {}.",
type_byte, msg_len
);
// Increase received bytes counter
tracker.inc_received(MsgHeader::LEN as u64 + msg_len);
reader_tracker.inc_received(MsgHeader::LEN as u64 + msg_len);

try_break!(read_discard(msg_len, &mut reader));
}
@@ -379,11 +356,15 @@ where
.name("peer_write".to_string())
.spawn(move || {
let mut retry_send = Err(());
writer
.set_write_timeout(Some(BODY_IO_TIMEOUT))
.expect("set timeout");
loop {
let maybe_data = retry_send.or_else(|_| send_rx.recv_timeout(IO_TIMEOUT));
let maybe_data = retry_send.or_else(|_| send_rx.recv_timeout(CHANNEL_TIMEOUT));
retry_send = Err(());
if let Ok(data) = maybe_data {
let written = try_break!(writer.write_all(&data[..]).map_err(&From::from));
let written =
try_break!(write_message(&mut writer, &data, writer_tracker.clone()));
if written.is_none() {
retry_send = Ok(data);
}
@@ -12,10 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use crate::conn::Tracker;
use crate::core::core::hash::Hash;
use crate::core::pow::Difficulty;
use crate::core::ser::ProtocolVersion;
use crate::msg::{read_message, write_message, Hand, Shake, Type, USER_AGENT};
use crate::msg::{read_message, write_message, Hand, Msg, Shake, Type, USER_AGENT};
use crate::peer::Peer;
use crate::types::{Capabilities, Direction, Error, P2PConfig, PeerAddr, PeerInfo, PeerLiveInfo};
use crate::util::RwLock;
@@ -47,6 +48,7 @@ pub struct Handshake {
genesis: Hash,
config: P2PConfig,
protocol_version: ProtocolVersion,
tracker: Arc<Tracker>,
}

impl Handshake {
@@ -58,6 +60,7 @@ impl Handshake {
genesis,
config,
protocol_version: ProtocolVersion::local(),
tracker: Arc::new(Tracker::new()),
}
}

@@ -99,7 +102,8 @@ impl Handshake {
};

// write and read the handshake response
write_message(conn, hand, Type::Hand, self.protocol_version)?;
let msg = Msg::new(Type::Hand, hand, self.protocol_version)?;
write_message(conn, &msg, self.tracker.clone())?;

let shake: Shake = read_message(conn, self.protocol_version, Type::Shake)?;
if shake.genesis != self.genesis {
@@ -196,7 +200,9 @@ impl Handshake {
user_agent: USER_AGENT.to_string(),
};

write_message(conn, shake, Type::Shake, negotiated_version)?;
let msg = Msg::new(Type::Shake, shake, negotiated_version)?;
write_message(conn, &msg, self.tracker.clone())?;

trace!("Success handshake with {}.", peer_info.addr);

Ok(peer_info)
@@ -14,6 +14,7 @@

//! Message types that transit over the network and related serialization code.

use crate::conn::Tracker;
use crate::core::core::hash::Hash;
use crate::core::core::BlockHeader;
use crate::core::pow::Difficulty;
@@ -25,7 +26,9 @@ use crate::types::{
Capabilities, Error, PeerAddr, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS,
};
use num::FromPrimitive;
use std::fs::File;
use std::io::{Read, Write};
use std::sync::Arc;

/// Grin's user agent with current version
pub const USER_AGENT: &'static str = concat!("MW/Grin ", env!("CARGO_PKG_VERSION"));
@@ -114,6 +117,33 @@ fn magic() -> [u8; 2] {
}
}

pub struct Msg {
header: MsgHeader,
body: Vec<u8>,
attachment: Option<File>,
version: ProtocolVersion,
}

impl Msg {
pub fn new<T: Writeable>(
msg_type: Type,
msg: T,
version: ProtocolVersion,
) -> Result<Msg, Error> {
let body = ser::ser_vec(&msg, version)?;
Ok(Msg {
header: MsgHeader::new(msg_type, body.len() as u64),
body,
attachment: None,
version,
})
}

pub fn add_attachment(&mut self, attachment: File) {
self.attachment = Some(attachment)
}
}

/// Read a header from the provided stream without blocking if the
/// underlying stream is async. Typically headers will be polled for, so
/// we do not want to block.
@@ -175,39 +205,38 @@ pub fn read_message<T: Readable>(
Err(Error::BadMessage)
}
}
MsgHeaderWrapper::Unknown(msg_len) => {
MsgHeaderWrapper::Unknown(msg_len, _) => {
read_discard(msg_len, stream)?;
Err(Error::BadMessage)
}
}
}

pub fn write_to_buf<T: Writeable>(
msg: T,
msg_type: Type,
version: ProtocolVersion,
) -> Result<Vec<u8>, Error> {
// prepare the body first so we know its serialized length
let mut body_buf = vec![];
ser::serialize(&mut body_buf, version, &msg)?;

// build and serialize the header using the body size
let mut msg_buf = vec![];
let blen = body_buf.len() as u64;
ser::serialize(&mut msg_buf, version, &MsgHeader::new(msg_type, blen))?;
msg_buf.append(&mut body_buf);

Ok(msg_buf)
}

pub fn write_message<T: Writeable>(
pub fn write_message(
stream: &mut dyn Write,
msg: T,
msg_type: Type,
version: ProtocolVersion,
msg: &Msg,
tracker: Arc<Tracker>,
) -> Result<(), Error> {
let buf = write_to_buf(msg, msg_type, version)?;
let mut buf = ser::ser_vec(&msg.header, msg.version)?;
buf.extend(&msg.body[..]);
stream.write_all(&buf[..])?;
tracker.inc_sent(buf.len() as u64);
if let Some(file) = &msg.attachment {
let mut file = file.try_clone()?;
let mut buf = [0u8; 8000];
loop {
match file.read(&mut buf[..]) {
Ok(0) => break,
Ok(n) => {
stream.write_all(&buf[..n])?;
// Increase sent bytes "quietly" without incrementing the counter.
// (In a loop here for the single attachment).
tracker.inc_quiet_sent(n as u64);
}
Err(e) => return Err(From::from(e)),
}
}
}
Ok(())
}

@@ -219,7 +248,7 @@ pub enum MsgHeaderWrapper {
/// A "known" msg type with deserialized msg header.
Known(MsgHeader),
/// An unknown msg type with corresponding msg size in bytes.
Unknown(u64),
Unknown(u64, u8),
}

/// Header of any protocol message, used to identify incoming messages.
@@ -302,7 +331,7 @@ impl Readable for MsgHeaderWrapper {
return Err(ser::Error::TooLargeReadErr);
}

Ok(MsgHeaderWrapper::Unknown(msg_len))
Ok(MsgHeaderWrapper::Unknown(msg_len, t))
}
}
}
@@ -21,6 +21,8 @@ use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;

use lru_cache::LruCache;

use crate::chain;
use crate::conn;
use crate::core::core::hash::{Hash, Hashed};
@@ -29,7 +31,7 @@ use crate::core::ser::Writeable;
use crate::core::{core, global};
use crate::handshake::Handshake;
use crate::msg::{
self, BanReason, GetPeerAddrs, KernelDataRequest, Locator, Ping, TxHashSetRequest, Type,
self, BanReason, GetPeerAddrs, KernelDataRequest, Locator, Msg, Ping, TxHashSetRequest, Type,
};
use crate::protocol::Protocol;
use crate::types::{
@@ -233,12 +235,8 @@ impl Peer {

/// Send a msg with given msg_type to our peer via the connection.
fn send<T: Writeable>(&self, msg: T, msg_type: Type) -> Result<(), Error> {
let bytes = self
.send_handle
.lock()
.send(msg, msg_type, self.info.version)?;
self.tracker.inc_sent(bytes);
Ok(())
let msg = Msg::new(msg_type, msg, self.info.version)?;
self.send_handle.lock().send(msg)
}

/// Send a ping to the remote peer, providing our local difficulty and
@@ -368,10 +366,11 @@ impl Peer {
self.send(&h, msg::Type::GetTransaction)
}

/// Sends a request for a specific block by hash
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
/// Sends a request for a specific block by hash.
/// Takes opts so we can track if this request was due to our node syncing or otherwise.
pub fn send_block_request(&self, h: Hash, opts: chain::Options) -> Result<(), Error> {
debug!("Requesting block {} from peer {}.", h, self.info.addr);
self.tracking_adapter.push_req(h);
self.tracking_adapter.push_req(h, opts);
self.send(&h, msg::Type::GetBlock)
}

@@ -433,51 +432,35 @@ impl Peer {
#[derive(Clone)]
struct TrackingAdapter {
adapter: Arc<dyn NetAdapter>,
known: Arc<RwLock<Vec<Hash>>>,
requested: Arc<RwLock<Vec<Hash>>>,
received: Arc<RwLock<LruCache<Hash, ()>>>,
requested: Arc<RwLock<LruCache<Hash, chain::Options>>>,
}

impl TrackingAdapter {
fn new(adapter: Arc<dyn NetAdapter>) -> TrackingAdapter {
TrackingAdapter {
adapter: adapter,
known: Arc::new(RwLock::new(Vec::with_capacity(MAX_TRACK_SIZE))),
requested: Arc::new(RwLock::new(Vec::with_capacity(MAX_TRACK_SIZE))),
received: Arc::new(RwLock::new(LruCache::new(MAX_TRACK_SIZE))),
requested: Arc::new(RwLock::new(LruCache::new(MAX_TRACK_SIZE))),
}
}

fn has_recv(&self, hash: Hash) -> bool {
let known = self.known.read();
// may become too slow, an ordered set (by timestamp for eviction) may
// end up being a better choice
known.contains(&hash)
self.received.write().contains_key(&hash)
}

fn push_recv(&self, hash: Hash) {
let mut known = self.known.write();
if known.len() > MAX_TRACK_SIZE {
known.truncate(MAX_TRACK_SIZE);
}
if !known.contains(&hash) {
known.insert(0, hash);
}
self.received.write().insert(hash, ());
}

fn has_req(&self, hash: Hash) -> bool {
let requested = self.requested.read();
// may become too slow, an ordered set (by timestamp for eviction) may
// end up being a better choice
requested.contains(&hash)
/// Track a block or transaction hash requested by us.
/// Track the opts alongside the hash so we know if this was due to us syncing or not.
fn push_req(&self, hash: Hash, opts: chain::Options) {
self.requested.write().insert(hash, opts);
}

fn push_req(&self, hash: Hash) {
let mut requested = self.requested.write();
if requested.len() > MAX_TRACK_SIZE {
requested.truncate(MAX_TRACK_SIZE);
}
if !requested.contains(&hash) {
requested.insert(0, hash);
}
fn req_opts(&self, hash: Hash) -> Option<chain::Options> {
self.requested.write().get_mut(&hash).cloned()
}
}

@@ -522,11 +505,17 @@ impl ChainAdapter for TrackingAdapter {
&self,
b: core::Block,
peer_info: &PeerInfo,
_was_requested: bool,
opts: chain::Options,
) -> Result<bool, chain::Error> {
let bh = b.hash();
self.push_recv(bh);
self.adapter.block_received(b, peer_info, self.has_req(bh))

// If we are currently tracking a request for this block then
// use the opts specified when we made the request.
// If we requested this block as part of sync then we want to
// let our adapter know this when we receive it.
let req_opts = self.req_opts(bh).unwrap_or(opts);
self.adapter.block_received(b, peer_info, req_opts)
}

fn compact_block_received(
@@ -58,13 +58,10 @@ impl Peers {
/// Adds the peer to our internal peer mapping. Note that the peer is still
/// returned so the server can run it.
pub fn add_connected(&self, peer: Arc<Peer>) -> Result<(), Error> {
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
Some(peers) => peers,
None => {
error!("add_connected: failed to get peers lock");
return Err(Error::Timeout);
}
};
let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| {
error!("add_connected: failed to get peers lock");
Error::Timeout
})?;
let peer_data = PeerData {
addr: peer.info.addr,
capabilities: peer.info.capabilities,
@@ -102,13 +99,10 @@ impl Peers {
/// and this attempt fails then return an error allowing the caller
/// to decide how best to handle this.
pub fn is_known(&self, addr: PeerAddr) -> Result<bool, Error> {
let peers = match self.peers.try_read_for(LOCK_TIMEOUT) {
Some(peers) => peers,
None => {
error!("is_known: failed to get peers lock");
return Err(Error::Internal);
}
};
let peers = self.peers.try_read_for(LOCK_TIMEOUT).ok_or_else(|| {
error!("is_known: failed to get peers lock");
Error::Internal
})?;
Ok(peers.contains_key(&addr))
}

@@ -253,50 +247,38 @@ impl Peers {
}
false
}

/// Ban a peer, disconnecting it if we're currently connected
pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) {
if let Err(e) = self.update_state(peer_addr, State::Banned) {
error!("Couldn't ban {}: {:?}", peer_addr, e);
return;
}

if let Some(peer) = self.get_connected_peer(peer_addr) {
debug!("Banning peer {}", peer_addr);
// setting peer status will get it removed at the next clean_peer
match peer.send_ban_reason(ban_reason) {
Err(e) => error!("failed to send a ban reason to{}: {:?}", peer_addr, e),
Ok(_) => debug!("ban reason {:?} was sent to {}", ban_reason, peer_addr),
};
peer.set_banned();
peer.stop();

let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
Some(peers) => peers,
None => {
pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
self.update_state(peer_addr, State::Banned)?;

match self.get_connected_peer(peer_addr) {
Some(peer) => {
debug!("Banning peer {}", peer_addr);
// setting peer status will get it removed at the next clean_peer
peer.send_ban_reason(ban_reason)?;
peer.set_banned();
peer.stop();
let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| {
error!("ban_peer: failed to get peers lock");
return;
}
};
peers.remove(&peer.info.addr);
Error::PeerException
})?;
peers.remove(&peer.info.addr);
Ok(())
}
None => return Err(Error::PeerNotFound),
}
}

/// Unban a peer, checks if it exists and banned then unban
pub fn unban_peer(&self, peer_addr: PeerAddr) {
pub fn unban_peer(&self, peer_addr: PeerAddr) -> Result<(), Error> {
debug!("unban_peer: peer {}", peer_addr);
match self.get_peer(peer_addr) {
Ok(_) => {
if self.is_banned(peer_addr) {
if let Err(e) = self.update_state(peer_addr, State::Healthy) {
error!("Couldn't unban {}: {:?}", peer_addr, e);
}
} else {
error!("Couldn't unban {}: peer is not banned", peer_addr);
}
}
Err(e) => error!("Couldn't unban {}: {:?}", peer_addr, e),
};
// check if peer exist
self.get_peer(peer_addr)?;
if self.is_banned(peer_addr) {
return self.update_state(peer_addr, State::Healthy);
} else {
return Err(Error::PeerNotBanned);
}
}

fn broadcast<F>(&self, obj_name: &str, inner: F) -> u32
@@ -595,17 +577,22 @@ impl ChainAdapter for Peers {
&self,
b: core::Block,
peer_info: &PeerInfo,
was_requested: bool,
opts: chain::Options,
) -> Result<bool, chain::Error> {
let hash = b.hash();
if !self.adapter.block_received(b, peer_info, was_requested)? {
if !self.adapter.block_received(b, peer_info, opts)? {
// if the peer sent us a block that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban
debug!(
"Received a bad block {} from {}, the peer will be banned",
hash, peer_info.addr,
);
self.ban_peer(peer_info.addr, ReasonForBan::BadBlock);
self.ban_peer(peer_info.addr, ReasonForBan::BadBlock)
.map_err(|e| {
let err: chain::Error =
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
err
})?;
Ok(false)
} else {
Ok(true)
@@ -625,7 +612,12 @@ impl ChainAdapter for Peers {
"Received a bad compact block {} from {}, the peer will be banned",
hash, peer_info.addr
);
self.ban_peer(peer_info.addr, ReasonForBan::BadCompactBlock);
self.ban_peer(peer_info.addr, ReasonForBan::BadCompactBlock)
.map_err(|e| {
let err: chain::Error =
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
err
})?;
Ok(false)
} else {
Ok(true)
@@ -640,7 +632,12 @@ impl ChainAdapter for Peers {
if !self.adapter.header_received(bh, peer_info)? {
// if the peer sent us a block header that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader);
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader)
.map_err(|e| {
let err: chain::Error =
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
err
})?;
Ok(false)
} else {
Ok(true)
@@ -655,7 +652,12 @@ impl ChainAdapter for Peers {
if !self.adapter.headers_received(headers, peer_info)? {
// if the peer sent us a block header that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader);
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader)
.map_err(|e| {
let err: chain::Error =
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
err
})?;
Ok(false)
} else {
Ok(true)
@@ -701,7 +703,12 @@ impl ChainAdapter for Peers {
"Received a bad txhashset data from {}, the peer will be banned",
peer_info.addr
);
self.ban_peer(peer_info.addr, ReasonForBan::BadTxHashSet);
self.ban_peer(peer_info.addr, ReasonForBan::BadTxHashSet)
.map_err(|e| {
let err: chain::Error =
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
err
})?;
Ok(true)
} else {
Ok(false)
@@ -12,19 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use crate::conn::{Message, MessageHandler, Response, Tracker};
use crate::chain;
use crate::conn::{Message, MessageHandler, Tracker};
use crate::core::core::{self, hash::Hash, hash::Hashed, CompactBlock};

use crate::msg::{
BanReason, GetPeerAddrs, Headers, KernelDataResponse, Locator, PeerAddrs, Ping, Pong,
BanReason, GetPeerAddrs, Headers, KernelDataResponse, Locator, Msg, PeerAddrs, Ping, Pong,
TxHashSetArchive, TxHashSetRequest, Type,
};
use crate::types::{Error, NetAdapter, PeerInfo};
use chrono::prelude::Utc;
use rand::{thread_rng, Rng};
use std::cmp;
use std::fs::{self, File, OpenOptions};
use std::io::{BufWriter, Seek, SeekFrom, Write};
use std::io::{BufWriter, Seek, SeekFrom};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Instant;
@@ -51,12 +52,12 @@ impl Protocol {
}

impl MessageHandler for Protocol {
fn consume<'a>(
fn consume(
&self,
mut msg: Message<'a>,
writer: &'a mut dyn Write,
mut msg: Message,
stopped: Arc<AtomicBool>,
tracker: Arc<Tracker>,
) -> Result<Option<Response<'a>>, Error> {
) -> Result<Option<Msg>, Error> {
let adapter = &self.adapter;

// If we received a msg from a banned peer then log and drop it.
@@ -75,14 +76,13 @@ impl MessageHandler for Protocol {
let ping: Ping = msg.body()?;
adapter.peer_difficulty(self.peer_info.addr, ping.total_difficulty, ping.height);

Ok(Some(Response::new(
Ok(Some(Msg::new(
Type::Pong,
self.peer_info.version,
Pong {
total_difficulty: adapter.total_difficulty()?,
height: adapter.total_height()?,
},
writer,
self.peer_info.version,
)?))
}

@@ -116,11 +116,10 @@ impl MessageHandler for Protocol {
);
let tx = adapter.get_transaction(h);
if let Some(tx) = tx {
Ok(Some(Response::new(
Ok(Some(Msg::new(
Type::Transaction,
self.peer_info.version,
tx,
writer,
self.peer_info.version,
)?))
} else {
Ok(None)
@@ -157,12 +156,7 @@ impl MessageHandler for Protocol {

let bo = adapter.get_block(h);
if let Some(b) = bo {
return Ok(Some(Response::new(
Type::Block,
self.peer_info.version,
b,
writer,
)?));
return Ok(Some(Msg::new(Type::Block, b, self.peer_info.version)?));
}
Ok(None)
}
@@ -172,23 +166,24 @@ impl MessageHandler for Protocol {
"handle_payload: received block: msg_len: {}",
msg.header.msg_len
);
let b: core::Block = msg.body()?;
let b: core::UntrustedBlock = msg.body()?;

// we can't know at this level whether we requested the block or not,
// the boolean should be properly set in higher level adapter
adapter.block_received(b, &self.peer_info, false)?;
// We default to NONE opts here as we do not know know yet why this block was
// received.
// If we requested this block from a peer due to our node syncing then
// the peer adapter will override opts to reflect this.
adapter.block_received(b.into(), &self.peer_info, chain::Options::NONE)?;
Ok(None)
}

Type::GetCompactBlock => {
let h: Hash = msg.body()?;
if let Some(b) = adapter.get_block(h) {
let cb: CompactBlock = b.into();
Ok(Some(Response::new(
Ok(Some(Msg::new(
Type::CompactBlock,
self.peer_info.version,
cb,
writer,
self.peer_info.version,
)?))
} else {
Ok(None)
@@ -200,9 +195,9 @@ impl MessageHandler for Protocol {
"handle_payload: received compact block: msg_len: {}",
msg.header.msg_len
);
let b: core::CompactBlock = msg.body()?;
let b: core::UntrustedCompactBlock = msg.body()?;

adapter.compact_block_received(b, &self.peer_info)?;
adapter.compact_block_received(b.into(), &self.peer_info)?;
Ok(None)
}

@@ -212,19 +207,18 @@ impl MessageHandler for Protocol {
let headers = adapter.locate_headers(&loc.hashes)?;

// serialize and send all the headers over
Ok(Some(Response::new(
Ok(Some(Msg::new(
Type::Headers,
self.peer_info.version,
Headers { headers },
writer,
self.peer_info.version,
)?))
}

// "header first" block propagation - if we have not yet seen this block
// we can go request it from some of our peers
Type::Header => {
let header: core::BlockHeader = msg.body()?;
adapter.header_received(header, &self.peer_info)?;
let header: core::UntrustedBlockHeader = msg.body()?;
adapter.header_received(header.into(), &self.peer_info)?;
Ok(None)
}

@@ -240,8 +234,9 @@ impl MessageHandler for Protocol {
for chunk in (0..count).collect::<Vec<_>>().chunks(chunk_size) {
let mut headers = vec![];
for _ in chunk {
let (header, bytes_read) = msg.streaming_read()?;
headers.push(header);
let (header, bytes_read) =
msg.streaming_read::<core::UntrustedBlockHeader>()?;
headers.push(header.into());
total_bytes_read += bytes_read;
}
adapter.headers_received(&headers, &self.peer_info)?;
@@ -258,11 +253,10 @@ impl MessageHandler for Protocol {
Type::GetPeerAddrs => {
let get_peers: GetPeerAddrs = msg.body()?;
let peers = adapter.find_peer_addrs(get_peers.capabilities);
Ok(Some(Response::new(
Ok(Some(Msg::new(
Type::PeerAddrs,
self.peer_info.version,
PeerAddrs { peers },
writer,
self.peer_info.version,
)?))
}

@@ -277,11 +271,10 @@ impl MessageHandler for Protocol {
let kernel_data = self.adapter.kernel_data_read()?;
let bytes = kernel_data.metadata()?.len();
let kernel_data_response = KernelDataResponse { bytes };
let mut response = Response::new(
let mut response = Msg::new(
Type::KernelDataResponse,
self.peer_info.version,
&kernel_data_response,
writer,
self.peer_info.version,
)?;
response.add_attachment(kernel_data);
Ok(Some(response))
@@ -337,15 +330,14 @@ impl MessageHandler for Protocol {

if let Some(txhashset) = txhashset {
let file_sz = txhashset.reader.metadata()?.len();
let mut resp = Response::new(
let mut resp = Msg::new(
Type::TxHashSetArchive,
self.peer_info.version,
&TxHashSetArchive {
height: txhashset_header.height as u64,
hash: txhashset_header_hash,
bytes: file_sz,
},
writer,
self.peer_info.version,
)?;
resp.add_attachment(txhashset.reader);
Ok(Some(resp))
@@ -408,7 +400,13 @@ impl MessageHandler for Protocol {
}
// Increase received bytes quietly (without affecting the counters).
// Otherwise we risk banning a peer as "abusive".
tracker.inc_quiet_received(size as u64)
tracker.inc_quiet_received(size as u64);

// check the close channel
if stopped.load(Ordering::Relaxed) {
debug!("stopping txhashset download early");
return Err(Error::ConnectionClose);
}
}
debug!(
"handle_payload: txhashset archive: {}/{} ... DONE",
@@ -84,6 +84,14 @@ impl Server {

match listener.accept() {
Ok((stream, peer_addr)) => {
// We want out TCP stream to be in blocking mode.
// The TCP listener is in nonblocking mode so we *must* explicitly
// move the accepted TCP stream into blocking mode (or all kinds of
// bad things can and will happen).
// A nonblocking TCP listener will accept nonblocking TCP streams which
// we do not want.
stream.set_nonblocking(false)?;

let peer_addr = PeerAddr(peer_addr);

if self.check_undesirable(&stream) {
@@ -296,7 +304,12 @@ impl ChainAdapter for DummyAdapter {
) -> Result<bool, chain::Error> {
Ok(true)
}
fn block_received(&self, _: core::Block, _: &PeerInfo, _: bool) -> Result<bool, chain::Error> {
fn block_received(
&self,
_: core::Block,
_: &PeerInfo,
_: chain::Options,
) -> Result<bool, chain::Error> {
Ok(true)
}
fn headers_received(
@@ -18,8 +18,6 @@ use std::fs::File;
use std::io::{self, Read};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::path::PathBuf;

use std::sync::mpsc;
use std::sync::Arc;

use chrono::prelude::*;
@@ -80,6 +78,8 @@ pub enum Error {
peer: Hash,
},
Send(String),
PeerNotFound,
PeerNotBanned,
PeerException,
Internal,
}
@@ -104,11 +104,6 @@ impl From<io::Error> for Error {
Error::Connection(e)
}
}
impl<T> From<mpsc::TrySendError<T>> for Error {
fn from(e: mpsc::TrySendError<T>) -> Error {
Error::Send(e.to_string())
}
}

#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct PeerAddr(pub SocketAddr);
@@ -527,7 +522,7 @@ pub trait ChainAdapter: Sync + Send {
&self,
b: core::Block,
peer_info: &PeerInfo,
was_requested: bool,
opts: chain::Options,
) -> Result<bool, chain::Error>;

fn compact_block_received(
@@ -1,8 +1,8 @@
[package]
name = "grin_pool"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
@@ -19,10 +19,10 @@ chrono = "0.4.4"
failure = "0.1"
failure_derive = "0.1"

grin_core = { path = "../core", version = "2.1.0-beta.3" }
grin_keychain = { path = "../keychain", version = "2.1.0-beta.3" }
grin_store = { path = "../store", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_core = { path = "../core", version = "3.0.0" }
grin_keychain = { path = "../keychain", version = "3.0.0" }
grin_store = { path = "../store", version = "3.0.0" }
grin_util = { path = "../util", version = "3.0.0" }

[dev-dependencies]
grin_chain = { path = "../chain", version = "2.1.0-beta.3" }
grin_chain = { path = "../chain", version = "3.0.0" }
@@ -446,6 +446,12 @@ impl Pool {
self.entries.len()
}

/// Number of transaction kernels in the pool.
/// This may differ from the size (number of transactions) due to tx aggregation.
pub fn kernel_count(&self) -> usize {
self.entries.iter().map(|x| x.tx.kernels().len()).sum()
}

/// Is the pool empty?
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
@@ -105,21 +105,21 @@ pub struct PoolConfig {
/// Base fee for a transaction to be accepted by the pool. The transaction
/// weight is computed from its number of inputs, outputs and kernels and
/// multiplied by the base fee to compare to the actual transaction fee.
#[serde = "default_accept_fee_base"]
#[serde(default = "default_accept_fee_base")]
pub accept_fee_base: u64,

/// Maximum capacity of the pool in number of transactions
#[serde = "default_max_pool_size"]
#[serde(default = "default_max_pool_size")]
pub max_pool_size: usize,

/// Maximum capacity of the pool in number of transactions
#[serde = "default_max_stempool_size"]
#[serde(default = "default_max_stempool_size")]
pub max_stempool_size: usize,

/// Maximum total weight of transactions that can get selected to build a
/// block from. Allows miners to restrict the maximum weight of their
/// blocks.
#[serde = "default_mineable_max_weight"]
#[serde(default = "default_mineable_max_weight")]
pub mineable_max_weight: usize,
}

@@ -149,7 +149,7 @@ fn default_mineable_max_weight() -> usize {

/// Represents a single entry in the pool.
/// A single (possibly aggregated) transaction.
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PoolEntry {
/// Info on where this tx originated from.
pub src: TxSource,
@@ -165,7 +165,7 @@ pub struct PoolEntry {
///
/// Most likely this will evolve to contain some sort of network identifier,
/// once we get a better sense of what transaction building might look like.
#[derive(Clone, Debug, PartialEq)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TxSource {
PushApi,
Broadcast,
@@ -18,7 +18,7 @@ use self::chain::store::ChainStore;
use self::chain::types::Tip;
use self::core::core::hash::{Hash, Hashed};
use self::core::core::verifier_cache::VerifierCache;
use self::core::core::{Block, BlockHeader, BlockSums, Committed, Transaction};
use self::core::core::{Block, BlockHeader, BlockSums, Committed, KernelFeatures, Transaction};
use self::core::libtx;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::*;
@@ -59,7 +59,6 @@ impl ChainAdapter {

batch.save_block_header(header).unwrap();
batch.save_body_head(&tip).unwrap();
batch.save_header_head(&tip).unwrap();

// Retrieve previous block_sums from the db.
let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) {
@@ -194,9 +193,13 @@ where
tx_elements.push(libtx::build::output(output_value, key_id));
}

tx_elements.push(libtx::build::with_fee(fees as u64));

libtx::build::transaction(tx_elements, keychain, &libtx::ProofBuilder::new(keychain)).unwrap()
libtx::build::transaction(
KernelFeatures::Plain { fee: fees as u64 },
tx_elements,
keychain,
&libtx::ProofBuilder::new(keychain),
)
.unwrap()
}

pub fn test_transaction<K>(
@@ -224,9 +227,14 @@ where
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::output(output_value, key_id));
}
tx_elements.push(libtx::build::with_fee(fees as u64));

libtx::build::transaction(tx_elements, keychain, &libtx::ProofBuilder::new(keychain)).unwrap()
libtx::build::transaction(
KernelFeatures::Plain { fee: fees as u64 },
tx_elements,
keychain,
&libtx::ProofBuilder::new(keychain),
)
.unwrap()
}

pub fn test_source() -> TxSource {
@@ -1,8 +1,8 @@
[package]
name = "grin_servers"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
@@ -26,11 +26,11 @@ chrono = "0.4.4"
tokio = "0.1.11"
walkdir = "2.2.9"

grin_api = { path = "../api", version = "2.1.0-beta.3" }
grin_chain = { path = "../chain", version = "2.1.0-beta.3" }
grin_core = { path = "../core", version = "2.1.0-beta.3" }
grin_keychain = { path = "../keychain", version = "2.1.0-beta.3" }
grin_p2p = { path = "../p2p", version = "2.1.0-beta.3" }
grin_pool = { path = "../pool", version = "2.1.0-beta.3" }
grin_store = { path = "../store", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_api = { path = "../api", version = "3.0.0" }
grin_chain = { path = "../chain", version = "3.0.0" }
grin_core = { path = "../core", version = "3.0.0" }
grin_keychain = { path = "../keychain", version = "3.0.0" }
grin_p2p = { path = "../p2p", version = "3.0.0" }
grin_pool = { path = "../pool", version = "3.0.0" }
grin_store = { path = "../store", version = "3.0.0" }
grin_util = { path = "../util", version = "3.0.0" }
@@ -15,6 +15,6 @@
//! Modules common to all Grin server types

pub mod adapters;
pub mod hooks;
pub mod stats;
pub mod types;
pub mod hooks;
@@ -118,8 +118,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
&self,
b: core::Block,
peer_info: &PeerInfo,
was_requested: bool,
opts: chain::Options,
) -> Result<bool, chain::Error> {
if self.chain().block_exists(b.hash())? {
return Ok(true);
}
debug!(
"Received block {} at {} from {} [in/out/kern: {}/{}/{}] going to process.",
b.hash(),
@@ -129,14 +132,18 @@ impl p2p::ChainAdapter for NetToChainAdapter {
b.outputs().len(),
b.kernels().len(),
);
self.process_block(b, peer_info, was_requested)
self.process_block(b, peer_info, opts)
}

fn compact_block_received(
&self,
cb: core::CompactBlock,
peer_info: &PeerInfo,
) -> Result<bool, chain::Error> {
// No need to process this compact block if we have previously accepted the _full block_.
if self.chain().block_exists(cb.hash())? {
return Ok(true);
}
let bhash = cb.hash();
debug!(
"Received compact_block {} at {} from {} [out/kern/kern_ids: {}/{}/{}] going to process.",
@@ -158,7 +165,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
hook.on_block_received(&block, &peer_info.addr);
}
}
self.process_block(block, peer_info, false)
self.process_block(block, peer_info, chain::Options::NONE)
}
Err(e) => {
debug!("Invalid hydrated block {}: {:?}", cb_hash, e);
@@ -169,7 +176,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
// check at least the header is valid before hydrating
if let Err(e) = self
.chain()
.process_block_header(&cb.header, self.chain_opts(false))
.process_block_header(&cb.header, chain::Options::NONE)
{
debug!("Invalid compact block header {}: {:?}", cb_hash, e.kind());
return Ok(!e.is_bad_data());
@@ -182,15 +189,16 @@ impl p2p::ChainAdapter for NetToChainAdapter {
};

debug!(
"adapter: txs from tx pool - {}, (unknown kern_ids: {})",
"compact_block_received: txs from tx pool - {}, (unknown kern_ids: {})",
txs.len(),
missing_short_ids.len(),
);

// TODO - 3 scenarios here -
// 1) we hydrate a valid block (good to go)
// 2) we hydrate an invalid block (txs legit missing from our pool)
// 3) we hydrate an invalid block (peer sent us a "bad" compact block) - [TBD]
// If we have missing kernels then we know we cannot hydrate this compact block.
if missing_short_ids.len() > 0 {
self.request_block(&cb.header, peer_info, chain::Options::NONE);
return Ok(true);
}

let block = match core::Block::hydrate_from(cb.clone(), txs) {
Ok(block) => {
@@ -213,11 +221,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.is_ok()
{
debug!("successfully hydrated block from tx pool!");
self.process_block(block, peer_info, false)
self.process_block(block, peer_info, chain::Options::NONE)
} else {
if self.sync_state.status() == SyncStatus::NoSync {
debug!("adapter: block invalid after hydration, requesting full block");
self.request_block(&cb.header, peer_info);
self.request_block(&cb.header, peer_info, chain::Options::NONE);
Ok(true)
} else {
debug!("block invalid after hydration, ignoring it, cause still syncing");
@@ -236,6 +244,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
bh: core::BlockHeader,
peer_info: &PeerInfo,
) -> Result<bool, chain::Error> {
// No need to process this header if we have previously accepted the _full block_.
if self.chain().block_exists(bh.hash())? {
return Ok(true);
}
if !self.sync_state.is_syncing() {
for hook in &self.hooks {
hook.on_header_received(&bh, &peer_info.addr);
@@ -244,9 +256,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {

// pushing the new block header through the header chain pipeline
// we will go ask for the block if this is a new header
let res = self
.chain()
.process_block_header(&bh, self.chain_opts(false));
let res = self.chain().process_block_header(&bh, chain::Options::NONE);

if let Err(e) = res {
debug!(
@@ -287,7 +297,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}

// try to add headers to our header chain
match self.chain().sync_block_headers(bhs, self.chain_opts(true)) {
match self.chain().sync_block_headers(bhs, chain::Options::SYNC) {
Ok(_) => Ok(true),
Err(e) => {
debug!("Block headers refused by chain: {:?}", e);
@@ -522,7 +532,7 @@ impl NetToChainAdapter {
&self,
b: core::Block,
peer_info: &PeerInfo,
was_requested: bool,
opts: chain::Options,
) -> Result<bool, chain::Error> {
// We cannot process blocks earlier than the horizon so check for this here.
{
@@ -538,10 +548,7 @@ impl NetToChainAdapter {
let bhash = b.hash();
let previous = self.chain().get_previous_header(&b.header);

match self
.chain()
.process_block(b, self.chain_opts(was_requested))
{
match self.chain().process_block(b, opts) {
Ok(_) => {
self.validate_chain(bhash);
self.check_compact();
@@ -560,7 +567,7 @@ impl NetToChainAdapter {
&& !self.sync_state.is_syncing()
{
debug!("process_block: received an orphan block, checking the parent: {:}", previous.hash());
self.request_block_by_hash(previous.hash(), peer_info)
self.request_block(&previous, peer_info, chain::Options::NONE)
}
}
Ok(true)
@@ -635,12 +642,10 @@ impl NetToChainAdapter {
// it into a full block then fallback to requesting the full block
// from the same peer that gave us the compact block
// consider additional peers for redundancy?
fn request_block(&self, bh: &BlockHeader, peer_info: &PeerInfo) {
self.request_block_by_hash(bh.hash(), peer_info)
}

fn request_block_by_hash(&self, h: Hash, peer_info: &PeerInfo) {
self.send_block_request_to_peer(h, peer_info, |peer, h| peer.send_block_request(h))
fn request_block(&self, bh: &BlockHeader, peer_info: &PeerInfo, opts: Options) {
self.send_block_request_to_peer(bh.hash(), peer_info, |peer, h| {
peer.send_block_request(h, opts)
})
}

// After we have received a block header in "header first" propagation
@@ -692,16 +697,6 @@ impl NetToChainAdapter {
),
}
}

/// Prepare options for the chain pipeline
fn chain_opts(&self, was_requested: bool) -> chain::Options {
let opts = if was_requested {
chain::Options::SYNC
} else {
chain::Options::NONE
};
opts
}
}

/// Implementation of the ChainAdapter for the network. Gets notified when the
@@ -53,7 +53,7 @@ pub struct ServerStats {
/// Chain head
pub chain_stats: ChainStats,
/// sync header head
pub header_stats: ChainStats,
pub header_stats: Option<ChainStats>,
/// Whether we're currently syncing
pub sync_status: SyncStatus,
/// Handle to current stratum server stats
@@ -63,7 +63,7 @@ pub struct ServerStats {
/// Difficulty calculation statistics
pub diff_stats: DiffStats,
/// Transaction pool statistics
pub tx_stats: TxStats,
pub tx_stats: Option<TxStats>,
/// Disk usage in GB
pub disk_usage_gb: String,
}
@@ -85,8 +85,12 @@ pub struct ChainStats {
pub struct TxStats {
/// Number of transactions in the transaction pool
pub tx_pool_size: usize,
/// Number of transaction kernels in the transaction pool
pub tx_pool_kernels: usize,
/// Number of transactions in the stem pool
pub stem_pool_size: usize,
/// Number of transaction kernels in the stem pool
pub stem_pool_kernels: usize,
}
/// Struct to return relevant information about stratum workers
#[derive(Clone, Serialize, Debug)]
@@ -189,6 +193,24 @@ pub struct PeerStats {
pub received_bytes_per_sec: u64,
}

impl PartialEq for PeerStats {
fn eq(&self, other: &PeerStats) -> bool {
*self.addr == other.addr
}
}

impl PartialEq for WorkerStats {
fn eq(&self, other: &WorkerStats) -> bool {
*self.id == other.id
}
}

impl PartialEq for DiffBlock {
fn eq(&self, other: &DiffBlock) -> bool {
self.block_height == other.block_height
}
}

impl StratumStats {
/// Calculate network hashrate
pub fn network_hashrate(&self, height: u64) -> f64 {
@@ -145,9 +145,12 @@ pub struct ServerConfig {
/// Network address for the Rest API HTTP server.
pub api_http_addr: String,

/// Location of secret for basic auth on Rest API HTTP server.
/// Location of secret for basic auth on Rest API HTTP and V2 Owner API server.
pub api_secret_path: Option<String>,

/// Location of secret for basic auth on v2 Foreign API server.
pub foreign_api_secret_path: Option<String>,

/// TLS certificate file
pub tls_certificate_file: Option<String>,
/// TLS certificate private key file
@@ -204,6 +207,7 @@ impl Default for ServerConfig {
db_root: "grin_chain".to_string(),
api_http_addr: "127.0.0.1:3413".to_string(),
api_secret_path: Some(".api_secret".to_string()),
foreign_api_secret_path: Some(".foreign_api_secret".to_string()),
tls_certificate_file: None,
tls_certificate_key: None,
p2p_config: p2p::P2PConfig::default(),
@@ -34,15 +34,13 @@ use crate::util::StopState;

// DNS Seeds with contact email associated
const MAINNET_DNS_SEEDS: &'static [&'static str] = &[
"mainnet.seed.grin-tech.org", // igno.peverell@protonmail.com
"mainnet.seed.grin.icu", // gary.peverell@protonmail.com
"mainnet.seed.713.mw", // jasper@713.mw
"mainnet.seed.grin.lesceller.com", // q.lesceller@gmail.com
"mainnet.seed.grin.prokapi.com", // hendi@prokapi.com
"grinseed.yeastplume.org", // yeastplume@protonmail.com
];
const FLOONET_DNS_SEEDS: &'static [&'static str] = &[
"floonet.seed.grin-tech.org", // igno.peverell@protonmail.com
"floonet.seed.grin.icu", // gary.peverell@protonmail.com
"floonet.seed.713.mw", // jasper@713.mw
"floonet.seed.grin.lesceller.com", // q.lesceller@gmail.com
@@ -157,7 +155,9 @@ fn monitor_peers(
let interval = Utc::now().timestamp() - x.last_banned;
// Unban peer
if interval >= config.ban_window() {
peers.unban_peer(x.addr);
if let Err(e) = peers.unban_peer(x.addr) {
error!("failed to unban peer {}: {:?}", x.addr, e);
}
debug!(
"monitor_peers: unbanned {} after {} seconds",
x.addr, interval
@@ -20,10 +20,10 @@ use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::Arc;
use std::sync::{mpsc, Arc};
use std::{
thread::{self, JoinHandle},
time,
time::{self, Duration},
};

use fs2::FileExt;
@@ -52,6 +52,7 @@ use crate::p2p::types::PeerAddr;
use crate::pool;
use crate::util::file::get_first_line;
use crate::util::{RwLock, StopState};
use grin_util::logger::LogEntry;

/// Grin server holding internal structures.
pub struct Server {
@@ -62,12 +63,12 @@ pub struct Server {
/// data store access
pub chain: Arc<chain::Chain>,
/// in-memory transaction pool
tx_pool: Arc<RwLock<pool::TransactionPool>>,
pub tx_pool: Arc<RwLock<pool::TransactionPool>>,
/// Shared cache for verification results when
/// verifying rangeproof and kernel signatures.
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
/// Whether we're currently syncing
sync_state: Arc<SyncState>,
pub sync_state: Arc<SyncState>,
/// To be passed around to collect stats and info
state_info: ServerStateInfo,
/// Stop flag
@@ -83,9 +84,13 @@ impl Server {
/// Instantiates and starts a new server. Optionally takes a callback
/// for the server to send an ARC copy of itself, to allow another process
/// to poll info about the server status
pub fn start<F>(config: ServerConfig, mut info_callback: F) -> Result<(), Error>
pub fn start<F>(
config: ServerConfig,
logs_rx: Option<mpsc::Receiver<LogEntry>>,
mut info_callback: F,
) -> Result<(), Error>
where
F: FnMut(Server),
F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>),
{
let mining_config = config.stratum_mining_config.clone();
let enable_test_miner = config.run_test_miner;
@@ -111,7 +116,7 @@ impl Server {
}
}

info_callback(serv);
info_callback(serv, logs_rx);
Ok(())
}

@@ -269,7 +274,7 @@ impl Server {

info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());

let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone());
let tls_conf = match config.tls_certificate_file.clone() {
None => None,
Some(file) => {
@@ -285,15 +290,16 @@ impl Server {
};

// TODO fix API shutdown and join this thread
api::start_rest_apis(
config.api_http_addr.clone(),
api::node_apis(
&config.api_http_addr,
shared_chain.clone(),
tx_pool.clone(),
p2p_server.peers.clone(),
sync_state.clone(),
api_secret,
tls_conf,
);
api_secret.clone(),
foreign_api_secret.clone(),
tls_conf.clone(),
)?;

info!("Starting dandelion monitor: {}", &config.api_http_addr);
let dandelion_thread = dandelion_monitor::monitor_transactions(
@@ -475,18 +481,16 @@ impl Server {
.map(|p| PeerStats::from_peer(&p))
.collect();

let (tx_pool_size, stem_pool_size) = {
let tx_pool_lock = self.tx_pool.try_read();
match tx_pool_lock {
Some(l) => (l.txpool.entries.len(), l.stempool.entries.len()),
None => (0, 0),
}
};
// Updating TUI stats should not block any other processing so only attempt to
// acquire various read locks with a timeout.
let read_timeout = Duration::from_millis(500);

let tx_stats = TxStats {
tx_pool_size,
stem_pool_size,
};
let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats {
tx_pool_size: pool.txpool.size(),
tx_pool_kernels: pool.txpool.kernel_count(),
stem_pool_size: pool.stempool.size(),
stem_pool_kernels: pool.stempool.kernel_count(),
});

let head = self.chain.head_header()?;
let head_stats = ChainStats {
@@ -496,13 +500,16 @@ impl Server {
total_difficulty: head.total_difficulty(),
};

let header_tip = self.chain.header_head()?;
let header = self.chain.get_block_header(&header_tip.hash())?;
let header_stats = ChainStats {
latest_timestamp: header.timestamp,
height: header.height,
last_block_h: header.prev_hash,
total_difficulty: header.total_difficulty(),
let header_stats = match self.chain.try_header_head(read_timeout)? {
Some(head) => self.chain.get_block_header(&head.hash()).map(|header| {
Some(ChainStats {
latest_timestamp: header.timestamp,
height: header.height,
last_block_h: header.prev_hash,
total_difficulty: header.total_difficulty(),
})
})?,
_ => None,
};

let disk_usage_bytes = WalkDir::new(&self.config.db_root)
@@ -555,9 +562,10 @@ impl Server {
}
}
// this call is blocking and makes sure all peers stop, however
// we can't be sure that we stoped a listener blocked on accept, so we don't join the p2p thread
// we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread
self.p2p.stop();
let _ = self.lock_file.unlock();
warn!("Shutdown complete");
}

/// Pause the p2p server.
@@ -138,7 +138,7 @@ impl BodySync {
let mut peers_iter = peers.iter().cycle();
for hash in hashes_to_get.clone() {
if let Some(peer) = peers_iter.next() {
if let Err(e) = peer.send_block_request(*hash) {
if let Err(e) = peer.send_block_request(*hash, chain::Options::SYNC) {
debug!("Skipped request to {}: {:?}", peer.info.addr, e);
peer.stop();
} else {
@@ -81,9 +81,6 @@ impl HeaderSync {
// correctly, so reset any previous (and potentially stale) sync_head to match
// our last known "good" header_head.
//
self.chain.reset_sync_head()?;

// Rebuild the sync MMR to match our updated sync_head.
self.chain.rebuild_sync_mmr(&header_head)?;

self.history_locator.retain(|&x| x.0 == 0);
@@ -148,8 +145,12 @@ impl HeaderSync {
if now > *stalling_ts + Duration::seconds(120)
&& header_head.total_difficulty < peer.info.total_difficulty()
{
self.peers
.ban_peer(peer.info.addr, ReasonForBan::FraudHeight);
if let Err(e) = self
.peers
.ban_peer(peer.info.addr, ReasonForBan::FraudHeight)
{
error!("failed to ban peer {}: {:?}", peer.info.addr, e);
}
info!(
"sync: ban a fraud peer: {}, claimed height: {}, total difficulty: {}",
peer.info.addr,
@@ -182,7 +182,18 @@ impl SyncRunner {
// if syncing is needed
let head = unwrap_or_restart_loop!(self.chain.head());
let tail = self.chain.tail().unwrap_or_else(|_| head.clone());
let header_head = unwrap_or_restart_loop!(self.chain.header_head());

// We still do not fully understand what is blocking this but if this blocks here after
// we download and validate the txhashet we do not reliably proceed to block_sync,
// potentially blocking for an extended period of time (> 10 mins).
// Does not appear to be deadlock as it does resolve itself eventually.
// So as a workaround we try_header_head with a relatively short timeout and simply
// retry the syncer loop.
let maybe_header_head =
unwrap_or_restart_loop!(self.chain.try_header_head(time::Duration::from_secs(1)));
let header_head = unwrap_or_restart_loop!(
maybe_header_head.ok_or("failed to obtain lock for try_header_head")
);

// run each sync stage, each of them deciding whether they're needed
// except for state sync that only runs if body sync return true (means txhashset is needed)
@@ -192,7 +203,8 @@ impl SyncRunner {
match self.sync_state.status() {
SyncStatus::TxHashsetDownload { .. }
| SyncStatus::TxHashsetSetup
| SyncStatus::TxHashsetValidation { .. }
| SyncStatus::TxHashsetRangeProofsValidation { .. }
| SyncStatus::TxHashsetKernelsValidation { .. }
| SyncStatus::TxHashsetSave
| SyncStatus::TxHashsetDone => check_state_sync = true,
_ => {
@@ -27,46 +27,53 @@ use crate::core::global;
use crate::p2p::{PeerAddr, Seeding};
use crate::servers;
use crate::tui::ui;
use grin_util::logger::LogEntry;
use std::sync::mpsc;

/// wrap below to allow UI to clean up on stop
pub fn start_server(config: servers::ServerConfig) {
start_server_tui(config);
pub fn start_server(config: servers::ServerConfig, logs_rx: Option<mpsc::Receiver<LogEntry>>) {
start_server_tui(config, logs_rx);
// Just kill process for now, otherwise the process
// hangs around until sigint because the API server
// currently has no shutdown facility
warn!("Shutting down...");
thread::sleep(Duration::from_millis(1000));
warn!("Shutdown complete.");
exit(0);
}

fn start_server_tui(config: servers::ServerConfig) {
fn start_server_tui(config: servers::ServerConfig, logs_rx: Option<mpsc::Receiver<LogEntry>>) {
// Run the UI controller.. here for now for simplicity to access
// everything it might need
if config.run_tui.unwrap_or(false) {
warn!("Starting GRIN in UI mode...");
servers::Server::start(config, |serv: servers::Server| {
let mut controller = ui::Controller::new().unwrap_or_else(|e| {
panic!("Error loading UI controller: {}", e);
});
controller.run(serv);
})
servers::Server::start(
config,
logs_rx,
|serv: servers::Server, logs_rx: Option<mpsc::Receiver<LogEntry>>| {
let mut controller = ui::Controller::new(logs_rx.unwrap()).unwrap_or_else(|e| {
panic!("Error loading UI controller: {}", e);
});
controller.run(serv);
},
)
.unwrap();
} else {
warn!("Starting GRIN w/o UI...");
servers::Server::start(config, |serv: servers::Server| {
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
ctrlc::set_handler(move || {
r.store(false, Ordering::SeqCst);
})
.expect("Error setting handler for both SIGINT (Ctrl+C) and SIGTERM (kill)");
while running.load(Ordering::SeqCst) {
thread::sleep(Duration::from_secs(1));
}
warn!("Received SIGINT (Ctrl+C) or SIGTERM (kill).");
serv.stop();
})
servers::Server::start(
config,
logs_rx,
|serv: servers::Server, _: Option<mpsc::Receiver<LogEntry>>| {
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
ctrlc::set_handler(move || {
r.store(false, Ordering::SeqCst);
})
.expect("Error setting handler for both SIGINT (Ctrl+C) and SIGTERM (kill)");
while running.load(Ordering::SeqCst) {
thread::sleep(Duration::from_secs(1));
}
warn!("Received SIGINT (Ctrl+C) or SIGTERM (kill).");
serv.stop();
},
)
.unwrap();
}
}
@@ -78,6 +85,7 @@ fn start_server_tui(config: servers::ServerConfig) {
pub fn server_command(
server_args: Option<&ArgMatches<'_>>,
mut global_config: GlobalConfig,
logs_rx: Option<mpsc::Receiver<LogEntry>>,
) -> i32 {
global::set_mining_mode(
global_config
@@ -123,7 +131,7 @@ pub fn server_command(
if let Some(a) = server_args {
match a.subcommand() {
("run", _) => {
start_server(server_config);
start_server(server_config, logs_rx);
}
("", _) => {
println!("Subcommand required, use 'grin help server' for details");
@@ -137,7 +145,7 @@ pub fn server_command(
}
}
} else {
start_server(server_config);
start_server(server_config, logs_rx);
}
0
}
@@ -30,6 +30,8 @@ use grin_core as core;
use grin_p2p as p2p;
use grin_servers as servers;
use grin_util as util;
use grin_util::logger::LogEntry;
use std::sync::mpsc;

mod cmd;
pub mod tui;
@@ -136,34 +138,36 @@ fn real_main() -> i32 {
}
}

if let Some(mut config) = node_config.clone() {
let mut l = config.members.as_mut().unwrap().logging.clone().unwrap();
let run_tui = config.members.as_mut().unwrap().server.run_tui;
if let Some(true) = run_tui {
l.log_to_stdout = false;
l.tui_running = Some(true);
}
init_logger(Some(l));
let mut config = node_config.clone().unwrap();
let mut logging_config = config.members.as_mut().unwrap().logging.clone().unwrap();
logging_config.tui_running = config.members.as_mut().unwrap().server.run_tui;

global::set_mining_mode(config.members.unwrap().server.clone().chain_type);
let (logs_tx, logs_rx) = if logging_config.tui_running.unwrap() {
let (logs_tx, logs_rx) = mpsc::sync_channel::<LogEntry>(200);
(Some(logs_tx), Some(logs_rx))
} else {
(None, None)
};
init_logger(Some(logging_config), logs_tx);

if let Some(file_path) = &config.config_file_path {
info!(
"Using configuration file at {}",
file_path.to_str().unwrap()
);
} else {
info!("Node configuration file not found, using default");
}
}
global::set_mining_mode(config.members.unwrap().server.clone().chain_type);

if let Some(file_path) = &config.config_file_path {
info!(
"Using configuration file at {}",
file_path.to_str().unwrap()
);
} else {
info!("Node configuration file not found, using default");
};

log_build_info();

// Execute subcommand
match args.subcommand() {
// server commands and options
("server", Some(server_args)) => {
cmd::server_command(Some(server_args), node_config.unwrap())
cmd::server_command(Some(server_args), node_config.unwrap(), logs_rx)
}

// client commands and options
@@ -177,11 +181,11 @@ fn real_main() -> i32 {
Ok(_) => 0,
Err(_) => 1,
}
},
}

// If nothing is specified, try to just use the config file instead
// this could possibly become the way to configure most things
// with most command line options being phased out
_ => cmd::server_command(None, node_config.unwrap()),
_ => cmd::server_command(None, node_config.unwrap(), logs_rx),
}
}
@@ -1,5 +1,5 @@
name: grin
about: Lightweight implementation of the MimbleWimble protocol.
about: Lightweight implementation of the Mimblewimble protocol.
author: The Grin Team

args:
@@ -28,6 +28,9 @@ pub const SUBMENU_MINING_BUTTON: &str = "mining_submenu_button";
pub const TABLE_MINING_STATUS: &str = "mining_status_table";
pub const TABLE_MINING_DIFF_STATUS: &str = "mining_diff_status_table";

// Logs View
pub const VIEW_LOGS: &str = "logs_view";

// Mining View
pub const VIEW_VERSION: &str = "version_view";

@@ -0,0 +1,104 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use cursive::theme::{BaseColor, Color, ColorStyle};
use cursive::traits::Identifiable;
use cursive::view::View;
use cursive::views::BoxView;
use cursive::{Cursive, Printer};

use crate::tui::constants::VIEW_LOGS;
use cursive::utils::lines::spans::{LinesIterator, Row};
use cursive::utils::markup::StyledString;
use grin_util::logger::LogEntry;
use log::Level;
use std::collections::VecDeque;

pub struct TUILogsView;

impl TUILogsView {
pub fn create() -> Box<dyn View> {
let logs_view = BoxView::with_full_screen(LogBufferView::new(200).with_id("logs"));
Box::new(logs_view.with_id(VIEW_LOGS))
}

pub fn update(c: &mut Cursive, entry: LogEntry) {
c.call_on_id("logs", |t: &mut LogBufferView| {
t.update(entry);
});
}
}

struct LogBufferView {
buffer: VecDeque<LogEntry>,
}

impl LogBufferView {
fn new(size: usize) -> Self {
let mut buffer = VecDeque::new();
buffer.resize(
size,
LogEntry {
log: String::new(),
level: Level::Info,
},
);

LogBufferView { buffer }
}

fn update(&mut self, entry: LogEntry) {
self.buffer.push_front(entry);
self.buffer.pop_back();
}

fn color(level: Level) -> ColorStyle {
match level {
Level::Info => ColorStyle::new(
Color::Light(BaseColor::Green),
Color::Dark(BaseColor::Black),
),
Level::Warn => ColorStyle::new(
Color::Light(BaseColor::Yellow),
Color::Dark(BaseColor::Black),
),
Level::Error => {
ColorStyle::new(Color::Light(BaseColor::Red), Color::Dark(BaseColor::Black))
}
_ => ColorStyle::new(
Color::Light(BaseColor::White),
Color::Dark(BaseColor::Black),
),
}
}
}

impl View for LogBufferView {
fn draw(&self, printer: &Printer) {
let mut i = 0;
for entry in self.buffer.iter().take(printer.size.y) {
printer.with_color(LogBufferView::color(entry.level), |p| {
let log_message = StyledString::plain(&entry.log);
let mut rows: Vec<Row> = LinesIterator::new(&log_message, printer.size.x).collect();
rows.reverse(); // So stack traces are in the right order.
for row in rows {
for span in row.resolve(&log_message) {
p.print((0, p.size.y.saturating_sub(i + 1)), span.content);
i += 1;
}
}
});
}
}
}
@@ -25,8 +25,8 @@ use cursive::views::{
use cursive::Cursive;

use crate::tui::constants::{
MAIN_MENU, ROOT_STACK, SUBMENU_MINING_BUTTON, VIEW_BASIC_STATUS, VIEW_MINING, VIEW_PEER_SYNC,
VIEW_VERSION,
MAIN_MENU, ROOT_STACK, SUBMENU_MINING_BUTTON, VIEW_BASIC_STATUS, VIEW_LOGS, VIEW_MINING,
VIEW_PEER_SYNC, VIEW_VERSION,
};

pub fn create() -> Box<dyn View> {
@@ -38,6 +38,7 @@ pub fn create() -> Box<dyn View> {
.get_mut()
.add_item("Peers and Sync", VIEW_PEER_SYNC);
main_menu.get_mut().add_item("Mining", VIEW_MINING);
main_menu.get_mut().add_item("Logs", VIEW_LOGS);
main_menu.get_mut().add_item("Version Info", VIEW_VERSION);
let change_view = |s: &mut Cursive, v: &&str| {
if *v == "" {
@@ -17,6 +17,7 @@ use chrono;
use humansize;
//
mod constants;
mod logs;
mod menu;
mod mining;
mod peers;
@@ -33,7 +33,7 @@ use crate::tui::table::{TableView, TableViewItem};
use crate::tui::types::TUIStatusListener;

#[derive(Copy, Clone, PartialEq, Eq, Hash)]
enum PeerColumn {
pub enum PeerColumn {
Address,
State,
UsedBandwidth,
@@ -31,6 +31,109 @@ const NANO_TO_MILLIS: f64 = 1.0 / 1_000_000.0;

pub struct TUIStatusView;

impl TUIStatusView {
fn update_sync_status(sync_status: SyncStatus) -> String {
match sync_status {
SyncStatus::Initial => "Initializing".to_string(),
SyncStatus::NoSync => "Running".to_string(),
SyncStatus::AwaitingPeers(_) => "Waiting for peers".to_string(),
SyncStatus::HeaderSync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Sync step 1/7: Downloading headers: {}%", percent)
}
SyncStatus::TxHashsetDownload {
start_time,
prev_update_time,
update_time: _,
prev_downloaded_size,
downloaded_size,
total_size,
} => {
if total_size > 0 {
let percent = if total_size > 0 {
downloaded_size * 100 / total_size
} else {
0
};
let start = prev_update_time.timestamp_nanos();
let fin = Utc::now().timestamp_nanos();
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;

format!("Sync step 2/7: Downloading {}(MB) chain state for state sync: {}% at {:.1?}(kB/s)",
total_size / 1_000_000,
percent,
if dur_ms > 1.0f64 { (downloaded_size - prev_downloaded_size) as f64 / dur_ms as f64 } else { 0f64 },
)
} else {
let start = start_time.timestamp_millis();
let fin = Utc::now().timestamp_millis();
let dur_secs = (fin - start) / 1000;

format!("Sync step 2/7: Downloading chain state for state sync. Waiting remote peer to start: {}s",
dur_secs,
)
}
}
SyncStatus::TxHashsetSetup => {
"Sync step 3/7: Preparing chain state for validation".to_string()
}
SyncStatus::TxHashsetRangeProofsValidation {
rproofs,
rproofs_total,
} => {
let r_percent = if rproofs_total > 0 {
(rproofs * 100) / rproofs_total
} else {
0
};
format!(
"Sync step 4/7: Validating chain state - range proofs: {}%",
r_percent
)
}
SyncStatus::TxHashsetKernelsValidation {
kernels,
kernels_total,
} => {
let k_percent = if kernels_total > 0 {
(kernels * 100) / kernels_total
} else {
0
};
format!(
"Sync step 5/7: Validating chain state - kernels: {}%",
k_percent
)
}
SyncStatus::TxHashsetSave => {
"Sync step 6/7: Finalizing chain state for state sync".to_string()
}
SyncStatus::TxHashsetDone => {
"Sync step 6/7: Finalized chain state for state sync".to_string()
}
SyncStatus::BodySync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Sync step 7/7: Downloading blocks: {}%", percent)
}
SyncStatus::Shutdown => "Shutting down, closing connections".to_string(),
}
}
}

impl TUIStatusListener for TUIStatusView {
/// Create basic status view
fn create() -> Box<dyn View> {
@@ -109,12 +212,18 @@ impl TUIStatusListener for TUIStatusView {
.child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new("Transaction Pool Size: "))
.child(TextView::new(" ").with_id("tx_pool_size")),
.child(TextView::new("0").with_id("tx_pool_size"))
.child(TextView::new(" ("))
.child(TextView::new("0").with_id("tx_pool_kernels"))
.child(TextView::new(")")),
)
.child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new("Stem Pool Size: "))
.child(TextView::new(" ").with_id("stem_pool_size")),
.child(TextView::new("0").with_id("stem_pool_size"))
.child(TextView::new(" ("))
.child(TextView::new("0").with_id("stem_pool_kernels"))
.child(TextView::new(")")),
)
.child(
LinearLayout::new(Orientation::Horizontal).child(TextView::new(
@@ -137,137 +246,9 @@ impl TUIStatusListener for TUIStatusView {
Box::new(basic_status_view.with_id(VIEW_BASIC_STATUS))
}

/// update
fn update(c: &mut Cursive, stats: &ServerStats) {
//find and update here as needed
let basic_status = {
match stats.sync_status {
SyncStatus::Initial => "Initializing".to_string(),
SyncStatus::NoSync => "Running".to_string(),
SyncStatus::AwaitingPeers(_) => "Waiting for peers".to_string(),
SyncStatus::HeaderSync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Downloading headers: {}%, step 1/4", percent)
}
SyncStatus::TxHashsetDownload {
start_time,
prev_update_time,
update_time: _,
prev_downloaded_size,
downloaded_size,
total_size,
} => {
if total_size > 0 {
let percent = if total_size > 0 {
downloaded_size * 100 / total_size
} else {
0
};
let start = prev_update_time.timestamp_nanos();
let fin = Utc::now().timestamp_nanos();
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;
let basic_status = TUIStatusView::update_sync_status(stats.sync_status);

format!("Downloading {}(MB) chain state for state sync: {}% at {:.1?}(kB/s), step 2/4",
total_size / 1_000_000,
percent,
if dur_ms > 1.0f64 { (downloaded_size - prev_downloaded_size) as f64 / dur_ms as f64 } else { 0f64 },
)
} else {
let start = start_time.timestamp_millis();
let fin = Utc::now().timestamp_millis();
let dur_secs = (fin - start) / 1000;

format!("Downloading chain state for state sync. Waiting remote peer to start: {}s, step 2/4",
dur_secs,
)
}
}
SyncStatus::TxHashsetSetup => {
"Preparing chain state for validation, step 3/4".to_string()
}
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
rproofs,
rproof_total,
} => {
// 10% of overall progress is attributed to kernel validation
// 90% to range proofs (which are much longer)
let mut percent = if kernel_total > 0 {
kernels * 10 / kernel_total
} else {
0
};
percent += if rproof_total > 0 {
rproofs * 90 / rproof_total
} else {
0
};
format!("Validating chain state: {}%, step 3/4", percent)
}
SyncStatus::TxHashsetSave => {
"Finalizing chain state for state sync, step 3/4".to_string()
}
SyncStatus::TxHashsetDone => {
"Finalized chain state for state sync, step 3/4".to_string()
}
SyncStatus::BodySync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Downloading blocks: {}%, step 4/4", percent)
}
SyncStatus::Shutdown => "Shutting down, closing connections".to_string(),
}
};
/*let basic_mining_config_status = {
if stats.mining_stats.is_enabled {
"Configured as mining node"
} else {
"Configured as validating node only (not mining)"
}
};
let (basic_mining_status, basic_network_info) = {
if stats.mining_stats.is_enabled {
if stats.is_syncing {
(
"Mining Status: Paused while syncing".to_string(),
" ".to_string(),
)
} else if stats.mining_stats.combined_gps == 0.0 {
(
"Mining Status: Starting miner and awaiting first solution...".to_string(),
" ".to_string(),
)
} else {
(
format!(
"Mining Status: Mining at height {} at {:.*} GPS",
stats.mining_stats.block_height, 4, stats.mining_stats.combined_gps
),
format!(
"Cuckoo {} - Network Difficulty {}",
stats.mining_stats.edge_bits,
stats.mining_stats.network_difficulty.to_string()
),
)
}
} else {
(" ".to_string(), " ".to_string())
}
};*/
c.call_on_id("basic_current_status", |t: &mut TextView| {
t.set_content(basic_status);
});
@@ -289,32 +270,53 @@ impl TUIStatusListener for TUIStatusView {
c.call_on_id("chain_timestamp", |t: &mut TextView| {
t.set_content(stats.chain_stats.latest_timestamp.to_string());
});
c.call_on_id("basic_header_tip_hash", |t: &mut TextView| {
t.set_content(stats.header_stats.last_block_h.to_string() + "...");
});
c.call_on_id("basic_header_chain_height", |t: &mut TextView| {
t.set_content(stats.header_stats.height.to_string());
});
c.call_on_id("basic_header_total_difficulty", |t: &mut TextView| {
t.set_content(stats.header_stats.total_difficulty.to_string());
});
c.call_on_id("basic_header_timestamp", |t: &mut TextView| {
t.set_content(stats.header_stats.latest_timestamp.to_string());
});
c.call_on_id("tx_pool_size", |t: &mut TextView| {
t.set_content(stats.tx_stats.tx_pool_size.to_string());
});
c.call_on_id("stem_pool_size", |t: &mut TextView| {
t.set_content(stats.tx_stats.stem_pool_size.to_string());
});
/*c.call_on_id("basic_mining_config_status", |t: &mut TextView| {
t.set_content(basic_mining_config_status);
});
c.call_on_id("basic_mining_status", |t: &mut TextView| {
t.set_content(basic_mining_status);
});
c.call_on_id("basic_network_info", |t: &mut TextView| {
t.set_content(basic_network_info);
});*/
if let Some(header_stats) = &stats.header_stats {
c.call_on_id("basic_header_tip_hash", |t: &mut TextView| {
t.set_content(header_stats.last_block_h.to_string() + "...");
});
c.call_on_id("basic_header_chain_height", |t: &mut TextView| {
t.set_content(header_stats.height.to_string());
});
c.call_on_id("basic_header_total_difficulty", |t: &mut TextView| {
t.set_content(header_stats.total_difficulty.to_string());
});
c.call_on_id("basic_header_timestamp", |t: &mut TextView| {
t.set_content(header_stats.latest_timestamp.to_string());
});
}
if let Some(tx_stats) = &stats.tx_stats {
c.call_on_id("tx_pool_size", |t: &mut TextView| {
t.set_content(tx_stats.tx_pool_size.to_string());
});
c.call_on_id("stem_pool_size", |t: &mut TextView| {
t.set_content(tx_stats.stem_pool_size.to_string());
});
c.call_on_id("tx_pool_kernels", |t: &mut TextView| {
t.set_content(tx_stats.tx_pool_kernels.to_string());
});
c.call_on_id("stem_pool_kernels", |t: &mut TextView| {
t.set_content(tx_stats.stem_pool_kernels.to_string());
});
}
}
}

#[test]
fn test_status_txhashset_kernels() {
let status = SyncStatus::TxHashsetKernelsValidation {
kernels: 201,
kernels_total: 5000,
};
let basic_status = TUIStatusView::update_sync_status(status);
assert!(basic_status.contains("4%"), basic_status);
}

#[test]
fn test_status_txhashset_rproofs() {
let status = SyncStatus::TxHashsetRangeProofsValidation {
rproofs: 643,
rproofs_total: 1000,
};
let basic_status = TUIStatusView::update_sync_status(status);
assert!(basic_status.contains("64%"), basic_status);
}
@@ -145,7 +145,7 @@ where
/// .default_column(BasicColumn::Name);
/// # }
/// ```
pub struct TableView<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> {
pub struct TableView<T: TableViewItem<H> + PartialEq, H: Eq + Hash + Copy + Clone + 'static> {
enabled: bool,
scrollbase: ScrollBase,
last_size: Vec2,
@@ -165,7 +165,7 @@ pub struct TableView<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static>
on_select: Option<Rc<dyn Fn(&mut Cursive, usize, usize)>>,
}

impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H> {
impl<T: TableViewItem<H> + PartialEq, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H> {
/// Creates a new empty `TableView` without any columns.
///
/// A TableView should be accompanied by a enum of type `H` representing
@@ -233,8 +233,7 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>
pub fn sort_by(&mut self, column: H, order: Ordering) {
if self.column_indices.contains_key(&column) {
for c in &mut self.columns {
c.selected = c.column == column;
if c.selected {
if c.column == column {
c.order = order;
} else {
c.order = Ordering::Equal;
@@ -437,8 +436,19 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>
/// Sets the contained items of the table.
///
/// The currently active sort order is preserved and will be applied to all
/// items.
/// items. The selected item will also be preserved.
pub fn set_items(&mut self, items: Vec<T>) {
let mut new_location = 0;
if let Some(old_item_location) = self.item() {
let old_item = self.items.get(old_item_location).unwrap();
for (i, new_item) in items.iter().enumerate() {
if old_item == new_item {
new_location = i;
break;
}
}
}

self.items = items;
self.rows_to_items = Vec::with_capacity(self.items.len());

@@ -453,7 +463,7 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>
self.scrollbase
.set_heights(self.last_size.y.saturating_sub(2), self.rows_to_items.len());

self.set_selected_row(0);
self.set_selected_item(new_location);
}

/// Sets the contained items of the table.
@@ -492,7 +502,7 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>
/// Returns the index of the currently selected item within the underlying
/// storage vector.
pub fn item(&self) -> Option<usize> {
if self.items.is_empty() {
if self.items.is_empty() || self.focus >= self.rows_to_items.len() {
None
} else {
Some(self.rows_to_items[self.focus])
@@ -580,7 +590,7 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>
}
}

impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H> {
impl<T: TableViewItem<H> + PartialEq, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H> {
fn draw_columns<C: Fn(&Printer<'_, '_>, &TableColumn<H>)>(
&self,
printer: &Printer<'_, '_>,
@@ -604,7 +614,7 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>

fn sort_items(&mut self, column: H, order: Ordering) {
if !self.is_empty() {
let old_item = self.item().unwrap();
let old_item = self.item();

let mut rows_to_items = self.rows_to_items.clone();
rows_to_items.sort_by(|a, b| {
@@ -616,7 +626,7 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>
});
self.rows_to_items = rows_to_items;

self.set_selected_item(old_item);
old_item.map(|o| self.set_selected_item(o));
}
}

@@ -689,7 +699,7 @@ impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H>
}
}

impl<T: TableViewItem<H> + 'static, H: Eq + Hash + Copy + Clone + 'static> View
impl<T: TableViewItem<H> + PartialEq + 'static, H: Eq + Hash + Copy + Clone + 'static> View
for TableView<T, H>
{
fn draw(&self, printer: &Printer<'_, '_>) {
@@ -981,3 +991,75 @@ impl<H: Copy + Clone + 'static> TableColumn<H> {
printer.print((0, 0), value.as_str());
}
}

#[cfg(test)]
mod test {
use crate::tui::peers::PeerColumn;
use crate::tui::table::TableView;
use chrono::Utc;
use grin_core::ser::ProtocolVersion;
use grin_servers::PeerStats;
use std::cmp::Ordering;

#[test]
pub fn test_set_items_preserves_selected_item() {
let mut table = TableView::<PeerStats, PeerColumn>::new();
let ps1 = PeerStats {
addr: "123.0.0.1".to_string(),
..TestPeerStats::default()
};
let ps2 = PeerStats {
addr: "123.0.0.2".to_string(),
..TestPeerStats::default()
};

let mut items = vec![ps1, ps2];
table.set_items(items.clone());
assert_eq!(table.item().unwrap(), 0);

items.reverse();
table.set_items(items);
assert_eq!(table.item().unwrap(), 1);
}

#[test]
pub fn test_set_items_preserves_order() {
let mut table = TableView::<PeerStats, PeerColumn>::new();
let ps1 = PeerStats {
addr: "123.0.0.1".to_string(),
received_bytes_per_sec: 10,
..TestPeerStats::default()
};
let ps2 = PeerStats {
addr: "123.0.0.2".to_string(),
received_bytes_per_sec: 80,
..TestPeerStats::default()
};

let items = vec![ps1, ps2];
table.set_items(items);
assert_eq!(table.rows_to_items[0], 0);
table.sort_by(PeerColumn::UsedBandwidth, Ordering::Greater);

assert_eq!(table.rows_to_items[0], 1);
}

struct TestPeerStats(PeerStats);

impl TestPeerStats {
fn default() -> PeerStats {
PeerStats {
state: "Connected".to_string(),
addr: "127.0.0.1".to_string(),
version: ProtocolVersion::local(),
user_agent: "".to_string(),
total_difficulty: 0,
height: 0,
direction: "Outbound".to_string(),
last_seen: Utc::now(),
sent_bytes_per_sec: 0,
received_bytes_per_sec: 0,
}
}
}
}
@@ -26,21 +26,23 @@ use cursive::theme::{BaseColor, BorderStyle, Color, Theme};
use cursive::traits::Boxable;
use cursive::traits::Identifiable;
use cursive::utils::markup::StyledString;
use cursive::views::{LinearLayout, Panel, StackView, TextView, ViewBox};
use cursive::views::{CircularFocus, Dialog, LinearLayout, Panel, StackView, TextView, ViewBox};
use cursive::Cursive;
use std::sync::mpsc;

use crate::built_info;
use crate::servers::Server;
use crate::tui::constants::ROOT_STACK;
use crate::tui::types::{TUIStatusListener, UIMessage};
use crate::tui::{menu, mining, peers, status, version};
use crate::tui::{logs, menu, mining, peers, status, version};
use grin_util::logger::LogEntry;

pub struct UI {
cursive: Cursive,
ui_rx: mpsc::Receiver<UIMessage>,
ui_tx: mpsc::Sender<UIMessage>,
controller_tx: mpsc::Sender<ControllerMessage>,
logs_rx: mpsc::Receiver<LogEntry>,
}

fn modify_theme(theme: &mut Theme) {
@@ -57,19 +59,25 @@ fn modify_theme(theme: &mut Theme) {

impl UI {
/// Create a new UI
pub fn new(controller_tx: mpsc::Sender<ControllerMessage>) -> UI {
pub fn new(
controller_tx: mpsc::Sender<ControllerMessage>,
logs_rx: mpsc::Receiver<LogEntry>,
) -> UI {
let (ui_tx, ui_rx) = mpsc::channel::<UIMessage>();

let mut grin_ui = UI {
cursive: Cursive::default(),
ui_tx: ui_tx,
ui_rx: ui_rx,
controller_tx: controller_tx,
ui_tx,
ui_rx,
controller_tx,
logs_rx,
};

// Create UI objects, etc
let status_view = status::TUIStatusView::create();
let mining_view = mining::TUIMiningView::create();
let peer_view = peers::TUIPeerView::create();
let logs_view = logs::TUILogsView::create();
let version_view = version::TUIVersionView::create();

let main_menu = menu::create();
@@ -78,6 +86,7 @@ impl UI {
.layer(version_view)
.layer(mining_view)
.layer(peer_view)
.layer(logs_view)
.layer(status_view)
.with_id(ROOT_STACK)
.full_height();
@@ -108,7 +117,11 @@ impl UI {

// Configure a callback (shutdown, for the first test)
let controller_tx_clone = grin_ui.controller_tx.clone();
grin_ui.cursive.add_global_callback('q', move |_| {
grin_ui.cursive.add_global_callback('q', move |c| {
let content = StyledString::styled("Shutting down...", Color::Light(BaseColor::Yellow));
c.add_layer(CircularFocus::wrap_tab(Dialog::around(TextView::new(
content,
))));
controller_tx_clone
.send(ControllerMessage::Shutdown)
.unwrap();
@@ -124,6 +137,10 @@ impl UI {
return false;
}

while let Some(message) = self.logs_rx.try_iter().next() {
logs::TUILogsView::update(&mut self.cursive, message);
}

// Process any pending UI messages
while let Some(message) = self.ui_rx.try_iter().next() {
match message {
@@ -158,13 +175,14 @@ pub enum ControllerMessage {

impl Controller {
/// Create a new controller
pub fn new() -> Result<Controller, String> {
pub fn new(logs_rx: mpsc::Receiver<LogEntry>) -> Result<Controller, String> {
let (tx, rx) = mpsc::channel::<ControllerMessage>();
Ok(Controller {
rx: rx,
ui: UI::new(tx),
rx,
ui: UI::new(tx, logs_rx),
})
}

/// Run the controller
pub fn run(&mut self, server: Server) {
let stat_update_interval = 1;
@@ -173,8 +191,8 @@ impl Controller {
while let Some(message) = self.rx.try_iter().next() {
match message {
ControllerMessage::Shutdown => {
warn!("Shutdown in progress, please wait");
self.ui.stop();
println!("Shutdown in progress, please wait");
server.stop();
return;
}
@@ -1,15 +1,16 @@
[package]
name = "grin_store"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
workspace = ".."
edition = "2018"

[dependencies]
bit-vec = "0.6"
byteorder = "1"
croaring = "0.3.9"
env_logger = "0.5"
@@ -23,8 +24,8 @@ serde = "1"
serde_derive = "1"
log = "0.4"

grin_core = { path = "../core", version = "2.1.0-beta.3" }
grin_util = { path = "../util", version = "2.1.0-beta.3" }
grin_core = { path = "../core", version = "3.0.0" }
grin_util = { path = "../util", version = "3.0.0" }

[dev-dependencies]
chrono = "0.4.4"
@@ -115,10 +115,8 @@ impl LeafSet {

// First remove pos from leaf_set that were
// added after the point we are rewinding to.
let marker_from = cutoff_pos;
let marker_to = self.bitmap.maximum() as u64;
let rewind_add_pos: Bitmap = ((marker_from + 1)..=marker_to).map(|x| x as u32).collect();
bitmap.andnot_inplace(&rewind_add_pos);
let to_remove = ((cutoff_pos + 1) as u32)..bitmap.maximum();
bitmap.remove_range_closed(to_remove);

// Then add back output pos to the leaf_set
// that were removed.
@@ -136,10 +134,8 @@ impl LeafSet {
pub fn rewind(&mut self, cutoff_pos: u64, rewind_rm_pos: &Bitmap) {
// First remove pos from leaf_set that were
// added after the point we are rewinding to.
let marker_from = cutoff_pos;
let marker_to = self.bitmap.maximum() as u64;
let rewind_add_pos: Bitmap = ((marker_from + 1)..=marker_to).map(|x| x as u32).collect();
self.bitmap.andnot_inplace(&rewind_add_pos);
let to_remove = ((cutoff_pos + 1) as u32)..self.bitmap.maximum();
self.bitmap.remove_range_closed(to_remove);

// Then add back output pos to the leaf_set
// that were removed.
@@ -140,6 +140,36 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
}
}

fn n_unpruned_leaves(&self) -> u64 {
if self.prunable {
self.leaf_set.len() as u64
} else {
pmmr::n_leaves(self.unpruned_size())
}
}

/// Returns an iterator over all the leaf insertion indices (0-indexed).
/// If our pos are [1,2,4,5,8] (first 5 leaf pos) then our insertion indices are [0,1,2,3,4]
fn leaf_idx_iter(&self, from_idx: u64) -> Box<dyn Iterator<Item = u64> + '_> {
// pass from_idx in as param
// convert this to pos
// iterate, skipping everything prior to this
// pass in from_idx=0 then we want to convert to pos=1

let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1);

if self.prunable {
Box::new(
self.leaf_set
.iter()
.skip_while(move |x| *x < from_pos)
.map(|x| pmmr::n_leaves(x).saturating_sub(1)),
)
} else {
panic!("leaf_idx_iter not implemented for non-prunable PMMR")
}
}

fn data_as_temp_file(&self) -> Result<File, String> {
self.data_file
.as_temp_file()
@@ -28,13 +28,43 @@ use crate::core::ser::{
Writer,
};

#[test]
fn pmmr_leaf_idx_iter() {
let (data_dir, elems) = setup("leaf_idx_iter");
{
let mut backend = store::pmmr::PMMRBackend::new(
data_dir.to_string(),
true,
false,
ProtocolVersion(1),
None,
)
.unwrap();

// adding first set of 4 elements and sync
let mmr_size = load(0, &elems[0..5], &mut backend);
backend.sync().unwrap();

{
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
let leaf_idx = pmmr.leaf_idx_iter(0).collect::<Vec<_>>();
let leaf_pos = pmmr.leaf_pos_iter().collect::<Vec<_>>();

// The first 5 leaves [0,1,2,3,4] are at pos [1,2,4,5,8] in the MMR.
assert_eq!(leaf_idx, vec![0, 1, 2, 3, 4]);
assert_eq!(leaf_pos, vec![1, 2, 4, 5, 8]);
}
}
teardown(data_dir);
}

#[test]
fn pmmr_append() {
let (data_dir, elems) = setup("append");
{
let mut backend = store::pmmr::PMMRBackend::new(
data_dir.to_string(),
true,
false,
false,
ProtocolVersion(1),
None,
@@ -53,6 +83,7 @@ fn pmmr_append() {
// Note: 1-indexed PMMR API
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);

assert_eq!(pmmr.n_unpruned_leaves(), 4);
assert_eq!(pmmr.get_data(1), Some(elems[0]));
assert_eq!(pmmr.get_data(2), Some(elems[1]));

@@ -88,6 +119,8 @@ fn pmmr_append() {
// Note: 1-indexed PMMR API
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);

assert_eq!(pmmr.n_unpruned_leaves(), 9);

// First pair of leaves.
assert_eq!(pmmr.get_data(1), Some(elems[0]));
assert_eq!(pmmr.get_data(2), Some(elems[1]));
@@ -132,6 +165,7 @@ fn pmmr_compact_leaf_sibling() {
let mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();

assert_eq!(backend.n_unpruned_leaves(), 19);
// On far left of the MMR -
// pos 1 and 2 are leaves (and siblings)
// the parent is pos 3
@@ -159,6 +193,8 @@ fn pmmr_compact_leaf_sibling() {
{
let pmmr = PMMR::at(&mut backend, mmr_size);

assert_eq!(pmmr.n_unpruned_leaves(), 17);

// check that pos 1 is "removed"
assert_eq!(pmmr.get_hash(1), None);

@@ -1,8 +1,8 @@
[package]
name = "grin_util"
version = "2.1.0-beta.3"
version = "3.0.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
description = "Simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
@@ -43,7 +43,7 @@ pub mod secp_static;
pub use crate::secp_static::static_secp_instance;

pub mod types;
pub use crate::types::{LogLevel, LoggingConfig, ZeroingString};
pub use crate::types::ZeroingString;

pub mod macros;

@@ -18,9 +18,7 @@ use std::ops::Deref;
use backtrace::Backtrace;
use std::{panic, thread};

use crate::types::{self, LogLevel, LoggingConfig};

use log::{LevelFilter, Record};
use log::{Level, Record};
use log4rs;
use log4rs::append::console::ConsoleAppender;
use log4rs::append::file::FileAppender;
@@ -32,17 +30,12 @@ use log4rs::append::rolling_file::{
use log4rs::append::Append;
use log4rs::config::{Appender, Config, Root};
use log4rs::encode::pattern::PatternEncoder;
use log4rs::encode::writer::simple::SimpleWriter;
use log4rs::encode::Encode;
use log4rs::filter::{threshold::ThresholdFilter, Filter, Response};

fn convert_log_level(in_level: &LogLevel) -> LevelFilter {
match *in_level {
LogLevel::Info => LevelFilter::Info,
LogLevel::Warning => LevelFilter::Warn,
LogLevel::Debug => LevelFilter::Debug,
LogLevel::Trace => LevelFilter::Trace,
LogLevel::Error => LevelFilter::Error,
}
}
use std::error::Error;
use std::sync::mpsc;
use std::sync::mpsc::SyncSender;

lazy_static! {
/// Flag to observe whether logging was explicitly initialised (don't output otherwise)
@@ -56,6 +49,57 @@ lazy_static! {

const LOGGING_PATTERN: &str = "{d(%Y%m%d %H:%M:%S%.3f)} {h({l})} {M} - {m}{n}";

/// 32 log files to rotate over by default
const DEFAULT_ROTATE_LOG_FILES: u32 = 32 as u32;

/// Log Entry
#[derive(Clone, Serialize, Debug)]
pub struct LogEntry {
/// The log message
pub log: String,
/// The log levelO
pub level: Level,
}

/// Logging config
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct LoggingConfig {
/// whether to log to stdout
pub log_to_stdout: bool,
/// logging level for stdout
pub stdout_log_level: Level,
/// whether to log to file
pub log_to_file: bool,
/// log file level
pub file_log_level: Level,
/// Log file path
pub log_file_path: String,
/// Whether to append to log or replace
pub log_file_append: bool,
/// Size of the log in bytes to rotate over (optional)
pub log_max_size: Option<u64>,
/// Number of the log files to rotate over (optional)
pub log_max_files: Option<u32>,
/// Whether the tui is running (optional)
pub tui_running: Option<bool>,
}

impl Default for LoggingConfig {
fn default() -> LoggingConfig {
LoggingConfig {
log_to_stdout: true,
stdout_log_level: Level::Warn,
log_to_file: true,
file_log_level: Level::Info,
log_file_path: String::from("grin.log"),
log_file_append: true,
log_max_size: Some(1024 * 1024 * 16), // 16 megabytes default
log_max_files: Some(DEFAULT_ROTATE_LOG_FILES),
tui_running: None,
}
}
}

/// This filter is rejecting messages that doesn't start with "grin"
/// in order to save log space for only Grin-related records
#[derive(Debug)]
@@ -73,8 +117,32 @@ impl Filter for GrinFilter {
}
}

#[derive(Debug)]
struct ChannelAppender {
output: Mutex<SyncSender<LogEntry>>,
encoder: Box<dyn Encode>,
}

impl Append for ChannelAppender {
fn append(&self, record: &Record) -> Result<(), Box<dyn Error + Sync + Send>> {
let mut writer = SimpleWriter(Vec::new());
self.encoder.encode(&mut writer, record)?;

let log = String::from_utf8_lossy(writer.0.as_slice()).to_string();

let _ = self.output.lock().try_send(LogEntry {
log,
level: record.level(),
});

Ok(())
}

fn flush(&self) {}
}

/// Initialize the logger with the given configuration
pub fn init_logger(config: Option<LoggingConfig>) {
pub fn init_logger(config: Option<LoggingConfig>, logs_tx: Option<mpsc::SyncSender<LogEntry>>) {
if let Some(c) = config {
let tui_running = c.tui_running.unwrap_or(false);
if tui_running {
@@ -86,8 +154,8 @@ pub fn init_logger(config: Option<LoggingConfig>) {
let mut config_ref = LOGGING_CONFIG.lock();
*config_ref = c.clone();

let level_stdout = convert_log_level(&c.stdout_log_level);
let level_file = convert_log_level(&c.file_log_level);
let level_stdout = c.stdout_log_level.to_level_filter();
let level_file = c.file_log_level.to_level_filter();

// Determine minimum logging level for Root logger
let level_minimum = if level_stdout > level_file {
@@ -105,15 +173,26 @@ pub fn init_logger(config: Option<LoggingConfig>) {

let mut appenders = vec![];

if c.log_to_stdout && !tui_running {
let filter = Box::new(ThresholdFilter::new(level_stdout));
if tui_running {
let channel_appender = ChannelAppender {
encoder: Box::new(PatternEncoder::new(&LOGGING_PATTERN)),
output: Mutex::new(logs_tx.unwrap()),
};

appenders.push(
Appender::builder()
.filter(filter)
.filter(Box::new(ThresholdFilter::new(level_stdout)))
.filter(Box::new(GrinFilter))
.build("tui", Box::new(channel_appender)),
);
root = root.appender("tui");
} else if c.log_to_stdout {
appenders.push(
Appender::builder()
.filter(Box::new(ThresholdFilter::new(level_stdout)))
.filter(Box::new(GrinFilter))
.build("stdout", Box::new(stdout)),
);

root = root.appender("stdout");
}

@@ -123,9 +202,7 @@ pub fn init_logger(config: Option<LoggingConfig>) {
let filter = Box::new(ThresholdFilter::new(level_file));
let file: Box<dyn Append> = {
if let Some(size) = c.log_max_size {
let count = c
.log_max_files
.unwrap_or_else(|| types::DEFAULT_ROTATE_LOG_FILES);
let count = c.log_max_files.unwrap_or_else(|| DEFAULT_ROTATE_LOG_FILES);
let roller = FixedWindowRoller::builder()
.build(&format!("{}.{{}}.gz", c.log_file_path), count)
.unwrap();
@@ -188,13 +265,13 @@ pub fn init_test_logger() {
}
let mut logger = LoggingConfig::default();
logger.log_to_file = false;
logger.stdout_log_level = LogLevel::Debug;
logger.stdout_log_level = Level::Debug;

// Save current logging configuration
let mut config_ref = LOGGING_CONFIG.lock();
*config_ref = logger;

let level_stdout = convert_log_level(&config_ref.stdout_log_level);
let level_stdout = config_ref.stdout_log_level.to_level_filter();
let level_minimum = level_stdout; // minimum logging level for Root logger

// Start logger
@@ -12,64 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

//! Logging configuration types

/// Log level types
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum LogLevel {
/// Error
Error,
/// Warning
Warning,
/// Info
Info,
/// Debug
Debug,
/// Trace
Trace,
}

/// 32 log files to rotate over by default
pub const DEFAULT_ROTATE_LOG_FILES: u32 = 32 as u32;

/// Logging config
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct LoggingConfig {
/// whether to log to stdout
pub log_to_stdout: bool,
/// logging level for stdout
pub stdout_log_level: LogLevel,
/// whether to log to file
pub log_to_file: bool,
/// log file level
pub file_log_level: LogLevel,
/// Log file path
pub log_file_path: String,
/// Whether to append to log or replace
pub log_file_append: bool,
/// Size of the log in bytes to rotate over (optional)
pub log_max_size: Option<u64>,
/// Number of the log files to rotate over (optional)
pub log_max_files: Option<u32>,
/// Whether the tui is running (optional)
pub tui_running: Option<bool>,
}

impl Default for LoggingConfig {
fn default() -> LoggingConfig {
LoggingConfig {
log_to_stdout: true,
stdout_log_level: LogLevel::Warning,
log_to_file: true,
file_log_level: LogLevel::Info,
log_file_path: String::from("grin.log"),
log_file_append: true,
log_max_size: Some(1024 * 1024 * 16), // 16 megabytes default
log_max_files: Some(DEFAULT_ROTATE_LOG_FILES),
tui_running: None,
}
}
}
//! Zeroing String

use std::ops::Deref;
use zeroize::Zeroize;