diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 9e638ec9df..c186ffc7b7 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -1,19 +1,9 @@ [advisories] ignore = [ - # All data structures using borsh ser/de have been reviewed for ZST's and we have found no reason for concern. - # TODO: Remove this line after borsh-rs has been upgraded to >=0.12.0 - # https://github.com/near/borsh-rs/pull/146 - "RUSTSEC-2023-0033", - - # Repo flagged as unmaintained but our clap dependency uses it - # TODO: Remove this if clap is upgraded to >=3.0.0 - "RUSTSEC-2021-0139", # We are not using a special allocator and will not suffer this issue "RUSTSEC-2021-0145", + # We are not using RSA + "RUSTSEC-2023-0071" - # PGP should be upgraded to 0.10.1 which removes the "unmaintained" dependency but we can't do this as pgp and snow - # specify different version dependencies for curve25519-dalek that are currently unresolvable. - # TODO: Check and see if pgp and snow can be resolved and if so, upgrade them and remove this ignore - "RUSTSEC-2023-0028", ] \ No newline at end of file diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000000..235745ca22 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,5 @@ +[target.aarch64-unknown-linux-gnu] +linker = "aarch64-linux-gnu-gcc" + +[target.riscv64gc-unknown-linux-gnu] +linker = "riscv64-linux-gnu-gcc" diff --git a/.config/nextest.toml b/.config/nextest.toml index b305587ac3..01af9351fd 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -3,3 +3,15 @@ slow-timeout = { period = "60s", terminate-after=2} [profile.ci.junit] # this can be some other profile, too path = "junit.xml" + +[profile.intellij] +retries = 0 +slow-timeout = { period = "30s" } +failure-output = "immediate-final" +fail-fast = false + +[profile.intellij.junit] # this can be some other profile, too +path = "junit.xml" + + + diff --git a/.github/workflows/base_node_binaries.json b/.github/workflows/base_node_binaries.json index 983bb5db96..c636da8df7 100644 --- a/.github/workflows/base_node_binaries.json +++ b/.github/workflows/base_node_binaries.json @@ -2,12 +2,9 @@ { "name": "linux-x86_64", "runs-on": "ubuntu-20.04", - "rust": "nightly-2023-06-04", + "rust": "nightly-2024-02-04", "target": "x86_64-unknown-linux-gnu", - "cross": false, - "target_cpu": "x86-64", - "target_bins": "--bin minotari_node --bin minotari_console_wallet --bin minotari_merge_mining_proxy --bin minotari_miner", - "features": "default, safe" + "cross": false }, { "name": "linux-arm64", @@ -15,31 +12,31 @@ "rust": "stable", "target": "aarch64-unknown-linux-gnu", "cross": true, - "target_cpu": "generic", - "features": "safe", - "target_bins": "--bin minotari_node --bin minotari_console_wallet --bin minotari_merge_mining_proxy --bin minotari_miner", "flags": "--workspace --exclude minotari_mining_helper_ffi --exclude tari_integration_tests" }, + { + "name": "linux-riscv64", + "runs-on": "ubuntu-latest", + "rust": "stable", + "target": "riscv64gc-unknown-linux-gnu", + "cross": true, + "flags": "--workspace --exclude minotari_mining_helper_ffi --exclude tari_integration_tests", + "build_enabled": true, + "best-effort": true + }, { "name": "macos-x86_64", "runs-on": "macos-11", "rust": "stable", "target": "x86_64-apple-darwin", - "cross": false, - "target_cpu": "x86-64", - "target_bins": "--bin minotari_node --bin minotari_console_wallet --bin minotari_merge_mining_proxy --bin minotari_miner", - "features": "default, safe" + "cross": false }, { "name": "macos-arm64", - "runs-on": "macos-12", + "runs-on": "macos-14", "rust": "stable", "target": "aarch64-apple-darwin", - "cross": false, - "target_cpu": "generic", - "target_bins": "--bin minotari_node --bin minotari_console_wallet --bin minotari_merge_mining_proxy --bin minotari_miner", - "features": "default, safe", - "build_enabled": true + "cross": false }, { "name": "windows-x64", @@ -47,9 +44,7 @@ "rust": "stable", "target": "x86_64-pc-windows-msvc", "cross": false, - "target_cpu": "x86-64", "features": "safe", - "target_bins": "--bin minotari_node --bin minotari_console_wallet --bin minotari_merge_mining_proxy --bin minotari_miner", "flags": "--workspace --exclude tari_libtor" }, { @@ -58,9 +53,9 @@ "rust": "stable", "target": "aarch64-pc-windows-msvc", "cross": false, - "target_cpu": "generic", "features": "safe", - "target_bins": "--bin minotari_node --bin minotari_console_wallet --bin minotari_merge_mining_proxy --bin minotari_miner", + "target_bins": "minotari_node, minotari_console_wallet, minotari_merge_mining_proxy, minotari_miner", + "flags": "--workspace --exclude tari_libtor", "build_enabled": false } ] diff --git a/.github/workflows/base_node_binaries.yml b/.github/workflows/base_node_binaries.yml index e3a52fd72d..6e54255573 100644 --- a/.github/workflows/base_node_binaries.yml +++ b/.github/workflows/base_node_binaries.yml @@ -6,7 +6,8 @@ name: Build Matrix of Binaries tags: - 'v[0-9]+.[0-9]+.[0-9]*' branches: - - build-* + - 'build-all-*' + - 'build-bins-*' schedule: - cron: "05 00 * * *" workflow_dispatch: @@ -18,21 +19,27 @@ name: Build Matrix of Binaries env: TBN_FILENAME: "tari_suite" - TBN_BUNDLEID_BASE: "com.tarilabs.pkg" - toolchain: nightly-2023-06-04 + TBN_BUNDLE_ID_BASE: "com.tarilabs" + TBN_SIG_FN: "sha256-unsigned.txt" + ## Must be a JSon string + TBN_FILES: '["minotari_node","minotari_console_wallet","minotari_miner","minotari_merge_mining_proxy"]' + TBN_FEATURES: "default, safe" + TBN_LIBRARIES: "minotari_mining_helper_ffi" + TARI_NETWORK_DIR: testnet + toolchain: nightly-2024-02-04 matrix-json-file: ".github/workflows/base_node_binaries.json" CARGO_HTTP_MULTIPLEXING: false CARGO_UNSTABLE_SPARSE_REGISTRY: true CARGO: cargo - # CARGO_OPTIONS: "--verbose" CARGO_OPTIONS: "--release" - # Needed for S3 as a default upload location - TARI_NETWORK_DIR: testnet - S3_DEST_OVERRIDE: "" + CARGO_CACHE: true concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true + # https://docs.github.com/en/actions/examples/using-concurrency-expressions-and-a-test-matrix + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: ${{ !startsWith(github.ref, 'refs/tags/v') || github.ref != 'refs/heads/development' || github.ref != 'refs/heads/nextnet' || github.ref != 'refs/heads/stagenet' }} + +permissions: {} jobs: matrix-prep: @@ -53,9 +60,9 @@ jobs: # # build only single target image # matrix_selection=$( jq -c '.[] | select( ."name" == "windows-x64" )' ${{ env.matrix-json-file }} ) - # matrix_selection=$( jq -c '.[] | select( ."name" == "macos-arm64" )' ${{ env.matrix-json-file }} ) + # matrix_selection=$( jq -c '.[] | select( ."name" | contains("linux") )' ${{ env.matrix-json-file }} ) # - # buid select target images - build_enabled + # build select target images - build_enabled matrix_selection=$( jq -c '.[] | select( ."build_enabled" != false )' ${{ env.matrix-json-file }} ) # # Setup the json build matrix @@ -84,10 +91,13 @@ jobs: builds: name: Building ${{ matrix.builds.name }} on ${{ matrix.builds.runs-on }} needs: matrix-prep + continue-on-error: ${{ matrix.builds.best-effort || false }} + outputs: + TARI_NETWORK_DIR: ${{ steps.set-tari-network.outputs.TARI_NETWORK_DIR }} + TARI_VERSION: ${{ steps.set-tari-vars.outputs.TARI_VERSION }} strategy: fail-fast: false matrix: ${{ fromJson(needs.matrix-prep.outputs.matrix) }} - runs-on: ${{ matrix.builds.runs-on }} steps: @@ -97,21 +107,65 @@ jobs: submodules: recursive - name: Declare TestNet for tags + id: set-tari-network + # Don't forget to comment out the below if, when force testing with GHA_NETWORK if: ${{ startsWith(github.ref, 'refs/tags/v') }} + env: + GHA_NETWORK: ${{ github.ref_name }} + # GHA_NETWORK: "v1.0.0-rc.4" shell: bash run: | - source buildtools/multinet_envs.sh ${{ github.ref_name }} + source buildtools/multinet_envs.sh ${{ env.GHA_NETWORK }} echo ${TARI_NETWORK} + echo ${TARI_TARGET_NETWORK} echo ${TARI_NETWORK_DIR} echo "TARI_NETWORK=${TARI_NETWORK}" >> $GITHUB_ENV + echo "TARI_TARGET_NETWORK=${TARI_TARGET_NETWORK}" >> $GITHUB_ENV echo "TARI_NETWORK_DIR=${TARI_NETWORK_DIR}" >> $GITHUB_ENV + echo "TARI_NETWORK_DIR=${TARI_NETWORK_DIR}" >> $GITHUB_OUTPUT - name: Declare Global Variables 4 GHA ${{ github.event_name }} - id: vars + id: set-tari-vars shell: bash run: | echo "VBRANCH=${{ github.ref_name }}" >> $GITHUB_ENV echo "VSHA_SHORT=$(git rev-parse --short HEAD)" >> $GITHUB_ENV + TARI_VERSION=$(awk -F ' = ' '$1 ~ /version/ { gsub(/["]/, "", $2); printf("%s",$2) }' "$GITHUB_WORKSPACE/applications/minotari_node/Cargo.toml") + echo "TARI_VERSION=${TARI_VERSION}" >> $GITHUB_ENV + echo "TARI_VERSION=${TARI_VERSION}" >> $GITHUB_OUTPUT + if [[ "${{ matrix.builds.features }}" == "" ]]; then + echo "BUILD_FEATURES=${{ env.TBN_FEATURES }}" >> $GITHUB_ENV + else + echo "BUILD_FEATURES=${{ matrix.builds.features }}" >> $GITHUB_ENV + fi + TARGET_BINS="" + if [[ "${{ matrix.builds.target_bins }}" == "" ]]; then + ARRAY_BINS=( $(echo ${TBN_FILES} | jq --raw-output '.[]' | awk '{ print $1 }') ) + else + ARRAY_BINS=( $(echo "${{ matrix.builds.target_bins }}" | tr ', ' '\n') ) + fi + for BIN_FILE in "${ARRAY_BINS[@]}"; do + echo "Adding ${BIN_FILE} to Builds" + TARGET_BINS+="--bin ${BIN_FILE} " + done + echo "TARGET_BINS=${TARGET_BINS}" >> $GITHUB_ENV + TARGET_LIBS="" + if [[ "${{ matrix.builds.target_libs }}" == "" ]]; then + ARRAY_LIBS=( $(echo ${TBN_LIBRARIES} | tr ', ' '\n') ) + else + ARRAY_LIBS=( $(echo "${{ matrix.builds.target_libs }}" | tr ', ' '\n') ) + fi + for LIB_FILE in "${ARRAY_LIBS[@]}"; do + echo "Adding ${LIB_FILE} to library Builds" + TARGET_LIBS+="--package ${LIB_FILE} " + done + echo "TARGET_LIBS=${TARGET_LIBS}" >> $GITHUB_ENV + TARI_BUILD_ISA_CPU=${{ matrix.builds.target }} + # Strip unknown part + TARI_BUILD_ISA_CPU=${TARI_BUILD_ISA_CPU//-unknown-linux-gnu} + # Strip gc used by rust + TARI_BUILD_ISA_CPU=${TARI_BUILD_ISA_CPU//gc} + echo "TARI_BUILD_ISA_CPU=${TARI_BUILD_ISA_CPU}" >> $GITHUB_ENV - name: Scheduled Destination Folder Override if: ${{ github.event_name == 'schedule' && github.event.schedule == '05 00 * * *' }} @@ -132,12 +186,13 @@ jobs: sudo apt-get update sudo bash scripts/install_ubuntu_dependencies.sh - - name: Install Linux dependencies - Ubuntu - cross-compile arm64 on x86-64 - if: ${{ startsWith(runner.os,'Linux') && ( ! matrix.builds.cross ) && matrix.builds.name == 'linux-arm64' }} + - name: Install Linux dependencies - Ubuntu - cross-compiled ${{ env.TARI_BUILD_ISA_CPU }} on x86-64 + if: ${{ startsWith(runner.os,'Linux') && ( ! matrix.builds.cross ) && matrix.builds.name != 'linux-x86_64' }} run: | sudo apt-get update - sudo bash scripts/install_ubuntu_dependencies-arm64.sh + sudo bash scripts/install_ubuntu_dependencies-cross_compile.sh ${{ env.TARI_BUILD_ISA_CPU }} rustup target add ${{ matrix.builds.target }} + echo "PKG_CONFIG_SYSROOT_DIR=/usr/${{ env.TARI_BUILD_ISA_CPU }}-linux-gnu/" >> $GITHUB_ENV - name: Install macOS dependencies if: startsWith(runner.os,'macOS') @@ -176,6 +231,16 @@ jobs: echo "PLATFORM_SPECIFIC_DIR=osx" >> $GITHUB_ENV echo "LIB_EXT=.dylib" >> $GITHUB_ENV + # Hardcoded sdk for MacOSX on ARM64 + - name: Set environment variables - macOS - ARM64 (pin/sdk) + # Debug + if: ${{ false }} + # if: ${{ startsWith(runner.os,'macOS') && matrix.builds.name == 'macos-arm64' }} + run: | + xcrun --show-sdk-path + ls -alhtR "/Library/Developer/CommandLineTools/SDKs/" + echo "RANDOMX_RS_CMAKE_OSX_SYSROOT=/Library/Developer/CommandLineTools/SDKs/MacOSX12.1.sdk" >> $GITHUB_ENV + - name: Set environment variables - Ubuntu if: startsWith(runner.os,'Linux') shell: bash @@ -202,7 +267,7 @@ jobs: echo "C:\Strawberry\perl\bin" >> $GITHUB_PATH - name: Cache cargo files and outputs - if: ${{ ( ! startsWith(github.ref, 'refs/tags/v') ) && ( ! matrix.builds.cross ) }} + if: ${{ ( ! startsWith(github.ref, 'refs/tags/v') ) && ( ! matrix.builds.cross ) && ( env.CARGO_CACHE ) }} uses: Swatinem/rust-cache@v2 with: key: ${{ matrix.builds.target }} @@ -235,38 +300,94 @@ jobs: run: | ${{ env.CARGO }} build ${{ env.CARGO_OPTIONS }} \ --target ${{ matrix.builds.target }} \ - --features "${{ matrix.builds.features }}" \ - ${{ matrix.builds.target_bins }} \ + --features "${{ env.BUILD_FEATURES }}" \ + ${{ env.TARGET_BINS }} \ + ${{ matrix.builds.flags }} --locked + + - name: Build release libraries + shell: bash + run: | + ${{ env.CARGO }} build ${{ env.CARGO_OPTIONS }} \ + --target ${{ matrix.builds.target }} \ + --lib ${{ env.TARGET_LIBS }} \ ${{ matrix.builds.flags }} --locked - name: Copy binaries to folder for archiving shell: bash run: | + set -xo pipefail mkdir -p "$GITHUB_WORKSPACE${TBN_DIST}" cd "$GITHUB_WORKSPACE${TBN_DIST}" - VERSION=$(awk -F ' = ' '$1 ~ /version/ { gsub(/["]/, "", $2); printf("%s",$2) }' "$GITHUB_WORKSPACE/applications/minotari_node/Cargo.toml") - echo "VERSION=${VERSION}" >> $GITHUB_ENV - echo "VSHA_SHORT=${VSHA_SHORT}" >> $GITHUB_ENV - BINFILE="${TBN_FILENAME}-${VERSION}-${VSHA_SHORT}-${{ matrix.builds.name }}${TBN_EXT}" + BINFILE="${TBN_FILENAME}-${TARI_VERSION}-${VSHA_SHORT}-${{ matrix.builds.name }}${TBN_EXT}" echo "BINFILE=${BINFILE}" >> $GITHUB_ENV echo "Copying files for ${BINFILE} to $(pwd)" echo "MTS_SOURCE=$(pwd)" >> $GITHUB_ENV - ls -la "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/" - FILES=( - "minotari_node" - "minotari_console_wallet" - "minotari_miner" - "minotari_merge_mining_proxy" - ) - for FILE in "${FILES[@]}"; do - if [ -f "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/${FILE}${TBN_EXT}" ]; then - cp -v "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/${FILE}${TBN_EXT}" . + ls -alht "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/" + ARRAY_FILES=( $(echo ${TBN_FILES} | jq --raw-output '.[]' | awk '{ print $1 }') ) + for FILE in "${ARRAY_FILES[@]}"; do + echo "checking for file - ${FILE}${TBN_EXT}" + if [ -f "${GITHUB_WORKSPACE}/target/${{ matrix.builds.target }}/release/${FILE}${TBN_EXT}" ]; then + cp -vf "${GITHUB_WORKSPACE}/target/${{ matrix.builds.target }}/release/${FILE}${TBN_EXT}" . fi done - if [ -f "$GITHUB_WORKSPACE/applications/minotari_node/${PLATFORM_SPECIFIC_DIR}/runtime/start_tor${SHELL_EXT}" ]; then - cp -v "$GITHUB_WORKSPACE/applications/minotari_node/${PLATFORM_SPECIFIC_DIR}/runtime/start_tor${SHELL_EXT}" . + if [[ "${{ matrix.builds.target_libs }}" == "" ]]; then + ARRAY_LIBS=( $(echo ${TBN_LIBRARIES} | tr ', ' '\n') ) + else + ARRAY_LIBS=( $(echo "${{ matrix.builds.target_libs }}" | tr ', ' '\n') ) fi - ls -la ${{ env.MTS_SOURCE }} + for FILE in "${ARRAY_LIBS[@]}"; do + echo "checking for file - ${FILE}${TBN_EXT}" + # Check on Nix for libs + if [ -f "${GITHUB_WORKSPACE}/target/${{ matrix.builds.target }}/release/lib${FILE}${LIB_EXT}" ]; then + cp -vf "${GITHUB_WORKSPACE}/target/${{ matrix.builds.target }}/release/lib${FILE}${LIB_EXT}" . + fi + # Check on Windows libs + if [ -f "${GITHUB_WORKSPACE}/target/${{ matrix.builds.target }}/release/${FILE}${LIB_EXT}" ]; then + cp -vf "${GITHUB_WORKSPACE}/target/${{ matrix.builds.target }}/release/${FILE}${LIB_EXT}" . + fi + done + if [ -f "${GITHUB_WORKSPACE}/applications/minotari_node/${PLATFORM_SPECIFIC_DIR}/runtime/start_tor${SHELL_EXT}" ]; then + cp -vf "${GITHUB_WORKSPACE}/applications/minotari_node/${PLATFORM_SPECIFIC_DIR}/runtime/start_tor${SHELL_EXT}" . + fi + ls -alhtR ${{ env.MTS_SOURCE }} + + - name: Build minotari_node metrics release binary for linux-x86_64 + if: ${{ startsWith(runner.os,'Linux') && ( ! matrix.builds.cross ) && matrix.builds.name == 'linux-x86_64' }} + shell: bash + run: | + ${{ env.CARGO }} build ${{ env.CARGO_OPTIONS }} \ + --target ${{ matrix.builds.target }} \ + --features "${{ env.BUILD_FEATURES }}, metrics" \ + --bin minotari_node \ + ${{ matrix.builds.flags }} --locked + cp -vf "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/minotari_node" "${{ env.MTS_SOURCE }}/minotari_node-metrics" + + - name: Build targeted miners + # if: ${{ ( startsWith(github.ref, 'refs/tags/v') ) && ( matrix.builds.miner_cpu_targets != '' ) }} + if: ${{ matrix.builds.miner_cpu_targets != '' }} + shell: bash + run: | + ARRAY_TARGETS=( $(echo "${{ matrix.builds.miner_cpu_targets }}" | tr ', ' '\n') ) + for CPU_TARGET in "${ARRAY_TARGETS[@]}"; do + echo "Target CPU ${CPU_TARGET} for miner" + export RUSTFLAGS="-C target-cpu=${CPU_TARGET}" + ${{ env.CARGO }} build ${{ env.CARGO_OPTIONS }} \ + --target ${{ matrix.builds.target }} \ + --features "${{ env.BUILD_FEATURES }}" \ + --bin minotari_miner \ + ${{ matrix.builds.flags }} --locked + cp -vf "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/minotari_miner" "${{ env.MTS_SOURCE }}/minotari_miner-${CPU_TARGET}" + done + + - name: Pre/unsigned OSX Artifact upload for Archive + # Debug + if: ${{ false }} + # if: startsWith(runner.os,'macOS') + continue-on-error: true + uses: actions/upload-artifact@v4 + with: + name: ${{ env.TBN_FILENAME }}_unsigned-archive-${{ matrix.builds.name }} + path: "${{ env.MTS_SOURCE }}/*" - name: Build the macOS pkg if: startsWith(runner.os,'macOS') @@ -291,97 +412,112 @@ jobs: security import application.p12 -k build.keychain -P $MACOS_APPLICATION_PASS -T /usr/bin/codesign security import installer.p12 -k build.keychain -P $MACOS_INSTALLER_PASS -T /usr/bin/pkgbuild security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k $MACOS_KEYCHAIN_PASS build.keychain + if [[ "${{ matrix.builds.name }}" == "macos-arm64" ]]; then + echo "Add codesign extra args for ${{ matrix.builds.name }}" + OSX_CODESIGN_EXTRAS="--entitlements ${GITHUB_WORKSPACE}/applications/minotari_node/osx-pkg/entitlements.xml" + fi cd buildtools export target_release="target/${{ matrix.builds.target }}/release" + mkdir -p "${{ runner.temp }}/osxpkg" + export tarball_parent="${{ runner.temp }}/osxpkg" + export tarball_source="${{ env.TARI_NETWORK_DIR }}" ./create_osx_install_zip.sh unused nozip - FILES=( - "minotari_node" - "minotari_console_wallet" - "minotari_miner" - "minotari_merge_mining_proxy" + ARRAY_FILES=( $(echo ${TBN_FILES} | jq --raw-output '.[]' | awk '{ print $1 }') ) + find "${GITHUB_WORKSPACE}/${target_release}" \ + -name "randomx-*" -type f -perm -+x \ + -exec cp -vf {} "${{ runner.temp }}/osxpkg/${{ env.TARI_NETWORK_DIR }}/runtime/" \; + FILES_DIAG_UTILS=( \ + $(find "${GITHUB_WORKSPACE}/${target_release}" \ + -name "randomx-*" -type f -perm -+x \ + -exec sh -c 'echo "$(basename "{}")"' \; \ + ) \ ) - for FILE in "${FILES[@]}"; do - codesign --options runtime --force --verify --verbose --timestamp --sign "Developer ID Application: $MACOS_APPLICATION_ID" "/tmp/tari_testnet/runtime/$FILE" - codesign --verify --deep --display --verbose=4 "/tmp/tari_testnet/runtime/$FILE" - cp -vf "/tmp/tari_testnet/runtime/$FILE" "$GITHUB_WORKSPACE${{ env.TBN_DIST }}" + ARRAY_FILES+=(${FILES_DIAG_UTILS[@]}) + for FILE in "${ARRAY_FILES[@]}"; do + codesign --options runtime --force --verify --verbose --timestamp ${OSX_CODESIGN_EXTRAS} \ + --prefix "${{ env.TBN_BUNDLE_ID_BASE }}.${{ env.TBN_FILENAME }}." \ + --sign "Developer ID Application: $MACOS_APPLICATION_ID" \ + "${{ runner.temp }}/osxpkg/${{ env.TARI_NETWORK_DIR }}/runtime/$FILE" + codesign --verify --deep --display --verbose=4 \ + "${{ runner.temp }}/osxpkg/${{ env.TARI_NETWORK_DIR }}/runtime/$FILE" + cp -vf "${{ runner.temp }}/osxpkg/${{ env.TARI_NETWORK_DIR }}/runtime/$FILE" \ + "${{ env.MTS_SOURCE }}" done distDirPKG=$(mktemp -d -t ${{ env.TBN_FILENAME }}) echo "${distDirPKG}" echo "distDirPKG=${distDirPKG}" >> $GITHUB_ENV TBN_Temp=${{ env.TBN_FILENAME }} - TBN_BUNDLEID_VALID_NAME=$(echo "${TBN_Temp//_/-}") + TBN_BUNDLE_ID_VALID_NAME=$(echo "${TBN_Temp//_/-}") # Strip apple-darwin TBN_ARCH=$(echo "${${{ matrix.builds.target }}//-apple-darwin/}") - pkgbuild --root /tmp/tari_testnet \ - --identifier "${{ env.TBN_BUNDLEID_BASE }}.$TBN_BUNDLEID_VALID_NAME" \ - --version "$VERSION" \ + pkgbuild --root "${{ runner.temp }}/osxpkg/${{ env.TARI_NETWORK_DIR }}" \ + --identifier "${{ env.TBN_BUNDLE_ID_BASE }}.pkg.${TBN_BUNDLE_ID_VALID_NAME}" \ + --version "${TARI_VERSION}" \ --install-location "/tmp/tari" \ - --scripts "/tmp/tari_testnet/scripts" \ - --sign "Developer ID Installer: $MACOS_INSTALLER_ID" \ - "${distDirPKG}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg" + --scripts "${{ runner.temp }}/osxpkg/${{ env.TARI_NETWORK_DIR }}/scripts" \ + --sign "Developer ID Installer: ${MACOS_INSTALLER_ID}" \ + "${distDirPKG}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg" echo -e "Submitting to Apple...\n\n" xcrun altool --notarize-app \ - --primary-bundle-id "${{ env.TBN_BUNDLEID_BASE }}.$TBN_BUNDLEID_VALID_NAME" \ - --username "$MACOS_NOTARIZE_USERNAME" --password "$MACOS_NOTARIZE_PASSWORD" \ - --asc-provider "$MACOS_ASC_PROVIDER" \ - --file "${distDirPKG}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg" &> notarisation.result + --primary-bundle-id "${{ env.TBN_BUNDLE_ID_BASE }}.pkg.${TBN_BUNDLE_ID_VALID_NAME}" \ + --username "${MACOS_NOTARIZE_USERNAME}" --password "${MACOS_NOTARIZE_PASSWORD}" \ + --asc-provider "${MACOS_ASC_PROVIDER}" \ + --file "${distDirPKG}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg" &> notarisation.result requestUUID=`grep RequestUUID notarisation.result | cut -d" " -f 3` - echo $requestUUID - if [[ $requestUUID == "" ]]; then + echo ${requestUUID} + if [[ ${requestUUID} == "" ]]; then echo "could not upload for notarization" exit 1 else - echo "Notarization RequestUUID: $requestUUID" + echo "Notarization RequestUUID: ${requestUUID}" fi echo -e "\n\nChecking result of notarisation..." request_status="in progress" - while [[ "$request_status" == "in progress" ]]; do + while [[ "${request_status}" == "in progress" ]]; do echo -n "waiting... " sleep 10 - request_status=$(xcrun altool --notarization-info $requestUUID --username "$MACOS_NOTARIZE_USERNAME" --password "$MACOS_NOTARIZE_PASSWORD" 2>&1) - echo "$request_status" - request_status=$(echo "$request_status" | awk -F ': ' '/Status:/ { print $2; }' ) - echo "$request_status" + request_status=$(xcrun altool --notarization-info ${requestUUID} --username "${MACOS_NOTARIZE_USERNAME}" --password "${MACOS_NOTARIZE_PASSWORD}" 2>&1) + echo "${request_status}" + request_status=$(echo "${request_status}" | awk -F ': ' '/Status:/ { print $2; }' ) + echo "${request_status}" done - echo "$request_status" - if [[ $request_status != "success" ]]; then - echo "## could not notarize - $request_status - ${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg" + echo "${request_status}" + if [[ ${request_status} != "success" ]]; then + echo "## could not notarize - ${request_status} - ${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg" exit 1 else - echo -e "\nStapling package...${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg\n" - xcrun stapler staple -v "${distDirPKG}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg" + echo -e "\nStapling package...${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg\n" + xcrun stapler staple -v "${distDirPKG}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg" fi cd ${distDirPKG} - ls -la echo "Compute pkg shasum" - ${SHARUN} "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg" \ - >> "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg.sha256" - cat "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg.sha256" + ${SHARUN} "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg" \ + >> "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg.sha256" + cat "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg.sha256" echo "Checksum verification for pkg is " - ${SHARUN} --check "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg.sha256" - cp -v *.pkg* ${{ env.MTS_SOURCE }} + ${SHARUN} --check "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg.sha256" - name: Artifact upload for macOS pkg if: startsWith(runner.os,'macOS') continue-on-error: true uses: actions/upload-artifact@v4 with: - name: ${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}.pkg - path: "${{ env.distDirPKG }}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.VERSION }}*.pkg*" + name: ${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg + path: "${{ env.distDirPKG }}/${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}*.pkg*" - name: Build the Windows installer shell: cmd if: startsWith(runner.os,'Windows') run: | cd buildtools - "%programfiles(x86)%\Inno Setup 6\iscc.exe" "/DMyAppVersion=${{ env.VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer" "/DMinotariSuite=${{ env.TBN_FILENAME }}" "/DTariSuitePath=${{ github.workspace }}${{ env.TBN_DIST }}" "windows_inno_installer.iss" + "%programfiles(x86)%\Inno Setup 6\iscc.exe" "/DMyAppVersion=${{ env.TARI_VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer" "/DMinotariSuite=${{ env.TBN_FILENAME }}" "/DTariSuitePath=${{ github.workspace }}${{ env.TBN_DIST }}" "windows_inno_installer.iss" cd Output echo "Compute archive shasum" - ${{ env.SHARUN }} "${{ env.TBN_FILENAME }}-${{ env.VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe" >> "${{ env.TBN_FILENAME }}-${{ env.VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe.sha256" + ${{ env.SHARUN }} "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe" >> "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe.sha256" echo "Show the shasum" - cat "${{ env.TBN_FILENAME }}-${{ env.VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe.sha256" - echo "Checkum verification archive is " - ${{ env.SHARUN }} --check "${{ env.TBN_FILENAME }}-${{ env.VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe.sha256" + cat "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe.sha256" + echo "Checksum verification archive is " + ${{ env.SHARUN }} --check "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}-${{ env.VSHA_SHORT }}-${{ matrix.builds.name }}-installer.exe.sha256" - name: Artifact upload for Windows installer uses: actions/upload-artifact@v4 @@ -394,7 +530,7 @@ jobs: shell: bash run: | echo "Archive ${{ env.BINFILE }} too ${{ env.BINFILE }}.zip" - cd "$GITHUB_WORKSPACE${{ env.TBN_DIST }}" + cd "${{ env.MTS_SOURCE }}" echo "Compute files shasum" ${SHARUN} * >> "${{ env.BINFILE }}.sha256" echo "Show the shasum" @@ -406,7 +542,7 @@ jobs: ${SHARUN} "${{ env.BINFILE }}.zip" >> "${{ env.BINFILE }}.zip.sha256" echo "Show the shasum" cat "${{ env.BINFILE }}.zip.sha256" - echo "Checkum verification archive is " + echo "Checksum verification archive is " ${SHARUN} --check "${{ env.BINFILE }}.zip.sha256" - name: Artifact upload for Archive @@ -415,35 +551,15 @@ jobs: name: ${{ env.TBN_FILENAME }}_archive-${{ matrix.builds.name }} path: "${{ github.workspace }}${{ env.TBN_DIST }}/${{ env.BINFILE }}.zip*" - - name: Prep Miner for upload - shell: bash - run: | - cd "${{ github.workspace }}${{ env.TBN_DIST }}" - cp -v "minotari_miner${{ env.TBN_EXT}}" \ - "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}" - echo "Compute miner shasum" - ${SHARUN} "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}" \ - >> "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}.sha256" - echo "Show the shasum" - cat "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}.sha256" - echo "Checksum verification for miner is " - ${SHARUN} --check "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}.sha256" - - - name: Artifact upload for Miner - uses: actions/upload-artifact@v4 - with: - name: minotari_miner-${{ matrix.builds.name }} - path: "${{ github.workspace }}${{ env.TBN_DIST }}/minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}*" - - name: Prep diag-utils archive for upload continue-on-error: true shell: bash run: | - mkdir "${{ github.workspace }}${{ env.TBN_DIST }}/diag-utils" - cd "${{ github.workspace }}${{ env.TBN_DIST }}/diag-utils" + mkdir -p "${{ env.MTS_SOURCE }}-diag-utils" + cd "${{ env.MTS_SOURCE }}-diag-utils" # Find RandomX built tools for testing find "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/" \ - -name "randomx-*${{ env.TBN_EXT}}" -type f -perm -+x -exec cp -v {} . \; + -name "randomx-*${{ env.TBN_EXT}}" -type f -perm -+x -exec cp -vf {} . \; echo "Compute diag utils shasum" ${SHARUN} * \ >> "${{ env.TBN_FILENAME }}_archive-diag-utils-${{ matrix.builds.name }}.sha256" @@ -463,78 +579,92 @@ jobs: uses: actions/upload-artifact@v4 with: name: ${{ env.TBN_FILENAME }}_archive-diag-utils-${{ matrix.builds.name }} - path: "${{ github.workspace }}${{ env.TBN_DIST }}/diag-utils/*.zip*" - - - name: Artifact cleanup for diag-utils - continue-on-error: true - shell: bash - run: | - rm -vRf "${{ github.workspace }}${{ env.TBN_DIST }}/diag-utils/" - - - name: Artifact Windows Installer for S3 - if: startsWith(runner.os,'Windows') - continue-on-error: true - shell: bash - run: | - if [ -d "${{ github.workspace }}/buildtools/Output/" ]; then - echo "Coping Windows installer ..." - cp -v "${{ github.workspace }}/buildtools/Output/"* \ - "${{ github.workspace }}${{ env.TBN_DIST }}" - fi - - - name: Sync dist to S3 - Bash - continue-on-error: true # Don't break if s3 upload fails - if: ${{ env.AWS_SECRET_ACCESS_KEY != '' && matrix.builds.runs-on != 'self-hosted' }} - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - DEST_DIR: "${{ env.S3_DEST_OVERRIDE }}${{ env.PLATFORM_SPECIFIC_DIR }}/${{ env.TARI_NETWORK_DIR }}/" - S3CMD: "cp" - S3OPTIONS: '--recursive --exclude "*" --include "*.zip*" --include "*.pkg*" --include "*installer.exe*"' - shell: bash - run: | - echo "Starting upload ... ${{ env.MTS_SOURCE }}" - ls -al ${{ env.MTS_SOURCE }} - - aws --version - - aws s3 ${{ env.S3CMD }} --region ${{ secrets.AWS_REGION }} \ - "${{ env.MTS_SOURCE }}" \ - s3://${{ secrets.AWS_S3_BUCKET }}/${{ env.DEST_DIR }} \ - ${{ env.S3OPTIONS }} - - if [[ "${{ github.ref }}" =~ refs\/tags\/v* ]]; then - echo "Copy tags to latest s3" - aws s3 ${{ env.S3CMD }} --region ${{ secrets.AWS_REGION }} \ - "${{ env.MTS_SOURCE }}" \ - s3://${{ secrets.AWS_S3_BUCKET }}/current/${{ env.DEST_DIR }} \ - ${{ env.S3OPTIONS }} - - aws s3 rm --region ${{ secrets.AWS_REGION }} \ - s3://${{ secrets.AWS_S3_BUCKET }}/latest/${{ env.DEST_DIR }} \ - --recursive --include "*" - - aws s3 ${{ env.S3CMD }} --region ${{ secrets.AWS_REGION }} \ - "${{ env.MTS_SOURCE }}" \ - s3://${{ secrets.AWS_S3_BUCKET }}/latest/${{ env.DEST_DIR }} \ - ${{ env.S3OPTIONS }} - fi + path: "${{ github.workspace }}${{ env.TBN_DIST }}-diag-utils/*.zip*" create-release: + if: ${{ startsWith(github.ref, 'refs/tags/v') }} runs-on: ubuntu-latest needs: builds - if: ${{ startsWith(github.ref, 'refs/tags/v') }} + env: + TARI_NETWORK_DIR: ${{ needs.builds.outputs.TARI_NETWORK_DIR }} + TARI_VERSION: ${{ needs.builds.outputs.TARI_VERSION }} + steps: - name: Download binaries uses: actions/download-artifact@v4 + with: + path: ${{ env.TBN_FILENAME }} + pattern: "${{ env.TBN_FILENAME }}*" + merge-multiple: true + + - name: Verify checksums and Prep Uploads + shell: bash + working-directory: ${{ env.TBN_FILENAME }} + run: | + # set -xo pipefail + sudo apt-get update + sudo apt-get --no-install-recommends --assume-yes install dos2unix + ls -alhtR + if [ -f "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" ] ; then + rm -fv "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + fi + # Merge all sha256 files into one + find . -name "*.sha256" -type f -print | xargs cat >> "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + dos2unix --quiet "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + cat "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + sha256sum --ignore-missing --check "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + ls -alhtR - name: Create release uses: ncipollo/release-action@v1 with: - artifacts: "tari_*/**/*" + artifacts: "${{ env.TBN_FILENAME }}*/**/*" token: ${{ secrets.GITHUB_TOKEN }} prerelease: true draft: true allowUpdates: true updateOnlyUnreleased: true replacesArtifacts: true + + - name: Sync assets to S3 + continue-on-error: true + if: ${{ env.AWS_SECRET_ACCESS_KEY != '' && matrix.builds.runs-on != 'self-hosted' }} + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + S3CMD: "cp" + S3OPTIONS: '--recursive --exclude "*" --include "*.sha256*" --include "*.zip*" --include "*.pkg*" --include "*installer.exe*"' + shell: bash + working-directory: ${{ env.TBN_FILENAME }} + run: | + echo "Upload processing ..." + ls -alhtR + echo "Clean up" + # Bash check if file with wildcards, does not work as expected + # if [ -f ${{ env.TBN_FILENAME }}*diag-utils* ] ; then + if ls ${{ env.TBN_FILENAME }}*diag-utils* > /dev/null 2>&1 ; then + rm -fv ${{ env.TBN_FILENAME }}*diag-utils* + fi + echo "Folder setup" + if ls ${{ env.TBN_FILENAME }}*linux* > /dev/null 2>&1 ; then + mkdir -p "linux/${{ env.TARI_NETWORK_DIR }}/" + mv -v ${{ env.TBN_FILENAME }}*linux* "linux/${{ env.TARI_NETWORK_DIR }}/" + fi + if ls ${{ env.TBN_FILENAME }}*macos* > /dev/null 2>&1 ; then + mkdir -p "osx/${{ env.TARI_NETWORK_DIR }}/" + mv -v ${{ env.TBN_FILENAME }}*macos* "osx/${{ env.TARI_NETWORK_DIR }}/" + fi + if ls ${{ env.TBN_FILENAME }}*windows* > /dev/null 2>&1 ; then + mkdir -p "windows/${{ env.TARI_NETWORK_DIR }}/" + mv -v ${{ env.TBN_FILENAME }}*windows* "windows/${{ env.TARI_NETWORK_DIR }}/" + fi + ls -alhtR + aws --version + echo "ls current" + aws s3 ls --region ${{ secrets.AWS_REGION }} \ + s3://${{ secrets.AWS_S3_BUCKET }}/current/ + echo "Upload current" + aws s3 ${{ env.S3CMD }} --region ${{ secrets.AWS_REGION }} \ + . \ + s3://${{ secrets.AWS_S3_BUCKET }}/current/ \ + ${{ env.S3OPTIONS }} diff --git a/.github/workflows/build_dockers.yml b/.github/workflows/build_dockers.yml index 04340dc8c1..001881331b 100644 --- a/.github/workflows/build_dockers.yml +++ b/.github/workflows/build_dockers.yml @@ -8,7 +8,8 @@ name: Build docker images tags: - 'v[0-9]+.[0-9]+.[0-9]*' branches: - - 'build_dockers*' + - 'build-all-*' + - 'build-dockers-*' schedule: - cron: '05 00 * * *' workflow_dispatch: @@ -47,11 +48,14 @@ name: Build docker images - xmrig env: - toolchain_default: nightly-2023-06-04 + toolchain_default: nightly-2024-02-04 concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true + # https://docs.github.com/en/actions/examples/using-concurrency-expressions-and-a-test-matrix + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: ${{ !startsWith(github.ref, 'refs/tags/v') || github.ref != 'refs/heads/development' || github.ref != 'refs/heads/nextnet' || github.ref != 'refs/heads/stagenet' }} + +permissions: {} jobs: builds_envs_setup: @@ -107,6 +111,8 @@ jobs: builds_run: needs: builds_envs_setup + permissions: + packages: write uses: ./.github/workflows/build_dockers_workflow.yml secrets: inherit with: diff --git a/.github/workflows/build_dockers_workflow.yml b/.github/workflows/build_dockers_workflow.yml index f656b24b66..20a109b320 100644 --- a/.github/workflows/build_dockers_workflow.yml +++ b/.github/workflows/build_dockers_workflow.yml @@ -14,7 +14,7 @@ name: Build docker images - workflow_call/on-demand toolchain: type: string description: 'Rust toolchain' - default: nightly-2023-06-04 + default: nightly-2024-02-04 arch: type: string default: x86-64 @@ -40,6 +40,8 @@ env: LAUNCHPAD_REPO: tari-project/tari-launchpad LAUNCHPAD_BRANCH: main +permissions: {} + jobs: envs_setup: runs-on: ubuntu-latest @@ -97,6 +99,9 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.envs_setup.outputs.matrix) }} + permissions: + packages: write + runs-on: ubuntu-latest steps: @@ -119,6 +124,8 @@ jobs: source tari/buildtools/multinet_envs.sh ${{github.ref_name}} echo ${TARI_NETWORK} echo "TARI_NETWORK=${TARI_NETWORK}" >> $GITHUB_ENV + echo ${TARI_TARGET_NETWORK} + echo "TARI_TARGET_NETWORK=${TARI_TARGET_NETWORK}" >> $GITHUB_ENV - name: environment setup shell: bash @@ -225,6 +232,7 @@ jobs: APP_NAME=${{ matrix.builds.app_name }} APP_EXEC=${{ matrix.builds.app_exec }} TARI_NETWORK=${{ env.TARI_NETWORK }} + TARI_TARGET_NETWORK=${{ env.TARI_TARGET_NETWORK }} ${{ env.DOCKER_SUBTAG }} tags: | ${{ steps.meta.outputs.tags }} diff --git a/.github/workflows/build_libffis.yml b/.github/workflows/build_libffis.yml index e2bdef9cb8..edbcfcc8b5 100644 --- a/.github/workflows/build_libffis.yml +++ b/.github/workflows/build_libffis.yml @@ -6,6 +6,7 @@ name: Build ffi libraries tags: - 'v[0-9]+.[0-9]+.[0-9]*' branches: + - 'build-all-*' - 'build-ffis-*' schedule: - cron: '05 00 * * *' @@ -24,8 +25,11 @@ env: matrix-json-file: '.github/workflows/build_libffis.json' concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true + # https://docs.github.com/en/actions/examples/using-concurrency-expressions-and-a-test-matrix + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: ${{ !startsWith(github.ref, 'refs/tags/v') || github.ref != 'refs/heads/development' || github.ref != 'refs/heads/nextnet' || github.ref != 'refs/heads/stagenet' }} + +permissions: {} jobs: matrix-prep: @@ -97,13 +101,19 @@ jobs: uses: actions/checkout@v4 - name: Declare TestNet for tags + # Don't forget to comment out the below if, when force testing with GHA_NETWORK if: ${{ startsWith(github.ref, 'refs/tags/v') }} + env: + GHA_NETWORK: ${{ github.ref_name }} + # GHA_NETWORK: "v1.0.0-rc.4" shell: bash run: | - source buildtools/multinet_envs.sh ${{ github.ref_name }} + source buildtools/multinet_envs.sh ${{ env.GHA_NETWORK }} echo ${TARI_NETWORK} + echo ${TARI_TARGET_NETWORK} echo ${TARI_NETWORK_CHANGELOG} echo "TARI_NETWORK=${TARI_NETWORK}" >> $GITHUB_ENV + echo "TARI_TARGET_NETWORK=${TARI_TARGET_NETWORK}" >> $GITHUB_ENV echo "TARI_NETWORK_CHANGELOG=${TARI_NETWORK_CHANGELOG}" >> $GITHUB_ENV - name: Declare Android/iOS envs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c881a4febe..166f8d432c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ name: CI merge_group: env: - toolchain: nightly-2023-06-04 + toolchain: nightly-2024-02-04 CARGO_HTTP_MULTIPLEXING: false CARGO_TERM_COLOR: always CARGO_UNSTABLE_SPARSE_REGISTRY: true @@ -45,7 +45,7 @@ jobs: # Without restore keys, we lose the ability to get partial matches on caches, and end # up with too many cache misses. # Use a "small" suffix to use the build caches where possible, but build caches won't use this - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cargo/registry/index @@ -85,7 +85,7 @@ jobs: # Rust-cache disables a key feature of actions/cache: restoreKeys. # Without restore keys, we lose the ability to get partial matches on caches, and end # up with too many cache misses. - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.cargo/registry/index @@ -116,7 +116,7 @@ jobs: # Without restore keys, we lose the ability to get partial matches on caches, and end # up with too many cache misses. # This job runs on self-hosted, so use local-cache instead. - uses: maxnowack/local-cache@v1 + uses: maxnowack/local-cache@v2 with: path: | ~/.cargo/registry/index @@ -167,6 +167,17 @@ jobs: permissions: checks: write pull-requests: write + strategy: + matrix: + tari_target_network: [ + { target: "testnet", network: "esmeralda" }, + { target: "nextnet", network: "nextnet" }, + { target: "mainnet", network: "stagenet" }, + ] + env: + TARI_TARGET_NETWORK: ${{ matrix.tari_target_network.target }} + TARI_NETWORK: ${{ matrix.tari_target_network.network }} + RUST_LOG: debug steps: - name: checkout uses: actions/checkout@v4 @@ -184,7 +195,7 @@ jobs: # Without restore keys, we lose the ability to get partial matches on caches, and end # up with too many cache misses. # This job runs on self-hosted, so use local-cache instead. - uses: maxnowack/local-cache@v1 + uses: maxnowack/local-cache@v2 with: path: | ~/.cargo/registry/index @@ -192,21 +203,22 @@ jobs: ~/.cargo/registry/CACHEDIR.TAG ~/.cargo/git target - key: tari-${{ runner.os }}-${{ runner.cpu-model }}-${{ env.toolchain }}-nightly-${{ hashFiles('**/Cargo.lock') }} + key: tari-${{ runner.os }}-${{ runner.cpu-model }}-${{ env.toolchain }}-nightly-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.tari_target_network.target }} restore-keys: | + tari-${{ runner.os }}-${{ runner.cpu-model }}-${{ env.toolchain }}-nightly-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.tari_target_network.network }} tari-${{ runner.os }}-${{ runner.cpu-model }}-${{ env.toolchain }}-nightly-${{ hashFiles('**/Cargo.lock') }} tari-${{ runner.os }}-${{ runner.cpu-model }}-${{ env.toolchain }}-nightly - name: Install cargo-nextest run: cargo install cargo-nextest --locked --force - name: cargo test compile - run: cargo test --no-run --locked --all-features --release + run: cargo test -vv --no-run --locked --all-features --release - name: cargo test run: cargo nextest run --all-features --release -E "not package(tari_integration_tests)" --profile ci - name: upload artifact uses: actions/upload-artifact@v4 # upload test results as artifact if: always() with: - name: test-results + name: test-results-${{ matrix.tari_target_network.target }}.${{ matrix.tari_target_network.network }} path: ${{ github.workspace }}/target/nextest/ci/junit.xml # Allows other workflows to know the PR number @@ -226,7 +238,7 @@ jobs: name: pr_num path: ./pr_num.txt -# needed for test results + # needed for test results event_file: name: "Upload Event File for Test Results" runs-on: ubuntu-latest diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 27f598bab4..39ad2e4b91 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -11,7 +11,7 @@ name: Source Coverage - ci-coverage-* env: - toolchain: nightly-2023-06-04 + toolchain: nightly-2024-02-04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index ab96e48048..57ff669092 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -27,7 +27,7 @@ name: Integration tests type: string env: - toolchain: nightly-2023-06-04 + toolchain: nightly-2024-02-04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -78,7 +78,7 @@ jobs: # Without restore keys, we lose the ability to get partial matches on caches, and end # up with too many cache misses. # This job runs on self-hosted, so use local-cache instead. - uses: maxnowack/local-cache@v1 + uses: maxnowack/local-cache@v2 with: path: | ~/.cargo/registry/index @@ -159,7 +159,7 @@ jobs: if: ${{ env.CI_FFI == 'true' }} # Don't use rust-cache. # This job runs on self-hosted, so use local-cache instead. - uses: maxnowack/local-cache@v1 + uses: maxnowack/local-cache@v2 with: path: | ~/.cargo/registry/index diff --git a/.gitignore b/.gitignore index 06dae84105..08666063a4 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,7 @@ pie/ integration_tests/cucumber-output-junit.xml integration_tests/log/ + +# Ignore MinoTari Ledger Wallet +app_nanosplus.json +app_nanos.json diff --git a/.license.ignore b/.license.ignore index a0d334a362..ecb685b180 100644 --- a/.license.ignore +++ b/.license.ignore @@ -1,5 +1,6 @@ ./applications/minotari_node/assets/tari_banner.rs ./applications/minotari_node/assets/tari_logo.rs +./applications/minotari_node/osx-pkg/entitlements.xml ./base_layer/contacts/src/schema.rs ./base_layer/key_manager/src/schema.rs ./base_layer/p2p/src/dns/roots/tls.rs diff --git a/CODEOWNERS b/CODEOWNERS index ed8f8520e0..ea022ec823 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,14 @@ -@stringhandler @CjS77 @sdbondi \ No newline at end of file +# CI/CD-related files require a review by the devops team +.github/**/* @tari-project/devops +scripts/**/* @tari-project/devops +CODEOWNERS @tari-project/devops + +# Consensus code requires approvals by lead maintainers +base_layer/core/src/consensus/**/* @tari-project/lead-maintainers +base_layer/core/src/**/* @tari-project/tari-core-developers +base_layer/key_manager/src/**/* @tari-project/tari-core-developers +base_layer/wallet/src/**/* @tari-project/tari-core-developers + +# meta management requires approvals by lead maintainers +meta/crates.io/* @tari-project/lead-maintainers +meta/gpg_keys/* @tari-project/lead-maintainers diff --git a/Cargo.lock b/Cargo.lock index f0c7372cff..1e68eb558b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -52,17 +52,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ahash" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" -dependencies = [ - "getrandom 0.2.10", - "once_cell", - "version_check", -] - [[package]] name = "aho-corasick" version = "1.1.2" @@ -95,9 +84,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", @@ -115,30 +104,30 @@ checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -321,9 +310,9 @@ dependencies = [ [[package]] name = "base58-monero" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d079cdf47e1ca75554200bb2f30bff5a5af16964cac4a566b18de9a5d48db2b" +checksum = "978e81a45367d2409ecd33369a45dda2e9a3ca516153ec194de1fbda4b9fb79d" dependencies = [ "thiserror", ] @@ -892,11 +881,12 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "config" -version = "0.13.3" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" +checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" dependencies = [ "async-trait", + "convert_case 0.6.0", "json5", "lazy_static", "nom", @@ -905,7 +895,7 @@ dependencies = [ "rust-ini", "serde", "serde_json", - "toml 0.5.11", + "toml 0.8.8", "yaml-rust", ] @@ -964,12 +954,41 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.3" @@ -1170,23 +1189,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "crossterm" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2102ea4f781910f8a5b98dd061f4c2023f479ce7bb1236330099ceb5a93cf17" -dependencies = [ - "bitflags 1.3.2", - "crossterm_winapi 0.9.1", - "futures-core", - "libc", - "mio 0.8.9", - "parking_lot 0.12.1", - "signal-hook", - "signal-hook-mio", - "winapi", -] - [[package]] name = "crossterm" version = "0.25.0" @@ -1195,8 +1197,9 @@ checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" dependencies = [ "bitflags 1.3.2", "crossterm_winapi 0.9.1", + "futures-core", "libc", - "mio 0.8.9", + "mio 0.8.11", "parking_lot 0.12.1", "signal-hook", "signal-hook-mio", @@ -1234,7 +1237,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -1246,7 +1249,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "typenum", ] @@ -1300,14 +1303,14 @@ dependencies = [ "globwalk", "humantime 2.1.0", "inventory", - "itertools 0.12.0", + "itertools 0.12.1", "junit-report", "lazy-regex", "linked-hash-map", "once_cell", "pin-project 1.1.3", "regex", - "sealed 0.5.0", + "sealed", "serde", "serde_json", "smart-default", @@ -1321,7 +1324,7 @@ checksum = "01091e28d1f566c8b31b67948399d2efd6c0a8f6228a9785519ed7b73f7f0aef" dependencies = [ "cucumber-expressions", "inflections", - "itertools 0.12.0", + "itertools 0.12.1", "proc-macro2", "quote", "regex", @@ -1343,19 +1346,6 @@ dependencies = [ "regex-syntax 0.7.5", ] -[[package]] -name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - [[package]] name = "curve25519-dalek" version = "4.1.1" @@ -1520,7 +1510,7 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", @@ -1658,9 +1648,12 @@ dependencies = [ [[package]] name = "dlv-list" -version = "0.3.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" +dependencies = [ + "const-random", +] [[package]] name = "doc-comment" @@ -1704,7 +1697,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 4.1.1", + "curve25519-dalek", "ed25519", "serde", "sha2 0.10.8", @@ -1732,7 +1725,7 @@ dependencies = [ "hkdf", "pem-rfc7468", "pkcs8", - "rand_core 0.6.4", + "rand_core", "sec1", "subtle", "zeroize", @@ -1866,7 +1859,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2092,17 +2085,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.10" @@ -2111,7 +2093,7 @@ checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -2162,15 +2144,15 @@ dependencies = [ [[package]] name = "globset" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" dependencies = [ "aho-corasick", "bstr", - "fnv", "log", - "regex", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -2191,15 +2173,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core 0.6.4", + "rand_core", "subtle", ] [[package]] name = "h2" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" dependencies = [ "bytes 1.5.0", "fnv", @@ -2207,7 +2189,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.1.0", "slab", "tokio", "tokio-util 0.7.10", @@ -2225,9 +2207,12 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash", -] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" [[package]] name = "hashbrown" @@ -2314,6 +2299,24 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hidapi" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "798154e4b6570af74899d71155fb0072d5b17e6aa12f39c8ef22c60fb8ec99e7" +dependencies = [ + "cc", + "libc", + "pkg-config", + "winapi", +] + [[package]] name = "hkdf" version = "0.12.3" @@ -2500,17 +2503,16 @@ dependencies = [ [[package]] name = "ignore" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe7873dab538a9a44ad79ede1faf5f30d49f9a5c883ddbab48bce81b64b7492" +checksum = "b46810df39e66e925525d6e38ce1e7f6e1d208f72dc39757880fcb66e2c58af1" dependencies = [ + "crossbeam-deque", "globset", - "lazy_static", "log", "memchr", - "regex", + "regex-automata 0.4.3", "same-file", - "thread_local", "walkdir", "winapi-util", ] @@ -2610,9 +2612,9 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "inventory" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0508c56cfe9bfd5dfeb0c22ab9a6abfda2f27bdca422132e494266351ed8d83c" +checksum = "f958d3d68f4167080a18141e10381e7634563984a537f2a49a30fd8e53ac5767" [[package]] name = "ipnet" @@ -2651,9 +2653,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -2758,6 +2760,51 @@ dependencies = [ "spin 0.5.2", ] +[[package]] +name = "ledger-apdu" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe435806c197dfeaa5efcded5e623c4b8230fd28fdf1e91e7a86e40ef2acbf90" +dependencies = [ + "arrayref", + "no-std-compat", + "snafu", +] + +[[package]] +name = "ledger-transport" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1117f2143d92c157197785bf57711d7b02f2cfa101e162f8ca7900fb7f976321" +dependencies = [ + "async-trait", + "ledger-apdu", +] + +[[package]] +name = "ledger-transport" +version = "0.10.0" +source = "git+https://github.com/Zondax/ledger-rs?rev=20e2a20#20e2a2076d799d449ff6f07eb0128548b358d9bc" +dependencies = [ + "async-trait", + "ledger-apdu", +] + +[[package]] +name = "ledger-transport-hid" +version = "0.10.0" +source = "git+https://github.com/Zondax/ledger-rs?rev=20e2a20#20e2a2076d799d449ff6f07eb0128548b358d9bc" +dependencies = [ + "byteorder", + "cfg-if", + "hex", + "hidapi", + "ledger-transport 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "log", + "thiserror", +] + [[package]] name = "libc" version = "0.2.149" @@ -2766,9 +2813,9 @@ checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libgit2-sys" -version = "0.16.1+1.7.1" +version = "0.16.2+1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2a2bb3680b094add03bb3732ec520ece34da31a8cd2d633d1389d0f0fb60d0c" +checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" dependencies = [ "cc", "libc", @@ -2902,8 +2949,9 @@ checksum = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" [[package]] name = "log4rs" -version = "1.2.0" -source = "git+https://github.com/tari-project/log4rs.git#e1051fd3a1bec9c55d055f60176a96cf11e58505" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0816135ae15bd0391cf284eab37e6e3ee0a6ee63d2ceeb659862bd8d0a984ca6" dependencies = [ "anyhow", "arc-swap", @@ -2914,11 +2962,13 @@ dependencies = [ "libc", "log", "log-mdc", + "once_cell", "parking_lot 0.12.1", + "rand", "serde", "serde-value", "serde_json", - "serde_yaml 0.8.26", + "serde_yaml", "thiserror", "thread-id", "typemap-ors", @@ -2988,7 +3038,7 @@ checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" dependencies = [ "byteorder", "keccak", - "rand_core 0.6.4", + "rand_core", "zeroize", ] @@ -3046,7 +3096,7 @@ dependencies = [ [[package]] name = "minotari_app_grpc" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "argon2", "base64 0.13.1", @@ -3062,6 +3112,7 @@ dependencies = [ "tari_comms", "tari_core", "tari_crypto", + "tari_features", "tari_script", "tari_utilities", "thiserror", @@ -3073,7 +3124,7 @@ dependencies = [ [[package]] name = "minotari_app_utilities" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "clap 3.2.25", "dialoguer", @@ -3095,7 +3146,7 @@ dependencies = [ [[package]] name = "minotari_chat_ffi" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "cbindgen", "chrono", @@ -3120,7 +3171,7 @@ dependencies = [ [[package]] name = "minotari_console_wallet" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "blake2", "chrono", @@ -3130,6 +3181,7 @@ dependencies = [ "crossterm 0.25.0", "digest 0.10.7", "futures 0.3.29", + "ledger-transport-hid", "log", "log4rs", "minotari_app_grpc", @@ -3154,7 +3206,7 @@ dependencies = [ "tari_core", "tari_crypto", "tari_features", - "tari_hash_domains", + "tari_hashing", "tari_key_manager", "tari_libtor", "tari_p2p", @@ -3174,7 +3226,7 @@ dependencies = [ [[package]] name = "minotari_merge_mining_proxy" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "bincode", @@ -3213,7 +3265,7 @@ dependencies = [ [[package]] name = "minotari_miner" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "base64 0.13.1", "borsh", @@ -3249,23 +3301,27 @@ dependencies = [ [[package]] name = "minotari_mining_helper_ffi" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "borsh", + "cbindgen", "hex", "libc", "rand", "tari_common", + "tari_common_types", "tari_comms", "tari_core", "tari_crypto", + "tari_features", "tari_utilities", "thiserror", + "tokio", ] [[package]] name = "minotari_node" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "async-trait", @@ -3275,7 +3331,7 @@ dependencies = [ "clap 3.2.25", "config", "console-subscriber", - "crossterm 0.23.2", + "crossterm 0.25.0", "derive_more", "either", "futures 0.3.29", @@ -3297,6 +3353,7 @@ dependencies = [ "tari_core", "tari_crypto", "tari_features", + "tari_key_manager", "tari_libtor", "tari_metrics", "tari_p2p", @@ -3320,7 +3377,7 @@ dependencies = [ [[package]] name = "minotari_wallet" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "argon2", "async-trait", @@ -3370,7 +3427,7 @@ dependencies = [ [[package]] name = "minotari_wallet_ffi" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "borsh", "cbindgen", @@ -3394,6 +3451,7 @@ dependencies = [ "tari_contacts", "tari_core", "tari_crypto", + "tari_features", "tari_key_manager", "tari_p2p", "tari_script", @@ -3433,13 +3491,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -3454,19 +3512,19 @@ dependencies = [ [[package]] name = "monero" -version = "0.18.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a8965a7510c5d9389e2086e406c292d6bbecac099eef195be55a2d2043448b9" +checksum = "1b205707fd34b01a547f2fe77e687b40fed05966fb82e955b86ac55cd8ee31b5" dependencies = [ - "base58-monero 1.0.0", - "curve25519-dalek 3.2.0", + "base58-monero 2.0.0", + "curve25519-dalek", "fixed-hash", "hex", - "hex-literal", - "sealed 0.4.0", + "hex-literal 0.4.1", + "sealed", "serde", "thiserror", - "tiny-keccak 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak", ] [[package]] @@ -3564,6 +3622,12 @@ dependencies = [ "memoffset 0.6.5", ] +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -3809,12 +3873,12 @@ dependencies = [ [[package]] name = "ordered-multimap" -version = "0.4.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" dependencies = [ "dlv-list", - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -3928,7 +3992,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -4081,7 +4145,7 @@ dependencies = [ "chrono", "cipher 0.4.4", "crc24", - "curve25519-dalek 4.1.1", + "curve25519-dalek", "derive_builder", "des", "digest 0.10.7", @@ -4505,7 +4569,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -4515,16 +4579,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -4533,7 +4588,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom", ] [[package]] @@ -4603,7 +4658,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.10", + "getrandom", "redox_syscall 0.2.16", "thiserror", ] @@ -4728,7 +4783,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" dependencies = [ "cc", - "getrandom 0.2.10", + "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -4746,13 +4801,14 @@ dependencies = [ [[package]] name = "ron" -version = "0.7.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", + "base64 0.21.5", + "bitflags 2.4.1", "serde", + "serde_derive", ] [[package]] @@ -4778,7 +4834,7 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8", - "rand_core 0.6.4", + "rand_core", "signature", "spki", "subtle", @@ -4787,9 +4843,9 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" dependencies = [ "cfg-if", "ordered-multimap", @@ -4972,18 +5028,6 @@ dependencies = [ "untrusted 0.9.0", ] -[[package]] -name = "sealed" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b5e421024b5e5edfbaa8e60ecf90bda9dbffc602dbb230e6028763f85f0c68c" -dependencies = [ - "heck 0.3.3", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "sealed" version = "0.5.0" @@ -5112,18 +5156,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap 1.9.3", - "ryu", - "serde", - "yaml-rust", -] - [[package]] name = "serde_yaml" version = "0.9.27" @@ -5221,7 +5253,7 @@ checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" dependencies = [ "libc", "mio 0.7.14", - "mio 0.8.9", + "mio 0.8.11", "signal-hook", ] @@ -5241,7 +5273,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -5300,15 +5332,15 @@ dependencies = [ [[package]] name = "snow" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" +checksum = "2e87c18a6608909007e75a60e04d03eda77b601c94de1c74d9a9dc2c04ab789a" dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.1.1", - "rand_core 0.6.4", + "curve25519-dalek", + "rand_core", "rustc_version", "sha2 0.10.8", "subtle", @@ -5527,7 +5559,7 @@ checksum = "78bfa6ec52465e2425fd43ce5bbbe0f0b623964f7c63feb6b10980e816c654ea" dependencies = [ "proc-macro2", "quote", - "sealed 0.5.0", + "sealed", "syn 2.0.38", ] @@ -5569,13 +5601,23 @@ dependencies = [ "curve25519-dalek-derive", "fiat-crypto 0.1.20", "platforms", - "rand_core 0.6.4", + "rand_core", "rustc_version", "serde", "subtle", "zeroize", ] +[[package]] +name = "tari-tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baa5d0f04bac67c31c1e0c2ffbc7f0e3aee2707405804ec5e022b4d550be259c" +dependencies = [ + "borsh", + "crunchy", +] + [[package]] name = "tari_bulletproofs_plus" version = "0.3.2" @@ -5591,7 +5633,7 @@ dependencies = [ "lazy_static", "merlin", "rand", - "rand_core 0.6.4", + "rand_core", "serde", "sha3", "tari-curve25519-dalek", @@ -5601,7 +5643,7 @@ dependencies = [ [[package]] name = "tari_chat_client" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "async-trait", @@ -5627,7 +5669,7 @@ dependencies = [ [[package]] name = "tari_common" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "blake2", @@ -5641,7 +5683,7 @@ dependencies = [ "prost-build", "serde", "serde_json", - "serde_yaml 0.9.27", + "serde_yaml", "sha2 0.10.8", "structopt", "tari_crypto", @@ -5654,7 +5696,7 @@ dependencies = [ [[package]] name = "tari_common_sqlite" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "diesel", "diesel_migrations", @@ -5668,7 +5710,7 @@ dependencies = [ [[package]] name = "tari_common_types" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "base64 0.21.5", "blake2", @@ -5680,16 +5722,17 @@ dependencies = [ "primitive-types", "rand", "serde", + "strum", + "strum_macros", "tari_common", "tari_crypto", "tari_utilities", "thiserror", - "tokio", ] [[package]] name = "tari_comms" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "async-trait", @@ -5738,7 +5781,7 @@ dependencies = [ [[package]] name = "tari_comms_dht" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "bitflags 2.4.1", @@ -5782,7 +5825,7 @@ dependencies = [ [[package]] name = "tari_comms_rpc_macros" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "futures 0.3.29", "proc-macro2", @@ -5797,7 +5840,7 @@ dependencies = [ [[package]] name = "tari_contacts" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "chrono", "diesel", @@ -5830,7 +5873,7 @@ dependencies = [ [[package]] name = "tari_core" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "async-trait", "bincode", @@ -5850,6 +5893,8 @@ dependencies = [ "futures 0.3.29", "hex", "integer-encoding", + "ledger-transport 0.10.0 (git+https://github.com/Zondax/ledger-rs?rev=20e2a20)", + "ledger-transport-hid", "libsqlite3-sys", "lmdb-zero", "log", @@ -5873,6 +5918,7 @@ dependencies = [ "strum", "strum_macros", "tari-curve25519-dalek", + "tari-tiny-keccak", "tari_common", "tari_common_sqlite", "tari_common_types", @@ -5880,7 +5926,8 @@ dependencies = [ "tari_comms_dht", "tari_comms_rpc_macros", "tari_crypto", - "tari_hash_domains", + "tari_features", + "tari_hashing", "tari_key_manager", "tari_metrics", "tari_mmr", @@ -5893,7 +5940,6 @@ dependencies = [ "tari_utilities", "tempfile", "thiserror", - "tiny-keccak 2.0.2 (git+https://github.com/tari-project/tiny-keccak?rev=bcddc65530d8646de7282cd8d18d891dc434b643)", "tokio", "toml 0.5.11", "tracing", @@ -5912,7 +5958,7 @@ dependencies = [ "log", "once_cell", "rand_chacha", - "rand_core 0.6.4", + "rand_core", "serde", "sha3", "snafu", @@ -5924,12 +5970,15 @@ dependencies = [ [[package]] name = "tari_features" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" [[package]] -name = "tari_hash_domains" -version = "0.1.0" +name = "tari_hashing" +version = "1.0.0-pre.11a" dependencies = [ + "blake2", + "borsh", + "digest 0.10.7", "tari_crypto", ] @@ -5983,7 +6032,7 @@ dependencies = [ [[package]] name = "tari_key_manager" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "argon2", "async-trait", @@ -6018,7 +6067,7 @@ dependencies = [ [[package]] name = "tari_libtor" -version = "0.24.0" +version = "1.0.0-pre.11a" dependencies = [ "derivative", "libtor", @@ -6027,14 +6076,13 @@ dependencies = [ "rand", "tari_common", "tari_p2p", - "tari_shutdown", "tempfile", "tor-hash-passwd", ] [[package]] name = "tari_metrics" -version = "0.1.0" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "futures 0.3.29", @@ -6049,7 +6097,7 @@ dependencies = [ [[package]] name = "tari_mmr" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "bincode", "blake2", @@ -6068,7 +6116,7 @@ dependencies = [ [[package]] name = "tari_p2p" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "clap 3.2.25", @@ -6104,7 +6152,7 @@ dependencies = [ [[package]] name = "tari_script" -version = "0.12.0" +version = "1.0.0-pre.11a" dependencies = [ "blake2", "borsh", @@ -6121,7 +6169,7 @@ dependencies = [ [[package]] name = "tari_service_framework" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "anyhow", "async-trait", @@ -6138,7 +6186,7 @@ dependencies = [ [[package]] name = "tari_shutdown" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "futures 0.3.29", "tokio", @@ -6146,7 +6194,7 @@ dependencies = [ [[package]] name = "tari_storage" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "bincode", "lmdb-zero", @@ -6159,7 +6207,7 @@ dependencies = [ [[package]] name = "tari_test_utils" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" dependencies = [ "futures 0.3.29", "futures-test", @@ -6319,15 +6367,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "git+https://github.com/tari-project/tiny-keccak?rev=bcddc65530d8646de7282cd8d18d891dc434b643#bcddc65530d8646de7282cd8d18d891dc434b643" -dependencies = [ - "borsh", - "crunchy", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -6355,14 +6394,14 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes 1.5.0", "libc", - "mio 0.8.9", + "mio 0.8.11", "num_cpus", "pin-project-lite", "signal-hook-registry", @@ -6384,9 +6423,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", @@ -6476,6 +6515,18 @@ dependencies = [ "toml_edit 0.19.15", ] +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", +] + [[package]] name = "toml_datetime" version = "0.6.5" @@ -6509,6 +6560,19 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.1.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.8.3" @@ -6592,7 +6656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b83cd43a176c0c19d5db4401283e8f5c296b9c6c7fa29029de15cc445f26e12" dependencies = [ "hex", - "hex-literal", + "hex-literal 0.3.4", "rand", "sha1 0.6.0", "thiserror", @@ -6943,7 +7007,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" dependencies = [ - "getrandom 0.2.10", + "getrandom", ] [[package]] @@ -7032,12 +7096,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -7200,6 +7258,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -7230,6 +7297,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.5", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -7242,6 +7324,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -7254,6 +7342,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -7266,6 +7360,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -7278,6 +7378,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -7290,6 +7396,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -7302,6 +7414,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -7314,6 +7432,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" version = "0.5.18" @@ -7348,8 +7472,8 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 4.1.1", - "rand_core 0.6.4", + "curve25519-dalek", + "rand_core", "serde", "zeroize", ] diff --git a/Cargo.toml b/Cargo.toml index 2ee6313ef9..705ad4cfd0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,7 @@ members = [ "applications/minotari_merge_mining_proxy", "applications/minotari_miner", "integration_tests", - "hash_domains", + "hashing", ] # Add here until we move to edition=2021 diff --git a/README.md b/README.md index ac3d27d6a2..7dbaa35891 100644 --- a/README.md +++ b/README.md @@ -20,11 +20,11 @@ Want to contribute? Start by reading the [Contributing Guide](Contributing.md) a ### Versions The recommended running versions of each network are: -| Network | Version | -|-----------|-------------| -| Stagenet | --- | -| Nextnet | 1.0.0-rc.2 | -| Esmeralda | 1.0.0-pre.5 | +| Network | Version | +|-----------|----------------| +| Stagenet | 1.0.0-alpha.0a | +| Nextnet | 1.0.0-rc.6a | +| Esmeralda | 1.0.0-pre.11a | For more detail about versioning, see [Release Ideology](https://github.com/tari-project/tari/blob/development/docs/src/branching_releases.md). diff --git a/applications/minotari_app_grpc/Cargo.toml b/applications/minotari_app_grpc/Cargo.toml index 1c8c75ecc1..851a68394c 100644 --- a/applications/minotari_app_grpc/Cargo.toml +++ b/applications/minotari_app_grpc/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "This crate is to provide a single source for all cross application grpc files and conversions to and from tari::core" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] @@ -24,13 +24,14 @@ prost = "0.11.9" prost-types = "0.11.9" rand = "0.8" rcgen = "0.11.3" -subtle = { version = "2.5.0", features = ["core_hint_black_box"] } +subtle = "2.5.0" thiserror = "1" -tokio = { version = "1.23", features = ["fs"] } +tokio = { version = "1.36", features = ["fs"] } tonic = { version = "0.8.3", features = ["tls"]} zeroize = "1" [build-dependencies] +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a" } tonic-build = "0.8.4" [package.metadata.cargo-machete] diff --git a/applications/minotari_app_grpc/build.rs b/applications/minotari_app_grpc/build.rs index c059857769..f8084fb744 100644 --- a/applications/minotari_app_grpc/build.rs +++ b/applications/minotari_app_grpc/build.rs @@ -1,7 +1,10 @@ // Copyright 2022 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use tari_features::resolver::build_features; + fn main() -> Result<(), Box> { + build_features(); tonic_build::configure().build_client(true).build_server(true).compile( &[ "proto/base_node.proto", diff --git a/applications/minotari_app_grpc/proto/base_node.proto b/applications/minotari_app_grpc/proto/base_node.proto index df0ef382ee..84b7f07ada 100644 --- a/applications/minotari_app_grpc/proto/base_node.proto +++ b/applications/minotari_app_grpc/proto/base_node.proto @@ -57,6 +57,10 @@ service BaseNode { rpc GetNewBlockTemplate(NewBlockTemplateRequest) returns (NewBlockTemplateResponse); // Construct a new block from a provided template rpc GetNewBlock(NewBlockTemplate) returns (GetNewBlockResult); + // Construct a new block from a provided template + rpc GetNewBlockWithCoinbases(GetNewBlockWithCoinbasesRequest) returns (GetNewBlockResult); + // Construct a new block from a provided template + rpc GetNewBlockTemplateWithCoinbases(GetNewBlockTemplateWithCoinbasesRequest) returns (GetNewBlockResult); // Construct a new block and header blob from a provided template rpc GetNewBlockBlob(NewBlockTemplate) returns (GetNewBlockBlobResult); // Submit a new block for propagation @@ -182,6 +186,28 @@ message NewBlockTemplateRequest{ uint64 max_weight = 2; } +/// return type of NewBlockTemplateRequest +message GetNewBlockTemplateWithCoinbasesRequest{ + PowAlgo algo = 1; + //This field should be moved to optional once optional keyword is standard + uint64 max_weight = 2; + repeated NewBlockCoinbase coinbases = 3; +} + +/// request type of GetNewBlockWithCoinbasesRequest +message GetNewBlockWithCoinbasesRequest{ + NewBlockTemplate new_template = 1; + repeated NewBlockCoinbase coinbases = 2; +} + +message NewBlockCoinbase{ + string address = 1; + uint64 value = 2; + bool stealth_payment= 3; + bool revealed_value_proof= 4; + bytes coinbase_extra =5; +} + // Network difficulty response message NetworkDifficultyResponse { uint64 difficulty = 1; @@ -310,9 +336,9 @@ enum Sorting { message MetaData { // The current chain height, or the block number of the longest valid chain, or `None` if there is no chain - uint64 height_of_longest_chain = 1; + uint64 best_block_height = 1; // The block hash of the current tip of the longest valid chain, or `None` for an empty chain - bytes best_block = 2; + bytes best_block_hash = 2; // This is the min height this node can provide complete blocks for. A 0 here means this node is archival and can provide complete blocks for every height. uint64 pruned_height = 6; // The current geometric mean of the pow of the chain tip, or `None` if there is no chain @@ -347,6 +373,8 @@ message GetNewBlockResult{ // This is the completed block Block block = 2; bytes merge_mining_hash =3; + bytes tari_unique_id =4; + MinerData miner_data = 5; } // This is the message that is returned for a miner after it asks for a new block. @@ -359,6 +387,7 @@ message GetNewBlockBlobResult{ bytes block_body = 3; bytes merge_mining_hash =4; bytes utxo_mr = 5; + bytes tari_unique_id =6; } // This is mining data for the miner asking for a new block diff --git a/applications/minotari_app_grpc/proto/types.proto b/applications/minotari_app_grpc/proto/types.proto index 474e2bc3ab..c652dbd6bf 100644 --- a/applications/minotari_app_grpc/proto/types.proto +++ b/applications/minotari_app_grpc/proto/types.proto @@ -119,7 +119,7 @@ message ConsensusConstants { uint64 median_timestamp_count = 9; uint64 emission_initial = 10; repeated uint64 emission_decay = 11; - uint64 emission_tail = 12; + uint64 emission_tail = 12 [deprecated=true]; uint64 min_sha3x_pow_difficulty = 13; uint64 block_weight_inputs = 14; uint64 block_weight_outputs = 15; @@ -141,4 +141,6 @@ message ConsensusConstants { uint64 validator_node_registration_min_lock_height = 32; uint64 validator_node_registration_shuffle_interval_epoch = 33; repeated PermittedRangeProofs permitted_range_proof_types = 34; + uint64 inflation_bips = 35; + uint64 tail_epoch_length = 36; } diff --git a/applications/minotari_app_grpc/src/conversions/chain_metadata.rs b/applications/minotari_app_grpc/src/conversions/chain_metadata.rs index 3a5c3e7e69..5baa628e50 100644 --- a/applications/minotari_app_grpc/src/conversions/chain_metadata.rs +++ b/applications/minotari_app_grpc/src/conversions/chain_metadata.rs @@ -29,8 +29,8 @@ impl From for grpc::MetaData { let mut diff = [0u8; 32]; meta.accumulated_difficulty().to_big_endian(&mut diff); Self { - height_of_longest_chain: meta.height_of_longest_chain(), - best_block: meta.best_block().to_vec(), + best_block_height: meta.best_block_height(), + best_block_hash: meta.best_block_hash().to_vec(), pruned_height: meta.pruned_height(), accumulated_difficulty: diff.to_vec(), } diff --git a/applications/minotari_app_grpc/src/conversions/consensus_constants.rs b/applications/minotari_app_grpc/src/conversions/consensus_constants.rs index 80fac7416e..74818c8cb0 100644 --- a/applications/minotari_app_grpc/src/conversions/consensus_constants.rs +++ b/applications/minotari_app_grpc/src/conversions/consensus_constants.rs @@ -29,7 +29,7 @@ use crate::tari_rpc as grpc; impl From for grpc::ConsensusConstants { #[allow(clippy::too_many_lines)] fn from(cc: ConsensusConstants) -> Self { - let (emission_initial, emission_decay, emission_tail) = cc.emission_amounts(); + let (emission_initial, emission_decay, inflation_bips, tail_epoch_length) = cc.emission_amounts(); let weight_params = cc.transaction_weight_params().params(); let input_version_range = cc.input_version_range().clone().into_inner(); let input_version_range = grpc::Range { @@ -100,6 +100,7 @@ impl From for grpc::ConsensusConstants { let proof_of_work = HashMap::from_iter([(0u32, randomx_pow), (1u32, sha3x_pow)]); + #[allow(deprecated)] Self { coinbase_min_maturity: cc.coinbase_min_maturity(), blockchain_version: cc.blockchain_version().into(), @@ -110,7 +111,9 @@ impl From for grpc::ConsensusConstants { median_timestamp_count: u64::try_from(cc.median_timestamp_count()).unwrap_or(0), emission_initial: emission_initial.into(), emission_decay: emission_decay.to_vec(), - emission_tail: emission_tail.into(), + emission_tail: 0, + inflation_bips, + tail_epoch_length, min_sha3x_pow_difficulty: cc.min_pow_difficulty(PowAlgorithm::Sha3x).into(), block_weight_inputs: weight_params.input_weight, block_weight_outputs: weight_params.output_weight, diff --git a/applications/minotari_app_grpc/src/conversions/mod.rs b/applications/minotari_app_grpc/src/conversions/mod.rs index b716c253db..efd583fcc3 100644 --- a/applications/minotari_app_grpc/src/conversions/mod.rs +++ b/applications/minotari_app_grpc/src/conversions/mod.rs @@ -43,25 +43,6 @@ mod unblinded_output; use prost_types::Timestamp; -pub use self::{ - aggregate_body::*, - base_node_state::*, - block::*, - block_header::*, - chain_metadata::*, - com_and_pub_signature::*, - consensus_constants::*, - historical_block::*, - new_block_template::*, - output_features::*, - peer::*, - proof_of_work::*, - signature::*, - transaction::*, - transaction_input::*, - transaction_kernel::*, - transaction_output::*, -}; use crate::{tari_rpc as grpc, tari_rpc::BlockGroupRequest}; /// Utility function that converts a `chrono::NaiveDateTime` to a `prost::Timestamp` diff --git a/applications/minotari_app_utilities/Cargo.toml b/applications/minotari_app_utilities/Cargo.toml index 0c32287b37..615e2a02e4 100644 --- a/applications/minotari_app_utilities/Cargo.toml +++ b/applications/minotari_app_utilities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minotari_app_utilities" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" @@ -9,7 +9,6 @@ license = "BSD-3-Clause" tari_common = { path = "../../common" } tari_common_types = { path = "../../base_layer/common_types" } tari_comms = { path = "../../comms/core" } -tari_features = { path = "../../common/tari_features" } tari_utilities = { version = "0.7" } minotari_app_grpc = { path = "../minotari_app_grpc", optional = true } @@ -18,7 +17,7 @@ futures = { version = "^0.3.16", default-features = false, features = ["alloc"] json5 = "0.4" log = { version = "0.4.8", features = ["std"] } rand = "0.8" -tokio = { version = "1.23", features = ["signal"] } +tokio = { version = "1.36", features = ["signal"] } serde = "1.0.126" thiserror = "^1.0.26" dialoguer = { version = "0.10" } @@ -27,7 +26,7 @@ tonic = "0.8.3" [build-dependencies] tari_common = { path = "../../common", features = ["build", "static-application-info"] } -tari_features = { path = "../../common/tari_features" } +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a" } [features] miner_input = ["minotari_app_grpc"] diff --git a/applications/minotari_app_utilities/src/common_cli_args.rs b/applications/minotari_app_utilities/src/common_cli_args.rs index 78a0f2e366..57fd52d402 100644 --- a/applications/minotari_app_utilities/src/common_cli_args.rs +++ b/applications/minotari_app_utilities/src/common_cli_args.rs @@ -105,7 +105,7 @@ impl CommonCliArgs { } impl ConfigOverrideProvider for CommonCliArgs { - fn get_config_property_overrides(&self, _default_network: Network) -> Vec<(String, String)> { + fn get_config_property_overrides(&self, _network: &mut Network) -> Vec<(String, String)> { let mut overrides = self.config_property_overrides.clone(); overrides.push(( "common.base_path".to_string(), diff --git a/applications/minotari_app_utilities/src/lib.rs b/applications/minotari_app_utilities/src/lib.rs index 1033f05499..0859c10104 100644 --- a/applications/minotari_app_utilities/src/lib.rs +++ b/applications/minotari_app_utilities/src/lib.rs @@ -22,7 +22,6 @@ pub mod common_cli_args; pub mod identity_management; -pub mod network_check; #[cfg(feature = "miner_input")] pub mod parse_miner_input; pub mod utilities; diff --git a/applications/minotari_console_wallet/Cargo.toml b/applications/minotari_console_wallet/Cargo.toml index ea5e2190c1..832f661ffd 100644 --- a/applications/minotari_console_wallet/Cargo.toml +++ b/applications/minotari_console_wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minotari_console_wallet" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" @@ -21,36 +21,38 @@ tari_script = { path = "../../infrastructure/tari_script" } tari_shutdown = { path = "../../infrastructure/shutdown" } tari_utilities = { version = "0.7" } minotari_wallet = { path = "../../base_layer/wallet", features = [ - "bundled_sqlite", + "bundled_sqlite", ] } -tari_hash_domains = { path = "../../hash_domains" } +tari_hashing = { path = "../../hashing" } # Uncomment for tokio tracing via tokio-console (needs "tracing" featurs) console-subscriber = "0.1.8" -#tokio = { version = "1.20", features = ["signal", "tracing"] } +#tokio = { version = "1.36", features = ["signal", "tracing"] } # Uncomment for normal use (non tokio-console tracing) -tokio = { version = "1.23", features = ["signal"] } +tokio = { version = "1.36", features = ["signal"] } +blake2 = "0.10" chrono = { version = "0.4.19", default-features = false } clap = { version = "3.2", features = ["derive", "env"] } -config = "0.13.0" +config = "0.14.0" crossterm = { version = "0.25.0" } digest = "0.10" futures = { version = "^0.3.16", default-features = false, features = [ - "alloc", -] } -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = [ - "config_parsing", - "threshold_filter", - "yaml_format", - "console_appender", - "rolling_file_appender", - "compound_policy", - "size_trigger", - "fixed_window_roller", - "delete_roller", + "alloc", ] } +ledger-transport-hid = { git = "https://github.com/Zondax/ledger-rs", rev = "20e2a20", optional = true } log = { version = "0.4.8", features = ["std"] } +log4rs = { version = "1.3.0", default_features = false, features = [ + "config_parsing", + "threshold_filter", + "yaml_format", + "console_appender", + "rolling_file_appender", + "compound_policy", + "size_trigger", + "fixed_window_roller", + "delete_roller", +] } qrcode = { version = "0.12" } rand = "0.8" regex = "1.5.4" @@ -66,10 +68,9 @@ thiserror = "1.0.26" tonic = "0.8.3" unicode-segmentation = "1.6.0" unicode-width = "0.1" +url = "2.3.1" zeroize = "1" zxcvbn = "2" -url = "2.3.1" -blake2 = "0.10" [dependencies.tari_core] path = "../../base_layer/core" @@ -82,12 +83,13 @@ default-features = false features = ["crossterm"] [build-dependencies] -tari_features = { path = "../../common/tari_features" } +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a" } [features] default = ["libtor"] -libtor = ["tari_libtor"] grpc = [] +ledger = ["ledger-transport-hid"] +libtor = ["tari_libtor"] [package.metadata.cargo-machete] # We need to specify extra features for log4rs even though it is not used directly in this crate diff --git a/applications/minotari_console_wallet/src/automation/commands.rs b/applications/minotari_console_wallet/src/automation/commands.rs index 3949863419..aa520caee2 100644 --- a/applications/minotari_console_wallet/src/automation/commands.rs +++ b/applications/minotari_console_wallet/src/automation/commands.rs @@ -38,7 +38,10 @@ use minotari_app_grpc::tls::certs::{generate_self_signed_certs, print_warning, w use minotari_wallet::{ connectivity_service::WalletConnectivityInterface, output_manager_service::{handle::OutputManagerHandle, UtxoSelectionCriteria}, - transaction_service::handle::{TransactionEvent, TransactionServiceHandle}, + transaction_service::{ + handle::{TransactionEvent, TransactionServiceHandle}, + storage::models::WalletTransaction, + }, TransactionStage, WalletConfig, WalletSqlite, @@ -90,6 +93,8 @@ pub enum WalletCommand { DiscoverPeer, Whois, ExportUtxos, + ExportTx, + ImportTx, ExportSpentUtxos, CountUtxos, SetBaseNode, @@ -310,7 +315,9 @@ async fn set_base_node_peer( ) -> Result<(CommsPublicKey, Multiaddr), CommandError> { println!("Setting base node peer..."); println!("{}::{}", public_key, address); - wallet.set_base_node_peer(public_key.clone(), address.clone()).await?; + wallet + .set_base_node_peer(public_key.clone(), Some(address.clone())) + .await?; Ok((public_key, address)) } @@ -344,7 +351,7 @@ pub async fn discover_peer( pub async fn make_it_rain( wallet_transaction_service: TransactionServiceHandle, fee_per_gram: u64, - transactions_per_second: u32, + transactions_per_second: f64, duration: Duration, start_amount: MicroMinotari, increase_amount: MicroMinotari, @@ -353,6 +360,13 @@ pub async fn make_it_rain( transaction_type: MakeItRainTransactionType, message: String, ) -> Result<(), CommandError> { + // Limit the transactions per second to a reasonable range + // Notes: + // - The 'transactions_per_second' is best effort and not guaranteed. + // - If a slower rate is requested as what is achievable, transactions will be delayed to match the rate. + // - If a faster rate is requested as what is achievable, the maximum rate will be that of the integrated system. + // - The default value of 25/s may not be achievable. + let transactions_per_second = transactions_per_second.abs().max(0.01).min(250.0); // We are spawning this command in parallel, thus not collecting transaction IDs tokio::task::spawn(async move { // Wait until specified test start time @@ -373,7 +387,7 @@ pub async fn make_it_rain( ); sleep(Duration::from_millis(delay_ms)).await; - let num_txs = (f64::from(transactions_per_second) * duration.as_secs() as f64) as usize; + let num_txs = (transactions_per_second * duration.as_secs() as f64) as usize; let started_at = Utc::now(); struct TransactionSendStats { @@ -404,10 +418,19 @@ pub async fn make_it_rain( // Manage transaction submission rate let actual_ms = (Utc::now() - started_at).num_milliseconds(); - let target_ms = (i as f64 / f64::from(transactions_per_second) / 1000.0) as i64; + let target_ms = (i as f64 * (1000.0 / transactions_per_second)) as i64; + trace!( + target: LOG_TARGET, + "make-it-rain {}: target {:?} ms vs. actual {:?} ms", i, target_ms, actual_ms + ); if target_ms - actual_ms > 0 { // Maximum delay between Txs set to 120 s - sleep(Duration::from_millis((target_ms - actual_ms).min(120_000i64) as u64)).await; + let delay_ms = Duration::from_millis((target_ms - actual_ms).min(120_000i64) as u64); + trace!( + target: LOG_TARGET, + "make-it-rain {}: delaying for {:?} ms", i, delay_ms + ); + sleep(delay_ms).await; } let delayed_for = Instant::now(); let sender_clone = sender.clone(); @@ -798,6 +821,34 @@ pub async fn command_runner( }, Err(e) => eprintln!("ExportUtxos error! {}", e), }, + ExportTx(args) => match transaction_service.get_any_transaction(args.tx_id.into()).await { + Ok(Some(tx)) => { + if let Some(file) = args.output_file { + if let Err(e) = write_tx_to_csv_file(tx, file) { + eprintln!("ExportTx error! {}", e); + } + } else { + println!("Tx: {:?}", tx); + } + }, + Ok(None) => { + eprintln!("ExportTx error!, No tx found ") + }, + Err(e) => eprintln!("ExportTx error! {}", e), + }, + ImportTx(args) => { + match load_tx_from_csv_file(args.input_file) { + Ok(txs) => { + for tx in txs { + match transaction_service.import_transaction(tx).await { + Ok(id) => println!("imported tx: {}", id), + Err(e) => eprintln!("Could not import tx {}", e), + }; + } + }, + Err(e) => eprintln!("ImportTx error! {}", e), + }; + }, ExportSpentUtxos(args) => match output_service.get_spent_outputs().await { Ok(utxos) => { let utxos: Vec<(WalletOutput, Commitment)> = @@ -1078,6 +1129,29 @@ fn write_utxos_to_csv_file(utxos: Vec<(WalletOutput, Commitment)>, file_path: Pa } Ok(()) } + +fn write_tx_to_csv_file(tx: WalletTransaction, file_path: PathBuf) -> Result<(), CommandError> { + let file = File::create(file_path).map_err(|e| CommandError::CSVFile(e.to_string()))?; + let mut csv_file = LineWriter::new(file); + let tx_string = serde_json::to_string(&tx).map_err(|e| CommandError::CSVFile(e.to_string()))?; + writeln!(csv_file, "{}", tx_string).map_err(|e| CommandError::CSVFile(e.to_string()))?; + + Ok(()) +} + +fn load_tx_from_csv_file(file_path: PathBuf) -> Result, CommandError> { + let file_contents = fs::read_to_string(file_path).map_err(|e| CommandError::CSVFile(e.to_string()))?; + let mut results = Vec::new(); + for line in file_contents.lines() { + if let Ok(tx) = serde_json::from_str(line) { + results.push(tx); + } else { + return Err(CommandError::CSVFile("Could not read json file".to_string())); + } + } + Ok(results) +} + #[allow(dead_code)] fn write_json_file, T: Serialize>(path: P, data: &T) -> Result<(), CommandError> { fs::create_dir_all(path.as_ref().parent().unwrap()).map_err(|e| CommandError::JsonFile(e.to_string()))?; @@ -1106,7 +1180,7 @@ async fn get_tip_height(wallet: &WalletSqlite) -> Option { .await .ok() .and_then(|t| t.metadata) - .map(|m| m.height_of_longest_chain), + .map(|m| m.best_block_height), None => None, } } diff --git a/applications/minotari_console_wallet/src/cli.rs b/applications/minotari_console_wallet/src/cli.rs index 08afa45b92..496347181a 100644 --- a/applications/minotari_console_wallet/src/cli.rs +++ b/applications/minotari_console_wallet/src/cli.rs @@ -91,9 +91,9 @@ pub struct Cli { } impl ConfigOverrideProvider for Cli { - fn get_config_property_overrides(&self, default_network: Network) -> Vec<(String, String)> { - let mut overrides = self.common.get_config_property_overrides(default_network); - let network = self.common.network.unwrap_or(default_network); + fn get_config_property_overrides(&self, network: &mut Network) -> Vec<(String, String)> { + let mut overrides = self.common.get_config_property_overrides(network); + *network = self.common.network.unwrap_or(*network); overrides.push(("wallet.network".to_string(), network.to_string())); overrides.push(("wallet.override_from".to_string(), network.to_string())); overrides.push(("p2p.seeds.override_from".to_string(), network.to_string())); @@ -123,6 +123,8 @@ pub enum CliCommands { DiscoverPeer(DiscoverPeerArgs), Whois(WhoisArgs), ExportUtxos(ExportUtxosArgs), + ExportTx(ExportTxArgs), + ImportTx(ImportTxArgs), ExportSpentUtxos(ExportUtxosArgs), CountUtxos, SetBaseNode(SetBaseNodeArgs), @@ -161,8 +163,8 @@ pub struct MakeItRainArgs { pub destination: TariAddress, #[clap(short, long, alias="amount", default_value_t = tari_amount::T)] pub start_amount: MicroMinotari, - #[clap(short, long, alias = "tps", default_value_t = 25)] - pub transactions_per_second: u32, + #[clap(short, long, alias = "tps", default_value_t = 25.0)] + pub transactions_per_second: f64, #[clap(short, long, parse(try_from_str = parse_duration), default_value="60")] pub duration: Duration, #[clap(long, default_value_t=tari_amount::T)] @@ -241,6 +243,19 @@ pub struct ExportUtxosArgs { pub output_file: Option, } +#[derive(Debug, Args, Clone)] +pub struct ExportTxArgs { + pub tx_id: u64, + #[clap(short, long)] + pub output_file: Option, +} + +#[derive(Debug, Args, Clone)] +pub struct ImportTxArgs { + #[clap(short, long)] + pub input_file: PathBuf, +} + #[derive(Debug, Args, Clone)] pub struct SetBaseNodeArgs { pub public_key: UniPublicKey, diff --git a/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs index abbbd20ee3..f74f793664 100644 --- a/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -115,7 +115,7 @@ use tonic::{Request, Response, Status}; use crate::{ grpc::{convert_to_transaction_event, TransactionWrapper}, - notifier::{CANCELLED, CONFIRMATION, MINED, NEW_BLOCK_MINED, QUEUED, RECEIVED, SENT}, + notifier::{CANCELLED, CONFIRMATION, MINED, QUEUED, RECEIVED, SENT}, }; const LOG_TARGET: &str = "wallet::ui::grpc"; @@ -165,7 +165,7 @@ impl WalletGrpcServer { .wallet .db .get_chain_metadata()? - .map(|m| m.height_of_longest_chain()) + .map(|m| m.best_block_height()) .unwrap_or_default(); Ok(self.rules.consensus_constants(height)) } @@ -250,7 +250,7 @@ impl wallet_server::Wallet for WalletGrpcServer { println!("{}::{}", public_key, net_address); let mut wallet = self.wallet.clone(); wallet - .set_base_node_peer(public_key.clone(), net_address.clone()) + .set_base_node_peer(public_key.clone(), Some(net_address.clone())) .await .map_err(|e| Status::internal(format!("{:?}", e)))?; @@ -264,10 +264,7 @@ impl wallet_server::Wallet for WalletGrpcServer { Err(e) => return Err(Status::not_found(format!("GetBalance error! {}", e))), }; Ok(Response::new(GetBalanceResponse { - available_balance: balance - .available_balance - .saturating_sub(balance.time_locked_balance.unwrap_or_default()) - .0, + available_balance: balance.available_balance.0, pending_incoming_balance: balance.pending_incoming_balance.0, pending_outgoing_balance: balance.pending_outgoing_balance.0, timelocked_balance: balance.time_locked_balance.unwrap_or_default().0, @@ -687,19 +684,6 @@ impl wallet_server::Wallet for WalletGrpcServer { Ok(msg) => { use minotari_wallet::transaction_service::handle::TransactionEvent::*; match (*msg).clone() { - NewBlockMined(tx_id) => { - match transaction_service.get_any_transaction(tx_id).await { - Ok(found_transaction) => { - if let Some(WalletTransaction::PendingOutbound(tx)) = found_transaction { - let transaction_event = convert_to_transaction_event(NEW_BLOCK_MINED.to_string(), - TransactionWrapper::Outbound(Box::new(tx))); - send_transaction_event(transaction_event, &mut sender).await; - } - - }, - Err(e) => error!(target: LOG_TARGET, "Transaction service error: {}", e), - } - }, ReceivedFinalizedTransaction(tx_id) => handle_completed_tx(tx_id, RECEIVED, &mut transaction_service, &mut sender).await, TransactionMinedUnconfirmed{tx_id, num_confirmations: _, is_valid: _} | DetectedTransactionUnconfirmed{tx_id, num_confirmations: _, is_valid: _}=> handle_completed_tx(tx_id, CONFIRMATION, &mut transaction_service, &mut sender).await, TransactionMined{tx_id, is_valid: _} | DetectedTransactionConfirmed{tx_id, is_valid: _} => handle_completed_tx(tx_id, MINED, &mut transaction_service, &mut sender).await, @@ -752,26 +736,31 @@ impl wallet_server::Wallet for WalletGrpcServer { ) -> Result, Status> { debug!( target: LOG_TARGET, - "Incoming GRPC request for GetAllCompletedTransactions" + "GetAllCompletedTransactions: Incoming GRPC request" ); let mut transaction_service = self.get_transaction_service(); let transactions = transaction_service .get_completed_transactions() .await .map_err(|err| Status::not_found(format!("No completed transactions found: {:?}", err)))?; + debug!( + target: LOG_TARGET, + "GetAllCompletedTransactions: Found {} completed transactions", + transactions.len() + ); let (mut sender, receiver) = mpsc::channel(transactions.len()); task::spawn(async move { - for (_, txn) in transactions { + for (i, (_, txn)) in transactions.iter().enumerate() { let response = GetCompletedTransactionsResponse { transaction: Some(TransactionInfo { tx_id: txn.tx_id.into(), source_address: txn.source_address.to_bytes().to_vec(), dest_address: txn.destination_address.to_bytes().to_vec(), - status: TransactionStatus::from(txn.status) as i32, + status: TransactionStatus::from(txn.status.clone()) as i32, amount: txn.amount.into(), is_cancelled: txn.cancelled.is_some(), - direction: TransactionDirection::from(txn.direction) as i32, + direction: TransactionDirection::from(txn.direction.clone()) as i32, fee: txn.fee.into(), timestamp: txn.timestamp.timestamp() as u64, excess_sig: txn @@ -780,11 +769,19 @@ impl wallet_server::Wallet for WalletGrpcServer { .unwrap_or(&Signature::default()) .get_signature() .to_vec(), - message: txn.message, + message: txn.message.clone(), }), }; match sender.send(Ok(response)).await { - Ok(_) => (), + Ok(_) => { + debug!( + target: LOG_TARGET, + "GetAllCompletedTransactions: Sent transaction TxId: {} ({} of {})", + txn.tx_id, + i + 1, + transactions.len() + ); + }, Err(err) => { warn!(target: LOG_TARGET, "Error sending transaction via GRPC: {}", err); match sender.send(Err(Status::unknown("Error sending data"))).await { diff --git a/applications/minotari_console_wallet/src/init/mod.rs b/applications/minotari_console_wallet/src/init/mod.rs index d5faff3b2a..e2cd0f9b1e 100644 --- a/applications/minotari_console_wallet/src/init/mod.rs +++ b/applications/minotari_console_wallet/src/init/mod.rs @@ -22,8 +22,10 @@ #![allow(dead_code, unused)] -use std::{fs, path::PathBuf, str::FromStr, sync::Arc}; +use std::{fs, io, path::PathBuf, str::FromStr, sync::Arc, time::Instant}; +#[cfg(feature = "ledger")] +use ledger_transport_hid::{hidapi::HidApi, TransportNativeHID}; use log::*; use minotari_app_utilities::identity_management::setup_node_identity; use minotari_wallet::{ @@ -33,7 +35,7 @@ use minotari_wallet::{ database::{WalletBackend, WalletDatabase}, sqlite_utilities::initialize_sqlite_database_backends, }, - wallet::{derive_comms_secret_key, read_or_create_master_seed}, + wallet::{derive_comms_secret_key, read_or_create_master_seed, read_or_create_wallet_type}, Wallet, WalletConfig, WalletSqlite, @@ -48,6 +50,7 @@ use tari_common::{ }, exit_codes::{ExitCode, ExitError}, }; +use tari_common_types::wallet_types::WalletType; use tari_comms::{ multiaddr::Multiaddr, peer_manager::{Peer, PeerFeatures, PeerQuery}, @@ -248,6 +251,7 @@ pub async fn change_password( None, shutdown_signal, non_interactive_mode, + None, ) .await?; @@ -379,6 +383,7 @@ pub async fn init_wallet( recovery_seed: Option, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, + wallet_type: Option, ) -> Result { fs::create_dir_all( config @@ -414,6 +419,7 @@ pub async fn init_wallet( }; let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?; + let wallet_type = read_or_create_wallet_type(wallet_type, &wallet_db); let node_identity = match config.wallet.identity_file.as_ref() { Some(identity_file) => { @@ -442,6 +448,8 @@ pub async fn init_wallet( .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?; let factories = CryptoFactories::default(); + let now = Instant::now(); + let mut wallet = Wallet::start( wallet_config, config.peer_seeds.clone(), @@ -457,18 +465,18 @@ pub async fn init_wallet( key_manager_backend, shutdown_signal, master_seed, + wallet_type.unwrap(), ) .await .map_err(|e| match e { WalletError::CommsInitializationError(cie) => cie.to_exit_error(), e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)), })?; - if let Some(hs) = wallet.comms.hidden_service() { - wallet - .db - .set_tor_identity(hs.tor_identity().clone()) - .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?; - } + + error!( + target: LOG_TARGET, + "Wallet started in {}ms", now.elapsed().as_millis() + ); if let Some(file_name) = seed_words_file_name { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" "); @@ -570,7 +578,7 @@ pub async fn start_wallet( .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?; wallet - .set_base_node_peer(base_node.public_key.clone(), net_address.address().clone()) + .set_base_node_peer(base_node.public_key.clone(), Some(net_address.address().clone())) .await .map_err(|e| { ExitError::new( @@ -803,6 +811,60 @@ pub(crate) fn boot_with_password( Ok((boot_mode, password)) } +pub fn prompt_wallet_type( + boot_mode: WalletBoot, + wallet_config: &WalletConfig, + non_interactive: bool, +) -> Option { + if non_interactive { + return Some(WalletType::Software); + } + + if wallet_config.wallet_type.is_some() { + return wallet_config.wallet_type; + } + + match boot_mode { + WalletBoot::New => { + #[cfg(not(feature = "ledger"))] + return Some(WalletType::Software); + + #[cfg(feature = "ledger")] + { + if prompt("\r\nWould you like to use a connected hardware wallet? (Supported types: Ledger)") { + print!("Scanning for connected Ledger hardware device... "); + let err = "No connected device was found. Please make sure the device is plugged in before + continuing."; + match TransportNativeHID::new(&HidApi::new().expect(err)) { + Ok(_) => { + println!("Device found."); + let account = prompt_ledger_account().expect("An account value"); + Some(WalletType::Ledger(account)) + }, + Err(e) => panic!("{}", e), + } + } else { + Some(WalletType::Software) + } + } + }, + _ => None, + } +} + +pub fn prompt_ledger_account() -> Option { + let question = + "\r\nPlease enter an account number for your ledger. A simple 1-9, easily remembered numbers are suggested."; + println!("{}", question); + let mut input = "".to_string(); + io::stdin().read_line(&mut input).unwrap(); + let input = input.trim(); + match input.parse() { + Ok(num) => Some(num), + Err(_e) => Some(1), + } +} + #[cfg(test)] mod test { use tari_utilities::SafePassword; diff --git a/applications/minotari_console_wallet/src/lib.rs b/applications/minotari_console_wallet/src/lib.rs index 96525d44d1..95cb8c32da 100644 --- a/applications/minotari_console_wallet/src/lib.rs +++ b/applications/minotari_console_wallet/src/lib.rs @@ -48,7 +48,7 @@ pub use cli::{ }; use init::{change_password, get_base_node_peer_config, init_wallet, start_wallet, tari_splash_screen, WalletBoot}; use log::*; -use minotari_app_utilities::{common_cli_args::CommonCliArgs, consts, network_check::set_network_if_choice_valid}; +use minotari_app_utilities::{common_cli_args::CommonCliArgs, consts}; use minotari_wallet::transaction_service::config::TransactionRoutingMechanism; use recovery::{get_seed_from_seed_words, prompt_private_key_from_seed_words}; use tari_common::{ @@ -64,7 +64,7 @@ use tokio::runtime::Runtime; use wallet_modes::{command_mode, grpc_mode, recovery_mode, script_mode, tui_mode, WalletMode}; pub use crate::config::ApplicationConfig; -use crate::init::{boot_with_password, confirm_direct_only_send, confirm_seed_words, wallet_mode}; +use crate::init::{boot_with_password, confirm_direct_only_send, confirm_seed_words, prompt_wallet_type, wallet_mode}; pub const LOG_TARGET: &str = "wallet::console_wallet::main"; @@ -117,8 +117,6 @@ pub fn run_wallet_with_cli( consts::APP_VERSION ); - set_network_if_choice_valid(config.wallet.network)?; - let password = get_password(config, &cli); if password.is_none() { @@ -130,6 +128,9 @@ pub fn run_wallet_with_cli( let recovery_seed = get_recovery_seed(boot_mode, &cli)?; + // This is deactivated at the moment as full support is not yet complete + let wallet_type = prompt_wallet_type(boot_mode, &config.wallet, cli.non_interactive_mode); + // get command line password if provided let seed_words_file_name = cli.seed_words_file_name.clone(); @@ -151,7 +152,7 @@ pub fn run_wallet_with_cli( if config.wallet.use_libtor && config.wallet.p2p.transport.is_tor() { let tor = Tor::initialize()?; tor.update_comms_transport(&mut config.wallet.p2p.transport)?; - runtime.spawn(tor.run(shutdown.to_signal())); + tor.run_background(); debug!( target: LOG_TARGET, "Updated Tor comms transport: {:?}", config.wallet.p2p.transport @@ -169,6 +170,7 @@ pub fn run_wallet_with_cli( recovery_seed, shutdown_signal, cli.non_interactive_mode, + wallet_type, ))?; if !cli.non_interactive_mode && diff --git a/applications/minotari_console_wallet/src/notifier/mod.rs b/applications/minotari_console_wallet/src/notifier/mod.rs index 03da9ba32d..5f160970f0 100644 --- a/applications/minotari_console_wallet/src/notifier/mod.rs +++ b/applications/minotari_console_wallet/src/notifier/mod.rs @@ -46,7 +46,6 @@ pub const QUEUED: &str = "queued"; pub const CONFIRMATION: &str = "confirmation"; pub const MINED: &str = "mined"; pub const CANCELLED: &str = "cancelled"; -pub const NEW_BLOCK_MINED: &str = "new_block_mined"; #[derive(Clone)] // FIXME diff --git a/applications/minotari_console_wallet/src/ui/components/balance.rs b/applications/minotari_console_wallet/src/ui/components/balance.rs index 8dc1812f24..f5c8858727 100644 --- a/applications/minotari_console_wallet/src/ui/components/balance.rs +++ b/applications/minotari_console_wallet/src/ui/components/balance.rs @@ -53,7 +53,7 @@ impl Component for Balance { let available_balance = Spans::from(vec![ Span::styled("Available:", Style::default().fg(Color::Magenta)), Span::raw(" "), - Span::raw(format!("{}", balance.available_balance.saturating_sub(time_locked))), + Span::raw(format!("{}", balance.available_balance)), Span::raw(format!(" (Time Locked: {})", time_locked)), ]); let incoming_balance = Spans::from(vec![ diff --git a/applications/minotari_console_wallet/src/ui/components/base_node.rs b/applications/minotari_console_wallet/src/ui/components/base_node.rs index c7a00acdf1..561e877606 100644 --- a/applications/minotari_console_wallet/src/ui/components/base_node.rs +++ b/applications/minotari_console_wallet/src/ui/components/base_node.rs @@ -68,7 +68,7 @@ impl Component for BaseNode { OnlineStatus::Online => { let base_node_state = app_state.get_base_node_state(); if let Some(ref metadata) = base_node_state.chain_metadata { - let tip = metadata.height_of_longest_chain(); + let tip = metadata.best_block_height(); let synced = base_node_state.is_synced.unwrap_or_default(); let (tip_color, sync_text) = if synced { diff --git a/applications/minotari_console_wallet/src/ui/components/burn_tab.rs b/applications/minotari_console_wallet/src/ui/components/burn_tab.rs index e41a4a7e7a..e90d97c87b 100644 --- a/applications/minotari_console_wallet/src/ui/components/burn_tab.rs +++ b/applications/minotari_console_wallet/src/ui/components/burn_tab.rs @@ -250,7 +250,7 @@ impl BurnTab { let mut column0_items = Vec::new(); let mut column1_items = Vec::new(); - for item in windowed_view.iter() { + for item in windowed_view { column0_items.push(ListItem::new(Span::raw(item.reciprocal_claim_public_key.clone()))); column1_items.push(ListItem::new(Span::raw(item.burned_at.to_string().clone()))); } diff --git a/applications/minotari_console_wallet/src/ui/components/contacts_tab.rs b/applications/minotari_console_wallet/src/ui/components/contacts_tab.rs index e68c8a1670..d6a783506c 100644 --- a/applications/minotari_console_wallet/src/ui/components/contacts_tab.rs +++ b/applications/minotari_console_wallet/src/ui/components/contacts_tab.rs @@ -91,7 +91,7 @@ impl ContactsTab { let mut column2_items = Vec::new(); let mut column3_items = Vec::new(); let mut column4_items = Vec::new(); - for c in windowed_view.iter() { + for c in windowed_view { column0_items.push(ListItem::new(Span::raw(c.alias.clone()))); column1_items.push(ListItem::new(Span::raw(c.address.clone()))); column2_items.push(ListItem::new(Span::raw(display_compressed_string( diff --git a/applications/minotari_console_wallet/src/ui/components/network_tab.rs b/applications/minotari_console_wallet/src/ui/components/network_tab.rs index e6f438004f..35cb233f91 100644 --- a/applications/minotari_console_wallet/src/ui/components/network_tab.rs +++ b/applications/minotari_console_wallet/src/ui/components/network_tab.rs @@ -190,7 +190,7 @@ impl NetworkTab { let mut column0_items = Vec::with_capacity(peers.len()); let mut column1_items = Vec::with_capacity(peers.len()); let mut column2_items = Vec::with_capacity(peers.len()); - for p in peers.iter() { + for p in peers { column0_items.push(ListItem::new(Span::raw(p.node_id.to_string()))); column1_items.push(ListItem::new(Span::raw(p.public_key.to_string()))); column2_items.push(ListItem::new(Span::raw(p.user_agent.clone()))); @@ -434,11 +434,7 @@ impl Component for NetworkTab { // set the currently selected base node as a custom base node let base_node = app_state.get_selected_base_node(); let public_key = base_node.public_key.to_hex(); - let address = base_node - .addresses - .best() - .map(|a| a.to_string()) - .unwrap_or_else(|| "".to_string()); + let address = base_node.addresses.best().map(|a| a.to_string()).unwrap_or_default(); match Handle::current().block_on(app_state.set_custom_base_node(public_key, address)) { Ok(peer) => { diff --git a/applications/minotari_console_wallet/src/ui/components/register_template_tab.rs b/applications/minotari_console_wallet/src/ui/components/register_template_tab.rs index edc91d5892..1370fc1764 100644 --- a/applications/minotari_console_wallet/src/ui/components/register_template_tab.rs +++ b/applications/minotari_console_wallet/src/ui/components/register_template_tab.rs @@ -11,7 +11,7 @@ use regex::Regex; use reqwest::StatusCode; use tari_core::transactions::{tari_amount::MicroMinotari, transaction_components::TemplateType}; use tari_crypto::hashing::DomainSeparation; -use tari_hash_domains::TariEngineHashDomain; +use tari_hashing::TariEngineHashDomain; use tari_utilities::hex::Hex; use tokio::{ runtime::{Handle, Runtime}, diff --git a/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs b/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs index 50fa7ff213..2f02f523c9 100644 --- a/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs +++ b/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs @@ -102,7 +102,7 @@ impl TransactionsTab { let mut column2_items = Vec::new(); let mut column3_items = Vec::new(); - for t in windowed_view.iter() { + for t in windowed_view { let text_color = text_colors .get(&t.cancelled.is_some()) .unwrap_or(&Color::Reset) @@ -198,17 +198,14 @@ impl TransactionsTab { .collect(); let base_node_state = app_state.get_base_node_state(); - let chain_height = base_node_state - .chain_metadata - .as_ref() - .map(|cm| cm.height_of_longest_chain()); + let chain_height = base_node_state.chain_metadata.as_ref().map(|cm| cm.best_block_height()); let mut column0_items = Vec::new(); let mut column1_items = Vec::new(); let mut column2_items = Vec::new(); let mut column3_items = Vec::new(); - for t in windowed_view.iter() { + for t in windowed_view { let cancelled = t.cancelled.is_some(); let text_color = text_colors.get(&cancelled).unwrap_or(&Color::Reset).to_owned(); if t.direction == TransactionDirection::Outbound { @@ -598,6 +595,7 @@ impl Component for TransactionsTab { error!(target: LOG_TARGET, "Error rebroadcasting transactions: {}", e); } }, + 'a' => app_state.toggle_abandoned_coinbase_filter(), '\n' => match self.selected_tx_list { SelectedTransactionList::None => {}, SelectedTransactionList::PendingTxs => { diff --git a/applications/minotari_console_wallet/src/ui/mod.rs b/applications/minotari_console_wallet/src/ui/mod.rs index 93f14443a5..5413c7ee22 100644 --- a/applications/minotari_console_wallet/src/ui/mod.rs +++ b/applications/minotari_console_wallet/src/ui/mod.rs @@ -108,6 +108,7 @@ fn crossterm_loop(mut app: App>) -> Result<(), ExitErro error!(target: LOG_TARGET, "Error drawing interface. {}", e); ExitCode::InterfaceError })?; + #[allow(clippy::blocks_in_conditions)] match events.next().map_err(|e| { error!(target: LOG_TARGET, "Error reading input event: {}", e); ExitCode::InterfaceError diff --git a/applications/minotari_console_wallet/src/ui/state/app_state.rs b/applications/minotari_console_wallet/src/ui/state/app_state.rs index cc2a3b5196..efc0ff3145 100644 --- a/applications/minotari_console_wallet/src/ui/state/app_state.rs +++ b/applications/minotari_console_wallet/src/ui/state/app_state.rs @@ -96,6 +96,7 @@ pub struct AppState { inner: Arc>, cached_data: AppStateData, cache_update_cooldown: Option, + completed_tx_filter: TransactionFilter, config: AppStateConfig, wallet_config: WalletConfig, wallet_connectivity: WalletConnectivityHandle, @@ -121,10 +122,11 @@ impl AppState { cached_data, cache_update_cooldown: None, config: AppStateConfig::default(), + completed_tx_filter: TransactionFilter::AbandonedCoinbases, wallet_connectivity, balance_enquiry_debouncer: BalanceEnquiryDebouncer::new( inner, - Duration::from_secs(5), + wallet_config.balance_enquiry_cooldown_period, output_manager_service, ), wallet_config, @@ -185,6 +187,13 @@ impl AppState { Ok(()) } + pub fn toggle_abandoned_coinbase_filter(&mut self) { + self.completed_tx_filter = match self.completed_tx_filter { + TransactionFilter::AbandonedCoinbases => TransactionFilter::None, + TransactionFilter::None => TransactionFilter::AbandonedCoinbases, + }; + } + pub async fn update_cache(&mut self) { let update = match self.cache_update_cooldown { Some(last_update) => last_update.elapsed() > self.config.cache_update_cooldown, @@ -556,7 +565,15 @@ impl AppState { } pub fn get_completed_txs(&self) -> Vec<&CompletedTransactionInfo> { - self.cached_data.completed_txs.iter().collect() + if self.completed_tx_filter == TransactionFilter::AbandonedCoinbases { + self.cached_data + .completed_txs + .iter() + .filter(|tx| !matches!(tx.status, TransactionStatus::CoinbaseNotInBlockChain)) + .collect() + } else { + self.cached_data.completed_txs.iter().collect() + } } pub fn get_confirmations(&self, tx_id: TxId) -> Option<&u64> { @@ -896,12 +913,12 @@ impl AppStateInner { }); self.data.contacts = ui_contacts; + self.refresh_network_id().await?; self.updated = true; Ok(()) } pub async fn refresh_burnt_proofs_state(&mut self) -> Result<(), UiError> { - // let db_burnt_proofs = self.wallet.db.get_burnt_proofs()?; let db_burnt_proofs = self.wallet.db.fetch_burnt_proofs()?; let mut ui_proofs: Vec = vec![]; @@ -921,7 +938,43 @@ impl AppStateInner { Ok(()) } + pub async fn refresh_network_id(&mut self) -> Result<(), UiError> { + let wallet_id = WalletIdentity::new(self.wallet.comms.node_identity(), self.wallet.network.as_network()); + let eid = wallet_id.address.to_emoji_string(); + let qr_link = format!( + "tari://{}/transactions/send?tariAddress={}", + wallet_id.network, + wallet_id.address.to_hex() + ); + let code = QrCode::new(qr_link).unwrap(); + let image = code + .render::() + .dark_color(unicode::Dense1x2::Dark) + .light_color(unicode::Dense1x2::Light) + .build() + .lines() + .skip(1) + .fold("".to_string(), |acc, l| format!("{}{}\n", acc, l)); + let identity = MyIdentity { + tari_address: wallet_id.address.to_hex(), + network_address: wallet_id + .node_identity + .public_addresses() + .iter() + .map(|a| a.to_string()) + .collect::>() + .join(", "), + emoji_id: eid, + qr_code: image, + node_id: wallet_id.node_identity.node_id().to_string(), + }; + self.data.my_identity = identity; + self.updated = true; + Ok(()) + } + pub async fn refresh_connected_peers_state(&mut self) -> Result<(), UiError> { + self.refresh_network_id().await?; let connections = self.wallet.comms.connectivity().get_active_connections().await?; let peer_manager = self.wallet.comms.peer_manager(); let mut peers = Vec::with_capacity(connections.len()); @@ -997,7 +1050,7 @@ impl AppStateInner { self.wallet .set_base_node_peer( peer.public_key.clone(), - peer.addresses.best().ok_or(UiError::NoAddress)?.address().clone(), + Some(peer.addresses.best().ok_or(UiError::NoAddress)?.address().clone()), ) .await?; @@ -1022,7 +1075,7 @@ impl AppStateInner { self.wallet .set_base_node_peer( peer.public_key.clone(), - peer.addresses.best().ok_or(UiError::NoAddress)?.address().clone(), + Some(peer.addresses.best().ok_or(UiError::NoAddress)?.address().clone()), ) .await?; @@ -1060,7 +1113,7 @@ impl AppStateInner { self.wallet .set_base_node_peer( previous.public_key.clone(), - previous.addresses.best().ok_or(UiError::NoAddress)?.address().clone(), + Some(previous.addresses.best().ok_or(UiError::NoAddress)?.address().clone()), ) .await?; @@ -1334,3 +1387,9 @@ impl Default for AppStateConfig { } } } + +#[derive(Clone, PartialEq)] +pub enum TransactionFilter { + None, + AbandonedCoinbases, +} diff --git a/applications/minotari_console_wallet/src/ui/state/debouncer.rs b/applications/minotari_console_wallet/src/ui/state/debouncer.rs index 7f3a87ab01..a3b177aba4 100644 --- a/applications/minotari_console_wallet/src/ui/state/debouncer.rs +++ b/applications/minotari_console_wallet/src/ui/state/debouncer.rs @@ -72,8 +72,9 @@ impl BalanceEnquiryDebouncer { if let Ok(balance) = self.output_manager_service.get_balance().await { trace!( target: LOG_TARGET, - "Initial balance: available {}, incoming {}, outgoing {}", + "Initial balance: available {}, time-locked {}, incoming {}, outgoing {}", balance.available_balance, + balance.time_locked_balance.unwrap_or(0.into()), balance.pending_incoming_balance, balance.pending_outgoing_balance ); diff --git a/applications/minotari_console_wallet/src/ui/state/tasks.rs b/applications/minotari_console_wallet/src/ui/state/tasks.rs index 7401187bb4..7019e8d377 100644 --- a/applications/minotari_console_wallet/src/ui/state/tasks.rs +++ b/applications/minotari_console_wallet/src/ui/state/tasks.rs @@ -40,10 +40,10 @@ use tari_core::{ transactions::{ tari_amount::MicroMinotari, transaction_components::{BuildInfo, OutputFeatures, TemplateType}, - TransactionHashDomain, }, }; use tari_crypto::{keys::PublicKey as PublicKeyTrait, ristretto::RistrettoPublicKey}; +use tari_hashing::TransactionHashDomain; use tari_key_manager::key_manager::KeyManager; use tari_utilities::{hex::Hex, ByteArray}; use tokio::sync::{broadcast, watch}; @@ -257,18 +257,19 @@ pub async fn send_burn_transaction_task( // burning minotari // ---------------------------------------------------------------------------- - let (burn_tx_id, original_proof) = transaction_service_handle + let (burn_tx_id, original_proof) = match transaction_service_handle .burn_tari(amount, selection_criteria, fee_per_gram, message, claim_public_key) .await - .map_err(|err| { - log::error!("failed to burn minotari: {:?}", err); - + { + Ok((burn_tx_id, original_proof)) => (burn_tx_id, original_proof), + Err(e) => { + error!(target: LOG_TARGET, "failed to burn minotari: {:?}", e); result_tx - .send(UiTransactionBurnStatus::Error(UiError::from(err).to_string())) + .send(UiTransactionBurnStatus::Error(format!("burn error: {}", e))) .unwrap(); - }) - .unwrap(); - + return; + }, + }; // ---------------------------------------------------------------------------- // starting a feedback loop to wait for the answer from the transaction service // ---------------------------------------------------------------------------- @@ -292,14 +293,38 @@ pub async fn send_burn_transaction_task( range_proof: original_proof.range_proof.0, }; - let serialized_proof = - serde_json::to_string_pretty(&wrapped_proof).expect("failed to serialize burn proof"); + let serialized_proof = match serde_json::to_string_pretty(&wrapped_proof) { + Ok(proof) => proof, + Err(e) => { + error!(target: LOG_TARGET, "failed to serialize burn proof: {:?}", e); + result_tx + .send(UiTransactionBurnStatus::Error(format!( + "failure to create proof {:?}", + e + ))) + .unwrap(); + return; + }, + }; let proof_id = random::(); + let filepath = burn_proof_filepath.unwrap_or_else(|| PathBuf::from(format!("{}.json", proof_id))); - std::fs::write(filepath, serialized_proof.as_bytes()).expect("failed to save burn proof"); + match std::fs::write(filepath, serialized_proof.as_bytes()) { + Ok(()) => {}, + Err(e) => { + error!(target: LOG_TARGET, "failed to write burn proof: {:?}", e); + result_tx + .send(UiTransactionBurnStatus::Error(format!( + "failure to write proof {:?}", + e + ))) + .unwrap(); + return; + }, + }; let result = db.create_burnt_proof( proof_id, diff --git a/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs b/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs index da5a085e23..768d9c3b17 100644 --- a/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs +++ b/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs @@ -74,13 +74,6 @@ impl WalletEventMonitor { let mut base_node_changed = wallet_connectivity.get_current_base_node_watcher(); let mut base_node_events = self.app_state_inner.read().await.get_base_node_event_stream(); - // let mut software_update_notif = self - // .app_state_inner - // .read() - // .await - // .get_software_updater() - // .new_update_notifier() - // .clone(); let mut contacts_liveness_events = self.app_state_inner.read().await.get_contacts_liveness_event_stream(); diff --git a/applications/minotari_console_wallet/src/ui/widgets/list_state.rs b/applications/minotari_console_wallet/src/ui/widgets/list_state.rs index ba93429534..812f7fb658 100644 --- a/applications/minotari_console_wallet/src/ui/widgets/list_state.rs +++ b/applications/minotari_console_wallet/src/ui/widgets/list_state.rs @@ -191,7 +191,7 @@ mod test { #[test] fn test_list_offset_update() { - let slist = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let slist = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let mut list_state = WindowedListState::new(); list_state.set_num_items(slist.len()); let height = 4; diff --git a/applications/minotari_console_wallet/src/wallet_modes.rs b/applications/minotari_console_wallet/src/wallet_modes.rs index a518eff7f5..70fe202652 100644 --- a/applications/minotari_console_wallet/src/wallet_modes.rs +++ b/applications/minotari_console_wallet/src/wallet_modes.rs @@ -475,6 +475,7 @@ async fn run_grpc( #[cfg(test)] mod test { + use std::path::Path; use crate::{cli::CliCommands, wallet_modes::parse_command_file}; @@ -499,6 +500,10 @@ mod test { --start-time now --message Stressing_it_a_bit...!_(from_Feeling-a-bit-Generous) \ 5c4f2a4b3f3f84e047333218a84fd24f581a9d7e4f23b78e3714e9d174427d615e + export-tx 123456789 --output-file pie.txt + + import-tx --input-file pie_this_message.txt + # End of script file " .to_string(); @@ -511,6 +516,8 @@ mod test { let mut make_it_rain = false; let mut coin_split = false; let mut discover_peer = false; + let mut export_tx = false; + let mut import_tx = false; let mut whois = false; for command in commands { match command { @@ -524,6 +531,16 @@ mod test { CliCommands::DiscoverPeer(_) => discover_peer = true, CliCommands::Whois(_) => whois = true, CliCommands::ExportUtxos(_) => {}, + CliCommands::ExportTx(args) => { + if args.tx_id == 123456789 && args.output_file == Some("pie.txt".into()) { + export_tx = true + } + }, + CliCommands::ImportTx(args) => { + if args.input_file == Path::new("pie_this_message.txt") { + import_tx = true + } + }, CliCommands::ExportSpentUtxos(_) => {}, CliCommands::CountUtxos => {}, CliCommands::SetBaseNode(_) => {}, @@ -537,6 +554,16 @@ mod test { CliCommands::CreateTlsCerts => {}, } } - assert!(get_balance && send_tari && burn_tari && make_it_rain && coin_split && discover_peer && whois); + assert!( + get_balance && + send_tari && + burn_tari && + make_it_rain && + coin_split && + discover_peer && + whois && + export_tx && + import_tx + ); } } diff --git a/applications/minotari_ledger_wallet/Cargo.lock b/applications/minotari_ledger_wallet/Cargo.lock new file mode 100644 index 0000000000..17fa7e0271 --- /dev/null +++ b/applications/minotari_ledger_wallet/Cargo.lock @@ -0,0 +1,467 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "atomic-polyfill" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" +dependencies = [ + "critical-section", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "borsh" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e6cb63579996213e822f6d828b0a47e1d23b1e8708f52d18a6b1af5670dd207" +dependencies = [ + "cfg_aliases", +] + +[[package]] +name = "cc" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + +[[package]] +name = "color_quant" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" + +[[package]] +name = "cpufeatures" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +dependencies = [ + "libc", +] + +[[package]] +name = "critical-section" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "embedded-alloc" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8931e47e33c5d3194fbcf9cc82df0919193bd2fa40008f388eb1d28fd9c9ea6b" +dependencies = [ + "critical-section", + "linked_list_allocator", +] + +[[package]] +name = "fiat-crypto" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "gif" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06" +dependencies = [ + "color_quant", + "weezl", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "include_gif" +version = "0.1.0" +source = "git+https://github.com/LedgerHQ/sdk_include_gif#699d28c6157518c4493899e2eeaa8af08346e5e7" +dependencies = [ + "gif", + "syn 1.0.109", +] + +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "libc" +version = "0.2.147" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" + +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" + +[[package]] +name = "log" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" + +[[package]] +name = "minotari_ledger_wallet" +version = "0.52.0-pre.0" +dependencies = [ + "blake2", + "borsh", + "critical-section", + "digest", + "embedded-alloc", + "nanos_sdk", + "nanos_ui", + "tari_crypto", +] + +[[package]] +name = "nanos_sdk" +version = "0.2.1" +source = "git+https://github.com/LedgerHQ/ledger-nanos-sdk.git#4d9bfc6183d94cee6edb239c39286be3825cc179" +dependencies = [ + "cc", + "num-traits", + "rand_core", +] + +[[package]] +name = "nanos_ui" +version = "0.2.0" +source = "git+https://github.com/LedgerHQ/ledger-nanos-ui.git?rev=6a7c4a3eb41ee0b09c8fd4dcc5be4f3a1f5d7b45#6a7c4a3eb41ee0b09c8fd4dcc5be4f3a1f5d7b45" +dependencies = [ + "include_gif", + "nanos_sdk", +] + +[[package]] +name = "num-traits" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +dependencies = [ + "atomic-polyfill", + "critical-section", +] + +[[package]] +name = "platforms" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "semver" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "snafu" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" +dependencies = [ + "doc-comment", + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tari-curve25519-dalek" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b8e2644aae57a832e475ebc31199ab1114ebd7fe4d2621e67e89bdd9c8ac38" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "fiat-crypto", + "platforms", + "rand_core", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "tari_crypto" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc09581fc1a9709e54be25e0a50437dc405370b3f5795ee65dc913f4f7e726e5" +dependencies = [ + "blake2", + "digest", + "log", + "once_cell", + "rand_chacha", + "rand_core", + "sha3", + "snafu", + "tari-curve25519-dalek", + "tari_utilities", + "zeroize", +] + +[[package]] +name = "tari_utilities" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "367d17d09cf48e4cf45222fd48536e206f8ef3aaa5eed449c7df38d2ab4586c6" +dependencies = [ + "generic-array", + "snafu", + "zeroize", +] + +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "unicode-ident" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "weezl" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" + +[[package]] +name = "zeroize" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] diff --git a/applications/minotari_ledger_wallet/Cargo.toml b/applications/minotari_ledger_wallet/Cargo.toml new file mode 100644 index 0000000000..ff78777fae --- /dev/null +++ b/applications/minotari_ledger_wallet/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "minotari_ledger_wallet" +version = "0.52.0-pre.0" +authors = ["The Tari Development Community"] +license = "BSD-3-Clause" +edition = "2021" + + +[dependencies] +# lock to rev as soon as this is fixed: https://github.com/rust-lang/rust/issues/98666 +nanos_sdk = { git = "https://github.com/LedgerHQ/ledger-nanos-sdk.git" } +nanos_ui = { git = "https://github.com/LedgerHQ/ledger-nanos-ui.git", rev = "6a7c4a3eb41ee0b09c8fd4dcc5be4f3a1f5d7b45" } + +tari_crypto = { version = "0.18", default-features = false } + +embedded-alloc = "0.5.0" +critical-section = { version = "1.1.1" } +digest = { version = "0.10", default-features = false } +borsh = { version = "1.0", default-features = false } +blake2 = { version = "0.10", default-features = false } + +[profile.release] +opt-level = 's' +lto = "fat" # same as `true` +panic = "abort" + +[package.metadata.nanos] +name = "MinoTari Wallet" +curve = ["secp256k1", "ed25519"] +flags = "0" +icon = "key_16x16.gif" +icon_small = "key_14x14.gif" +path = ["44'/1022'","m/5261654'","m/44'"] +api_level = "1" + +[workspace] diff --git a/applications/minotari_ledger_wallet/README.md b/applications/minotari_ledger_wallet/README.md new file mode 100644 index 0000000000..3d03356929 --- /dev/null +++ b/applications/minotari_ledger_wallet/README.md @@ -0,0 +1,117 @@ +# Instructions + +## Setup + +Ledger does not build with the standard library, so we need to install `rust-src`. This can be done with: +``` +rustup component add rust-src --toolchain nightly +``` + +For loading a BOLOS application to a Ledger device, Ledger has actually written a command, called +[Cargo Ledger](https://github.com/LedgerHQ/cargo-ledger). This we need to install with: +``` +cargo install --git https://github.com/LedgerHQ/cargo-ledger +``` + +As per the [Cargo Ledger setup instructions](https://github.com/LedgerHQ/cargo-ledger#setup) run the following to add +new build targets for the current rust toolchain: + +``` +cargo ledger setup +``` + +Next up we need install the supporting Python libraries from Ledger to control Ledger devices, +[LedgerCTL](https://github.com/LedgerHQ/ledgerctl). This we do with: +``` +pip3 install --upgrade protobuf setuptools ecdsa +pip3 install git+https://github.com/LedgerHQ/ledgerctl +``` + +Lastly install the ARM GCC toolchain: `arm-none-eabi-gcc` for your OS (https://developer.arm.com/downloads/-/gnu-rm). +For MacOS, we can use brew with: +``` +brew install armmbed/formulae/arm-none-eabi-gcc +``` + +## Device configuration + +See https://github.com/LedgerHQ/ledgerctl#device-configuration + +Install a custom certificate on the device to help with development. Start the device in recovery mode (varies per device) +- Nano S Plus: Hold the left button while turning on, and follow on screen instructions +- Nano S: Hold the right button while turning on + +Once in recovery mode run the following where is simply the name of the CA. It can be anything: + +``` +ledgerctl install-ca +``` + +## Runtime + +Open a terminal in the subfolder `./applications/ledger` + +_**Note:** Windows users should start a "x64 Native Tools Command Prompt for VS 2019" to have the build tools available +and then start a python shell within that terminal to have the Python libraries available._ + +### Build `ledger` + +To build, run + +``` +cargo ledger build {TARGET} -- "-Zbuild-std=std,alloc" +``` + +where TARGET = nanosplus, nanos, etc. + +### Build and install `ledger` + +This must be run from a Python shell (`pip3 --version` should work). To build and load, run + +``` +cargo ledger build {TARGET} --load -- "-Zbuild-std=std,alloc" +``` +where TARGET = nanosplus, nanos, etc. + +**Errors** + +If the auto-load does not work ("ledgerwallet.client.CommException: Exception : Invalid status 6512 (Unknown reason)"), +try to do a manual installation. + +### Manual installation + +- First delete the application if it was already installed + +``` +`ledgerctl delete "MinoTari Wallet"` +``` + +- Install with + +``` +`ledgerctl install app_nanosplus.json` +``` +**Note:** In some cases the `cargo ledger build` action will invalidate `app_nanosplus.json` by setting the first line +to `"apiLevel": "0",` - ensure it is set to `"apiLevel": "1",` + +### Running the ledger application + +Start the `MinoTari Wallet` application on the Ledger by navigating to the app and pressing both buttons. You should +see `MinoTari Wallet` displayed on the screen. Now your device is ready to be used with the console wallet. + +_**Note:** To manually exit the application, press both buttons on the Ledger._ + +**Errors** + +- If the `MinoTari Wallet` application on the Ledger is not started when trying to access it with a desktop + application, you should see the following error on the desktop: + + `Error: Ledger application not started` + +- If the wrong application is started on the Ledger, you should see the following error on the desktop: + + `Error: Processing error 'Ledger application is not the MinoTari Wallet application: expected ...'` + +- If the `MinoTari Wallet` application has an incorrect version, you should see the following error on the desktop: + + `Error: Processing error 'MinoTari Wallet application version mismatch: expected ...'` diff --git a/applications/minotari_ledger_wallet/app_nanosplus.json b/applications/minotari_ledger_wallet/app_nanosplus.json new file mode 100644 index 0000000000..e1545ac66b --- /dev/null +++ b/applications/minotari_ledger_wallet/app_nanosplus.json @@ -0,0 +1,21 @@ +{ + "apiLevel": "1", + "binary": "target/nanosplus/release/app.hex", + "dataSize": 0, + "derivationPath": { + "curves": [ + "secp256k1", + "ed25519" + ], + "paths": [ + "44'/1022'", + "m/5261654'", + "m/44'" + ] + }, + "flags": "0", + "icon": "key_14x14.gif", + "name": "MinoTari Wallet", + "targetId": "0x33100004", + "version": "0.52.0-pre.0" +} \ No newline at end of file diff --git a/applications/minotari_ledger_wallet/key_14x14.gif b/applications/minotari_ledger_wallet/key_14x14.gif new file mode 100644 index 0000000000..2bab27bade Binary files /dev/null and b/applications/minotari_ledger_wallet/key_14x14.gif differ diff --git a/applications/minotari_ledger_wallet/key_16x16.gif b/applications/minotari_ledger_wallet/key_16x16.gif new file mode 100644 index 0000000000..55e198b16c Binary files /dev/null and b/applications/minotari_ledger_wallet/key_16x16.gif differ diff --git a/applications/minotari_ledger_wallet/rust-toolchain.toml b/applications/minotari_ledger_wallet/rust-toolchain.toml new file mode 100644 index 0000000000..84247f8a05 --- /dev/null +++ b/applications/minotari_ledger_wallet/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly-2024-02-04" \ No newline at end of file diff --git a/applications/minotari_ledger_wallet/rustfmt.toml b/applications/minotari_ledger_wallet/rustfmt.toml new file mode 100644 index 0000000000..13868eb0c1 --- /dev/null +++ b/applications/minotari_ledger_wallet/rustfmt.toml @@ -0,0 +1,27 @@ +binop_separator = "Back" +use_small_heuristics = "default" +comment_width = 120 +edition = "2018" +format_code_in_doc_comments = true +format_strings = true +group_imports = "StdExternalCrate" +hard_tabs = false +imports_layout = "HorizontalVertical" +imports_granularity = "Crate" +match_block_trailing_comma = true +max_width = 120 +newline_style = "Auto" +normalize_comments = true +overflow_delimited_expr = true +reorder_imports = true +reorder_modules = true +reorder_impl_items = true +space_after_colon = true +space_before_colon = false +struct_lit_single_line = true +use_field_init_shorthand = true +use_try_shorthand = true +unstable_features = true +where_single_line = true +wrap_comments = true +ignore = [] diff --git a/applications/minotari_ledger_wallet/src/hashing.rs b/applications/minotari_ledger_wallet/src/hashing.rs new file mode 100644 index 0000000000..9528a300cd --- /dev/null +++ b/applications/minotari_ledger_wallet/src/hashing.rs @@ -0,0 +1,75 @@ +// Copyright 2022 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +//! # MinoTari Ledger Wallet - Hashing + +use core::marker::PhantomData; + +use blake2::Blake2b; +use borsh::{ + maybestd::io::{Result as BorshResult, Write}, + BorshSerialize, +}; +use digest::{consts::U64, Digest}; +use tari_crypto::hashing::DomainSeparation; + +/// Domain separated consensus hasher +pub struct DomainSeparatedConsensusHasher(PhantomData); + +impl DomainSeparatedConsensusHasher { + /// Create a new hasher with the given label + pub fn new(label: &'static str) -> ConsensusHasher> { + let mut digest = Blake2b::::new(); + M::add_domain_separation_tag(&mut digest, label); + ConsensusHasher::from_digest(digest) + } +} + +/// Consensus hasher +#[derive(Clone)] +pub struct ConsensusHasher { + writer: WriteHashWrapper, +} + +impl ConsensusHasher { + fn from_digest(digest: D) -> Self { + Self { + writer: WriteHashWrapper(digest), + } + } +} + +impl ConsensusHasher +where D: Digest +{ + /// Finalize the hasher and return the hash + pub fn finalize(self) -> [u8; 64] { + self.writer.0.finalize().into() + } + + /// Update the hasher with the given data + pub fn update_consensus_encode(&mut self, data: &T) { + BorshSerialize::serialize(data, &mut self.writer) + .expect("Incorrect implementation of BorshSerialize encountered. Implementations MUST be infallible."); + } + + /// Update the hasher with the given data + pub fn chain(mut self, data: &T) -> Self { + self.update_consensus_encode(data); + self + } +} + +#[derive(Clone)] +struct WriteHashWrapper(D); + +impl Write for WriteHashWrapper { + fn write(&mut self, buf: &[u8]) -> BorshResult { + self.0.update(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> BorshResult<()> { + Ok(()) + } +} diff --git a/applications/minotari_ledger_wallet/src/main.rs b/applications/minotari_ledger_wallet/src/main.rs new file mode 100644 index 0000000000..48e2d0e8a9 --- /dev/null +++ b/applications/minotari_ledger_wallet/src/main.rs @@ -0,0 +1,189 @@ +// Copyright 2022 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +//! # MinoTari Ledger Wallet + +#![no_std] +#![no_main] +#![feature(alloc_error_handler)] + +extern crate alloc; +use core::{cmp::min, mem::MaybeUninit}; + +use critical_section::RawRestoreState; +use nanos_sdk::{ + buttons::ButtonEvent, + io, + io::{ApduHeader, Reply, StatusWords, SyscallError}, +}; +use nanos_ui::ui; +use tari_crypto::{ristretto::RistrettoSecretKey, tari_utilities::ByteArray}; + +use crate::{ + alloc::string::ToString, + utils::{byte_to_hex, get_raw_key, u64_to_string}, +}; + +static MINOTARI_LEDGER_ID: u32 = 535348; +static MINOTARI_ACCOUNT_ID: u32 = 7041; + +pub mod hashing; +pub mod utils; + +nanos_sdk::set_panic!(nanos_sdk::exiting_panic); + +/// Allocator heap size +const HEAP_SIZE: usize = 1024 * 26; + +/// Statically allocated heap memory +static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + +/// Bind global allocator +#[global_allocator] +static HEAP: embedded_alloc::Heap = embedded_alloc::Heap::empty(); + +/// Error handler for allocation +#[alloc_error_handler] +fn alloc_error(_: core::alloc::Layout) -> ! { + ui::SingleMessage::new("allocation error!").show_and_wait(); + nanos_sdk::exit_app(250) +} + +/// Initialise allocator +pub fn init() { + unsafe { HEAP.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) } +} + +struct MyCriticalSection; +critical_section::set_impl!(MyCriticalSection); + +unsafe impl critical_section::Impl for MyCriticalSection { + unsafe fn acquire() -> RawRestoreState { + // nothing, it's all good, don't worry bout it + } + + unsafe fn release(_token: RawRestoreState) { + // nothing, it's all good, don't worry bout it + } +} + +/// App Version parameters +const NAME: &str = env!("CARGO_PKG_NAME"); +const VERSION: &str = env!("CARGO_PKG_VERSION"); + +enum Instruction { + GetVersion, + GetPrivateKey, + BadInstruction(u8), + Exit, +} + +impl From for Instruction { + fn from(header: io::ApduHeader) -> Instruction { + match header.ins { + 0x01 => Self::GetVersion, + 0x02 => Self::GetPrivateKey, + 0x03 => Self::Exit, + other => Self::BadInstruction(other), + } + } +} + +#[no_mangle] +extern "C" fn sample_main() { + let mut comm = io::Comm::new(); + init(); + let messages = alloc::vec!["MinoTari Wallet", "keep the app open..", "[exit = both buttons]"]; + let mut index = 0; + ui::SingleMessage::new(messages[index]).show(); + loop { + let event = comm.next_event::(); + match event { + io::Event::Button(ButtonEvent::BothButtonsRelease) => nanos_sdk::exit_app(0), + io::Event::Button(ButtonEvent::RightButtonRelease) => { + index = min(index + 1, messages.len() - 1); + ui::SingleMessage::new(messages[index]).show() + }, + io::Event::Button(ButtonEvent::LeftButtonRelease) => { + if index > 0 { + index -= 1; + } + ui::SingleMessage::new(messages[index]).show() + }, + io::Event::Button(_) => {}, + io::Event::Command(apdu_header) => match handle_apdu(&mut comm, apdu_header.into()) { + Ok(()) => comm.reply_ok(), + Err(e) => comm.reply(e), + }, + io::Event::Ticker => {}, + } + } +} + +// Perform ledger instructions +fn handle_apdu(comm: &mut io::Comm, instruction: Instruction) -> Result<(), Reply> { + if comm.rx == 0 { + return Err(io::StatusWords::NothingReceived.into()); + } + + match instruction { + Instruction::GetVersion => { + ui::SingleMessage::new("GetVersion...").show(); + let name_bytes = NAME.as_bytes(); + let version_bytes = VERSION.as_bytes(); + comm.append(&[1]); // Format + comm.append(&[name_bytes.len() as u8]); + comm.append(name_bytes); + comm.append(&[version_bytes.len() as u8]); + comm.append(version_bytes); + comm.append(&[0]); // No flags + ui::SingleMessage::new("GetVersion... Done").show(); + comm.reply_ok(); + }, + Instruction::GetPrivateKey => { + // first 5 bytes are instruction details + let offset = 5; + let mut address_index_bytes = [0u8; 8]; + address_index_bytes.clone_from_slice(comm.get(offset, offset + 8)); + let address_index = crate::u64_to_string(u64::from_le_bytes(address_index_bytes)); + + let mut msg = "GetPrivateKey... ".to_string(); + msg.push_str(&address_index); + ui::SingleMessage::new(&msg).show(); + + let mut bip32_path = "m/44'/".to_string(); + bip32_path.push_str(&MINOTARI_LEDGER_ID.to_string()); + bip32_path.push_str(&"'/"); + bip32_path.push_str(&MINOTARI_ACCOUNT_ID.to_string()); + bip32_path.push_str(&"'/0/"); + bip32_path.push_str(&address_index); + let path: [u32; 5] = nanos_sdk::ecc::make_bip32_path(bip32_path.as_bytes()); + + let raw_key = get_raw_key(&path)?; + + let k = match RistrettoSecretKey::from_bytes(&raw_key) { + Ok(val) => val, + Err(_) => { + ui::SingleMessage::new("Err: key conversion").show(); + return Err(SyscallError::InvalidParameter.into()); + }, + }; + comm.append(&[1]); // version + comm.append(k.as_bytes()); + comm.reply_ok(); + }, + Instruction::BadInstruction(val) => { + let mut error = "BadInstruction... ! (".to_string(); + error.push_str(&crate::byte_to_hex(val)); + error.push_str(&")"); + ui::SingleMessage::new(&error).show(); + return Err(StatusWords::BadIns.into()); + }, + Instruction::Exit => { + ui::SingleMessage::new("Exit...").show(); + comm.reply_ok(); + nanos_sdk::exit_app(0) + }, + } + Ok(()) +} diff --git a/applications/minotari_ledger_wallet/src/utils.rs b/applications/minotari_ledger_wallet/src/utils.rs new file mode 100644 index 0000000000..e7c609f46a --- /dev/null +++ b/applications/minotari_ledger_wallet/src/utils.rs @@ -0,0 +1,123 @@ +// Copyright 2022 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +//! # MinoTari Ledger Wallet - Utils + +use nanos_sdk::{ + ecc::{bip32_derive, CurvesId, CxError, Secret}, + io::SyscallError, +}; +use nanos_ui::ui; +use tari_crypto::hash_domain; + +use crate::{ + alloc::string::{String, ToString}, + hashing::DomainSeparatedConsensusHasher, +}; + +hash_domain!(LedgerHashDomain, "com.tari.genesis_tools.applications.mp_ldeger", 0); + +/// Convert a u64 to a string without using the standard library +pub fn u64_to_string(number: u64) -> String { + let mut buffer = [0u8; 20]; // Maximum length for a 64-bit integer (including null terminator) + let mut pos = 0; + + if number == 0 { + buffer[pos] = b'0'; + pos += 1; + } else { + let mut num = number; + + let mut digits = [0u8; 20]; + let mut num_digits = 0; + + while num > 0 { + digits[num_digits] = b'0' + (num % 10) as u8; + num /= 10; + num_digits += 1; + } + + while num_digits > 0 { + num_digits -= 1; + buffer[pos] = digits[num_digits]; + pos += 1; + } + } + + String::from_utf8_lossy(&buffer[..pos]).to_string() +} + +/// Convert a single byte to a hex string +pub fn byte_to_hex(byte: u8) -> String { + const HEX_CHARS: [u8; 16] = *b"0123456789abcdef"; + let hex = [HEX_CHARS[(byte >> 4) as usize], HEX_CHARS[(byte & 0x0F) as usize]]; + String::from_utf8_lossy(&hex).to_string() +} + +// Convert CxError to a string for display +fn cx_error_to_string(e: CxError) -> String { + let err = match e { + CxError::Carry => "Carry", + CxError::Locked => "Locked", + CxError::Unlocked => "Unlocked", + CxError::NotLocked => "NotLocked", + CxError::NotUnlocked => "NotUnlocked", + CxError::InternalError => "InternalError", + CxError::InvalidParameterSize => "InvalidParameterSize", + CxError::InvalidParameterValue => "InvalidParameterValue", + CxError::InvalidParameter => "InvalidParameter", + CxError::NotInvertible => "NotInvertible", + CxError::Overflow => "Overflow", + CxError::MemoryFull => "MemoryFull", + CxError::NoResidue => "NoResidue", + CxError::PointAtInfinity => "PointAtInfinity", + CxError::InvalidPoint => "InvalidPoint", + CxError::InvalidCurve => "InvalidCurve", + CxError::GenericError => "GenericError", + }; + err.to_string() +} + +// Get a raw 32 byte key hash from the BIP32 path. +// - The wrapper function for the syscall `os_perso_derive_node_bip32`, `bip32_derive`, requires a 96 byte buffer when +// called with `CurvesId::Ed25519` as it checks the consistency of the curve choice and key length in order to prevent +// the underlying syscall from panicking. +// - The syscall `os_perso_derive_node_bip32` returns 96 bytes as: +// private key: 64 bytes +// chain: 32 bytes +// Example: +// d8a57c1be0c52e9643485e77aac56d72fa6c4eb831466c2abd2d320c82d3d14929811c598c13d431bad433e037dbd97265492cea42bc2e3aad15440210a20a2d0000000000000000000000000000000000000000000000000000000000000000 +// - This function applies domain separated hashing to the 64 byte private key of the returned buffer to get 32 +// uniformly distributed random bytes. +fn get_raw_key_hash(path: &[u32]) -> Result<[u8; 64], String> { + let mut key = Secret::<96>::new(); + let raw_key_64 = match bip32_derive(CurvesId::Ed25519, path, key.as_mut()) { + Ok(_) => { + let binding = &key.as_ref()[..64]; + let raw_key_64: [u8; 64] = match binding.try_into() { + Ok(v) => v, + Err(_) => return Err("Err: get_raw_key".to_string()), + }; + raw_key_64 + }, + Err(e) => return Err(cx_error_to_string(e)), + }; + + Ok(DomainSeparatedConsensusHasher::::new("raw_key") + .chain(&raw_key_64) + .finalize()) +} + +/// Get a raw 32 byte key hash from the BIP32 path. In cas of an error, display an interactive message on the device. +pub fn get_raw_key(path: &[u32]) -> Result<[u8; 64], SyscallError> { + match get_raw_key_hash(&path) { + Ok(val) => Ok(val), + Err(e) => { + let mut msg = "".to_string(); + msg.push_str("Err: raw key >>..."); + ui::SingleMessage::new(&msg).show_and_wait(); + ui::SingleMessage::new(&e).show(); + Err(SyscallError::InvalidParameter.into()) + }, + } +} diff --git a/applications/minotari_merge_mining_proxy/Cargo.toml b/applications/minotari_merge_mining_proxy/Cargo.toml index e9bb9b6882..8440579378 100644 --- a/applications/minotari_merge_mining_proxy/Cargo.toml +++ b/applications/minotari_merge_mining_proxy/Cargo.toml @@ -4,46 +4,46 @@ authors = ["The Tari Development Community"] description = "The Tari merge mining proxy for xmrig" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [features] default = [] [dependencies] +minotari_app_grpc = { path = "../minotari_app_grpc" } +minotari_app_utilities = { path = "../minotari_app_utilities", features = ["miner_input"] } +minotari_node_grpc_client = { path = "../../clients/rust/base_node_grpc_client" } +minotari_wallet_grpc_client = { path = "../../clients/rust/wallet_grpc_client" } tari_common = { path = "../../common" } tari_common_types = { path = "../../base_layer/common_types" } tari_comms = { path = "../../comms/core" } tari_core = { path = "../../base_layer/core", default-features = false, features = ["transactions"] } -minotari_app_utilities = { path = "../minotari_app_utilities", features = ["miner_input"] } -tari_utilities = { version = "0.7" } -minotari_node_grpc_client = { path = "../../clients/rust/base_node_grpc_client" } -minotari_wallet_grpc_client = { path = "../../clients/rust/wallet_grpc_client" } -minotari_app_grpc = { path = "../minotari_app_grpc" } tari_key_manager = { path = "../../base_layer/key_manager", features = ["key_manager_service"] } +tari_utilities = { version = "0.7" } anyhow = "1.0.53" -crossterm = { version = "0.25.0" } bincode = "1.3.1" borsh = "1.2" bytes = "1.1" -chrono = { version = "0.4.6", default-features = false } +chrono = { version = "0.4.19", default-features = false } clap = { version = "3.2", features = ["derive", "env"] } -config = { version = "0.13.0" } -futures = "0.3.5" +config = { version = "0.14.0" } +crossterm = { version = "0.25.0" } +futures = { version = "^0.3.16", features = ["async-await"] } hex = "0.4.2" hyper = "0.14.12" jsonrpc = "0.12.0" log = { version = "0.4.8", features = ["std"] } -monero = { version = "0.18" } +monero = { version = "0.20.0" } reqwest = { version = "0.11.4", features = ["json"] } -serde = { version = "1.0.106", features = ["derive"] } +serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.57" thiserror = "1.0.26" -tokio = { version = "1.23", features = ["macros"] } +tokio = { version = "1.36", features = ["macros"] } tonic = "0.8.3" tracing = "0.1" url = "2.1.1" [build-dependencies] -tari_features = { path = "../../common/tari_features"} +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a"} diff --git a/applications/minotari_merge_mining_proxy/src/block_template_data.rs b/applications/minotari_merge_mining_proxy/src/block_template_data.rs index d88598dda4..e0c2ecc52d 100644 --- a/applications/minotari_merge_mining_proxy/src/block_template_data.rs +++ b/applications/minotari_merge_mining_proxy/src/block_template_data.rs @@ -22,7 +22,7 @@ //! Provides methods for for building template data and storing them with timestamps. -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, convert::TryFrom, sync::Arc}; #[cfg(not(test))] use chrono::Duration; @@ -33,28 +33,56 @@ use tari_core::proof_of_work::monero_rx::FixedByteArray; use tokio::sync::RwLock; use tracing::trace; -use crate::error::MmProxyError; +use crate::{ + block_template_protocol::{FinalBlockTemplateData, NewBlockTemplateData}, + error::MmProxyError, +}; const LOG_TARGET: &str = "minotari_mm_proxy::xmrig"; -/// Structure for holding hashmap of hashes -> [BlockTemplateRepositoryItem] +/// Structure for holding hashmap of hashes -> [BlockRepositoryItem] and [TemplateRepositoryItem]. #[derive(Debug, Clone)] pub struct BlockTemplateRepository { - blocks: Arc, BlockTemplateRepositoryItem>>>, + blocks: Arc, BlockRepositoryItem>>>, + templates: Arc, TemplateRepositoryItem>>>, } -/// Structure holding [BlockTemplateData] along with a timestamp. +/// Structure holding [NewBlockTemplate] along with a timestamp. #[derive(Debug, Clone)] -pub struct BlockTemplateRepositoryItem { - pub data: BlockTemplateData, +pub struct TemplateRepositoryItem { + pub new_block_template: NewBlockTemplateData, + pub template_with_coinbase: grpc::NewBlockTemplate, datetime: DateTime, } -impl BlockTemplateRepositoryItem { +impl TemplateRepositoryItem { /// Create new [Self] with current time in UTC. - pub fn new(block_template: BlockTemplateData) -> Self { + pub fn new(new_block_template: NewBlockTemplateData, template_with_coinbase: grpc::NewBlockTemplate) -> Self { Self { - data: block_template, + new_block_template, + template_with_coinbase, + datetime: Utc::now(), + } + } + + /// Get the timestamp of creation. + pub fn datetime(&self) -> DateTime { + self.datetime + } +} + +/// Structure holding [FinalBlockTemplateData] along with a timestamp. +#[derive(Debug, Clone)] +pub struct BlockRepositoryItem { + pub data: FinalBlockTemplateData, + datetime: DateTime, +} + +impl BlockRepositoryItem { + /// Create new [Self] with current time in UTC. + pub fn new(final_block: FinalBlockTemplateData) -> Self { + Self { + data: final_block, datetime: Utc::now(), } } @@ -69,53 +97,119 @@ impl BlockTemplateRepository { pub fn new() -> Self { Self { blocks: Arc::new(RwLock::new(HashMap::new())), + templates: Arc::new(RwLock::new(HashMap::new())), } } /// Return [BlockTemplateData] with the associated hash. None if the hash is not stored. - pub async fn get>(&self, hash: T) -> Option { - trace!( - target: LOG_TARGET, - "Retrieving blocktemplate with merge mining hash: {:?}", - hex::encode(hash.as_ref()) - ); + pub async fn get_final_template>(&self, merge_mining_hash: T) -> Option { let b = self.blocks.read().await; - b.get(hash.as_ref()).map(|item| item.data.clone()) + b.get(merge_mining_hash.as_ref()).map(|item| { + trace!( + target: LOG_TARGET, + "Retrieving block template at height #{} with merge mining hash: {:?}", + item.data.clone().template.new_block_template.header.unwrap_or_default().height, + hex::encode(merge_mining_hash.as_ref()) + ); + item.data.clone() + }) } - /// Store [BlockTemplateData] at the hash value. - pub async fn save(&self, hash: Vec, block_template: BlockTemplateData) { - trace!( - target: LOG_TARGET, - "Saving blocktemplate with merge mining hash: {:?}", - hex::encode(&hash) - ); + /// Return [BlockTemplateData] with the associated hash. None if the hash is not stored. + pub async fn get_new_template>( + &self, + best_block_hash: T, + ) -> Option<(NewBlockTemplateData, grpc::NewBlockTemplate)> { + let b = self.templates.read().await; + b.get(best_block_hash.as_ref()) + .map(|item| (item.new_block_template.clone(), item.template_with_coinbase.clone())) + } + + /// Store [FinalBlockTemplateData] at the hash value if the key does not exist. + pub async fn save_final_block_template_if_key_unique( + &self, + merge_mining_hash: Vec, + block_template: FinalBlockTemplateData, + ) { let mut b = self.blocks.write().await; - let repository_item = BlockTemplateRepositoryItem::new(block_template); - b.insert(hash, repository_item); + b.entry(merge_mining_hash.clone()).or_insert_with(|| { + trace!( + target: LOG_TARGET, + "Saving final block template with merge mining hash: {:?}", + hex::encode(&merge_mining_hash) + ); + BlockRepositoryItem::new(block_template) + }); + } + + /// Store [NewBlockTemplate] at the hash value if the key does not exist. + pub async fn save_new_block_template_if_key_unique( + &self, + best_block_hash: Vec, + new_block_template: NewBlockTemplateData, + template_with_coinbase: grpc::NewBlockTemplate, + ) { + let mut b = self.templates.write().await; + b.entry(best_block_hash.clone()).or_insert_with(|| { + trace!( + target: LOG_TARGET, + "Saving new block template for best block hash: {:?}", + hex::encode(&best_block_hash) + ); + TemplateRepositoryItem::new(new_block_template, template_with_coinbase) + }); + } + + /// Check if the repository contains a block template with best_previous_block_hash + pub async fn blocks_contains(&self, current_best_block_hash: FixedHash) -> Option { + let b = self.blocks.read().await; + b.values() + .find(|item| { + let header = item.data.template.new_block_template.header.clone().unwrap_or_default(); + FixedHash::try_from(header.prev_hash).unwrap_or(FixedHash::default()) == current_best_block_hash + }) + .map(|val| val.data.clone()) } /// Remove any data that is older than 20 minutes. pub async fn remove_outdated(&self) { - trace!(target: LOG_TARGET, "Removing outdated blocktemplates"); + trace!(target: LOG_TARGET, "Removing outdated final block templates"); let mut b = self.blocks.write().await; #[cfg(test)] let threshold = Utc::now(); #[cfg(not(test))] let threshold = Utc::now() - Duration::minutes(20); *b = b.drain().filter(|(_, i)| i.datetime() >= threshold).collect(); + trace!(target: LOG_TARGET, "Removing outdated new block templates"); + let mut b = self.templates.write().await; + #[cfg(test)] + let threshold = Utc::now(); + #[cfg(not(test))] + let threshold = Utc::now() - Duration::minutes(20); + *b = b.drain().filter(|(_, i)| i.datetime() >= threshold).collect(); } - /// Remove a particular hash and return the associated [BlockTemplateRepositoryItem] if any. - pub async fn remove>(&self, hash: T) -> Option { + /// Remove a particularfinla block template for hash and return the associated [BlockRepositoryItem] if any. + pub async fn remove_final_block_template>(&self, hash: T) -> Option { trace!( target: LOG_TARGET, - "Blocktemplate removed with merge mining hash {:?}", + "Final block template removed with merge mining hash {:?}", hex::encode(hash.as_ref()) ); let mut b = self.blocks.write().await; b.remove(hash.as_ref()) } + + /// Remove a particular new block template for hash and return the associated [BlockRepositoryItem] if any. + pub async fn remove_new_block_template>(&self, hash: T) -> Option { + trace!( + target: LOG_TARGET, + "New block template removed with best block hash {:?}", + hex::encode(hash.as_ref()) + ); + let mut b = self.templates.write().await; + b.remove(hash.as_ref()) + } } /// Setup values for the new block. @@ -126,8 +220,9 @@ pub struct BlockTemplateData { pub tari_miner_data: grpc::MinerData, pub monero_difficulty: u64, pub tari_difficulty: u64, - pub tari_hash: FixedHash, + pub tari_merge_mining_hash: FixedHash, pub aux_chain_hashes: Vec, + pub new_block_template: grpc::NewBlockTemplate, } impl BlockTemplateData {} @@ -140,8 +235,9 @@ pub struct BlockTemplateDataBuilder { tari_miner_data: Option, monero_difficulty: Option, tari_difficulty: Option, - tari_hash: Option, + tari_merge_mining_hash: Option, aux_chain_hashes: Vec, + new_block_template: Option, } impl BlockTemplateDataBuilder { @@ -174,8 +270,8 @@ impl BlockTemplateDataBuilder { self } - pub fn tari_hash(mut self, hash: FixedHash) -> Self { - self.tari_hash = Some(hash); + pub fn tari_merge_mining_hash(mut self, hash: FixedHash) -> Self { + self.tari_merge_mining_hash = Some(hash); self } @@ -184,6 +280,11 @@ impl BlockTemplateDataBuilder { self } + pub fn new_block_template(mut self, template: grpc::NewBlockTemplate) -> Self { + self.new_block_template = Some(template); + self + } + /// Build a new [BlockTemplateData], all the values have to be set. /// /// # Errors @@ -205,12 +306,15 @@ impl BlockTemplateDataBuilder { let tari_difficulty = self .tari_difficulty .ok_or_else(|| MmProxyError::MissingDataError("tari_difficulty not provided".to_string()))?; - let tari_hash = self - .tari_hash + let tari_merge_mining_hash = self + .tari_merge_mining_hash .ok_or_else(|| MmProxyError::MissingDataError("tari_hash not provided".to_string()))?; if self.aux_chain_hashes.is_empty() { return Err(MmProxyError::MissingDataError("aux chain hashes are empty".to_string())); }; + let new_block_template = self + .new_block_template + .ok_or_else(|| MmProxyError::MissingDataError("new_block_template not provided".to_string()))?; Ok(BlockTemplateData { monero_seed, @@ -218,8 +322,9 @@ impl BlockTemplateDataBuilder { tari_miner_data, monero_difficulty, tari_difficulty, - tari_hash, + tari_merge_mining_hash, aux_chain_hashes: self.aux_chain_hashes, + new_block_template, }) } } @@ -230,12 +335,14 @@ pub mod test { use tari_core::{ blocks::{Block, BlockHeader}, + proof_of_work::Difficulty, transactions::aggregated_body::AggregateBody, }; + use tari_utilities::ByteArray; use super::*; - fn create_block_template_data() -> BlockTemplateData { + fn create_block_template_data() -> FinalBlockTemplateData { let header = BlockHeader::new(100); let body = AggregateBody::empty(); let block = Block::new(header, body); @@ -246,15 +353,25 @@ pub mod test { total_fees: 100, algo: Some(grpc::PowAlgo { pow_algo: 0 }), }; + let new_block_template = grpc::NewBlockTemplate::default(); let btdb = BlockTemplateDataBuilder::new() .monero_seed(FixedByteArray::new()) .tari_block(block.try_into().unwrap()) .tari_miner_data(miner_data) .monero_difficulty(123456) .tari_difficulty(12345) - .tari_hash(hash) - .aux_hashes(vec![monero::Hash::from_slice(hash.as_slice())]); - btdb.build().unwrap() + .tari_merge_mining_hash(hash) + .aux_hashes(vec![monero::Hash::from_slice(hash.as_slice())]) + .new_block_template(new_block_template); + let block_template_data = btdb.build().unwrap(); + FinalBlockTemplateData { + template: block_template_data, + target_difficulty: Difficulty::from_u64(12345).unwrap(), + blockhashing_blob: "no blockhashing_blob data".to_string(), + blocktemplate_blob: "no blocktemplate_blob data".to_string(), + aux_chain_hashes: vec![monero::Hash::from_slice(hash.as_slice())], + aux_chain_mr: hash.to_vec(), + } } #[tokio::test] @@ -264,19 +381,21 @@ pub mod test { let hash2 = vec![2; 32]; let hash3 = vec![3; 32]; let block_template = create_block_template_data(); - btr.save(hash1.clone(), block_template.clone()).await; - btr.save(hash2.clone(), block_template).await; - assert!(btr.get(hash1.clone()).await.is_some()); - assert!(btr.get(hash2.clone()).await.is_some()); - assert!(btr.get(hash3.clone()).await.is_none()); - assert!(btr.remove(hash1.clone()).await.is_some()); - assert!(btr.get(hash1.clone()).await.is_none()); - assert!(btr.get(hash2.clone()).await.is_some()); - assert!(btr.get(hash3.clone()).await.is_none()); + btr.save_final_block_template_if_key_unique(hash1.clone(), block_template.clone()) + .await; + btr.save_final_block_template_if_key_unique(hash2.clone(), block_template) + .await; + assert!(btr.get_final_template(hash1.clone()).await.is_some()); + assert!(btr.get_final_template(hash2.clone()).await.is_some()); + assert!(btr.get_final_template(hash3.clone()).await.is_none()); + assert!(btr.remove_final_block_template(hash1.clone()).await.is_some()); + assert!(btr.get_final_template(hash1.clone()).await.is_none()); + assert!(btr.get_final_template(hash2.clone()).await.is_some()); + assert!(btr.get_final_template(hash3.clone()).await.is_none()); btr.remove_outdated().await; - assert!(btr.get(hash1).await.is_none()); - assert!(btr.get(hash2).await.is_none()); - assert!(btr.get(hash3).await.is_none()); + assert!(btr.get_final_template(hash1).await.is_none()); + assert!(btr.get_final_template(hash2).await.is_none()); + assert!(btr.get_final_template(hash3).await.is_none()); } #[test] @@ -323,10 +442,13 @@ pub mod test { #[test] pub fn ok_block_template_data_builder() { let build = create_block_template_data(); - assert!(build.monero_seed.is_empty()); - assert_eq!(build.tari_block.header.unwrap().version, 100); - assert_eq!(build.tari_miner_data.target_difficulty, 600000); - assert_eq!(build.monero_difficulty, 123456); - assert_eq!(build.tari_difficulty, 12345); + assert!(build.template.monero_seed.is_empty()); + assert_eq!(build.template.tari_block.header.unwrap().version, 100); + assert_eq!(build.template.tari_miner_data.target_difficulty, 600000); + assert_eq!(build.template.monero_difficulty, 123456); + assert_eq!(build.template.tari_difficulty, 12345); + assert_eq!(build.blockhashing_blob, "no blockhashing_blob data".to_string()); + assert_eq!(build.blocktemplate_blob, "no blocktemplate_blob data".to_string()); + assert_eq!(build.target_difficulty, Difficulty::from_u64(12345).unwrap()); } } diff --git a/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs b/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs index 1c44b5bc9e..08db7354ac 100644 --- a/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs +++ b/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs @@ -36,9 +36,10 @@ use tari_core::{ transaction_components::{TransactionKernel, TransactionOutput}, }, }; +use tari_utilities::{hex::Hex, ByteArray}; use crate::{ - block_template_data::{BlockTemplateData, BlockTemplateDataBuilder}, + block_template_data::{BlockTemplateData, BlockTemplateDataBuilder, BlockTemplateRepository}, common::merge_mining, config::MergeMiningProxyConfig, error::MmProxyError, @@ -73,53 +74,177 @@ impl<'a> BlockTemplateProtocol<'a> { } } +#[allow(clippy::too_many_lines)] impl BlockTemplateProtocol<'_> { /// Create [FinalBlockTemplateData] with [MoneroMiningData]. pub async fn get_next_block_template( mut self, monero_mining_data: MoneroMiningData, + block_templates: &BlockTemplateRepository, ) -> Result { + let best_block_hash = self.get_current_best_block_hash().await?; + let existing_block_template = block_templates.blocks_contains(best_block_hash).await; + + let mut final_block_template = existing_block_template; + let mut loop_count = 0; loop { - let new_template = self.get_new_block_template().await?; - let (coinbase_output, coinbase_kernel) = self.get_coinbase(&new_template).await?; + if loop_count >= 10 { + warn!(target: LOG_TARGET, "Failed to get block template after {} retries", loop_count); + return Err(MmProxyError::FailedToGetBlockTemplate(format!( + "Retried {} times", + loop_count + ))); + } + if loop_count == 1 && final_block_template.is_some() { + final_block_template = None; + } + if loop_count > 0 { + tokio::time::sleep(std::time::Duration::from_millis(loop_count * 250)).await; + } + loop_count += 1; + let (final_template_data, block_height) = if let Some(data) = final_block_template.clone() { + let height = data + .template + .tari_block + .header + .as_ref() + .map(|h| h.height) + .unwrap_or_default(); + debug!( + target: LOG_TARGET, + "Used existing block template and block for height: #{} (try {}), block hash: `{}`", + height, + loop_count, + match data.template.tari_block.header.as_ref() { + Some(h) => h.hash.to_hex(), + None => "None".to_string(), + } + ); + (data, height) + } else { + let (new_template, block_template_with_coinbase, height) = match block_templates + .get_new_template(best_block_hash) + .await + { + None => { + let new_template = match self.get_new_block_template().await { + Ok(val) => val, + Err(err) => { + error!(target: LOG_TARGET, "grpc get_new_block_template ({})", err.to_string()); + return Err(err); + }, + }; + let height = new_template + .template + .header + .as_ref() + .map(|h| h.height) + .unwrap_or_default(); + debug!(target: LOG_TARGET, "Requested new block template at height: #{} (try {})", height, loop_count); + let (coinbase_output, coinbase_kernel) = self.get_coinbase(&new_template).await?; + + let template_with_coinbase = merge_mining::add_coinbase( + &coinbase_output, + &coinbase_kernel, + new_template.template.clone(), + )?; + debug!(target: LOG_TARGET, "Added coinbase to new block template (try {})", loop_count); - let template_height = new_template.template.header.as_ref().map(|h| h.height).unwrap_or(0); - if !self.check_expected_tip(template_height).await? { + block_templates + .save_new_block_template_if_key_unique( + best_block_hash.to_vec(), + new_template.clone(), + template_with_coinbase.clone(), + ) + .await; + + (new_template, template_with_coinbase, height) + }, + Some((new_template, template_with_coinbase)) => { + let height = new_template + .template + .header + .as_ref() + .map(|h| h.height) + .unwrap_or_default(); + debug!(target: LOG_TARGET, "Used existing new block template at height: #{} (try {})", height, loop_count); + (new_template, template_with_coinbase, height) + }, + }; + + let block = match self.get_new_block(block_template_with_coinbase).await { + Ok(b) => { + debug!( + target: LOG_TARGET, + "Requested new block at height: #{} (try {}), block hash: `{}`", + height, loop_count, + { + let block_header = b.block.as_ref().map(|b| b.header.as_ref()).unwrap_or_default(); + block_header.map(|h| h.hash.clone()).unwrap_or_default().to_hex() + }, + ); + b + }, + Err(MmProxyError::FailedPreconditionBlockLostRetry) => { + debug!( + target: LOG_TARGET, + "Chain tip has progressed past template height {}. Fetching a new block template (try {}).", + height, loop_count + ); + continue; + }, + Err(err) => { + error!(target: LOG_TARGET, "grpc get_new_block ({})", err.to_string()); + return Err(err); + }, + }; + + ( + add_monero_data(block, monero_mining_data.clone(), new_template)?, + height, + ) + }; + + block_templates + .save_final_block_template_if_key_unique( + // `aux_chain_mr` is used as the key because it is stored in the ExtraData field in the Monero + // block + final_template_data.aux_chain_mr.clone(), + final_template_data.clone(), + ) + .await; + block_templates + .remove_new_block_template(best_block_hash.to_vec()) + .await; + + if !self.check_expected_tip(block_height).await? { debug!( target: LOG_TARGET, - "Chain tip has progressed past template height {}. Fetching a new block template.", template_height + "Chain tip has progressed past template height {}. Fetching a new block template (try {}).", block_height, loop_count ); continue; } - - debug!(target: LOG_TARGET, "Added coinbase to new block template"); - let block_template_with_coinbase = - merge_mining::add_coinbase(&coinbase_output, &coinbase_kernel, new_template.template.clone())?; - info!( - target: LOG_TARGET, - "Received new block template from Minotari base node for height #{}", - new_template - .template + info!(target: LOG_TARGET, + "Block template for height: #{} (try {}), block hash: `{}`, {}", + final_template_data + .template.new_block_template .header .as_ref() .map(|h| h.height) - .unwrap_or_default(), - ); - let block = match self.get_new_block(block_template_with_coinbase).await { - Ok(b) => b, - Err(MmProxyError::FailedPreconditionBlockLostRetry) => { - debug!( - target: LOG_TARGET, - "Chain tip has progressed past template height {}. Fetching a new block template.", - template_height - ); - continue; + .unwrap_or_default(), loop_count, + match final_template_data.template.tari_block.header.as_ref() { + Some(h) => h.hash.to_hex(), + None => "None".to_string(), }, - Err(err) => return Err(err), - }; - - let final_block = self.add_monero_data(block, monero_mining_data, new_template)?; - return Ok(final_block); + match final_template_data.template.tari_block.body.as_ref() { + Some(b) => format!( + "inputs: `{}`, outputs: `{}`, kernels: `{}`", + b.inputs.len(), b.outputs.len(), b.kernels.len() + ), + None => "inputs: `0`, outputs: `0`, kernels: `0`".to_string(), + } + ); + return Ok(final_template_data); } } @@ -179,7 +304,7 @@ impl BlockTemplateProtocol<'_> { .get_tip_info(grpc::Empty {}) .await? .into_inner(); - let tip_height = tip.metadata.as_ref().map(|m| m.height_of_longest_chain).unwrap_or(0); + let tip_height = tip.metadata.as_ref().map(|m| m.best_block_height).unwrap_or(0); if height <= tip_height { warn!( @@ -218,81 +343,99 @@ impl BlockTemplateProtocol<'_> { Ok((coinbase_output, coinbase_kernel)) } - /// Build the [FinalBlockTemplateData] from [template](NewBlockTemplateData) and with - /// [tari](grpc::GetNewBlockResult) and [monero data](MoneroMiningData). - fn add_monero_data( - &self, - tari_block: grpc::GetNewBlockResult, - monero_mining_data: MoneroMiningData, - template_data: NewBlockTemplateData, - ) -> Result { - debug!(target: LOG_TARGET, "New block received from Minotari: {:?}", tari_block); - - let tari_difficulty = template_data.miner_data.target_difficulty; - let block_template_data = BlockTemplateDataBuilder::new() - .tari_block( - tari_block - .block - .ok_or(MmProxyError::GrpcResponseMissingField("block"))?, - ) - .tari_miner_data(template_data.miner_data) - .monero_seed(monero_mining_data.seed_hash) - .monero_difficulty(monero_mining_data.difficulty) - .tari_difficulty(tari_difficulty) - .tari_hash( - FixedHash::try_from(tari_block.merge_mining_hash.clone()) - .map_err(|e| MmProxyError::MissingDataError(e.to_string()))?, - ) - .aux_hashes(vec![monero::Hash::from_slice(&tari_block.merge_mining_hash)]) - .build()?; - - // Deserialize the block template blob - debug!(target: LOG_TARGET, "Deserializing Blocktemplate Blob into Monero Block",); - let mut monero_block = monero_rx::deserialize_monero_block_from_hex(&monero_mining_data.blocktemplate_blob)?; - - debug!(target: LOG_TARGET, "Insert Merged Mining Tag",); - // Add the Tari merge mining tag to the retrieved block template - // We need to send the MR al all aux chains, but a single chain, aka minotari only, means we only need the tari - // hash - let aux_chain_mr = tari_block.merge_mining_hash.clone(); - monero_rx::insert_merge_mining_tag_and_aux_chain_merkle_root_into_block( - &mut monero_block, - &aux_chain_mr, - 1, - 0, - )?; - - debug!(target: LOG_TARGET, "Creating blockhashing blob from blocktemplate blob",); - // Must be done after the tag is inserted since it will affect the hash of the miner tx - let blockhashing_blob = monero_rx::create_blockhashing_blob_from_block(&monero_block)?; - let blocktemplate_blob = monero_rx::serialize_monero_block_to_hex(&monero_block)?; - - let monero_difficulty = monero_mining_data.difficulty; - let mining_difficulty = cmp::min(monero_difficulty, tari_difficulty); - info!( - target: LOG_TARGET, - "Difficulties: Minotari ({}), Monero({}), Selected({})", - tari_difficulty, - monero_mining_data.difficulty, - mining_difficulty - ); - let merge_mining_hash = FixedHash::try_from(tari_block.merge_mining_hash.clone()) - .map_err(|e| MmProxyError::MissingDataError(e.to_string()))?; - Ok(FinalBlockTemplateData { - template: block_template_data, - target_difficulty: Difficulty::from_u64(mining_difficulty)?, - blockhashing_blob, - blocktemplate_blob, - merge_mining_hash, - aux_chain_hashes: vec![monero::Hash::from_slice(&tari_block.merge_mining_hash)], - aux_chain_mr: tari_block.merge_mining_hash, - }) + async fn get_current_best_block_hash(&self) -> Result { + let tip = self + .base_node_client + .clone() + .get_tip_info(grpc::Empty {}) + .await? + .into_inner(); + let best_block_hash = tip + .metadata + .as_ref() + .map(|m| m.best_block_hash.clone()) + .unwrap_or(Vec::default()); + FixedHash::try_from(best_block_hash).map_err(|e| MmProxyError::ConversionError(e.to_string())) } } +/// This is an interim solution to calculate the merkle root for the aux chains when multiple aux chains will be +/// merge mined with Monero. It needs to be replaced with a more general solution in the future. +pub fn calculate_aux_chain_merkle_root(hashes: Vec) -> Result<(monero::Hash, u32), MmProxyError> { + if hashes.is_empty() { + Err(MmProxyError::MissingDataError( + "No aux chain hashes provided".to_string(), + )) + } else if hashes.len() == 1 { + Ok((hashes[0], 0)) + } else { + unimplemented!("Multiple aux chains for Monero is not supported yet, only Tari."); + } +} + +/// Build the [FinalBlockTemplateData] from [template](NewBlockTemplateData) and with +/// [tari](grpc::GetNewBlockResult) and [monero data](MoneroMiningData). +fn add_monero_data( + tari_block_result: grpc::GetNewBlockResult, + monero_mining_data: MoneroMiningData, + template_data: NewBlockTemplateData, +) -> Result { + let merge_mining_hash = FixedHash::try_from(tari_block_result.merge_mining_hash.clone()) + .map_err(|e| MmProxyError::ConversionError(e.to_string()))?; + + let aux_chain_hashes = vec![monero::Hash::from_slice(merge_mining_hash.as_slice())]; + let tari_difficulty = template_data.miner_data.target_difficulty; + let block_template_data = BlockTemplateDataBuilder::new() + .tari_block( + tari_block_result + .block + .ok_or(MmProxyError::GrpcResponseMissingField("block"))?, + ) + .tari_miner_data(template_data.miner_data) + .monero_seed(monero_mining_data.seed_hash) + .monero_difficulty(monero_mining_data.difficulty) + .tari_difficulty(tari_difficulty) + .tari_merge_mining_hash(merge_mining_hash) + .aux_hashes(aux_chain_hashes.clone()) + .new_block_template(template_data.template) + .build()?; + + // Deserialize the block template blob + debug!(target: LOG_TARGET, "Deseriale Monero block template blob into Monero block",); + let mut monero_block = monero_rx::deserialize_monero_block_from_hex(&monero_mining_data.blocktemplate_blob)?; + + debug!(target: LOG_TARGET, "Insert aux chain merkle root (merge_mining_hash) into Monero block"); + let aux_chain_mr = calculate_aux_chain_merkle_root(aux_chain_hashes.clone())?.0; + monero_rx::insert_aux_chain_mr_and_info_into_block(&mut monero_block, aux_chain_mr.to_bytes(), 1, 0)?; + + debug!(target: LOG_TARGET, "Create blockhashing blob from blocktemplate blob",); + // Must be done after the aux_chain_mr is inserted since it will affect the hash of the miner tx + let blockhashing_blob = monero_rx::create_blockhashing_blob_from_block(&monero_block)?; + let blocktemplate_blob = monero_rx::serialize_monero_block_to_hex(&monero_block)?; + + let monero_difficulty = monero_mining_data.difficulty; + let mining_difficulty = cmp::min(monero_difficulty, tari_difficulty); + info!( + target: LOG_TARGET, + "Difficulties: Minotari ({}), Monero({}), Selected({})", + tari_difficulty, + monero_mining_data.difficulty, + mining_difficulty + ); + + Ok(FinalBlockTemplateData { + template: block_template_data, + target_difficulty: Difficulty::from_u64(mining_difficulty)?, + blockhashing_blob, + blocktemplate_blob, + aux_chain_hashes, + aux_chain_mr: aux_chain_mr.to_bytes().to_vec(), + }) +} + /// Private convenience container struct for new template data -#[allow(dead_code)] -struct NewBlockTemplateData { +#[derive(Debug, Clone)] +pub struct NewBlockTemplateData { pub template: grpc::NewBlockTemplate, pub miner_data: grpc::MinerData, pub initial_sync_achieved: bool, @@ -305,17 +448,18 @@ impl NewBlockTemplateData { } /// Final outputs for required for merge mining +#[derive(Debug, Clone)] pub struct FinalBlockTemplateData { pub template: BlockTemplateData, pub target_difficulty: Difficulty, pub blockhashing_blob: String, pub blocktemplate_blob: String, - pub merge_mining_hash: FixedHash, pub aux_chain_hashes: Vec, pub aux_chain_mr: Vec, } /// Container struct for monero mining data inputs obtained from monerod +#[derive(Clone)] pub struct MoneroMiningData { pub seed_hash: FixedByteArray, pub blocktemplate_blob: String, diff --git a/applications/minotari_merge_mining_proxy/src/cli.rs b/applications/minotari_merge_mining_proxy/src/cli.rs index f2cf7faa04..14e42769cd 100644 --- a/applications/minotari_merge_mining_proxy/src/cli.rs +++ b/applications/minotari_merge_mining_proxy/src/cli.rs @@ -35,9 +35,9 @@ pub struct Cli { } impl ConfigOverrideProvider for Cli { - fn get_config_property_overrides(&self, default_network: Network) -> Vec<(String, String)> { - let mut overrides = self.common.get_config_property_overrides(default_network); - let network = self.common.network.unwrap_or(default_network); + fn get_config_property_overrides(&self, network: &mut Network) -> Vec<(String, String)> { + let mut overrides = self.common.get_config_property_overrides(network); + *network = self.common.network.unwrap_or(*network); overrides.push(("merge_mining_proxy.override_from".to_string(), network.to_string())); overrides.push(("merge_mining_proxy.network".to_string(), network.to_string())); overrides diff --git a/applications/minotari_merge_mining_proxy/src/common/json_rpc.rs b/applications/minotari_merge_mining_proxy/src/common/json_rpc.rs index 8895d9b372..ebaa405412 100644 --- a/applications/minotari_merge_mining_proxy/src/common/json_rpc.rs +++ b/applications/minotari_merge_mining_proxy/src/common/json_rpc.rs @@ -128,13 +128,13 @@ pub mod test { #[test] pub fn test_error_response() { - let req_id = Some(12); + let req_id = 12; let err_code = 200; let err_message = "error message"; - let err_data = Some(json::json!({"test key":"test value"})); - let response = error_response(req_id, err_code, err_message, err_data.clone()); - assert_eq!(response["id"], req_id.unwrap()); - assert_eq!(response["error"]["data"], err_data.unwrap()); + let err_data = json::json!({"test key":"test value"}); + let response = error_response(Some(req_id), err_code, err_message, Some(err_data.clone())); + assert_eq!(response["id"], req_id); + assert_eq!(response["error"]["data"], err_data); assert_eq!(response["error"]["code"], err_code); assert_eq!(response["error"]["message"], err_message); let response = error_response(None, err_code, err_message, None); diff --git a/applications/minotari_merge_mining_proxy/src/error.rs b/applications/minotari_merge_mining_proxy/src/error.rs index 27b81a8ca2..fe58c43f3a 100644 --- a/applications/minotari_merge_mining_proxy/src/error.rs +++ b/applications/minotari_merge_mining_proxy/src/error.rs @@ -111,6 +111,10 @@ pub enum MmProxyError { ParseInputError(#[from] ParseInputError), #[error("Base node not responding to gRPC requests: {0}")] BaseNodeNotResponding(String), + #[error("Unexpected missing data: {0}")] + UnexpectedMissingData(String), + #[error("Failed to get block template: {0}")] + FailedToGetBlockTemplate(String), } impl From for MmProxyError { diff --git a/applications/minotari_merge_mining_proxy/src/proxy.rs b/applications/minotari_merge_mining_proxy/src/proxy.rs index 9476e5bcee..6e55ca1c36 100644 --- a/applications/minotari_merge_mining_proxy/src/proxy.rs +++ b/applications/minotari_merge_mining_proxy/src/proxy.rs @@ -195,7 +195,7 @@ impl InnerService { .get_ref() .metadata .as_ref() - .map(|meta| meta.height_of_longest_chain) + .map(|meta| meta.best_block_height) .ok_or(MmProxyError::GrpcResponseMissingField("base node metadata"))?; if result.get_ref().initial_sync_achieved != self.initial_sync_achieved.load(Ordering::SeqCst) { self.initial_sync_achieved @@ -226,7 +226,6 @@ impl InnerService { let (parts, mut json_resp) = monerod_resp.into_parts(); debug!(target: LOG_TARGET, "handle_submit_block: submit request #{}", request); - debug!(target: LOG_TARGET, "Params received: #{:?}", request["params"]); let params = match request["params"].as_array() { Some(v) => v, None => { @@ -242,8 +241,6 @@ impl InnerService { }, }; - let gen_hash = *self.consensus_manager.get_genesis_block().hash(); - for param in params.iter().filter_map(|p| p.as_str()) { let monero_block = monero_rx::deserialize_monero_block_from_hex(param)?; debug!(target: LOG_TARGET, "Monero block: {}", monero_block); @@ -257,7 +254,7 @@ impl InnerService { hex::encode(hash) ); - let mut block_data = match self.block_templates.get(&hash).await { + let mut block_data = match self.block_templates.get_final_template(&hash).await { Some(d) => d, None => { info!( @@ -270,36 +267,53 @@ impl InnerService { }; let monero_data = monero_rx::construct_monero_data( monero_block, - block_data.monero_seed.clone(), + block_data.template.monero_seed.clone(), block_data.aux_chain_hashes.clone(), - block_data.tari_hash, + block_data.template.tari_merge_mining_hash, )?; debug!(target: LOG_TARGET, "Monero PoW Data: {:?}", monero_data); - let header_mut = block_data.tari_block.header.as_mut().unwrap(); - let height = header_mut.height; - BorshSerialize::serialize(&monero_data, &mut header_mut.pow.as_mut().unwrap().pow_data) + let tari_header_mut = block_data + .template + .tari_block + .header + .as_mut() + .ok_or(MmProxyError::UnexpectedMissingData("tari_block.header".to_string()))?; + let pow_mut = tari_header_mut + .pow + .as_mut() + .ok_or(MmProxyError::UnexpectedMissingData("tari_block.header.pow".to_string()))?; + BorshSerialize::serialize(&monero_data, &mut pow_mut.pow_data) .map_err(|err| MmProxyError::ConversionError(err.to_string()))?; - let tari_header = header_mut.clone().try_into().map_err(MmProxyError::ConversionError)?; + let tari_header = tari_header_mut + .clone() + .try_into() + .map_err(MmProxyError::ConversionError)?; let mut base_node_client = self.base_node_client.clone(); let start = Instant::now(); let achieved_target = if self.config.check_tari_difficulty_before_submit { trace!(target: LOG_TARGET, "Starting calculate achieved Tari difficultly"); - let diff = randomx_difficulty(&tari_header, &self.randomx_factory, &gen_hash, &self.consensus_manager)?; + let diff = randomx_difficulty( + &tari_header, + &self.randomx_factory, + self.consensus_manager.get_genesis_block().hash(), + &self.consensus_manager, + )?; trace!( target: LOG_TARGET, "Finished calculate achieved Tari difficultly - achieved {} vs. target {}", - diff.as_u64(), - block_data.tari_difficulty + diff, + block_data.template.tari_difficulty ); diff.as_u64() } else { - block_data.tari_difficulty + block_data.template.tari_difficulty }; - if achieved_target >= block_data.tari_difficulty { - match base_node_client.submit_block(block_data.tari_block).await { + let height = tari_header_mut.height; + if achieved_target >= block_data.template.tari_difficulty { + match base_node_client.submit_block(block_data.template.tari_block).await { Ok(resp) => { if self.config.submit_to_origin { json_resp = json_rpc::success_response( @@ -327,7 +341,7 @@ impl InnerService { json_resp ); } - self.block_templates.remove(&hash).await; + self.block_templates.remove_final_block_template(&hash).await; }, Err(err) => { debug!( @@ -404,7 +418,6 @@ impl InnerService { let mut grpc_client = self.base_node_client.clone(); // Add merge mining tag on blocktemplate request - debug!(target: LOG_TARGET, "Requested new block template from Minotari base node"); if !self.initial_sync_achieved.load(Ordering::SeqCst) { let grpc::TipInfoResponse { initial_sync_achieved, @@ -416,7 +429,7 @@ impl InnerService { self.initial_sync_achieved.store(true, Ordering::SeqCst); let msg = format!( "Initial base node sync achieved. Ready to mine at height #{}", - metadata.as_ref().map(|h| h.height_of_longest_chain).unwrap_or_default(), + metadata.as_ref().map(|h| h.best_block_height).unwrap_or_default(), ); debug!(target: LOG_TARGET, "{}", msg); println!("{}", msg); @@ -424,7 +437,7 @@ impl InnerService { } else { let msg = format!( "Initial base node sync not achieved, current height at #{} ... (waiting = {})", - metadata.as_ref().map(|h| h.height_of_longest_chain).unwrap_or_default(), + metadata.as_ref().map(|h| h.best_block_height).unwrap_or_default(), self.config.wait_for_initial_sync_at_startup, ); debug!(target: LOG_TARGET, "{}", msg); @@ -455,12 +468,15 @@ impl InnerService { difficulty, }; - let final_block_template_data = new_block_protocol.get_next_block_template(monero_mining_data).await?; + let final_block_template_data = new_block_protocol + .get_next_block_template(monero_mining_data, &self.block_templates) + .await?; - monerod_resp["result"]["blocktemplate_blob"] = final_block_template_data.blocktemplate_blob.into(); - monerod_resp["result"]["blockhashing_blob"] = final_block_template_data.blockhashing_blob.into(); + monerod_resp["result"]["blocktemplate_blob"] = final_block_template_data.blocktemplate_blob.clone().into(); + monerod_resp["result"]["blockhashing_blob"] = final_block_template_data.blockhashing_blob.clone().into(); monerod_resp["result"]["difficulty"] = final_block_template_data.target_difficulty.as_u64().into(); + let tari_difficulty = final_block_template_data.template.tari_difficulty; let tari_height = final_block_template_data .template .tari_block @@ -468,9 +484,9 @@ impl InnerService { .as_ref() .map(|h| h.height) .unwrap_or(0); + let aux_chain_mr = hex::encode(final_block_template_data.aux_chain_mr.clone()); let block_reward = final_block_template_data.template.tari_miner_data.reward; let total_fees = final_block_template_data.template.tari_miner_data.total_fees; - let mining_hash = final_block_template_data.merge_mining_hash; let monerod_resp = add_aux_data( monerod_resp, json!({ "base_difficulty": final_block_template_data.template.monero_difficulty }), @@ -479,21 +495,14 @@ impl InnerService { monerod_resp, json!({ "id": TARI_CHAIN_ID, - "difficulty": final_block_template_data.template.tari_difficulty, + "difficulty": tari_difficulty, "height": tari_height, - // The merge mining hash, before the final block hash can be calculated - "mining_hash": mining_hash.to_hex(), + // The aux chain merkle root, before the final block hash can be calculated + "mining_hash": aux_chain_mr, "miner_reward": block_reward + total_fees, }), ); - self.block_templates - .save( - final_block_template_data.aux_chain_mr, - final_block_template_data.template, - ) - .await; - debug!(target: LOG_TARGET, "Returning template result: {}", monerod_resp); Ok(proxy::into_response(parts, &monerod_resp)) } @@ -590,7 +599,7 @@ impl InnerService { let tip_header = client .get_header_by_hash(grpc::GetHeaderByHashRequest { - hash: chain_metadata.best_block, + hash: chain_metadata.best_block_hash, }) .await?; diff --git a/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs b/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs index 676e96bf6a..ec076c94b6 100644 --- a/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs +++ b/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs @@ -79,8 +79,8 @@ pub async fn start_merge_miner(cli: Cli) -> Result<(), anyhow::Error> { if let MmProxyError::BaseNodeNotResponding(_) = e { error!(target: LOG_TARGET, "{}", e.to_string()); println!(); - let msg = "Are the base node's gRPC mining methods denied in its 'config.toml'? Please ensure these \ - methods are commented out:\n 'grpc_server_deny_methods': \"get_new_block_template\", \ + let msg = "Are the base node's gRPC mining methods allowed in its 'config.toml'? Please ensure these \ + methods are enabled in:\n 'grpc_server_allow_methods': \"get_new_block_template\", \ \"get_tip_info\", \"get_new_block\", \"submit_block\""; println!("{}", msg); println!(); diff --git a/applications/minotari_miner/Cargo.toml b/applications/minotari_miner/Cargo.toml index 6dacb121ad..9006d51cb1 100644 --- a/applications/minotari_miner/Cargo.toml +++ b/applications/minotari_miner/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari miner implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] @@ -28,20 +28,20 @@ derivative = "2.2.0" futures = "0.3" hex = "0.4.2" log = { version = "0.4", features = ["std"] } -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } +log4rs = { version = "1.3.0", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } native-tls = "0.2" num_cpus = "1.13" rand = "0.8" serde = { version = "1.0", default_features = false, features = ["derive"] } serde_json = "1.0.57" thiserror = "1.0" -tokio = { version = "1.23", default_features = false, features = ["rt-multi-thread"] } +tokio = { version = "1.36", default_features = false, features = ["rt-multi-thread"] } tonic = { version = "0.8.3", features = ["tls", "tls-roots" ] } [dev-dependencies] prost-types = "0.11.9" chrono = { version = "0.4.19", default-features = false } -config = "0.13.0" +config = "0.14.0" [package.metadata.cargo-machete] ignored = [ diff --git a/applications/minotari_miner/src/cli.rs b/applications/minotari_miner/src/cli.rs index d1c976eff1..1c5f339acc 100644 --- a/applications/minotari_miner/src/cli.rs +++ b/applications/minotari_miner/src/cli.rs @@ -42,9 +42,9 @@ pub struct Cli { } impl ConfigOverrideProvider for Cli { - fn get_config_property_overrides(&self, default_network: Network) -> Vec<(String, String)> { - let mut overrides = self.common.get_config_property_overrides(default_network); - let network = self.common.network.unwrap_or(default_network); + fn get_config_property_overrides(&self, network: &mut Network) -> Vec<(String, String)> { + let mut overrides = self.common.get_config_property_overrides(network); + *network = self.common.network.unwrap_or(*network); overrides.push(("miner.network".to_string(), network.to_string())); overrides } diff --git a/applications/minotari_miner/src/miner.rs b/applications/minotari_miner/src/miner.rs index a21b2ce35d..ba42c85cb3 100644 --- a/applications/minotari_miner/src/miner.rs +++ b/applications/minotari_miner/src/miner.rs @@ -222,7 +222,7 @@ pub fn mining_task( // If we are mining in share mode, this share might not be a block, so we need to keep mining till we get a // new job if share_mode { - waker.clone().wake(); + waker.wake_by_ref(); } else { waker.wake(); trace!(target: LOG_TARGET, "Mining thread {} stopped", miner); @@ -240,7 +240,7 @@ pub fn mining_task( height: hasher.height(), target_difficulty, }); - waker.clone().wake(); + waker.wake_by_ref(); trace!(target: LOG_TARGET, "Reporting from {} result {:?}", miner, res); if let Err(TrySendError::Disconnected(_)) = res { info!(target: LOG_TARGET, "Mining thread {} disconnected", miner); diff --git a/applications/minotari_miner/src/run_miner.rs b/applications/minotari_miner/src/run_miner.rs index abe74eadca..e3e41cd4e2 100644 --- a/applications/minotari_miner/src/run_miner.rs +++ b/applications/minotari_miner/src/run_miner.rs @@ -29,14 +29,11 @@ use minotari_app_grpc::{ tari_rpc::{base_node_client::BaseNodeClient, TransactionOutput as GrpcTransactionOutput}, tls::protocol_string, }; -use minotari_app_utilities::{ - network_check::set_network_if_choice_valid, - parse_miner_input::{ - base_node_socket_address, - verify_base_node_grpc_mining_responses, - wallet_payment_address, - BaseNodeGrpcClient, - }, +use minotari_app_utilities::parse_miner_input::{ + base_node_socket_address, + verify_base_node_grpc_mining_responses, + wallet_payment_address, + BaseNodeGrpcClient, }; use tari_common::{ exit_codes::{ExitCode, ExitError}, @@ -76,8 +73,6 @@ pub async fn start_miner(cli: Cli) -> Result<(), ExitError> { let mut config = MinerConfig::load_from(&cfg).expect("Failed to load config"); config.set_base_path(cli.common.get_base_path()); - set_network_if_choice_valid(config.network)?; - debug!(target: LOG_TARGET_FILE, "{:?}", config); let key_manager = create_memory_db_key_manager(); let wallet_payment_address = wallet_payment_address(config.wallet_payment_address.clone(), config.network) @@ -137,9 +132,9 @@ pub async fn start_miner(cli: Cli) -> Result<(), ExitError> { if let MinerError::BaseNodeNotResponding(_) = e { error!(target: LOG_TARGET, "{}", e.to_string()); println!(); - let msg = "Could not connect to the base node. \nAre the base node's gRPC mining methods denied in \ - its 'config.toml'? Please ensure these methods are commented out:\n \ - 'grpc_server_deny_methods': \"get_new_block_template\", \"get_tip_info\", \ + let msg = "Could not connect to the base node. \nAre the base node's gRPC mining methods allowed in \ + its 'config.toml'? Please ensure these methods are enabled in:\n \ + 'grpc_server_allow_methods': \"get_new_block_template\", \"get_tip_info\", \ \"get_new_block\", \"submit_block\""; println!("{}", msg); println!(); @@ -417,7 +412,7 @@ async fn validate_tip( .get_tip_info(minotari_app_grpc::tari_rpc::Empty {}) .await? .into_inner(); - let longest_height = tip.clone().metadata.unwrap().height_of_longest_chain; + let longest_height = tip.clone().metadata.unwrap().best_block_height; if let Some(height) = mine_until_height { if longest_height >= height { return Err(MinerError::MineUntilHeightReached(height)); diff --git a/applications/minotari_miner/src/stratum/controller.rs b/applications/minotari_miner/src/stratum/controller.rs index 73a0e18c19..e569253b5e 100644 --- a/applications/minotari_miner/src/stratum/controller.rs +++ b/applications/minotari_miner/src/stratum/controller.rs @@ -236,10 +236,10 @@ impl Controller { } fn handle_error(&mut self, error: types::rpc_error::RpcError) { - if vec![-1, 24].contains(&error.code) { + if [-1, 24].contains(&error.code) { // unauthorized let _result = self.send_login(); - } else if vec![21, 20, 22, 23, 25].contains(&error.code) { + } else if [21, 20, 22, 23, 25].contains(&error.code) { // problem with template let _result = self.send_message_get_job_template(); } else { diff --git a/applications/minotari_node/Cargo.toml b/applications/minotari_node/Cargo.toml index ed6331dd0a..76b66e9d5c 100644 --- a/applications/minotari_node/Cargo.toml +++ b/applications/minotari_node/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari full base node implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] @@ -22,6 +22,9 @@ tari_storage = {path="../../infrastructure/storage"} tari_service_framework = { path = "../../base_layer/service_framework" } tari_shutdown = { path = "../../infrastructure/shutdown" } tari_utilities = { version = "0.7" } +tari_key_manager = { path = "../../base_layer/key_manager", features = [ + "key_manager_service", +], version = "1.0.0-pre.11a" } anyhow = "1.0.53" async-trait = "0.1.52" @@ -30,35 +33,35 @@ borsh = "1.2" chrono = { version = "0.4.19", default-features = false } clap = { version = "3.2", features = ["derive", "env"] } console-subscriber = "0.1.8" -config = { version = "0.13.0" } -crossterm = { version = "0.23.1", features = ["event-stream"] } +config = { version = "0.14.0" } +crossterm = { version = "0.25.0", features = ["event-stream"] } derive_more = "0.99.17" either = "1.6.1" futures = { version = "^0.3.16", default-features = false, features = ["alloc"] } qrcode = { version = "0.12" } log = { version = "0.4.8", features = ["std"] } log-mdc = "0.1.0" -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } +log4rs = { version = "1.3.0", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } nom = "7.1" rustyline = "9.0" rustyline-derive = "0.5" serde = "1.0.136" strum = { version = "0.22", features = ["derive"] } thiserror = "^1.0.26" -tokio = { version = "1.23", features = ["signal"] } +tokio = { version = "1.36", features = ["signal"] } tonic = { version = "0.8.3", features = ["tls", "tls-roots" ] } # Metrics tari_metrics = { path = "../../infrastructure/metrics", optional = true, features = ["server"] } [features] -default = ["metrics", "libtor"] +default = ["libtor"] metrics = ["tari_metrics", "tari_comms/metrics"] safe = [] libtor = ["tari_libtor"] [build-dependencies] -tari_features = { path = "../../common/tari_features"} +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a"} [package.metadata.cargo-machete] ignored = [ diff --git a/applications/minotari_node/osx-pkg/entitlements.xml b/applications/minotari_node/osx-pkg/entitlements.xml new file mode 100644 index 0000000000..a013b59909 --- /dev/null +++ b/applications/minotari_node/osx-pkg/entitlements.xml @@ -0,0 +1,8 @@ + + + + + com.apple.security.cs.allow-jit + + + diff --git a/applications/minotari_node/src/bootstrap.rs b/applications/minotari_node/src/bootstrap.rs index c2d0ce7753..dc76aa290f 100644 --- a/applications/minotari_node/src/bootstrap.rs +++ b/applications/minotari_node/src/bootstrap.rs @@ -28,7 +28,14 @@ use tari_common::{ configuration::bootstrap::ApplicationType, exit_codes::{ExitCode, ExitError}, }; -use tari_comms::{peer_manager::Peer, protocol::rpc::RpcServer, NodeIdentity, UnspawnedCommsNode}; +use tari_comms::{ + multiaddr::{Error as MultiaddrError, Multiaddr}, + peer_manager::Peer, + protocol::rpc::RpcServer, + tor::TorIdentity, + NodeIdentity, + UnspawnedCommsNode, +}; use tari_comms_dht::Dht; use tari_core::{ base_node, @@ -79,6 +86,7 @@ pub struct BaseNodeBootstrapper<'a, B> { impl BaseNodeBootstrapper<'_, B> where B: BlockchainBackend + 'static { + #[allow(clippy::too_many_lines)] pub async fn bootstrap(self) -> Result { let mut base_node_config = self.app_config.base_node.clone(); let mut p2p_config = self.app_config.base_node.p2p.clone(); @@ -164,10 +172,37 @@ where B: BlockchainBackend + 'static let comms = comms.add_protocol_extension(mempool_protocol); let comms = Self::setup_rpc_services(comms, &handles, self.db.into(), &p2p_config); - let comms = initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone()) - .await - .map_err(|e| e.to_exit_error())?; + let comms = if p2p_config.transport.transport_type == TransportType::Tor { + let tor_id_path = base_node_config.tor_identity_file.clone(); + let node_id_path = base_node_config.identity_file.clone(); + let node_id = comms.node_identity(); + let after_comms = move |identity: TorIdentity| { + let address_string = format!("/onion3/{}:{}", identity.service_id, identity.onion_port); + if let Err(e) = identity_management::save_as_json(&tor_id_path, &identity) { + error!(target: LOG_TARGET, "Failed to save tor identity{:?}", e); + } + trace!(target: LOG_TARGET, "resave the tor identity {:?}", identity); + let result: Result = address_string.parse(); + if result.is_err() { + error!(target: LOG_TARGET, "Failed to parse tor identity as multiaddr{:?}", result); + return; + } + let address = result.unwrap(); + if !node_id.public_addresses().contains(&address) { + node_id.add_public_address(address); + } + if let Err(e) = identity_management::save_as_json(&node_id_path, &*node_id) { + error!(target: LOG_TARGET, "Failed to save node identity identity{:?}", e); + } + }; + initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await + } else { + let after_comms = |_identity| {}; + initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await + }; + + let comms = comms.map_err(|e| e.to_exit_error())?; // Save final node identity after comms has initialized. This is required because the public_address can be // changed by comms during initialization when using tor. match p2p_config.transport.transport_type { @@ -177,10 +212,6 @@ where B: BlockchainBackend + 'static .map_err(|e| ExitError::new(ExitCode::IdentityError, e))?; }, }; - if let Some(hs) = comms.hidden_service() { - identity_management::save_as_json(&base_node_config.tor_identity_file, hs.tor_identity()) - .map_err(|e| ExitError::new(ExitCode::IdentityError, e))?; - } handles.register(comms); diff --git a/applications/minotari_node/src/cli.rs b/applications/minotari_node/src/cli.rs index f6de3e7c9f..f4eb4da533 100644 --- a/applications/minotari_node/src/cli.rs +++ b/applications/minotari_node/src/cli.rs @@ -49,12 +49,14 @@ pub struct Cli { pub grpc_enabled: bool, #[clap(long, env = "MINOTARI_NODE_ENABLE_MINING", alias = "enable-mining")] pub mining_enabled: bool, + #[clap(long, env = "MINOTARI_NODE_SECOND_LAYER_GRPC_ENABLED", alias = "enable-second-layer")] + pub second_layer_grpc_enabled: bool, } impl ConfigOverrideProvider for Cli { - fn get_config_property_overrides(&self, default_network: Network) -> Vec<(String, String)> { - let mut overrides = self.common.get_config_property_overrides(default_network); - let network = self.common.network.unwrap_or(default_network); + fn get_config_property_overrides(&self, network: &mut Network) -> Vec<(String, String)> { + let mut overrides = self.common.get_config_property_overrides(network); + *network = self.common.network.unwrap_or(*network); overrides.push(("base_node.network".to_string(), network.to_string())); overrides.push(("base_node.override_from".to_string(), network.to_string())); overrides.push(("p2p.seeds.override_from".to_string(), network.to_string())); @@ -67,6 +69,10 @@ impl ConfigOverrideProvider for Cli { overrides.push(("base_node.grpc_enabled".to_string(), "true".to_string())); overrides.push(("base_node.mining_enabled".to_string(), "true".to_string())); } + if self.second_layer_grpc_enabled { + overrides.push(("base_node.grpc_enabled".to_string(), "true".to_string())); + overrides.push(("base_node.second_layer_grpc_enabled".to_string(), "true".to_string())); + } overrides } } diff --git a/applications/minotari_node/src/commands/cli_loop.rs b/applications/minotari_node/src/commands/cli_loop.rs index 323a90dfa8..a3cd47b7f5 100644 --- a/applications/minotari_node/src/commands/cli_loop.rs +++ b/applications/minotari_node/src/commands/cli_loop.rs @@ -98,6 +98,8 @@ impl CliLoop { KeyEvent { code: KeyCode::Char('c'), modifiers: KeyModifiers::CONTROL, + kind: _, + state: _, } => { return true; }, diff --git a/applications/minotari_node/src/commands/command/check_db.rs b/applications/minotari_node/src/commands/command/check_db.rs index b6e59ac4fe..8858bb39bd 100644 --- a/applications/minotari_node/src/commands/command/check_db.rs +++ b/applications/minotari_node/src/commands/command/check_db.rs @@ -43,12 +43,12 @@ impl CommandContext { /// Function to process the check-db command pub async fn check_db(&mut self) -> Result<(), Error> { let meta = self.node_service.get_metadata().await?; - let mut height = meta.height_of_longest_chain(); + let mut height = meta.best_block_height(); let mut missing_blocks = Vec::new(); let mut missing_headers = Vec::new(); print!("Searching for height: "); // We need to check every header, but not every block. - let horizon_height = meta.horizon_block_height(height); + let horizon_height = meta.pruned_height_at_given_chain_tip(height); while height > 0 { print!("{}", height); io::stdout().flush().await?; diff --git a/applications/minotari_node/src/commands/command/get_block.rs b/applications/minotari_node/src/commands/command/get_block.rs index 5b516fb8ed..8cacba16f3 100644 --- a/applications/minotari_node/src/commands/command/get_block.rs +++ b/applications/minotari_node/src/commands/command/get_block.rs @@ -62,8 +62,6 @@ impl HandleCommand for CommandContext { enum ArgsError { #[error("Block not found at height {height}")] NotFoundAt { height: u64 }, - #[error("Block not found")] - NotFound, #[error("Serializing/Deserializing error: `{0}`")] MessageFormatError(String), } @@ -101,20 +99,32 @@ impl CommandContext { } pub async fn get_block_by_hash(&self, hash: HashOutput, format: Format) -> Result<(), Error> { - let block = self - .blockchain_db - .fetch_block_by_hash(hash, false) - .await? - .ok_or(ArgsError::NotFound)?; - match format { - Format::Text => println!("{}", block), - Format::Json => println!( - "{}", - block - .to_json() - .map_err(|e| ArgsError::MessageFormatError(format!("{}", e)))? - ), - } + let block = self.blockchain_db.fetch_block_by_hash(hash, false).await?; + match block { + Some(block) => match format { + Format::Text => println!("{}", block), + Format::Json => println!( + "{}", + block + .to_json() + .map_err(|e| ArgsError::MessageFormatError(format!("{}", e)))? + ), + }, + None => { + let block = self.blockchain_db.fetch_orphan(hash).await?; + println!("Found in orphan database"); + match format { + Format::Text => println!("{}", block), + Format::Json => println!( + "{}", + block + .to_json() + .map_err(|e| ArgsError::MessageFormatError(format!("{}", e)))? + ), + } + }, + }; + Ok(()) } } diff --git a/applications/minotari_node/src/commands/command/get_network_stats.rs b/applications/minotari_node/src/commands/command/get_network_stats.rs index 73432c9378..19c86a52e4 100644 --- a/applications/minotari_node/src/commands/command/get_network_stats.rs +++ b/applications/minotari_node/src/commands/command/get_network_stats.rs @@ -25,6 +25,7 @@ use async_trait::async_trait; use clap::Parser; use super::{CommandContext, HandleCommand}; +#[cfg(feature = "metrics")] use crate::table::Table; /// Displays network stats diff --git a/applications/minotari_node/src/commands/command/list_connections.rs b/applications/minotari_node/src/commands/command/list_connections.rs index 61b162a13c..a4c9d5343d 100644 --- a/applications/minotari_node/src/commands/command/list_connections.rs +++ b/applications/minotari_node/src/commands/command/list_connections.rs @@ -64,7 +64,7 @@ impl CommandContext { let chain_height = peer .get_metadata(1) .and_then(|v| bincode::deserialize::(v).ok()) - .map(|metadata| format!("height: {}", metadata.metadata.height_of_longest_chain())); + .map(|metadata| format!("height: {}", metadata.metadata.best_block_height())); let ua = peer.user_agent; let rpc_sessions = self diff --git a/applications/minotari_node/src/commands/command/list_peers.rs b/applications/minotari_node/src/commands/command/list_peers.rs index 9199e32fb1..ab487b7ee7 100644 --- a/applications/minotari_node/src/commands/command/list_peers.rs +++ b/applications/minotari_node/src/commands/command/list_peers.rs @@ -86,7 +86,7 @@ impl CommandContext { .get_metadata(1) .and_then(|v| bincode::deserialize::(v).ok()) { - s.push(format!("chain height: {}", metadata.metadata.height_of_longest_chain())); + s.push(format!("chain height: {}", metadata.metadata.best_block_height())); } if let Some(last_seen) = peer.addresses.last_seen() { diff --git a/applications/minotari_node/src/commands/command/list_validator_nodes.rs b/applications/minotari_node/src/commands/command/list_validator_nodes.rs index 83eba16049..71b1f5017d 100644 --- a/applications/minotari_node/src/commands/command/list_validator_nodes.rs +++ b/applications/minotari_node/src/commands/command/list_validator_nodes.rs @@ -62,13 +62,11 @@ impl CommandContext { /// Function to process the list-connections command pub async fn list_validator_nodes(&mut self, args: Args) -> Result<(), Error> { let metadata = self.blockchain_db.get_chain_metadata().await?; - let constants = self - .consensus_rules - .consensus_constants(metadata.height_of_longest_chain()); + let constants = self.consensus_rules.consensus_constants(metadata.best_block_height()); let height = args .epoch .map(|epoch| constants.epoch_to_block_height(epoch)) - .unwrap_or_else(|| metadata.height_of_longest_chain()); + .unwrap_or_else(|| metadata.best_block_height()); let current_epoch = constants.block_height_to_epoch(height); let next_epoch = VnEpoch(current_epoch.as_u64() + 1); let next_epoch_height = constants.epoch_to_block_height(next_epoch); diff --git a/applications/minotari_node/src/commands/command/period_stats.rs b/applications/minotari_node/src/commands/command/period_stats.rs index 2870989a5b..edd749d699 100644 --- a/applications/minotari_node/src/commands/command/period_stats.rs +++ b/applications/minotari_node/src/commands/command/period_stats.rs @@ -58,7 +58,7 @@ impl CommandContext { ) -> Result<(), Error> { let meta = self.node_service.get_metadata().await?; - let mut height = meta.height_of_longest_chain(); + let mut height = meta.best_block_height(); // Currently gets the stats for: tx count, hash rate estimation, target difficulty, solvetime. let mut results: Vec<(usize, f64, u64, u64, usize)> = Vec::new(); diff --git a/applications/minotari_node/src/commands/command/status.rs b/applications/minotari_node/src/commands/command/status.rs index e63e256b7b..f4d101a104 100644 --- a/applications/minotari_node/src/commands/command/status.rs +++ b/applications/minotari_node/src/commands/command/status.rs @@ -64,7 +64,7 @@ impl CommandContext { status_line.add_field("State", self.state_machine_info.borrow().state_info.short_desc()); let metadata = self.node_service.get_metadata().await?; - let height = metadata.height_of_longest_chain(); + let height = metadata.best_block_height(); let last_header = self .node_service .get_header(height) @@ -76,16 +76,10 @@ impl CommandContext { ); status_line.add_field( "Tip", - format!( - "{} ({})", - metadata.height_of_longest_chain(), - last_block_time.to_rfc2822() - ), + format!("{} ({})", metadata.best_block_height(), last_block_time.to_rfc2822()), ); - let constants = self - .consensus_rules - .consensus_constants(metadata.height_of_longest_chain()); + let constants = self.consensus_rules.consensus_constants(metadata.best_block_height()); let fut = self.mempool_service.get_mempool_stats(); if let Ok(mempool_stats) = time::timeout(Duration::from_secs(5), fut).await? { status_line.add_field( diff --git a/applications/minotari_node/src/config.rs b/applications/minotari_node/src/config.rs index 494dbf868b..cb9435fc97 100644 --- a/applications/minotari_node/src/config.rs +++ b/applications/minotari_node/src/config.rs @@ -89,13 +89,15 @@ pub struct BaseNodeConfig { /// GRPC address of base node pub grpc_address: Option, /// GRPC server config - which methods are active and which not - pub grpc_server_deny_methods: Vec, + pub grpc_server_allow_methods: Vec, /// GRPC authentication mode pub grpc_authentication: GrpcAuthentication, /// GRPC tls enabled pub grpc_tls_enabled: bool, /// Enable mining on the base node, overriding other settings regarding mining pub mining_enabled: bool, + /// Enable second layer specific grpc methods. + pub second_layer_grpc_enabled: bool, /// A path to the file that stores the base node identity and secret key pub identity_file: PathBuf, /// Spin up and use a built-in Tor instance. This only works on macos/linux - requires that the wallet was built @@ -154,19 +156,11 @@ impl Default for BaseNodeConfig { network: Network::default(), grpc_enabled: true, grpc_address: None, - grpc_server_deny_methods: vec![ - // These gRPC server methods share sensitive information, thus disabled by default - GrpcMethod::GetVersion, - GrpcMethod::CheckForUpdates, - GrpcMethod::GetSyncInfo, - GrpcMethod::GetSyncProgress, - GrpcMethod::GetTipInfo, - GrpcMethod::Identify, - GrpcMethod::GetNetworkStatus, - ], + grpc_server_allow_methods: vec![GrpcMethod::GetVersion], grpc_authentication: GrpcAuthentication::default(), grpc_tls_enabled: false, mining_enabled: false, + second_layer_grpc_enabled: false, identity_file: PathBuf::from("config/base_node_id.json"), use_libtor: true, tor_identity_file: PathBuf::from("config/base_node_tor_id.json"), @@ -241,6 +235,8 @@ pub enum GrpcMethod { GetNetworkDifficulty, GetNewBlockTemplate, GetNewBlock, + GetNewBlockWithCoinbases, + GetNewBlockTemplateWithCoinbases, GetNewBlockBlob, SubmitBlock, SubmitBlockBlob, @@ -280,14 +276,14 @@ mod tests { #[derive(Clone, Serialize, Deserialize, Debug)] #[allow(clippy::struct_excessive_bools)] struct TestInnerConfig { - deny_methods: Vec, + allow_methods: Vec, } #[test] fn it_deserializes_enums() { let config_str = r#" name = "blockchain champion" - inner_config.deny_methods = [ + inner_config.allow_methods = [ "list_headers", "get_constants", # "get_blocks" @@ -298,10 +294,10 @@ mod tests { let config = toml::from_str::(config_str).unwrap(); // Enums in the config - assert!(config.inner_config.deny_methods.contains(&GrpcMethod::ListHeaders)); - assert!(config.inner_config.deny_methods.contains(&GrpcMethod::GetConstants)); - assert!(!config.inner_config.deny_methods.contains(&GrpcMethod::GetBlocks)); // commented out in the config - assert!(config.inner_config.deny_methods.contains(&GrpcMethod::Identify)); - assert!(!config.inner_config.deny_methods.contains(&GrpcMethod::GetShardKey)); // commented out in the config + assert!(config.inner_config.allow_methods.contains(&GrpcMethod::ListHeaders)); + assert!(config.inner_config.allow_methods.contains(&GrpcMethod::GetConstants)); + assert!(!config.inner_config.allow_methods.contains(&GrpcMethod::GetBlocks)); // commented out in the config + assert!(config.inner_config.allow_methods.contains(&GrpcMethod::Identify)); + assert!(!config.inner_config.allow_methods.contains(&GrpcMethod::GetShardKey)); // commented out in the config } } diff --git a/applications/minotari_node/src/grpc/base_node_grpc_server.rs b/applications/minotari_node/src/grpc/base_node_grpc_server.rs index f331237a31..f6e0281d4f 100644 --- a/applications/minotari_node/src/grpc/base_node_grpc_server.rs +++ b/applications/minotari_node/src/grpc/base_node_grpc_server.rs @@ -34,7 +34,10 @@ use minotari_app_grpc::{ tari_rpc::{CalcType, Sorting}, }; use minotari_app_utilities::consts; -use tari_common_types::types::{Commitment, FixedHash, PublicKey, Signature}; +use tari_common_types::{ + tari_address::TariAddress, + types::{Commitment, FixedHash, PublicKey, Signature}, +}; use tari_comms::{Bytes, CommsNode}; use tari_core::{ base_node::{ @@ -49,8 +52,25 @@ use tari_core::{ iterators::NonOverlappingIntegerPairIter, mempool::{service::LocalMempoolService, TxStorageResponse}, proof_of_work::PowAlgorithm, - transactions::transaction_components::Transaction, + transactions::{ + generate_coinbase_with_wallet_output, + key_manager::{ + create_memory_db_key_manager, + TariKeyId, + TransactionKeyManagerBranch, + TransactionKeyManagerInterface, + TxoStage, + }, + transaction_components::{ + KernelBuilder, + RangeProofType, + Transaction, + TransactionKernel, + TransactionKernelVersion, + }, + }, }; +use tari_key_manager::key_manager_service::KeyManagerInterface; use tari_p2p::{auto_update::SoftwareUpdaterHandle, services::liveness::LivenessHandle}; use tari_utilities::{hex::Hex, message_format::MessageFormat, ByteArray}; use tokio::task; @@ -120,18 +140,36 @@ impl BaseNodeGrpcServer { } fn is_method_enabled(&self, grpc_method: GrpcMethod) -> bool { - let mining_method = vec![ + let mining_method = [ + GrpcMethod::GetVersion, GrpcMethod::GetNewBlockTemplate, + GrpcMethod::GetNewBlockWithCoinbases, + GrpcMethod::GetNewBlockTemplateWithCoinbases, GrpcMethod::GetNewBlock, GrpcMethod::GetNewBlockBlob, GrpcMethod::SubmitBlock, GrpcMethod::SubmitBlockBlob, GrpcMethod::GetTipInfo, ]; + + let second_layer_methods = [ + GrpcMethod::GetVersion, + GrpcMethod::GetConstants, + GrpcMethod::GetMempoolTransactions, + GrpcMethod::GetTipInfo, + GrpcMethod::GetActiveValidatorNodes, + GrpcMethod::GetShardKey, + GrpcMethod::GetTemplateRegistrations, + GrpcMethod::GetHeaderByHash, + GrpcMethod::GetSideChainUtxos, + ]; if self.config.mining_enabled && mining_method.contains(&grpc_method) { return true; } - !self.config.grpc_server_deny_methods.contains(&grpc_method) + if self.config.second_layer_grpc_enabled && second_layer_methods.contains(&grpc_method) { + return true; + } + self.config.grpc_server_allow_methods.contains(&grpc_method) } } @@ -379,7 +417,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Status::internal(err.to_string()), )); }, - Ok(data) => data.height_of_longest_chain(), + Ok(data) => data.best_block_height(), }; let sorting: Sorting = request.sorting(); @@ -585,7 +623,6 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { .try_into() .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e)))?, ), - initial_sync_achieved: status_watch.borrow().bootstrapped, }; @@ -609,9 +646,446 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Status::invalid_argument(format!("Malformed block template provided: {}", s)), ) })?; + let algo = block_template.header.pow.pow_algo; + + let mut handler = self.node_service.clone(); + + let new_block = match handler.get_new_block(block_template).await { + Ok(b) => b, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::invalid_argument(message), + )); + }, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::CannotCalculateNonTipMmr(msg))) => { + let status = Status::with_details( + tonic::Code::FailedPrecondition, + msg, + Bytes::from_static(b"CannotCalculateNonTipMmr"), + ); + return Err(obscure_error_if_true(report_error_flag, status)); + }, + Err(e) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::internal(e.to_string()), + )) + }, + }; + let fees = new_block.body.get_total_fee().map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Invalid fees in block".to_string()), + ) + })?; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); + // construct response + let block_hash = new_block.hash().to_vec(); + let mining_hash = match new_block.header.pow.pow_algo { + PowAlgorithm::Sha3x => new_block.header.mining_hash().to_vec(), + PowAlgorithm::RandomX => new_block.header.merge_mining_hash().to_vec(), + }; + let block: Option = Some( + new_block + .try_into() + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e)))?, + ); + let new_template = handler.get_new_block_template(algo, 0).await.map_err(|e| { + warn!( + target: LOG_TARGET, + "Could not get new block template: {}", + e.to_string() + ); + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) + })?; + + let pow = algo as i32; + + let miner_data = tari_rpc::MinerData { + reward: new_template.reward.into(), + target_difficulty: new_template.target_difficulty.as_u64(), + total_fees: fees.as_u64(), + algo: Some(tari_rpc::PowAlgo { pow_algo: pow }), + }; + + let response = tari_rpc::GetNewBlockResult { + block_hash, + block, + merge_mining_hash: mining_hash, + tari_unique_id: gen_hash, + miner_data: Some(miner_data), + }; + debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); + Ok(Response::new(response)) + } + + #[allow(clippy::too_many_lines)] + async fn get_new_block_template_with_coinbases( + &self, + request: Request, + ) -> Result, Status> { + if !self.is_method_enabled(GrpcMethod::GetNewBlockTemplateWithCoinbases) { + return Err(Status::permission_denied( + "`GetNewBlockTemplateWithCoinbases` method not made available", + )); + } + debug!(target: LOG_TARGET, "Incoming GRPC request for get new block template with coinbases"); + let report_error_flag = self.report_error_flag(); + let request = request.into_inner(); + let algo = request + .algo + .map(|algo| u64::try_from(algo.pow_algo)) + .ok_or_else(|| obscure_error_if_true(report_error_flag, Status::invalid_argument("PoW algo not provided")))? + .map_err(|e| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument(format!("Invalid PoW algo '{}'", e)), + ) + })?; + + let algo = PowAlgorithm::try_from(algo).map_err(|e| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument(format!("Invalid PoW algo '{}'", e)), + ) + })?; + + let mut handler = self.node_service.clone(); + + let mut new_template = handler + .get_new_block_template(algo, request.max_weight) + .await + .map_err(|e| { + warn!( + target: LOG_TARGET, + "Could not get new block template: {}", + e.to_string() + ); + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) + })?; + + let pow = algo as i32; + + let miner_data = tari_rpc::MinerData { + reward: new_template.reward.into(), + target_difficulty: new_template.target_difficulty.as_u64(), + total_fees: new_template.total_fees.into(), + algo: Some(tari_rpc::PowAlgo { pow_algo: pow }), + }; + + let mut coinbases: Vec = request.coinbases; + + // let validate the coinbase amounts; + let reward = self + .consensus_rules + .calculate_coinbase_and_fees(new_template.header.height, new_template.body.kernels()) + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::internal("Could not calculate the amount of fees in the block".to_string()), + ) + })? + .as_u64(); + let mut total_shares = 0u64; + for coinbase in &coinbases { + total_shares += coinbase.value; + } + let mut remainder = reward - ((reward / total_shares) * total_shares); + for coinbase in &mut coinbases { + coinbase.value *= reward / total_shares; + if remainder > 0 { + coinbase.value += 1; + remainder -= 1; + } + } + + let key_manager = create_memory_db_key_manager(); + let height = new_template.header.height; + // The script key is not used in the Diffie-Hellmann protocol, so we assign default. + let script_key_id = TariKeyId::default(); + + let mut total_excess = Commitment::default(); + let mut total_nonce = PublicKey::default(); + let mut private_keys = Vec::new(); + let mut kernel_message = [0; 32]; + let mut last_kernel = Default::default(); + for coinbase in coinbases { + let address = TariAddress::from_hex(&coinbase.address) + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + let range_proof_type = if coinbase.revealed_value_proof { + RangeProofType::RevealedValue + } else { + RangeProofType::BulletProofPlus + }; + let (_, coinbase_output, coinbase_kernel, wallet_output) = generate_coinbase_with_wallet_output( + 0.into(), + coinbase.value.into(), + height, + &coinbase.coinbase_extra, + &key_manager, + &script_key_id, + &address, + coinbase.stealth_payment, + self.consensus_rules.consensus_constants(height), + range_proof_type, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + new_template.body.add_output(coinbase_output); + let (new_private_nonce, pub_nonce) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + total_nonce = &total_nonce + &pub_nonce; + total_excess = &total_excess + &coinbase_kernel.excess; + private_keys.push((wallet_output.spending_key_id, new_private_nonce)); + kernel_message = TransactionKernel::build_kernel_signature_message( + &TransactionKernelVersion::get_current_version(), + coinbase_kernel.fee, + coinbase_kernel.lock_height, + &coinbase_kernel.features, + &None, + ); + last_kernel = coinbase_kernel; + } + let mut kernel_signature = Signature::default(); + for (spending_key_id, nonce) in private_keys { + kernel_signature = &kernel_signature + + &key_manager + .get_partial_txo_kernel_signature( + &spending_key_id, + &nonce, + &total_nonce, + total_excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &last_kernel.features, + TxoStage::Output, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + } + let kernel_new = KernelBuilder::new() + .with_fee(0.into()) + .with_features(last_kernel.features) + .with_lock_height(last_kernel.lock_height) + .with_excess(&total_excess) + .with_signature(kernel_signature) + .build() + .unwrap(); + + new_template.body.add_kernel(kernel_new); + new_template.body.sort(); + + let new_block = match handler.get_new_block(new_template).await { + Ok(b) => b, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::invalid_argument(message), + )); + }, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::CannotCalculateNonTipMmr(msg))) => { + let status = Status::with_details( + tonic::Code::FailedPrecondition, + msg, + Bytes::from_static(b"CannotCalculateNonTipMmr"), + ); + return Err(obscure_error_if_true(report_error_flag, status)); + }, + Err(e) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::internal(e.to_string()), + )) + }, + }; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); + // construct response + let block_hash = new_block.hash().to_vec(); + let mining_hash = match new_block.header.pow.pow_algo { + PowAlgorithm::Sha3x => new_block.header.mining_hash().to_vec(), + PowAlgorithm::RandomX => new_block.header.merge_mining_hash().to_vec(), + }; + let block: Option = Some( + new_block + .try_into() + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e)))?, + ); + + let response = tari_rpc::GetNewBlockResult { + block_hash, + block, + merge_mining_hash: mining_hash, + tari_unique_id: gen_hash, + miner_data: Some(miner_data), + }; + debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); + Ok(Response::new(response)) + } + + #[allow(clippy::too_many_lines)] + async fn get_new_block_with_coinbases( + &self, + request: Request, + ) -> Result, Status> { + if !self.is_method_enabled(GrpcMethod::GetNewBlockWithCoinbases) { + return Err(Status::permission_denied( + "`GetNewBlockWithCoinbasesRequest` method not made available", + )); + } + let report_error_flag = self.report_error_flag(); + let request = request.into_inner(); + debug!(target: LOG_TARGET, "Incoming GRPC request for get new block with coinbases"); + let mut block_template: NewBlockTemplate = request + .new_template + .ok_or(obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Malformed block template provided".to_string()), + ))? + .try_into() + .map_err(|s| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument(format!("Malformed block template provided: {}", s)), + ) + })?; + let coinbases: Vec = request.coinbases; let mut handler = self.node_service.clone(); + // let validate the coinbase amounts; + let reward = self + .consensus_rules + .calculate_coinbase_and_fees(block_template.header.height, block_template.body.kernels()) + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::internal("Could not calculate the amount of fees in the block".to_string()), + ) + })?; + let mut amount = 0u64; + for coinbase in &coinbases { + amount += coinbase.value; + } + + if amount != reward.as_u64() { + return Err(obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Malformed coinbase amounts".to_string()), + )); + } + let key_manager = create_memory_db_key_manager(); + let height = block_template.header.height; + // The script key is not used in the Diffie-Hellmann protocol, so we assign default. + let script_key_id = TariKeyId::default(); + + let mut total_excess = Commitment::default(); + let mut total_nonce = PublicKey::default(); + let mut private_keys = Vec::new(); + let mut kernel_message = [0; 32]; + let mut last_kernel = Default::default(); + for coinbase in coinbases { + let address = TariAddress::from_hex(&coinbase.address) + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + let range_proof_type = if coinbase.revealed_value_proof { + RangeProofType::RevealedValue + } else { + RangeProofType::BulletProofPlus + }; + let (_, coinbase_output, coinbase_kernel, wallet_output) = generate_coinbase_with_wallet_output( + 0.into(), + coinbase.value.into(), + height, + &coinbase.coinbase_extra, + &key_manager, + &script_key_id, + &address, + coinbase.stealth_payment, + self.consensus_rules.consensus_constants(height), + range_proof_type, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + block_template.body.add_output(coinbase_output); + let (new_private_nonce, pub_nonce) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + total_nonce = &total_nonce + &pub_nonce; + total_excess = &total_excess + &coinbase_kernel.excess; + private_keys.push((wallet_output.spending_key_id, new_private_nonce)); + kernel_message = TransactionKernel::build_kernel_signature_message( + &TransactionKernelVersion::get_current_version(), + coinbase_kernel.fee, + coinbase_kernel.lock_height, + &coinbase_kernel.features, + &None, + ); + last_kernel = coinbase_kernel; + } + let mut kernel_signature = Signature::default(); + for (spending_key_id, nonce) in private_keys { + kernel_signature = &kernel_signature + + &key_manager + .get_partial_txo_kernel_signature( + &spending_key_id, + &nonce, + &total_nonce, + total_excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &last_kernel.features, + TxoStage::Output, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + } + let kernel_new = KernelBuilder::new() + .with_fee(0.into()) + .with_features(last_kernel.features) + .with_lock_height(last_kernel.lock_height) + .with_excess(&total_excess) + .with_signature(kernel_signature) + .build() + .unwrap(); + + block_template.body.add_kernel(kernel_new); + block_template.body.sort(); + let new_block = match handler.get_new_block(block_template).await { Ok(b) => b, Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { @@ -635,6 +1109,30 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { )) }, }; + let fees = new_block.body.get_total_fee().map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Invalid fees in block".to_string()), + ) + })?; + let algo = new_block.header.pow.pow_algo; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); // construct response let block_hash = new_block.hash().to_vec(); let mining_hash = match new_block.header.pow.pow_algo { @@ -647,10 +1145,30 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e)))?, ); + let new_template = handler.get_new_block_template(algo, 0).await.map_err(|e| { + warn!( + target: LOG_TARGET, + "Could not get new block template: {}", + e.to_string() + ); + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) + })?; + + let pow = algo as i32; + + let miner_data = tari_rpc::MinerData { + reward: new_template.reward.into(), + target_difficulty: new_template.target_difficulty.as_u64(), + total_fees: fees.as_u64(), + algo: Some(tari_rpc::PowAlgo { pow_algo: pow }), + }; + let response = tari_rpc::GetNewBlockResult { block_hash, block, merge_mining_hash: mining_hash, + tari_unique_id: gen_hash, + miner_data: Some(miner_data), }; debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); Ok(Response::new(response)) @@ -704,6 +1222,23 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { PowAlgorithm::Sha3x => new_block.header.mining_hash().to_vec(), PowAlgorithm::RandomX => new_block.header.merge_mining_hash().to_vec(), }; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); let (header, block_body) = new_block.into_header_body(); let mut header_bytes = Vec::new(); @@ -718,6 +1253,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { block_body: block_body_bytes, merge_mining_hash: mining_hash, utxo_mr: header.output_mr.to_vec(), + tari_unique_id: gen_hash, }; debug!(target: LOG_TARGET, "Sending GetNewBlockBlob response to client"); Ok(Response::new(response)) @@ -1852,22 +2388,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { }; for template_registration in template_registrations { - let registration = match template_registration.registration_data.try_into() { - Ok(t) => t, - Err(e) => { - warn!( - target: LOG_TARGET, - "Error sending converting template registration for GRPC: {}", e - ); - let _ignore = tx - .send(Err(obscure_error_if_true( - report_error_flag, - Status::internal(format!("Error converting template_registration: {}", e)), - ))) - .await; - return; - }, - }; + let registration = template_registration.registration_data.into(); let resp = tari_rpc::GetTemplateRegistrationResponse { utxo_hash: template_registration.output_hash.to_vec(), diff --git a/applications/minotari_node/src/grpc/blocks.rs b/applications/minotari_node/src/grpc/blocks.rs index 76167b77d5..e8448e7a64 100644 --- a/applications/minotari_node/src/grpc/blocks.rs +++ b/applications/minotari_node/src/grpc/blocks.rs @@ -55,7 +55,7 @@ pub async fn block_heights( .get_metadata() .await .map_err(|e| Status::internal(e.to_string()))?; - let tip = metadata.height_of_longest_chain(); + let tip = metadata.best_block_height(); // Avoid overflow let height_from_tip = cmp::min(tip, from_tip); let start = cmp::max(tip - height_from_tip, 0); diff --git a/applications/minotari_node/src/grpc/hash_rate.rs b/applications/minotari_node/src/grpc/hash_rate.rs index bc68aac913..e7c5b26abc 100644 --- a/applications/minotari_node/src/grpc/hash_rate.rs +++ b/applications/minotari_node/src/grpc/hash_rate.rs @@ -138,26 +138,26 @@ mod test { // Checks that the moving average hash rate at every block is correct // We use larger sample data than the SHA window size (12 periods) to check bounds - // We assumed a constant target block time of 300 secs (the SHA3 target time for Esmeralda) + // We assumed a constant target block time of 240 secs (the SHA3 target time for Esmeralda) // These expected hash rate values where calculated in a spreadsheet #[test] fn correct_moving_average_calculation() { let mut hash_rate_ma = create_hash_rate_ma(PowAlgorithm::Sha3x); - assert_hash_rate(&mut hash_rate_ma, 0, 100_000, 333); - assert_hash_rate(&mut hash_rate_ma, 1, 120_100, 366); - assert_hash_rate(&mut hash_rate_ma, 2, 110_090, 366); - assert_hash_rate(&mut hash_rate_ma, 3, 121_090, 375); - assert_hash_rate(&mut hash_rate_ma, 4, 150_000, 400); - assert_hash_rate(&mut hash_rate_ma, 5, 155_000, 419); - assert_hash_rate(&mut hash_rate_ma, 6, 159_999, 435); - assert_hash_rate(&mut hash_rate_ma, 7, 160_010, 448); - assert_hash_rate(&mut hash_rate_ma, 8, 159_990, 457); - assert_hash_rate(&mut hash_rate_ma, 9, 140_000, 458); - assert_hash_rate(&mut hash_rate_ma, 10, 137_230, 458); - assert_hash_rate(&mut hash_rate_ma, 11, 130_000, 456); - assert_hash_rate(&mut hash_rate_ma, 12, 120_000, 461); - assert_hash_rate(&mut hash_rate_ma, 13, 140_000, 467); + assert_hash_rate(&mut hash_rate_ma, 0, 100_000, 416); + assert_hash_rate(&mut hash_rate_ma, 1, 120_100, 458); + assert_hash_rate(&mut hash_rate_ma, 2, 110_090, 458); + assert_hash_rate(&mut hash_rate_ma, 3, 121_090, 469); + assert_hash_rate(&mut hash_rate_ma, 4, 150_000, 500); + assert_hash_rate(&mut hash_rate_ma, 5, 155_000, 524); + assert_hash_rate(&mut hash_rate_ma, 6, 159_999, 544); + assert_hash_rate(&mut hash_rate_ma, 7, 160_010, 560); + assert_hash_rate(&mut hash_rate_ma, 8, 159_990, 571); + assert_hash_rate(&mut hash_rate_ma, 9, 140_000, 572); + assert_hash_rate(&mut hash_rate_ma, 10, 137_230, 572); + assert_hash_rate(&mut hash_rate_ma, 11, 130_000, 570); + assert_hash_rate(&mut hash_rate_ma, 12, 120_000, 577); + assert_hash_rate(&mut hash_rate_ma, 13, 140_000, 584); } // Our moving average windows are very small (12 and 15 depending on PoW algorithm) diff --git a/applications/minotari_node/src/lib.rs b/applications/minotari_node/src/lib.rs index 780f97ad19..d6b8b0596d 100644 --- a/applications/minotari_node/src/lib.rs +++ b/applications/minotari_node/src/lib.rs @@ -43,7 +43,7 @@ use commands::{cli_loop::CliLoop, command::CommandContext}; use futures::FutureExt; use log::*; use minotari_app_grpc::{authentication::ServerAuthenticationInterceptor, tls::identity::read_identity}; -use minotari_app_utilities::{common_cli_args::CommonCliArgs, network_check::set_network_if_choice_valid}; +use minotari_app_utilities::common_cli_args::CommonCliArgs; use tari_common::{ configuration::bootstrap::{grpc_default_port, ApplicationType}, exit_codes::{ExitCode, ExitError}, @@ -55,10 +55,9 @@ use tokio::task; use tonic::transport::{Identity, Server, ServerTlsConfig}; use crate::cli::Cli; -pub use crate::{ - config::{ApplicationConfig, BaseNodeConfig, DatabaseType}, - metrics::MetricsConfig, -}; +pub use crate::config::{ApplicationConfig, BaseNodeConfig, DatabaseType}; +#[cfg(feature = "metrics")] +pub use crate::metrics::MetricsConfig; const LOG_TARGET: &str = "minotari::base_node::app"; @@ -89,6 +88,7 @@ pub async fn run_base_node( profile_with_tokio_console: false, grpc_enabled: false, mining_enabled: false, + second_layer_grpc_enabled: false, }; run_base_node_with_cli(node_identity, config, cli, shutdown).await @@ -101,8 +101,6 @@ pub async fn run_base_node_with_cli( cli: Cli, shutdown: Shutdown, ) -> Result<(), ExitError> { - set_network_if_choice_valid(config.network())?; - #[cfg(feature = "metrics")] { metrics::install( diff --git a/applications/minotari_node/src/main.rs b/applications/minotari_node/src/main.rs index e02f2ac9aa..60fd7b1889 100644 --- a/applications/minotari_node/src/main.rs +++ b/applications/minotari_node/src/main.rs @@ -152,7 +152,7 @@ fn main_inner() -> Result<(), ExitError> { if config.base_node.use_libtor && config.base_node.p2p.transport.is_tor() { let tor = Tor::initialize()?; tor.update_comms_transport(&mut config.base_node.p2p.transport)?; - runtime.spawn(tor.run(shutdown.to_signal())); + tor.run_background(); debug!( target: LOG_TARGET, "Updated Tor comms transport: {:?}", config.base_node.p2p.transport diff --git a/applications/minotari_node/src/recovery.rs b/applications/minotari_node/src/recovery.rs index e47feab973..a346e83865 100644 --- a/applications/minotari_node/src/recovery.rs +++ b/applications/minotari_node/src/recovery.rs @@ -152,7 +152,7 @@ async fn do_recovery( let max_height = source_database .get_chain_metadata() .map_err(|e| anyhow!("Could not get max chain height: {}", e))? - .height_of_longest_chain(); + .best_block_height(); // we start at height 1 let mut counter = 1; print!("Starting recovery at height: "); diff --git a/base_layer/chat_ffi/Cargo.toml b/base_layer/chat_ffi/Cargo.toml index 73e6b54c01..ae33c64815 100644 --- a/base_layer/chat_ffi/Cargo.toml +++ b/base_layer/chat_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "minotari_chat_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency chat C FFI bindings" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] @@ -19,7 +19,7 @@ tari_utilities = { version = "0.7" } libc = "0.2.65" libsqlite3-sys = { version = "0.25.1", features = ["bundled"], optional = true } log = "0.4.6" -log4rs = { git = "https://github.com/tari-project/log4rs.git", features = ["console_appender", "file_appender", "yaml_format"] } +log4rs = { version = "1.3.0", features = ["console_appender", "file_appender", "yaml_format"] } thiserror = "1.0.26" tokio = "1.23" diff --git a/base_layer/chat_ffi/chat.h b/base_layer/chat_ffi/chat.h index 507493ec9d..654edf1c04 100644 --- a/base_layer/chat_ffi/chat.h +++ b/base_layer/chat_ffi/chat.h @@ -283,12 +283,12 @@ struct ChatByteVector *read_confirmation_message_id(struct Confirmation *confirm * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_longlong` - A uint representation of time since epoch. May return -1 on error + * `c_longlong` - A uint representation of time since epoch * * # Safety * The ```confirmation``` When done with the Confirmation it should be destroyed */ -long long read_confirmation_timestamp(struct Confirmation *confirmation, int *error_out); +unsigned long long read_confirmation_timestamp(struct Confirmation *confirmation, int *error_out); /** * Frees memory for a Confirmation diff --git a/base_layer/chat_ffi/src/confirmation.rs b/base_layer/chat_ffi/src/confirmation.rs index 56d1f7003f..d3ef37f127 100644 --- a/base_layer/chat_ffi/src/confirmation.rs +++ b/base_layer/chat_ffi/src/confirmation.rs @@ -20,9 +20,9 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, os::raw::c_longlong, ptr}; +use std::{convert::TryFrom, ptr}; -use libc::{c_int, c_uint}; +use libc::{c_int, c_uint, c_ulonglong}; use tari_chat_client::ChatClient as ChatClientTrait; use tari_contacts::contacts_service::types::{Confirmation, Message}; @@ -112,7 +112,7 @@ pub unsafe extern "C" fn read_confirmation_message_id( /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_longlong` - A uint representation of time since epoch. May return -1 on error +/// `c_longlong` - A uint representation of time since epoch /// /// # Safety /// The ```confirmation``` When done with the Confirmation it should be destroyed @@ -120,17 +120,17 @@ pub unsafe extern "C" fn read_confirmation_message_id( pub unsafe extern "C" fn read_confirmation_timestamp( confirmation: *mut Confirmation, error_out: *mut c_int, -) -> c_longlong { +) -> c_ulonglong { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); if confirmation.is_null() { error = LibChatError::from(InterfaceError::NullError("client".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); - return -1; + return 0; } - (*confirmation).timestamp as c_longlong + (*confirmation).timestamp as c_ulonglong } /// Frees memory for a Confirmation diff --git a/base_layer/chat_ffi/src/conversationalists.rs b/base_layer/chat_ffi/src/conversationalists.rs index 6b149980dd..239d5a0b75 100644 --- a/base_layer/chat_ffi/src/conversationalists.rs +++ b/base_layer/chat_ffi/src/conversationalists.rs @@ -129,14 +129,15 @@ pub unsafe extern "C" fn conversationalists_vector_get_at( let conversationalists = &(*conversationalists); - let len = conversationalists.0.len() - 1; - if position as usize > len { + let len = conversationalists.0.len(); + let position = position as usize; + if conversationalists.0.is_empty() || position > len - 1 { error = LibChatError::from(InterfaceError::PositionInvalidError).code; ptr::swap(error_out, &mut error as *mut c_int); return ptr::null_mut(); } - Box::into_raw(Box::new(conversationalists.0[position as usize].clone())) + Box::into_raw(Box::new(conversationalists.0[position].clone())) } /// Frees memory for ConversationalistsVector diff --git a/base_layer/chat_ffi/src/message.rs b/base_layer/chat_ffi/src/message.rs index caa4ea0f8f..c4b6f654bf 100644 --- a/base_layer/chat_ffi/src/message.rs +++ b/base_layer/chat_ffi/src/message.rs @@ -162,15 +162,16 @@ pub unsafe extern "C" fn chat_metadata_get_at( let message = &(*message); - let len = message.metadata.len() - 1; - if position as usize > len { + let len = message.metadata.len(); + let position = position as usize; + if message.metadata.is_empty() || position > len - 1 { error = LibChatError::from(InterfaceError::PositionInvalidError).code; ptr::swap(error_out, &mut error as *mut c_int); return ptr::null_mut(); } let message_metadata_vec = &(*(message).metadata); - let message_metadata = Box::new(message_metadata_vec[position as usize].clone()); + let message_metadata = Box::new(message_metadata_vec[position].clone()); Box::into_raw(message_metadata) } @@ -198,7 +199,9 @@ pub unsafe extern "C" fn chat_message_metadata_len(message: *mut Message, error_ } let message = &(*message); - message.metadata.len() as c_longlong + #[allow(clippy::cast_possible_wrap)] + let res = message.metadata.len() as i64; + res } /// Returns a pointer to a ChatByteVector representing the data of the Message @@ -289,7 +292,7 @@ pub unsafe extern "C" fn read_chat_message_direction(message: *mut Message, erro return -1; } - c_int::try_from((*message).direction.as_byte()).unwrap_or(-1) + c_int::from((*message).direction.as_byte()) } /// Returns a c_ulonglong representation of the stored at timestamp as seconds since epoch diff --git a/base_layer/chat_ffi/src/message_metadata.rs b/base_layer/chat_ffi/src/message_metadata.rs index 68e4586c76..a3e06492c6 100644 --- a/base_layer/chat_ffi/src/message_metadata.rs +++ b/base_layer/chat_ffi/src/message_metadata.rs @@ -120,7 +120,7 @@ pub unsafe extern "C" fn read_chat_metadata_type(msg_metadata: *mut MessageMetad } let md = &(*msg_metadata); - c_int::try_from(md.metadata_type.as_byte()).unwrap_or(-1) + c_int::from(md.metadata_type.as_byte()) } /// Returns a ptr to a ByteVector diff --git a/base_layer/chat_ffi/src/messages.rs b/base_layer/chat_ffi/src/messages.rs index fcca7d85d5..88fe872179 100644 --- a/base_layer/chat_ffi/src/messages.rs +++ b/base_layer/chat_ffi/src/messages.rs @@ -147,15 +147,16 @@ pub unsafe extern "C" fn message_vector_get_at( } let messages = &(*messages); + let position = position as usize; + let len = messages.0.len(); - let len = messages.0.len() - 1; - if position as usize > len { + if messages.0.is_empty() || position > len - 1 { error = LibChatError::from(InterfaceError::PositionInvalidError).code; ptr::swap(error_out, &mut error as *mut c_int); return ptr::null_mut(); } - Box::into_raw(Box::new(messages.0[position as usize].clone())) + Box::into_raw(Box::new(messages.0[position].clone())) } /// Frees memory for MessagesVector diff --git a/base_layer/common_types/Cargo.toml b/base_layer/common_types/Cargo.toml index 5f0838e2e3..6149733266 100644 --- a/base_layer/common_types/Cargo.toml +++ b/base_layer/common_types/Cargo.toml @@ -3,13 +3,13 @@ name = "tari_common_types" authors = ["The Tari Development Community"] description = "Tari cryptocurrency common types" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] tari_crypto = { version = "0.20" } tari_utilities = { version = "0.7" } -tari_common = { path = "../../common" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } chacha20poly1305 = "0.10.1" @@ -19,8 +19,12 @@ newtype-ops = "0.1" once_cell = "1.8.0" rand = "0.8" serde = { version = "1.0.106", features = ["derive"] } +strum = "0.22" +strum_macros = "0.22" thiserror = "1.0.29" -tokio = { version = "1.23", features = ["time", "sync"] } base64 = "0.21.0" blake2 = "0.10" primitive-types = { version = "0.12", features = ["serde"] } + +[package.metadata.cargo-machete] +ignored = ["strum", "strum_macros"] # this is so we can run cargo machete without getting false positive about macro dependancies diff --git a/base_layer/common_types/README.md b/base_layer/common_types/README.md new file mode 100644 index 0000000000..fad3f416c8 --- /dev/null +++ b/base_layer/common_types/README.md @@ -0,0 +1,5 @@ +# tari_common_types + +Implementation of common types throughout the Tari code base + +This crate is part of the [Tari Cryptocurrency](https://tari.com) project. diff --git a/base_layer/common_types/src/chain_metadata.rs b/base_layer/common_types/src/chain_metadata.rs index 6c0d444a89..c9aa217d02 100644 --- a/base_layer/common_types/src/chain_metadata.rs +++ b/base_layer/common_types/src/chain_metadata.rs @@ -25,79 +25,72 @@ use std::fmt::{Display, Error, Formatter}; use primitive_types::U256; use serde::{Deserialize, Serialize}; -use crate::types::{BlockHash, FixedHash}; +use crate::types::BlockHash; #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] pub struct ChainMetadata { - /// The current chain height, or the block number of the longest valid chain, or `None` if there is no chain - height_of_longest_chain: u64, + /// The current chain height, or the block number of the longest valid chain + best_block_height: u64, /// The block hash of the current tip of the longest valid chain - best_block: BlockHash, + best_block_hash: BlockHash, /// The configured number of blocks back from the tip that this database tracks. A value of 0 indicates that /// pruning mode is disabled and the node will keep full blocks from the time it was set. If pruning horizon /// was previously enabled, previously pruned blocks will remain pruned. If set from initial sync, full blocks /// are preserved from genesis (i.e. the database is in full archival mode). pruning_horizon: u64, /// The height of the pruning horizon. This indicates from what height a full block can be provided - /// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be - /// provided. Archival nodes wil always have an `pruned_height` of zero. + /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be + /// provided. Archival nodes wil always have a `pruned_height` of zero. pruned_height: u64, /// The total accumulated proof of work of the longest chain accumulated_difficulty: U256, /// Timestamp of the tip block in the longest valid chain timestamp: u64, } +#[derive(Debug, thiserror::Error)] +pub enum ChainMetaDataError { + #[error("Pruning Height is higher than the Best Block height")] + PruningHeightAboveBestBlock, + #[error("The total accumulated difficulty is zero")] + AccumulatedDifficultyZero, +} impl ChainMetadata { pub fn new( - height: u64, - hash: BlockHash, + best_block_height: u64, + best_block_hash: BlockHash, pruning_horizon: u64, pruned_height: u64, accumulated_difficulty: U256, timestamp: u64, - ) -> ChainMetadata { - ChainMetadata { - height_of_longest_chain: height, - best_block: hash, + ) -> Result { + let chain_meta_data = ChainMetadata { + best_block_height, + best_block_hash, pruning_horizon, pruned_height, accumulated_difficulty, timestamp, - } - } - - pub fn empty() -> ChainMetadata { - ChainMetadata { - height_of_longest_chain: 0, - best_block: FixedHash::zero(), - pruning_horizon: 0, - pruned_height: 0, - accumulated_difficulty: 0.into(), - timestamp: 0, - } + }; + if chain_meta_data.accumulated_difficulty == 0.into() { + return Err(ChainMetaDataError::AccumulatedDifficultyZero); + }; + if chain_meta_data.pruned_height > chain_meta_data.best_block_height { + return Err(ChainMetaDataError::PruningHeightAboveBestBlock); + }; + Ok(chain_meta_data) } /// The block height at the pruning horizon, given the chain height of the network. Typically database backends /// cannot provide any block data earlier than this point. /// Zero is returned if the blockchain still hasn't reached the pruning horizon. - pub fn horizon_block_height(&self, chain_tip: u64) -> u64 { + pub fn pruned_height_at_given_chain_tip(&self, chain_tip: u64) -> u64 { match self.pruning_horizon { 0 => 0, - horizon => chain_tip.saturating_sub(horizon), + pruning_horizon => chain_tip.saturating_sub(pruning_horizon), } } - /// Set the pruning horizon to indicate that the chain is in archival mode (i.e. a pruning horizon of zero) - pub fn archival_mode(&mut self) { - self.pruning_horizon = 0; - } - - /// Set the pruning horizon - pub fn set_pruning_horizon(&mut self, pruning_horizon: u64) { - self.pruning_horizon = pruning_horizon; - } - /// The configured number of blocks back from the tip that this database tracks. A value of 0 indicates that /// pruning mode is disabled and the node will keep full blocks from the time it was set. If pruning horizon /// was previously enabled, previously pruned blocks will remain pruned. If set from initial sync, full blocks @@ -117,13 +110,13 @@ impl ChainMetadata { } /// Returns the height of longest chain. - pub fn height_of_longest_chain(&self) -> u64 { - self.height_of_longest_chain + pub fn best_block_height(&self) -> u64 { + self.best_block_height } /// The height of the pruning horizon. This indicates from what height a full block can be provided - /// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be - /// provided. Archival nodes wil always have an `pruned_height` of zero. + /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be + /// provided. Archival nodes wil always have a `pruned_height` of zero. pub fn pruned_height(&self) -> u64 { self.pruned_height } @@ -132,8 +125,8 @@ impl ChainMetadata { self.accumulated_difficulty } - pub fn best_block(&self) -> &BlockHash { - &self.best_block + pub fn best_block_hash(&self) -> &BlockHash { + &self.best_block_hash } pub fn timestamp(&self) -> u64 { @@ -143,14 +136,11 @@ impl ChainMetadata { impl Display for ChainMetadata { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - let height = self.height_of_longest_chain; - let best_block = self.best_block; - let accumulated_difficulty = self.accumulated_difficulty; - writeln!(f, "Height of longest chain: {}", height)?; - writeln!(f, "Total accumulated difficulty: {}", accumulated_difficulty)?; - writeln!(f, "Best block: {}", best_block)?; + writeln!(f, "Best block height: {}", self.best_block_height)?; + writeln!(f, "Total accumulated difficulty: {}", self.accumulated_difficulty)?; + writeln!(f, "Best block hash: {}", self.best_block_hash)?; writeln!(f, "Pruning horizon: {}", self.pruning_horizon)?; - writeln!(f, "Effective pruned height: {}", self.pruned_height)?; + writeln!(f, "Pruned height: {}", self.pruned_height)?; Ok(()) } } @@ -161,33 +151,53 @@ mod test { #[test] fn horizon_block_on_default() { - let metadata = ChainMetadata::empty(); - assert_eq!(metadata.horizon_block_height(0), 0); + let metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); } #[test] fn pruned_mode() { - let mut metadata = ChainMetadata::empty(); + let mut metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; assert!(!metadata.is_pruned_node()); assert!(metadata.is_archival_node()); - metadata.set_pruning_horizon(2880); + metadata.pruning_horizon = 2880; assert!(metadata.is_pruned_node()); assert!(!metadata.is_archival_node()); - assert_eq!(metadata.horizon_block_height(0), 0); - assert_eq!(metadata.horizon_block_height(100), 0); - assert_eq!(metadata.horizon_block_height(2880), 0); - assert_eq!(metadata.horizon_block_height(2881), 1); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(100), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2880), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2881), 1); } #[test] fn archival_node() { - let mut metadata = ChainMetadata::empty(); - metadata.archival_mode(); + let metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; // Chain is still empty - assert_eq!(metadata.horizon_block_height(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); // When pruning horizon is zero, the horizon block is always 0, the genesis block - assert_eq!(metadata.horizon_block_height(0), 0); - assert_eq!(metadata.horizon_block_height(100), 0); - assert_eq!(metadata.horizon_block_height(2881), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(100), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2881), 0); } } diff --git a/base_layer/common_types/src/lib.rs b/base_layer/common_types/src/lib.rs index 9f97d9077f..f1cccdc01b 100644 --- a/base_layer/common_types/src/lib.rs +++ b/base_layer/common_types/src/lib.rs @@ -32,4 +32,4 @@ pub mod tari_address; pub mod transaction; mod tx_id; pub mod types; -pub mod waiting_requests; +pub mod wallet_types; diff --git a/base_layer/common_types/src/tx_id.rs b/base_layer/common_types/src/tx_id.rs index 476de9e27d..c59d693b8e 100644 --- a/base_layer/common_types/src/tx_id.rs +++ b/base_layer/common_types/src/tx_id.rs @@ -66,7 +66,7 @@ impl Hash for TxId { impl PartialEq for TxId { fn eq(&self, other: &Self) -> bool { - self.0.eq(&other.0) + self.0 == other.0 } } diff --git a/base_layer/common_types/src/types/bullet_rangeproofs.rs b/base_layer/common_types/src/types/bullet_rangeproofs.rs index 178e8dd6f7..52b5b27769 100644 --- a/base_layer/common_types/src/types/bullet_rangeproofs.rs +++ b/base_layer/common_types/src/types/bullet_rangeproofs.rs @@ -48,6 +48,11 @@ impl BulletRangeProof { .expect("This should be 32 bytes for a Blake 256 hash") .into() } + + /// Get the range proof as a vector reference, which is useful to satisfy the verification API without cloning + pub fn as_vec(&self) -> &Vec { + &self.0 + } } impl ByteArray for BulletRangeProof { diff --git a/base_layer/common_types/src/wallet_types.rs b/base_layer/common_types/src/wallet_types.rs new file mode 100644 index 0000000000..052df957ea --- /dev/null +++ b/base_layer/common_types/src/wallet_types.rs @@ -0,0 +1,44 @@ +// Copyright 2023 The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{ + fmt, + fmt::{Display, Formatter}, +}; + +use serde::{Deserialize, Serialize}; +use strum_macros::EnumString; + +#[derive(Debug, EnumString, Clone, Copy, Serialize, Deserialize)] +pub enum WalletType { + Software, + Ledger(usize), +} + +impl Display for WalletType { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + WalletType::Software => write!(f, "Software"), + WalletType::Ledger(account) => write!(f, "Ledger({account})"), + } + } +} diff --git a/base_layer/contacts/Cargo.toml b/base_layer/contacts/Cargo.toml index 57a0711513..c53edcf774 100644 --- a/base_layer/contacts/Cargo.toml +++ b/base_layer/contacts/Cargo.toml @@ -3,19 +3,19 @@ name = "tari_contacts" authors = ["The Tari Development Community"] description = "Tari contacts library" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] -tari_common = { path = "../../common" } -tari_common_sqlite = { path = "../../common_sqlite" } -tari_common_types = { path = "../../base_layer/common_types" } -tari_comms = { path = "../../comms/core" } -tari_comms_dht = { path = "../../comms/dht" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } +tari_common_sqlite = { path = "../../common_sqlite", version = "1.0.0-pre.11a" } +tari_common_types = { path = "../../base_layer/common_types", version = "1.0.0-pre.11a" } +tari_comms = { path = "../../comms/core", version = "1.0.0-pre.11a" } +tari_comms_dht = { path = "../../comms/dht", version = "1.0.0-pre.11a" } tari_crypto = { version = "0.20" } -tari_p2p = { path = "../p2p", features = ["auto-update"] } -tari_service_framework = { path = "../service_framework" } -tari_shutdown = { path = "../../infrastructure/shutdown" } +tari_p2p = { path = "../p2p", features = ["auto-update"], version = "1.0.0-pre.11a" } +tari_service_framework = { path = "../service_framework", version = "1.0.0-pre.11a" } +tari_shutdown = { path = "../../infrastructure/shutdown", version = "1.0.0-pre.11a" } tari_utilities = { version = "0.7" } chrono = { version = "0.4.19", default-features = false, features = ["serde"] } @@ -30,7 +30,7 @@ rand = "0.8" serde = "1.0.136" serde_json = "1.0.79" thiserror = "1.0.26" -tokio = { version = "1.23", features = ["sync", "macros"] } +tokio = { version = "1.36", features = ["sync", "macros"] } tower = "0.4" uuid = { version = "1.3", features = ["v4"] } @@ -40,7 +40,7 @@ tari_test_utils = { path = "../../infrastructure/test_utils" } tempfile = "3.1.0" [build-dependencies] -tari_common = { path = "../../common" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } [package.metadata.cargo-machete] ignored = ["prost"] # this is so we can run cargo machete without getting false positive about macro dependancies diff --git a/base_layer/contacts/src/chat_client/Cargo.toml b/base_layer/contacts/src/chat_client/Cargo.toml index 8a391a6fe9..d92cbe1422 100644 --- a/base_layer/contacts/src/chat_client/Cargo.toml +++ b/base_layer/contacts/src/chat_client/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_chat_client" authors = ["The Tari Development Community"] description = "Tari cucumber chat client" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" @@ -22,7 +22,7 @@ tari_storage = { path = "../../../../infrastructure/storage" } anyhow = "1.0.41" async-trait = "0.1.52" -config = { version = "0.13.0" } +config = { version = "0.14.0" } diesel = { version = "2.0.3", features = ["sqlite", "r2d2", "serde_json", "chrono", "64-column-tables"] } lmdb-zero = "0.4.4" log = "0.4.17" diff --git a/base_layer/contacts/src/chat_client/src/networking.rs b/base_layer/contacts/src/chat_client/src/networking.rs index fa84a20e9f..0dc3a0f124 100644 --- a/base_layer/contacts/src/chat_client/src/networking.rs +++ b/base_layer/contacts/src/chat_client/src/networking.rs @@ -22,14 +22,14 @@ use std::{str::FromStr, sync::Arc, time::Duration}; -use log::trace; +use log::{error, trace}; use minotari_app_utilities::{identity_management, identity_management::load_from_json}; // Re-exports pub use tari_comms::{ - multiaddr::Multiaddr, + multiaddr::{Error as MultiaddrError, Multiaddr}, peer_manager::{NodeIdentity, PeerFeatures}, }; -use tari_comms::{peer_manager::Peer, CommsNode, UnspawnedCommsNode}; +use tari_comms::{peer_manager::Peer, tor::TorIdentity, CommsNode, UnspawnedCommsNode}; use tari_contacts::contacts_service::{handle::ContactsServiceHandle, ContactsServiceInitializer}; use tari_p2p::{ comms_connector::pubsub_connector, @@ -109,10 +109,30 @@ pub async fn start( for peer in seed_peers { peer_manager.add_peer(peer).await?; } - - let comms = spawn_comms_using_transport(comms, p2p_config.transport.clone()).await?; - - // Save final node identity after comms has initialized. This is required because the public_address can be + let comms = if p2p_config.transport.transport_type == TransportType::Tor { + let path = config.chat_client.tor_identity_file.clone(); + let node_id = comms.node_identity(); + let after_comms = move |identity: TorIdentity| { + let address_string = format!("/onion3/{}:{}", identity.service_id, identity.onion_port); + if let Err(e) = identity_management::save_as_json(&path, &identity) { + error!(target: LOG_TARGET, "Failed to save tor identity{:?}", e); + } + let result: Result = address_string.parse(); + if result.is_err() { + error!(target: LOG_TARGET, "Failed to parse tor identity as multiaddr{:?}", result); + return; + } + let address = result.unwrap(); + trace!(target: LOG_TARGET, "resave the chat tor identity {:?}", identity); + if !node_id.public_addresses().contains(&address) { + node_id.add_public_address(address); + } + }; + spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await? + } else { + let after_comms = |_identity| {}; + spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await? + }; // changed by comms during initialization when using tor. match p2p_config.transport.transport_type { TransportType::Tcp => {}, // Do not overwrite TCP public_address in the base_node_id! @@ -121,10 +141,7 @@ pub async fn start( trace!(target: LOG_TARGET, "save chat identity file"); }, }; - if let Some(hs) = comms.hidden_service() { - identity_management::save_as_json(&config.chat_client.tor_identity_file, hs.tor_identity())?; - trace!(target: LOG_TARGET, "resave the chat tor identity {:?}", hs.tor_identity()); - } + handles.register(comms); let comms = handles.expect_handle::(); diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index abe499530f..bce5fd777f 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [features] @@ -23,29 +23,33 @@ base_node = [ ] base_node_proto = [] benches = ["base_node"] +ledger = [ + "ledger-transport", + "ledger-transport-hid" +] metrics = ["tari_metrics"] [dependencies] -tari_common = { path = "../../common" } -tari_common_types = { path = "../../base_layer/common_types" } -tari_comms = { path = "../../comms/core" } -tari_comms_dht = { path = "../../comms/dht" } -tari_comms_rpc_macros = { path = "../../comms/rpc_macros" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } +tari_common_types = { path = "../../base_layer/common_types", version = "1.0.0-pre.11a" } +tari_comms = { path = "../../comms/core", version = "1.0.0-pre.11a" } +tari_comms_dht = { path = "../../comms/dht", version = "1.0.0-pre.11a" } +tari_comms_rpc_macros = { path = "../../comms/rpc_macros", version = "1.0.0-pre.11a" } tari_crypto = { version = "0.20", features = ["borsh"] } -tari_metrics = { path = "../../infrastructure/metrics", optional = true } -tari_mmr = { path = "../../base_layer/mmr", optional = true } -tari_p2p = { path = "../../base_layer/p2p" } -tari_script = { path = "../../infrastructure/tari_script" } -tari_service_framework = { path = "../service_framework" } -tari_shutdown = { path = "../../infrastructure/shutdown" } -tari_storage = { path = "../../infrastructure/storage" } -tari_test_utils = { path = "../../infrastructure/test_utils" } +tari_metrics = { path = "../../infrastructure/metrics", optional = true, version = "1.0.0-pre.11a" } +tari_mmr = { path = "../../base_layer/mmr", optional = true , version = "1.0.0-pre.11a"} +tari_p2p = { path = "../../base_layer/p2p", version = "1.0.0-pre.11a" } +tari_script = { path = "../../infrastructure/tari_script", version = "1.0.0-pre.11a" } +tari_service_framework = { path = "../service_framework", version = "1.0.0-pre.11a" } +tari_shutdown = { path = "../../infrastructure/shutdown", version = "1.0.0-pre.11a" } +tari_storage = { path = "../../infrastructure/storage", version = "1.0.0-pre.11a" } +tari_test_utils = { path = "../../infrastructure/test_utils", version = "1.0.0-pre.11a" } tari_utilities = { version = "0.7", features = ["borsh"] } tari_key_manager = { path = "../key_manager", features = [ "key_manager_service", -] } +], version = "1.0.0-pre.11a" } tari_common_sqlite = { path = "../../common_sqlite" } -tari_hash_domains = { path = "../../hash_domains" } +tari_hashing = { path = "../../hashing" } async-trait = { version = "0.1.50" } bincode = "1.1.4" @@ -62,10 +66,12 @@ fs2 = "0.4.0" futures = { version = "^0.3.16", features = ["async-await"] } hex = "0.4.2" integer-encoding = "3.0" +ledger-transport = { git = "https://github.com/Zondax/ledger-rs", rev = "20e2a20", optional = true } +ledger-transport-hid = { git = "https://github.com/Zondax/ledger-rs", rev = "20e2a20", optional = true } lmdb-zero = "0.4.4" log = "0.4" log-mdc = "0.1.0" -monero = { version = "0.18", features = ["serde-crate"], optional = true } +monero = { version = "0.20.0", features = ["serde-crate"], optional = true } newtype-ops = "0.1.4" num-traits = "0.2.15" num-derive = "0.3.3" @@ -82,11 +88,11 @@ sha2 = "0.10" strum = "0.22" strum_macros = "0.22" thiserror = "1.0.26" -tokio = { version = "1.23", features = ["time", "sync", "macros"] } +tokio = { version = "1.36", features = ["time", "sync", "macros"] } tracing = "0.1.26" zeroize = "1" primitive-types = { version = "0.12", features = ["serde"] } -tiny-keccak = { git = "https://github.com/tari-project/tiny-keccak", rev = "bcddc65530d8646de7282cd8d18d891dc434b643", features = [ +tiny-keccak = { package = "tari-tiny-keccak", version = "2.0.2", features = [ "keccak", ] } @@ -97,14 +103,15 @@ tari_test_utils = { path = "../../infrastructure/test_utils" } curve25519-dalek = { package = "tari-curve25519-dalek", version = "4.0.3" } # SQLite required for the integration tests libsqlite3-sys = { version = "0.25.1", features = ["bundled"] } -config = { version = "0.13.0" } +config = { version = "0.14.0" } env_logger = "0.7.0" tempfile = "3.1.0" toml = { version = "0.5" } quickcheck = "1.0" [build-dependencies] -tari_common = { path = "../../common", features = ["build"] } +tari_common = { path = "../../common", features = ["build"], version = "1.0.0-pre.11a" } +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a" } [[bench]] name = "mempool" diff --git a/base_layer/core/build.rs b/base_layer/core/build.rs index ca7f1901fc..02802ec35d 100644 --- a/base_layer/core/build.rs +++ b/base_layer/core/build.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use tari_features::resolver::build_features; + fn main() { + build_features(); tari_common::build::ProtobufCompiler::new() .include_paths(&["src/proto"]) .proto_paths(&[ diff --git a/base_layer/core/src/base_node/chain_metadata_service/service.rs b/base_layer/core/src/base_node/chain_metadata_service/service.rs index 1253c167e4..3b0e88f082 100644 --- a/base_layer/core/src/base_node/chain_metadata_service/service.rs +++ b/base_layer/core/src/base_node/chain_metadata_service/service.rs @@ -202,7 +202,7 @@ impl ChainMetadataService { target: LOG_TARGET, "Received chain metadata from NodeId '{}' #{}, Acc_diff {}", event.node_id, - chain_metadata.height_of_longest_chain(), + chain_metadata.best_block_height(), chain_metadata.accumulated_difficulty(), ); @@ -257,8 +257,8 @@ mod test { let mut bytes = [0u8; 32]; diff.to_big_endian(&mut bytes); proto::ChainMetadata { - height_of_longest_chain: 1, - best_block: vec![ + best_block_height: 1, + best_block_hash: vec![ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ], @@ -293,7 +293,7 @@ mod test { let (mut service, liveness_mock_state, mut base_node_receiver, _) = setup(); let mut proto_chain_metadata = create_sample_proto_chain_metadata(); - proto_chain_metadata.height_of_longest_chain = 123; + proto_chain_metadata.best_block_height = 123; let chain_metadata = proto_chain_metadata.clone().try_into().unwrap(); task::spawn(async move { @@ -311,7 +311,7 @@ mod test { unpack_enum!(LivenessRequest::SetMetadataEntry(metadata_key, data) = last_call); assert_eq!(metadata_key, MetadataKey::ChainMetadata); let chain_metadata = proto::ChainMetadata::decode(data.as_slice()).unwrap(); - assert_eq!(chain_metadata.height_of_longest_chain, 123); + assert_eq!(chain_metadata.best_block_height, 123); } #[tokio::test] async fn handle_liveness_event_ok() { @@ -333,8 +333,8 @@ mod test { let metadata = events_rx.recv().await.unwrap().peer_metadata().unwrap(); assert_eq!(*metadata.node_id(), node_id); assert_eq!( - metadata.claimed_chain_metadata().height_of_longest_chain(), - proto_chain_metadata.height_of_longest_chain + metadata.claimed_chain_metadata().best_block_height(), + proto_chain_metadata.best_block_height ); } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 4acbbff490..727df42046 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -292,33 +292,38 @@ where B: BlockchainBackend + 'static let prev_hash = header.prev_hash; let height = header.height; + let block = header.into_builder().with_transactions(transactions).build(); + let block_hash = block.hash(); let block_template = NewBlockTemplate::from_block( - header.into_builder().with_transactions(transactions).build(), + block, self.get_target_difficulty_for_next_block(request.algo, constants, prev_hash) .await?, self.consensus_manager.get_block_reward_at(height), )?; - debug!(target: LOG_TARGET, "New template block: {}", block_template); - debug!( - target: LOG_TARGET, - "New block template requested at height {}, weight: {}", + debug!(target: LOG_TARGET, + "New block template requested and prepared at height: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}", block_template.header.height, + block_template.target_difficulty, + block_hash.to_hex(), block_template .body .calculate_weight(constants.transaction_weight_params()) - .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))? + .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?, + block_template.body.to_counts_string() ); - trace!(target: LOG_TARGET, "{}", block_template); + Ok(NodeCommsResponse::NewBlockTemplate(block_template)) }, NodeCommsRequest::GetNewBlock(block_template) => { - debug!(target: LOG_TARGET, "Prepared block: {}", block_template); + let height = block_template.header.height; + let target_difficulty = block_template.target_difficulty; let block = self.blockchain_db.prepare_new_block(block_template).await?; let constants = self.consensus_manager.consensus_constants(block.header.height); - debug!( - target: LOG_TARGET, - "Prepared new block from template (hash: {}, weight: {}, {})", + debug!(target: LOG_TARGET, + "Prepared block: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}", + height, + target_difficulty, block.hash().to_hex(), block .body @@ -339,6 +344,7 @@ where B: BlockchainBackend + 'static "A peer has requested a block with hash {}", block_hex ); + #[allow(clippy::blocks_in_conditions)] let maybe_block = match self .blockchain_db .fetch_block_by_hash(hash, true) @@ -551,15 +557,19 @@ where B: BlockchainBackend + 'static ); return Ok(true); } - if self.blockchain_db.bad_block_exists(block).await? { + let block_exist = self.blockchain_db.bad_block_exists(block).await?; + if block_exist.0 { debug!( target: LOG_TARGET, - "Block with hash `{}` already validated as a bad block", - block.to_hex() + "Block with hash `{}` already validated as a bad block due to {}", + block.to_hex(), block_exist.1 ); return Err(CommsInterfaceError::ChainStorageError( ChainStorageError::ValidationError { - source: ValidationError::BadBlockFound { hash: block.to_hex() }, + source: ValidationError::BadBlockFound { + hash: block.to_hex(), + reason: block_exist.1, + }, }, )); } @@ -602,17 +612,18 @@ where B: BlockchainBackend + 'static // We check the current tip and orphan status of the block because we cannot guarantee that mempool state is // correct and the mmr root calculation is only valid if the block is building on the tip. let current_meta = self.blockchain_db.get_chain_metadata().await?; - if header.prev_hash != *current_meta.best_block() { + if header.prev_hash != *current_meta.best_block_hash() { debug!( target: LOG_TARGET, "Orphaned block #{}: ({}), current tip is: #{} ({}). We need to fetch the complete block from peer: \ ({})", header.height, block_hash.to_hex(), - current_meta.height_of_longest_chain(), - current_meta.best_block().to_hex(), + current_meta.best_block_height(), + current_meta.best_block_hash().to_hex(), source_peer, ); + #[allow(clippy::cast_possible_wrap)] #[cfg(feature = "metrics")] metrics::compact_block_tx_misses(header.height).set(excess_sigs.len() as i64); let block = self.request_full_block_from_peer(source_peer, block_hash).await?; @@ -623,6 +634,7 @@ where B: BlockchainBackend + 'static let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?; let known_transactions = known_transactions.into_iter().map(|tx| (*tx).clone()).collect(); + #[allow(clippy::cast_possible_wrap)] #[cfg(feature = "metrics")] metrics::compact_block_tx_misses(header.height).set(missing_excess_sigs.len() as i64); diff --git a/base_layer/core/src/base_node/proto/chain_metadata.proto b/base_layer/core/src/base_node/proto/chain_metadata.proto index 8f77ffac0c..bdd6061b53 100644 --- a/base_layer/core/src/base_node/proto/chain_metadata.proto +++ b/base_layer/core/src/base_node/proto/chain_metadata.proto @@ -9,14 +9,14 @@ package tari.base_node; message ChainMetadata { // The current chain height, or the block number of the longest valid chain, or `None` if there is no chain - uint64 height_of_longest_chain = 1; + uint64 best_block_height = 1; // The block hash of the current tip of the longest valid chain, or `None` for an empty chain - bytes best_block = 2; + bytes best_block_hash = 2; // The current geometric mean of the pow of the chain tip, or `None` if there is no chain bytes accumulated_difficulty = 5; // The effective height of the pruning horizon. This indicates from what height // a full block can be provided (exclusive). - // If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be provided. + // If `pruned_height` is equal to the `best_block_height` no blocks can be provided. // Archival nodes wil always have an `pruned_height` of zero. uint64 pruned_height = 6; // Timestamp of the last block in the chain, or `None` if there is no chain diff --git a/base_layer/core/src/base_node/proto/chain_metadata.rs b/base_layer/core/src/base_node/proto/chain_metadata.rs index 702141d288..2a876fbe63 100644 --- a/base_layer/core/src/base_node/proto/chain_metadata.rs +++ b/base_layer/core/src/base_node/proto/chain_metadata.rs @@ -41,29 +41,30 @@ impl TryFrom for ChainMetadata { } let accumulated_difficulty = U256::from_big_endian(&metadata.accumulated_difficulty); - let height_of_longest_chain = metadata.height_of_longest_chain; + let best_block_height = metadata.best_block_height; let pruning_horizon = if metadata.pruned_height == 0 { metadata.pruned_height } else { - height_of_longest_chain.saturating_sub(metadata.pruned_height) + best_block_height.saturating_sub(metadata.pruned_height) }; - if metadata.best_block.is_empty() { + if metadata.best_block_hash.is_empty() { return Err("Best block is missing".to_string()); } let hash: FixedHash = metadata - .best_block + .best_block_hash .try_into() .map_err(|e| format!("Malformed best block: {}", e))?; - Ok(ChainMetadata::new( - height_of_longest_chain, + ChainMetadata::new( + best_block_height, hash, pruning_horizon, metadata.pruned_height, accumulated_difficulty, metadata.timestamp, - )) + ) + .map_err(|e| e.to_string()) } } @@ -74,8 +75,8 @@ impl From for proto::ChainMetadata { .accumulated_difficulty() .to_big_endian(&mut accumulated_difficulty); Self { - height_of_longest_chain: metadata.height_of_longest_chain(), - best_block: metadata.best_block().to_vec(), + best_block_height: metadata.best_block_height(), + best_block_hash: metadata.best_block_hash().to_vec(), pruned_height: metadata.pruned_height(), accumulated_difficulty: accumulated_difficulty.to_vec(), timestamp: metadata.timestamp(), @@ -84,7 +85,7 @@ impl From for proto::ChainMetadata { } impl proto::ChainMetadata { - pub fn height_of_longest_chain(&self) -> u64 { - self.height_of_longest_chain + pub fn best_block_height(&self) -> u64 { + self.best_block_height } } diff --git a/base_layer/core/src/base_node/proto/rpc.proto b/base_layer/core/src/base_node/proto/rpc.proto index bbc6aea8b3..00532116b0 100644 --- a/base_layer/core/src/base_node/proto/rpc.proto +++ b/base_layer/core/src/base_node/proto/rpc.proto @@ -57,16 +57,20 @@ message SyncKernelsRequest { } message SyncUtxosRequest { + // Start header hash to sync UTXOs from bytes start_header_hash = 1; + // End header hash to sync UTXOs to bytes end_header_hash = 2; } -message SyncUtxosResponse { - tari.types.TransactionOutput output = 1; - bytes mined_header = 2; -} -message PrunedOutput { - bytes hash = 1; +message SyncUtxosResponse { + oneof txo { + // The unspent transaction output + tari.types.TransactionOutput output = 1; + // If the TXO is spent, the commitment bytes are returned + bytes commitment = 2; + } + bytes mined_header = 3; } message SyncUtxosByBlockRequest { diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.proto b/base_layer/core/src/base_node/proto/wallet_rpc.proto index e00dcce3c3..fb9c852ea3 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.proto +++ b/base_layer/core/src/base_node/proto/wallet_rpc.proto @@ -35,27 +35,27 @@ enum TxLocation { message TxQueryResponse { TxLocation location = 1; - bytes block_hash = 2; + bytes best_block_hash = 2; uint64 confirmations = 3; bool is_synced = 4; - uint64 height_of_longest_chain = 5; + uint64 best_block_height = 5; uint64 mined_timestamp = 6; } message TxQueryBatchResponse { tari.types.Signature signature = 1; TxLocation location = 2; - bytes block_hash = 3; + bytes best_block_hash = 3; uint64 confirmations = 4; - uint64 block_height = 5; + uint64 best_block_height = 5; uint64 mined_timestamp = 6; } message TxQueryBatchResponses { repeated TxQueryBatchResponse responses = 1; bool is_synced = 2; - bytes tip_hash = 3; - uint64 height_of_longest_chain = 4; + bytes best_block_hash = 3; + uint64 best_block_height = 4; uint64 tip_mined_timestamp = 5; } diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.rs b/base_layer/core/src/base_node/proto/wallet_rpc.rs index 6159694a11..ba2be442e1 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.rs +++ b/base_layer/core/src/base_node/proto/wallet_rpc.rs @@ -128,10 +128,10 @@ impl From for proto::TxSubmissionResponse { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TxQueryResponse { pub location: TxLocation, - pub block_hash: Option, + pub best_block_hash: Option, pub confirmations: u64, pub is_synced: bool, - pub height_of_longest_chain: u64, + pub best_block_height: u64, pub mined_timestamp: Option, } @@ -139,9 +139,9 @@ pub struct TxQueryResponse { pub struct TxQueryBatchResponse { pub signature: Signature, pub location: TxLocation, - pub block_hash: Option, + pub best_block_hash: Option, pub confirmations: u64, - pub block_height: u64, + pub best_block_height: u64, pub mined_timestamp: Option, } @@ -192,10 +192,10 @@ impl TryFrom for TxQueryResponse { type Error = String; fn try_from(proto_response: proto::TxQueryResponse) -> Result { - let hash = if proto_response.block_hash.is_empty() { + let hash = if proto_response.best_block_hash.is_empty() { None } else { - Some(match BlockHash::try_from(proto_response.block_hash.clone()) { + Some(match BlockHash::try_from(proto_response.best_block_hash.clone()) { Ok(h) => h, Err(e) => { return Err(format!("Malformed block hash: {}", e)); @@ -213,10 +213,10 @@ impl TryFrom for TxQueryResponse { proto::TxLocation::from_i32(proto_response.location) .ok_or_else(|| "Invalid or unrecognised `TxLocation` enum".to_string())?, )?, - block_hash: hash, + best_block_hash: hash, confirmations: proto_response.confirmations, is_synced: proto_response.is_synced, - height_of_longest_chain: proto_response.height_of_longest_chain, + best_block_height: proto_response.best_block_height, mined_timestamp, }) } @@ -226,10 +226,10 @@ impl From for proto::TxQueryResponse { fn from(response: TxQueryResponse) -> Self { Self { location: proto::TxLocation::from(response.location) as i32, - block_hash: response.block_hash.map(|v| v.to_vec()).unwrap_or(vec![]), + best_block_hash: response.best_block_hash.map(|v| v.to_vec()).unwrap_or_default(), confirmations: response.confirmations, is_synced: response.is_synced, - height_of_longest_chain: response.height_of_longest_chain, + best_block_height: response.best_block_height, mined_timestamp: response.mined_timestamp.unwrap_or_default(), } } @@ -239,10 +239,10 @@ impl TryFrom for TxQueryBatchResponse { type Error = String; fn try_from(proto_response: proto::TxQueryBatchResponse) -> Result { - let hash = if proto_response.block_hash.is_empty() { + let hash = if proto_response.best_block_hash.is_empty() { None } else { - Some(match BlockHash::try_from(proto_response.block_hash.clone()) { + Some(match BlockHash::try_from(proto_response.best_block_hash.clone()) { Ok(h) => h, Err(e) => { return Err(format!("Malformed block hash: {}", e)); @@ -263,8 +263,8 @@ impl TryFrom for TxQueryBatchResponse { proto::TxLocation::from_i32(proto_response.location) .ok_or_else(|| "Invalid or unrecognised `TxLocation` enum".to_string())?, )?, - block_hash: hash, - block_height: proto_response.block_height, + best_block_hash: hash, + best_block_height: proto_response.best_block_height, confirmations: proto_response.confirmations, mined_timestamp, }) diff --git a/base_layer/core/src/base_node/rpc/service.rs b/base_layer/core/src/base_node/rpc/service.rs index 4a6d62032d..34b8a30d46 100644 --- a/base_layer/core/src/base_node/rpc/service.rs +++ b/base_layer/core/src/base_node/rpc/service.rs @@ -124,13 +124,13 @@ impl BaseNodeWalletRpcService { { None => (), Some(header) => { - let confirmations = chain_metadata.height_of_longest_chain().saturating_sub(header.height); + let confirmations = chain_metadata.best_block_height().saturating_sub(header.height); let response = TxQueryResponse { location: TxLocation::Mined as i32, - block_hash: block_hash.to_vec(), + best_block_hash: block_hash.to_vec(), confirmations, is_synced, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), + best_block_height: chain_metadata.best_block_height(), mined_timestamp: header.timestamp.as_u64(), }; return Ok(response); @@ -148,10 +148,10 @@ impl BaseNodeWalletRpcService { { TxStorageResponse::UnconfirmedPool => TxQueryResponse { location: TxLocation::InMempool as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, is_synced, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), + best_block_height: chain_metadata.best_block_height(), mined_timestamp: 0, }, TxStorageResponse::ReorgPool | @@ -163,10 +163,10 @@ impl BaseNodeWalletRpcService { TxStorageResponse::NotStoredFeeTooLow | TxStorageResponse::NotStoredAlreadyMined => TxQueryResponse { location: TxLocation::NotStored as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, is_synced, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), + best_block_height: chain_metadata.best_block_height(), mined_timestamp: 0, }, }; @@ -311,17 +311,17 @@ impl BaseNodeWalletService for BaseNodeWalletRpc responses.push(TxQueryBatchResponse { signature: Some(SignatureProto::from(signature)), location: response.location, - block_hash: response.block_hash, + best_block_hash: response.best_block_hash, confirmations: response.confirmations, - block_height: response.height_of_longest_chain.saturating_sub(response.confirmations), + best_block_height: response.best_block_height.saturating_sub(response.confirmations), mined_timestamp: response.mined_timestamp, }); } Ok(Response::new(TxQueryBatchResponses { responses, is_synced, - tip_hash: metadata.best_block().to_vec(), - height_of_longest_chain: metadata.height_of_longest_chain(), + best_block_hash: metadata.best_block_hash().to_vec(), + best_block_height: metadata.best_block_height(), tip_mined_timestamp: metadata.timestamp(), })) } @@ -421,8 +421,8 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .rpc_status_internal_error(LOG_TARGET)?; Ok(Response::new(UtxoQueryResponses { - best_block_height: metadata.height_of_longest_chain(), - best_block_hash: metadata.best_block().to_vec(), + best_block_height: metadata.best_block_height(), + best_block_hash: metadata.best_block_hash().to_vec(), responses: mined_info_resp .into_iter() .flatten() @@ -520,8 +520,8 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .rpc_status_internal_error(LOG_TARGET)?; Ok(Response::new(QueryDeletedResponse { - best_block_height: metadata.height_of_longest_chain(), - best_block_hash: metadata.best_block().to_vec(), + best_block_height: metadata.best_block_height(), + best_block_hash: metadata.best_block_hash().to_vec(), data: return_data, })) } @@ -671,7 +671,7 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .rpc_status_internal_error(LOG_TARGET)?; let stats = self .mempool() - .get_fee_per_gram_stats(count, metadata.height_of_longest_chain()) + .get_fee_per_gram_stats(count, metadata.best_block_height()) .await .rpc_status_internal_error(LOG_TARGET)?; diff --git a/base_layer/core/src/base_node/service/service.rs b/base_layer/core/src/base_node/service/service.rs index 5d6c6f78f4..5a98eee9ba 100644 --- a/base_layer/core/src/base_node/service/service.rs +++ b/base_layer/core/src/base_node/service/service.rs @@ -28,10 +28,7 @@ use std::{ use futures::{pin_mut, stream::StreamExt, Stream}; use log::*; use rand::rngs::OsRng; -use tari_common_types::{ - types::BlockHash, - waiting_requests::{generate_request_key, RequestKey, WaitingRequests}, -}; +use tari_common_types::types::BlockHash; use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId}; use tari_comms_dht::{ domain_message::OutboundDomainMessage, @@ -60,7 +57,10 @@ use crate::{ }, blocks::{Block, NewBlock}, chain_storage::{BlockchainBackend, ChainStorageError}, - common::BanPeriod, + common::{ + waiting_requests::{generate_request_key, RequestKey, WaitingRequests}, + BanPeriod, + }, proto as shared_protos, proto::base_node as proto, }; diff --git a/base_layer/core/src/base_node/service/service_request.rs b/base_layer/core/src/base_node/service/service_request.rs index dcabc7879c..f8281b06e6 100644 --- a/base_layer/core/src/base_node/service/service_request.rs +++ b/base_layer/core/src/base_node/service/service_request.rs @@ -21,9 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use serde::{Deserialize, Serialize}; -use tari_common_types::waiting_requests::RequestKey; -use crate::base_node::comms_interface::NodeCommsRequest; +use crate::{base_node::comms_interface::NodeCommsRequest, common::waiting_requests::RequestKey}; /// Request type for a received BaseNodeService request. #[derive(Debug, Serialize, Deserialize)] diff --git a/base_layer/core/src/base_node/service/service_response.rs b/base_layer/core/src/base_node/service/service_response.rs index dc1f74f86e..1a2c2ed931 100644 --- a/base_layer/core/src/base_node/service/service_response.rs +++ b/base_layer/core/src/base_node/service/service_response.rs @@ -20,9 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_common_types::waiting_requests::RequestKey; - -use crate::base_node::comms_interface::NodeCommsResponse; +use crate::{base_node::comms_interface::NodeCommsResponse, common::waiting_requests::RequestKey}; /// Response type for a received BaseNodeService requests #[derive(Debug)] diff --git a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs index 5f0ef9c9f5..e44673939a 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs @@ -126,7 +126,7 @@ impl Display for SyncStatus { f, "Lagging behind {} peers (#{}, Difficulty: {})", sync_peers.len(), - network.height_of_longest_chain(), + network.best_block_height(), network.accumulated_difficulty(), ), UpToDate => f.write_str("UpToDate"), diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs index ec17dcba56..0620e84bc3 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs @@ -57,26 +57,33 @@ impl HorizonStateSync { Err(err) => return err.into(), }; + let sync_peers = &mut self.sync_peers; + // Order sync peers according to accumulated difficulty + sync_peers.sort_by(|a, b| { + b.claimed_chain_metadata() + .accumulated_difficulty() + .cmp(&a.claimed_chain_metadata().accumulated_difficulty()) + }); + + // Target horizon sync height based on the last header we have synced let last_header = match shared.db.fetch_last_header().await { Ok(h) => h, Err(err) => return err.into(), }; + let target_horizon_sync_height = local_metadata.pruned_height_at_given_chain_tip(last_header.height); - let horizon_sync_height = local_metadata.horizon_block_height(last_header.height); - if local_metadata.pruned_height() >= horizon_sync_height { - info!(target: LOG_TARGET, "Horizon state was already synchronized."); + // Determine if we need to sync horizon state + if local_metadata.pruned_height() >= target_horizon_sync_height { + info!(target: LOG_TARGET, "Horizon state is already synchronized."); return StateEvent::HorizonStateSynchronized; } - - // We're already synced because we have full blocks higher than our target pruned height - if local_metadata.height_of_longest_chain() >= horizon_sync_height { + if local_metadata.best_block_height() >= target_horizon_sync_height { info!( target: LOG_TARGET, - "Tip height is higher than our pruned height. Horizon state is already synchronized." + "Our tip height is higher than our target pruned height. Horizon state is already synchronized." ); return StateEvent::HorizonStateSynchronized; } - let sync_peers = &mut self.sync_peers; let db = shared.db.clone(); let config = shared.config.blockchain_sync_config.clone(); @@ -90,7 +97,7 @@ impl HorizonStateSync { connectivity, rules, sync_peers, - horizon_sync_height, + target_horizon_sync_height, prover, validator, ); diff --git a/base_layer/core/src/base_node/state_machine_service/states/listening.rs b/base_layer/core/src/base_node/state_machine_service/states/listening.rs index 9e030714fc..9e097d8d9b 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/listening.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/listening.rs @@ -330,8 +330,8 @@ fn determine_sync_mode( let network_tip_accum_difficulty = network.claimed_chain_metadata().accumulated_difficulty(); let local_tip_accum_difficulty = local.accumulated_difficulty(); if local_tip_accum_difficulty < network_tip_accum_difficulty { - let local_tip_height = local.height_of_longest_chain(); - let network_tip_height = network.claimed_chain_metadata().height_of_longest_chain(); + let local_tip_height = local.best_block_height(); + let network_tip_height = network.claimed_chain_metadata().best_block_height(); info!( target: LOG_TARGET, "Our local blockchain accumulated difficulty is a little behind that of the network. We're at block #{} \ @@ -350,7 +350,7 @@ fn determine_sync_mode( let pruned_mode = local.pruning_horizon() > 0; let pruning_horizon_check = network.claimed_chain_metadata().pruning_horizon() > 0 && network.claimed_chain_metadata().pruning_horizon() < local.pruning_horizon(); - let pruning_height_check = network.claimed_chain_metadata().pruned_height() > local.height_of_longest_chain(); + let pruning_height_check = network.claimed_chain_metadata().pruned_height() > local.best_block_height(); let sync_able_peer = match (pruned_mode, pruning_horizon_check, pruning_height_check) { (true, true, _) => { info!( @@ -366,7 +366,7 @@ fn determine_sync_mode( target: LOG_TARGET, "The remote peer is a pruned node, and it cannot supply the blocks we need. Remote pruned height # {}, current local tip #{}", network.claimed_chain_metadata().pruned_height(), - local.height_of_longest_chain(), + local.best_block_height(), ); false }, @@ -421,9 +421,9 @@ fn determine_sync_mode( // Equals "Our blockchain is up-to-date." }, - local.height_of_longest_chain(), + local.best_block_height(), local_tip_accum_difficulty, - network.claimed_chain_metadata().height_of_longest_chain(), + network.claimed_chain_metadata().best_block_height(), network_tip_accum_difficulty, ); UpToDate @@ -456,7 +456,7 @@ mod test { let archival_node = PeerChainMetadata::new( random_node_id(), - ChainMetadata::new(NETWORK_TIP_HEIGHT, block_hash, 0, 0, accumulated_difficulty, 0), + ChainMetadata::new(NETWORK_TIP_HEIGHT, block_hash, 0, 0, accumulated_difficulty, 0).unwrap(), None, ); @@ -469,7 +469,8 @@ mod test { 0, accumulated_difficulty - U256::from(1000), 0, - ), + ) + .unwrap(), None, ); diff --git a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs index 674af6eb33..ad853601c8 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs @@ -62,63 +62,63 @@ impl DecideNextSync { ); if local_metadata.pruning_horizon() > 0 { - let last_header = match shared.db.fetch_last_header().await { - Ok(h) => h, - Err(err) => return err.into(), - }; - - let horizon_sync_height = local_metadata.horizon_block_height(last_header.height); // Filter sync peers that claim to be able to provide blocks up until our pruned height - let sync_peers = self - .sync_peers + debug!(target: LOG_TARGET, "Local metadata: {}", local_metadata); + let mut sync_peers = self.sync_peers.clone(); + let sync_peers = sync_peers .drain(..) .filter(|sync_peer| { let remote_metadata = sync_peer.claimed_chain_metadata(); - remote_metadata.height_of_longest_chain() >= horizon_sync_height + debug!(target: LOG_TARGET, "Peer metadata: {}", remote_metadata); + // Must be able to provide the correct amount of full blocks past the pruned height (i.e. the + // pruning horizon), otherwise our horizon spec will not be met + remote_metadata.best_block_height().saturating_sub(remote_metadata.pruned_height()) >= + local_metadata.pruning_horizon() && + // Must have a better blockchain tip than us + remote_metadata.best_block_height() > local_metadata.best_block_height() }) .collect::>(); if sync_peers.is_empty() { warn!( target: LOG_TARGET, - "Unable to find any appropriate sync peers for horizon sync" + "Unable to find any appropriate sync peers for horizon sync, trying for block sync" ); - return Continue; - } - - debug!( - target: LOG_TARGET, - "Proceeding to horizon sync with {} sync peer(s) with a best latency of {:.2?}", - sync_peers.len(), - sync_peers.first().map(|p| p.latency()).unwrap_or_default() - ); - ProceedToHorizonSync(sync_peers) - } else { - // Filter sync peers that are able to provide full blocks from our current tip - let sync_peers = self - .sync_peers - .drain(..) - .filter(|sync_peer| { - sync_peer.claimed_chain_metadata().pruned_height() <= local_metadata.height_of_longest_chain() - }) - .collect::>(); - - if sync_peers.is_empty() { - warn!( + } else { + debug!( target: LOG_TARGET, - "Unable to find any appropriate sync peers for block sync" + "Proceeding to horizon sync with {} sync peer(s) with a best latency of {:.2?}", + sync_peers.len(), + sync_peers.first().map(|p| p.latency()).unwrap_or_default() ); - return Continue; + return ProceedToHorizonSync(sync_peers); } + } + + // This is not a pruned node or horizon sync is not possible, try for block sync + + // Filter sync peers that are able to provide full blocks from our current tip + let sync_peers = self + .sync_peers + .drain(..) + .filter(|sync_peer| { + let remote_metadata = sync_peer.claimed_chain_metadata(); + remote_metadata.pruned_height() <= local_metadata.best_block_height() + }) + .collect::>(); - debug!( - target: LOG_TARGET, - "Proceeding to block sync with {} sync peer(s) with a best latency of {:.2?}", - sync_peers.len(), - sync_peers.first().map(|p| p.latency()).unwrap_or_default() - ); - ProceedToBlockSync(sync_peers) + if sync_peers.is_empty() { + warn!(target: LOG_TARGET, "Unable to find any appropriate sync peers for block sync"); + return Continue; } + + debug!( + target: LOG_TARGET, + "Proceeding to block sync with {} sync peer(s) with a best latency of {:.2?}", + sync_peers.len(), + sync_peers.first().map(|p| p.latency()).unwrap_or_default() + ); + ProceedToBlockSync(sync_peers) } } diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index c06325f2af..308045ad07 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -233,7 +233,7 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { let tip_header = self.db.fetch_last_header().await?; let local_metadata = self.db.get_chain_metadata().await?; - if tip_header.height <= local_metadata.height_of_longest_chain() { + if tip_header.height <= local_metadata.best_block_height() { debug!( target: LOG_TARGET, "Blocks already synchronized to height {}.", tip_header.height @@ -243,7 +243,7 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { let tip_hash = tip_header.hash(); let tip_height = tip_header.height; - let best_height = local_metadata.height_of_longest_chain(); + let best_height = local_metadata.best_block_height(); let chain_header = self.db.fetch_chain_header(best_height).await?; let best_full_block_hash = chain_header.accumulated_data().hash; @@ -336,7 +336,7 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { .db .write_transaction() .delete_orphan(header_hash) - .insert_bad_block(header_hash, current_height) + .insert_bad_block(header_hash, current_height, err.to_string()) .commit() .await { diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index 9df904a4f7..12514a63bf 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -259,13 +259,12 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { let best_header = self.db.fetch_last_chain_header().await?; let best_block_header = self .db - .fetch_chain_header(best_block_metadata.height_of_longest_chain()) + .fetch_chain_header(best_block_metadata.best_block_height()) .await?; let best_header_height = best_header.height(); let best_block_height = best_block_header.height(); - if best_header_height < best_block_height || - best_block_height < self.local_cached_metadata.height_of_longest_chain() + if best_header_height < best_block_height || best_block_height < self.local_cached_metadata.best_block_height() { return Err(BlockHeaderSyncError::ChainStorageError( ChainStorageError::CorruptedDatabase("Inconsistent block and header data".to_string()), @@ -301,7 +300,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { .height() .checked_sub(split_info.reorg_steps_back) .unwrap_or_default(), - sync_peer.claimed_chain_metadata().height_of_longest_chain(), + sync_peer.claimed_chain_metadata().best_block_height(), sync_peer, ); self.synchronize_headers(sync_peer.clone(), &mut client, *split_info, max_latency) @@ -647,6 +646,11 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { header.hash().to_hex(), latency ); + trace!( + target: LOG_TARGET, + "{}", + header + ); if let Some(prev_header_height) = prev_height { if header.height != prev_header_height.saturating_add(1) { warn!( @@ -694,7 +698,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { sync_peer.add_sample(last_sync_timer.elapsed()); self.hooks.call_on_progress_header_hooks( current_height, - sync_peer.claimed_chain_metadata().height_of_longest_chain(), + sync_peer.claimed_chain_metadata().best_block_height(), &sync_peer, ); diff --git a/base_layer/core/src/base_node/sync/header_sync/validator.rs b/base_layer/core/src/base_node/sync/header_sync/validator.rs index 3e49e0b1a2..3824d09fba 100644 --- a/base_layer/core/src/base_node/sync/header_sync/validator.rs +++ b/base_layer/core/src/base_node/sync/header_sync/validator.rs @@ -146,7 +146,7 @@ impl BlockHeaderSyncValidator { Err(e) => { let mut txn = self.db.write_transaction(); - txn.insert_bad_block(header.hash(), header.height); + txn.insert_bad_block(header.hash(), header.height, e.to_string()); txn.commit().await?; return Err(e.into()); }, diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs index 4f1a40ff89..6aff7e4510 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs @@ -30,6 +30,7 @@ use tari_comms::{ }; use tari_crypto::errors::RangeProofError; use tari_mmr::{error::MerkleMountainRangeError, sparse_merkle_tree::SMTError}; +use tari_utilities::ByteArrayError; use thiserror::Error; use tokio::task; @@ -97,6 +98,14 @@ pub enum HorizonSyncError { PeerNotFound, #[error("Sparse Merkle Tree error: {0}")] SMTError(#[from] SMTError), + #[error("ByteArrayError error: {0}")] + ByteArrayError(String), +} + +impl From for HorizonSyncError { + fn from(e: ByteArrayError) -> Self { + HorizonSyncError::ByteArrayError(e.to_string()) + } } impl From for HorizonSyncError { @@ -142,7 +151,8 @@ impl HorizonSyncError { err @ HorizonSyncError::ConversionError(_) | err @ HorizonSyncError::MerkleMountainRangeError(_) | err @ HorizonSyncError::FixedHashSizeError(_) | - err @ HorizonSyncError::TransactionError(_) => Some(BanReason { + err @ HorizonSyncError::TransactionError(_) | + err @ HorizonSyncError::ByteArrayError(_) => Some(BanReason { reason: format!("{}", err), ban_duration: BanPeriod::Long, }), diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs index 3a4a528841..bdc4cb019c 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs @@ -32,7 +32,7 @@ use log::*; use tari_common_types::types::{Commitment, FixedHash, RangeProofService}; use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId, protocol::rpc::RpcClient, PeerConnection}; use tari_crypto::commitment::HomomorphicCommitment; -use tari_mmr::sparse_merkle_tree::{NodeKey, ValueHash}; +use tari_mmr::sparse_merkle_tree::{DeleteResult, NodeKey, ValueHash}; use tari_utilities::{hex::Hex, ByteArray}; use tokio::task; @@ -43,6 +43,7 @@ use crate::{ hooks::Hooks, horizon_state_sync::{HorizonSyncInfo, HorizonSyncStatus}, rpc, + rpc::BaseNodeSyncRpcClient, BlockchainSyncConfig, SyncPeer, }, @@ -50,13 +51,15 @@ use crate::{ chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree}, common::{rolling_avg::RollingAverageTime, BanPeriod}, consensus::ConsensusManager, - proto::base_node::{SyncKernelsRequest, SyncUtxosRequest, SyncUtxosResponse}, + proto::base_node::{sync_utxos_response::Txo, SyncKernelsRequest, SyncUtxosRequest, SyncUtxosResponse}, transactions::transaction_components::{ transaction_output::batch_verify_range_proofs, + OutputType, TransactionKernel, TransactionOutput, }, validation::{helpers, FinalHorizonStateValidation}, + OutputSmt, PrunedKernelMmr, }; @@ -129,7 +132,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { target: LOG_TARGET, "Preparing database for horizon sync to height #{}", self.horizon_sync_height ); - let header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { + let to_header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { ChainStorageError::ValueNotFound { entity: "Header", field: "height", @@ -139,7 +142,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut latency_increases_counter = 0; loop { - match self.sync(&header).await { + match self.sync(&to_header).await { Ok(()) => return Ok(()), Err(err @ HorizonSyncError::AllSyncPeersExceedLatency) => { // If we don't have many sync peers to select from, return the listening state and see if we can get @@ -167,7 +170,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } } - async fn sync(&mut self, header: &BlockHeader) -> Result<(), HorizonSyncError> { + async fn sync(&mut self, to_header: &BlockHeader) -> Result<(), HorizonSyncError> { let sync_peer_node_ids = self.sync_peers.iter().map(|p| p.node_id()).cloned().collect::>(); info!( target: LOG_TARGET, @@ -176,7 +179,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); let mut latency_counter = 0usize; for node_id in sync_peer_node_ids { - match self.connect_and_attempt_sync(&node_id, header).await { + match self.connect_and_attempt_sync(&node_id, to_header).await { Ok(_) => return Ok(()), // Try another peer Err(err) => { @@ -213,8 +216,27 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { async fn connect_and_attempt_sync( &mut self, node_id: &NodeId, - header: &BlockHeader, + to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { + // Connect + let (mut client, sync_peer) = self.connect_sync_peer(node_id).await?; + + // Perform horizon sync + debug!(target: LOG_TARGET, "Check if pruning is needed"); + self.prune_if_needed().await?; + self.sync_kernels_and_outputs(sync_peer.clone(), &mut client, to_header) + .await?; + + // Validate and finalize horizon sync + self.finalize_horizon_sync(&sync_peer).await?; + + Ok(()) + } + + async fn connect_sync_peer( + &mut self, + node_id: &NodeId, + ) -> Result<(BaseNodeSyncRpcClient, SyncPeer), HorizonSyncError> { let peer_index = self .get_sync_peer_index(node_id) .ok_or(HorizonSyncError::PeerNotFound)?; @@ -246,14 +268,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { max_latency: self.max_latency, }); } - debug!(target: LOG_TARGET, "Sync peer latency is {:.2?}", latency); - let sync_peer = self.sync_peers[peer_index].clone(); - self.begin_sync(sync_peer.clone(), &mut client, header).await?; - self.finalize_horizon_sync(&sync_peer).await?; - - Ok(()) + Ok((client, self.sync_peers[peer_index].clone())) } async fn dial_sync_peer(&self, node_id: &NodeId) -> Result { @@ -269,30 +286,100 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(conn) } - async fn begin_sync( + async fn sync_kernels_and_outputs( &mut self, sync_peer: SyncPeer, client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { - debug!(target: LOG_TARGET, "Initializing"); - self.initialize().await?; - + // Note: We do not need to rewind kernels if the sync fails due to it being validated when inserted into + // the database. Furthermore, these kernels will also be successfully removed when we need to rewind + // the blockchain for whatever reason. debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(sync_peer.clone(), client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); - self.synchronize_outputs(sync_peer, client, to_header).await?; - Ok(()) + match self.synchronize_outputs(sync_peer, client, to_header).await { + Ok(_) => Ok(()), + Err(err) => { + // We need to clean up the outputs + let _ = self.clean_up_failed_output_sync(to_header).await; + Err(err) + }, + } } - async fn initialize(&mut self) -> Result<(), HorizonSyncError> { - let db = self.db(); - let local_metadata = db.get_chain_metadata().await?; + /// We clean up a failed output sync attempt and ignore any errors that occur during the clean up process. + async fn clean_up_failed_output_sync(&mut self, to_header: &BlockHeader) { + let tip_header = if let Ok(header) = self.db.fetch_tip_header().await { + header + } else { + return; + }; + let db = self.db().clone(); + let mut txn = db.write_transaction(); + let mut current_header = to_header.clone(); + loop { + if let Ok(outputs) = self.db.fetch_outputs_in_block(current_header.hash()).await { + for (count, output) in (1..=outputs.len()).zip(outputs.iter()) { + // Note: We do not need to clean up the SMT as it was not saved in the database yet, however, we + // need to clean up the outputs + txn.prune_output_from_all_dbs( + output.hash(), + output.commitment.clone(), + output.features.output_type, + ); + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, + "Clean up failed sync - prune output from all dbs for header '{}': {}", + current_header.hash(), e + ); + } + if count % 100 == 0 || count == outputs.len() { + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, + "Clean up failed sync - commit prune outputs for header '{}': {}", + current_header.hash(), e + ); + } + } + } + } + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, "Clean up failed output sync - commit delete kernels for header '{}': {}", + current_header.hash(), e + ); + } + if let Ok(header) = db.fetch_header_by_block_hash(current_header.prev_hash).await { + if let Some(previous_header) = header { + current_header = previous_header; + } else { + warn!(target: LOG_TARGET, "Could not clean up failed output sync, previous_header link missing frm db"); + break; + } + } else { + warn!( + target: LOG_TARGET, + "Could not clean up failed output sync, header '{}' not in db", + current_header.prev_hash.to_hex() + ); + break; + } + if ¤t_header.hash() == tip_header.hash() { + debug!(target: LOG_TARGET, "Finished cleaning up failed output sync"); + break; + } + } + } - let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); + async fn prune_if_needed(&mut self) -> Result<(), HorizonSyncError> { + let local_metadata = self.db.get_chain_metadata().await?; + let new_prune_height = cmp::min(local_metadata.best_block_height(), self.horizon_sync_height); if local_metadata.pruned_height() < new_prune_height { debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); - db.prune_to_height(new_prune_height).await?; + self.db.prune_to_height(new_prune_height).await?; } Ok(()) @@ -328,7 +415,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { "Requesting kernels from {} to {} ({} remaining)", local_num_kernels, remote_num_kernels, - remote_num_kernels - local_num_kernels, + remote_num_kernels.saturating_sub(local_num_kernels), ); let latency = client.get_last_request_latency(); @@ -374,7 +461,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } txn.insert_kernel_via_horizon_sync(kernel, *current_header.hash(), mmr_position); - if mmr_position == current_header.header().kernel_mmr_size - 1 { + if mmr_position == current_header.header().kernel_mmr_size.saturating_sub(1) { let num_kernels = kernel_hashes.len(); debug!( target: LOG_TARGET, @@ -425,9 +512,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { num_kernels, mmr_position + 1, end, - end - (mmr_position + 1) + end.saturating_sub(mmr_position + 1) ); - if mmr_position < end - 1 { + if mmr_position < end.saturating_sub(1) { current_header = db.fetch_chain_header(current_header.height() + 1).await?; } } @@ -471,6 +558,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + // Synchronize outputs, returning true if any keys were deleted from the output SMT. #[allow(clippy::too_many_lines)] async fn synchronize_outputs( &mut self, @@ -479,9 +567,26 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { info!(target: LOG_TARGET, "Starting output sync from peer {}", sync_peer); + let db = self.db().clone(); + let tip_header = db.fetch_tip_header().await?; - let remote_num_outputs = to_header.output_smt_size; - self.num_outputs = remote_num_outputs; + // Estimate the number of outputs to be downloaded; this cannot be known exactly until the sync is complete. + let mut current_header = to_header.clone(); + self.num_outputs = 0; + loop { + current_header = + if let Some(previous_header) = db.fetch_header_by_block_hash(current_header.prev_hash).await? { + self.num_outputs += current_header + .output_smt_size + .saturating_sub(previous_header.output_smt_size); + previous_header + } else { + break; + }; + if ¤t_header.hash() == tip_header.hash() { + break; + } + } let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { current: 0, @@ -490,86 +595,136 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); self.hooks.call_on_progress_horizon_hooks(info); - debug!( - target: LOG_TARGET, - "Requesting outputs from {}", - remote_num_outputs, - ); - let db = self.db().clone(); - - let end = remote_num_outputs; - let end_hash = to_header.hash(); - let start_hash = db.fetch_chain_header(1).await?; - let gen_block = db.fetch_chain_header(0).await?; - let latency = client.get_last_request_latency(); debug!( target: LOG_TARGET, - "Initiating output sync with peer `{}` (latency = {}ms)", + "Initiating output sync with peer `{}`, requesting ~{} outputs, tip_header height `{}`, \ + last_chain_header height `{}` (latency = {}ms)", sync_peer.node_id(), - latency.unwrap_or_default().as_millis() + self.num_outputs, + tip_header.height(), + db.fetch_last_chain_header().await?.height(), + latency.unwrap_or_default().as_millis(), ); + let start_chain_header = db.fetch_chain_header(tip_header.height() + 1).await?; let req = SyncUtxosRequest { - start_header_hash: start_hash.hash().to_vec(), - end_header_hash: end_hash.to_vec(), + start_header_hash: start_chain_header.hash().to_vec(), + end_header_hash: to_header.hash().to_vec(), }; - let mut output_stream = client.sync_utxos(req).await?; let mut txn = db.write_transaction(); - let mut utxo_counter = gen_block.header().output_smt_size; + let mut utxo_counter = 0u64; + let mut stxo_counter = 0u64; let timer = Instant::now(); let mut output_smt = db.fetch_tip_smt().await?; let mut last_sync_timer = Instant::now(); let mut avg_latency = RollingAverageTime::new(20); + let mut inputs_to_delete = Vec::new(); while let Some(response) = output_stream.next().await { let latency = last_sync_timer.elapsed(); avg_latency.add_sample(latency); let res: SyncUtxosResponse = response?; - utxo_counter += 1; - if utxo_counter > end { - return Err(HorizonSyncError::IncorrectResponse( - "Peer sent too many outputs".to_string(), - )); - } - let output = res - .output - .ok_or_else(|| HorizonSyncError::IncorrectResponse("Peer sent no transaction output data".into()))?; - let output_header = FixedHash::try_from(res.mined_header) + let output_header_hash = FixedHash::try_from(res.mined_header) .map_err(|_| HorizonSyncError::IncorrectResponse("Peer sent no mined header".into()))?; let current_header = self .db() - .fetch_header_by_block_hash(output_header) + .fetch_header_by_block_hash(output_header_hash) .await? .ok_or_else(|| { HorizonSyncError::IncorrectResponse("Peer sent mined header we do not know of".into()) })?; - let constants = self.rules.consensus_constants(current_header.height).clone(); - let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; - trace!( - target: LOG_TARGET, - "UTXO {} received from sync peer", - output.hash(), - ); - helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; - - batch_verify_range_proofs(&self.prover, &[&output])?; - let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; - let smt_node = ValueHash::try_from(output.smt_hash(current_header.height).as_slice())?; - output_smt.insert(smt_key, smt_node)?; - txn.insert_output_via_horizon_sync( - output, - current_header.hash(), - current_header.height, - current_header.timestamp.as_u64(), - ); + let proto_output = res + .txo + .ok_or_else(|| HorizonSyncError::IncorrectResponse("Peer sent no transaction output data".into()))?; + match proto_output { + Txo::Output(output) => { + utxo_counter += 1; + // Increase the estimate number of outputs to be downloaded (for display purposes only). + if utxo_counter >= self.num_outputs { + self.num_outputs = utxo_counter + u64::from(current_header.hash() != to_header.hash()); + } - // we have checked the range proof, and we have checked that the linked to header exists. - txn.commit().await?; + let constants = self.rules.consensus_constants(current_header.height).clone(); + let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; + if !output.is_burned() { + debug!( + target: LOG_TARGET, + "UTXO `{}` received from sync peer ({} of {})", + output.hash(), + utxo_counter, + self.num_outputs, + ); + helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; + + batch_verify_range_proofs(&self.prover, &[&output])?; + let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.smt_hash(current_header.height).as_slice())?; + if let Err(e) = output_smt.insert(smt_key, smt_node) { + error!( + target: LOG_TARGET, + "Output commitment({}) already in SMT", + output.commitment.to_hex(), + ); + return Err(e.into()); + } + txn.insert_output_via_horizon_sync( + output, + current_header.hash(), + current_header.height, + current_header.timestamp.as_u64(), + ); + + // We have checked the range proof, and we have checked that the linked to header exists. + txn.commit().await?; + } + }, + Txo::Commitment(commitment_bytes) => { + stxo_counter += 1; + + let commitment = Commitment::from_canonical_bytes(commitment_bytes.as_slice())?; + match self + .db() + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .await? + { + Some(output_hash) => { + debug!( + target: LOG_TARGET, + "STXO hash `{}` received from sync peer ({})", + output_hash, + stxo_counter, + ); + let smt_key = NodeKey::try_from(commitment_bytes.as_slice())?; + match output_smt.delete(&smt_key)? { + DeleteResult::Deleted(_value_hash) => {}, + DeleteResult::KeyNotFound => { + error!( + target: LOG_TARGET, + "Could not find input({}) in SMT", + commitment.to_hex(), + ); + return Err(HorizonSyncError::ChainStorageError( + ChainStorageError::UnspendableInput, + )); + }, + }; + // This will only be committed once the SMT has been verified due to rewind difficulties if + // we need to abort the sync + inputs_to_delete.push((output_hash, commitment)); + }, + None => { + return Err(HorizonSyncError::IncorrectResponse( + "Peer sent unknown commitment hash".into(), + )) + }, + } + }, + } if utxo_counter % 100 == 0 { let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { @@ -583,33 +738,63 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { sync_peer.add_sample(last_sync_timer.elapsed()); last_sync_timer = Instant::now(); } - if utxo_counter != end { - return Err(HorizonSyncError::IncorrectResponse( - "Peer did not send enough outputs".to_string(), - )); + // The SMT can only be verified after all outputs have been downloaded, due to the way we optimize fetching + // outputs from the sync peer. As an example: + // 1. Initial sync: + // - We request outputs from height 0 to 100 (the tranche) + // - The sync peer only returns outputs per block that would still be unspent at height 100 and all inputs + // per block. All outputs that were created and spent within the tranche are never returned. + // - For example, an output is created in block 50 and spent in block 70. It would be included in the SMT for + // headers from height 50 to 69, but due to the optimization, the sync peer would never know about it. + // 2. Consecutive sync: + // - We request outputs from height 101 to 200 (the tranche) + // - The sync peer only returns outputs per block that would still be unspent at height 200, as well as all + // inputs per block, but in this case, only those inputs that are not an output of the current tranche of + // outputs. Similarly, all outputs created and spent within the tranche are never returned. + // - For example, an output is created in block 110 and spent in block 180. It would be included in the SMT + // for headers from height 110 to 179, but due to the optimization, the sync peer would never know about + // it. + // 3. In both cases it would be impossible to verify the SMT per block, as we would not be able to update the + // SMT with the outputs that were created and spent within the tranche. + HorizonStateSynchronization::::check_output_smt_root_hash(&mut output_smt, to_header)?; + + // Commit in chunks to avoid locking the database for too long + let inputs_to_delete_len = inputs_to_delete.len(); + for (count, (output_hash, commitment)) in (1..=inputs_to_delete_len).zip(inputs_to_delete.into_iter()) { + txn.prune_output_from_all_dbs(output_hash, commitment, OutputType::default()); + if count % 100 == 0 || count == inputs_to_delete_len { + txn.commit().await?; + } } + // This has a very low probability of failure + db.set_tip_smt(output_smt).await?; debug!( target: LOG_TARGET, - "finished syncing UTXOs: {} downloaded in {:.2?}", - end, + "Finished syncing TXOs: {} unspent and {} spent downloaded in {:.2?}", + utxo_counter, + stxo_counter, timer.elapsed() ); + Ok(()) + } + + // Helper function to check the output SMT root hash against the expected root hash. + fn check_output_smt_root_hash(output_smt: &mut OutputSmt, header: &BlockHeader) -> Result<(), HorizonSyncError> { let root = FixedHash::try_from(output_smt.hash().as_slice())?; - if root != to_header.output_mr { + if root != header.output_mr { warn!( target: LOG_TARGET, - "Final target root(#{}) did not match expected (#{})", - to_header.output_mr.to_hex(), + "Target root(#{}) did not match expected (#{})", + header.output_mr.to_hex(), root.to_hex(), ); return Err(HorizonSyncError::InvalidMrRoot { mr_tree: "UTXO SMT".to_string(), - at_height: to_header.height, - expected_hex: to_header.output_mr.to_hex(), + at_height: header.height, + expected_hex: header.output_mr.to_hex(), actual_hex: root.to_hex(), }); } - db.set_tip_smt(output_smt).await?; Ok(()) } @@ -647,7 +832,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { header.height(), *header.hash(), header.accumulated_data().total_accumulated_difficulty, - *metadata.best_block(), + *metadata.best_block_hash(), header.timestamp(), ) .set_pruned_height(header.height()) @@ -693,7 +878,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { curr_header.height(), curr_header.header().kernel_mmr_size, prev_kernel_mmr, - curr_header.header().kernel_mmr_size - 1 + curr_header.header().kernel_mmr_size.saturating_sub(1) ); trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 5c01e67d87..81db956111 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -100,6 +100,7 @@ impl BaseNodeSyncRpcService { let token = Arc::new(peer); lock.push(Arc::downgrade(&token)); + #[allow(clippy::cast_possible_wrap)] #[cfg(feature = "metrics")] metrics::active_sync_peers().set(lock.len() as i64); Ok(token) @@ -109,6 +110,7 @@ impl BaseNodeSyncRpcService { #[tari_comms::async_trait] impl BaseNodeSyncService for BaseNodeSyncRpcService { #[instrument(level = "trace", name = "sync_rpc::sync_blocks", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_blocks( &self, request: Request, @@ -272,6 +274,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", name = "sync_rpc::sync_headers", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_headers( &self, request: Request, @@ -372,6 +375,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn get_header_by_height( &self, request: Request, @@ -388,6 +392,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "debug", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn find_chain_split( &self, request: Request, @@ -451,6 +456,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn get_chain_metadata(&self, _: Request<()>) -> Result, RpcStatus> { let chain_metadata = self .db() @@ -461,6 +467,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_kernels( &self, request: Request, @@ -587,6 +594,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_utxos(&self, request: Request) -> Result, RpcStatus> { let req = request.message(); let peer_node_id = request.context().peer_node_id(); diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index 8b03e476a4..d2df0bdbe8 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -20,7 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryInto, sync::Arc, time::Instant}; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, + time::Instant, +}; use log::*; use tari_comms::{ @@ -28,7 +32,7 @@ use tari_comms::{ protocol::rpc::{Request, RpcStatus, RpcStatusResultExt}, utils, }; -use tari_utilities::hex::Hex; +use tari_utilities::{hex::Hex, ByteArray}; use tokio::{sync::mpsc, task}; #[cfg(feature = "metrics")] @@ -36,7 +40,8 @@ use crate::base_node::metrics; use crate::{ blocks::BlockHeader, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, - proto::base_node::{SyncUtxosRequest, SyncUtxosResponse}, + proto, + proto::base_node::{sync_utxos_response::Txo, SyncUtxosRequest, SyncUtxosResponse}, }; const LOG_TARGET: &str = "c::base_node::sync_rpc::sync_utxo_task"; @@ -70,7 +75,7 @@ where B: BlockchainBackend + 'static .fetch_header_by_block_hash(start_hash) .await .rpc_status_internal_error(LOG_TARGET)? - .ok_or_else(|| RpcStatus::not_found("Start header hash is was not found"))?; + .ok_or_else(|| RpcStatus::not_found("Start header hash was not found"))?; let end_hash = msg .end_header_hash @@ -83,7 +88,7 @@ where B: BlockchainBackend + 'static .fetch_header_by_block_hash(end_hash) .await .rpc_status_internal_error(LOG_TARGET)? - .ok_or_else(|| RpcStatus::not_found("End header hash is was not found"))?; + .ok_or_else(|| RpcStatus::not_found("End header hash was not found"))?; if start_header.height > end_header.height { return Err(RpcStatus::bad_request(&format!( "Start header height({}) cannot be greater than the end header height({})", @@ -123,78 +128,183 @@ where B: BlockchainBackend + 'static ) -> Result<(), RpcStatus> { debug!( target: LOG_TARGET, - "Starting stream task with current_header: {}, end_header: {},", + "Starting stream task with current_header: {}, end_header: {}", current_header.hash().to_hex(), end_header.hash().to_hex(), ); + + // If this is a pruned node and outputs have been requested for an initial sync, we need to discover and send + // the outputs from the genesis block that have been pruned as well + let mut pruned_genesis_block_outputs = Vec::new(); + let metadata = self + .db + .get_chain_metadata() + .await + .rpc_status_internal_error(LOG_TARGET)?; + if current_header.height == 1 && metadata.is_pruned_node() { + let genesis_block = self.db.fetch_genesis_block(); + for output in genesis_block.block().body.outputs() { + let output_hash = output.hash(); + if self + .db + .fetch_output(output_hash) + .await + .rpc_status_internal_error(LOG_TARGET)? + .is_none() + { + trace!( + target: LOG_TARGET, + "Spent genesis TXO (commitment '{}') to peer", + output.commitment.to_hex() + ); + pruned_genesis_block_outputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Commitment(output.commitment.as_bytes().to_vec())), + mined_header: current_header.hash().to_vec(), + })); + } + } + } + + let start_header = current_header.clone(); loop { let timer = Instant::now(); let current_header_hash = current_header.hash(); - debug!( target: LOG_TARGET, - "current header = {} ({})", + "Streaming TXO(s) for block #{} ({})", current_header.height, current_header_hash.to_hex() ); - if tx.is_closed() { - debug!( - target: LOG_TARGET, - "Peer '{}' exited UTXO sync session early", self.peer_node_id - ); + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); break; } let outputs_with_statuses = self .db - .fetch_outputs_in_block_with_spend_state(current_header.hash(), Some(end_header.hash())) + .fetch_outputs_in_block_with_spend_state(current_header_hash, Some(end_header.hash())) .await .rpc_status_internal_error(LOG_TARGET)?; + if tx.is_closed() { + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); + break; + } + + let mut outputs = Vec::with_capacity(outputs_with_statuses.len()); + for (output, spent) in outputs_with_statuses { + if output.is_burned() { + continue; + } + if !spent { + match proto::types::TransactionOutput::try_from(output.clone()) { + Ok(tx_ouput) => { + trace!( + target: LOG_TARGET, + "Unspent TXO (commitment '{}') to peer", + output.commitment.to_hex() + ); + outputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Output(tx_ouput)), + mined_header: current_header_hash.to_vec(), + })); + }, + Err(e) => { + return Err(RpcStatus::general(&format!( + "Output '{}' RPC conversion error ({})", + output.hash().to_hex(), + e + ))) + }, + } + } + } debug!( target: LOG_TARGET, - "Streaming UTXO(s) for block #{}.", + "Adding {} outputs in response for block #{} '{}'", outputs.len(), current_header.height, + current_header_hash ); + + let inputs_in_block = self + .db + .fetch_inputs_in_block(current_header_hash) + .await + .rpc_status_internal_error(LOG_TARGET)?; if tx.is_closed() { + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); + break; + } + + let mut inputs = Vec::with_capacity(inputs_in_block.len()); + for input in inputs_in_block { + let output_from_current_tranche = if let Some(mined_info) = self + .db + .fetch_output(input.output_hash()) + .await + .rpc_status_internal_error(LOG_TARGET)? + { + mined_info.mined_height >= start_header.height + } else { + false + }; + + if output_from_current_tranche { + trace!(target: LOG_TARGET, "Spent TXO (hash '{}') not sent to peer", input.output_hash().to_hex()); + } else { + let input_commitment = match self.db.fetch_output(input.output_hash()).await { + Ok(Some(o)) => o.output.commitment, + Ok(None) => { + return Err(RpcStatus::general(&format!( + "Mined info for input '{}' not found", + input.output_hash().to_hex() + ))) + }, + Err(e) => { + return Err(RpcStatus::general(&format!( + "Input '{}' not found ({})", + input.output_hash().to_hex(), + e + ))) + }, + }; + trace!(target: LOG_TARGET, "Spent TXO (commitment '{}') to peer", input_commitment.to_hex()); + inputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Commitment(input_commitment.as_bytes().to_vec())), + mined_header: current_header_hash.to_vec(), + })); + } + } + debug!( + target: LOG_TARGET, + "Adding {} inputs in response for block #{} '{}'", inputs.len(), + current_header.height, + current_header_hash + ); + + let mut txos = Vec::with_capacity(outputs.len() + inputs.len()); + txos.append(&mut outputs); + txos.append(&mut inputs); + if start_header == current_header { debug!( target: LOG_TARGET, - "Peer '{}' exited UTXO sync session early", self.peer_node_id + "Adding {} genesis block pruned inputs in response for block #{} '{}'", pruned_genesis_block_outputs.len(), + current_header.height, + current_header_hash ); - break; + txos.append(&mut pruned_genesis_block_outputs); } - - let utxos = outputs_with_statuses - .into_iter() - .filter_map(|(output, spent)| { - // We only send unspent utxos - if spent { - None - } else { - match output.try_into() { - Ok(tx_ouput) => Some(Ok(SyncUtxosResponse { - output: Some(tx_ouput), - mined_header: current_header.hash().to_vec(), - })), - Err(err) => Some(Err(err)), - } - } - }) - .collect::, String>>() - .map_err(|err| RpcStatus::bad_request(&err))? - .into_iter() - .map(Ok); + let txos = txos.into_iter(); // Ensure task stops if the peer prematurely stops their RPC session - let utxos_len = utxos.len(); - if utils::mpsc::send_all(tx, utxos).await.is_err() { + let txos_len = txos.len(); + if utils::mpsc::send_all(tx, txos).await.is_err() { break; } debug!( target: LOG_TARGET, - "Streamed {} utxos in {:.2?} (including stream backpressure)", - utxos_len, + "Streamed {} TXOs in {:.2?} (including stream backpressure)", + txos_len, timer.elapsed() ); @@ -217,7 +327,7 @@ where B: BlockchainBackend + 'static debug!( target: LOG_TARGET, - "UTXO sync completed to Header hash = {}", + "TXO sync completed to Header hash = {}", current_header.hash().to_hex() ); diff --git a/base_layer/core/src/base_node/sync/sync_peer.rs b/base_layer/core/src/base_node/sync/sync_peer.rs index 70d9b83df5..4594f78aa5 100644 --- a/base_layer/core/src/base_node/sync/sync_peer.rs +++ b/base_layer/core/src/base_node/sync/sync_peer.rs @@ -135,6 +135,7 @@ mod test { use super::*; mod sort_by_latency { + use tari_common_types::types::FixedHash; use tari_comms::types::{CommsPublicKey, CommsSecretKey}; use tari_crypto::keys::{PublicKey, SecretKey}; @@ -147,7 +148,12 @@ mod test { let pk = CommsPublicKey::from_secret_key(&sk); let node_id = NodeId::from_key(&pk); let latency_option = latency.map(|latency| Duration::from_millis(latency as u64)); - PeerChainMetadata::new(node_id, ChainMetadata::empty(), latency_option).into() + PeerChainMetadata::new( + node_id, + ChainMetadata::new(0, FixedHash::zero(), 0, 0, 1.into(), 0).unwrap(), + latency_option, + ) + .into() } #[test] diff --git a/base_layer/core/src/blocks/accumulated_data.rs b/base_layer/core/src/blocks/accumulated_data.rs index 7a9071f5a7..80ac3d9bb6 100644 --- a/base_layer/core/src/blocks/accumulated_data.rs +++ b/base_layer/core/src/blocks/accumulated_data.rs @@ -127,7 +127,7 @@ impl BlockHeaderAccumulatedDataBuilder<'_> { PowAlgorithm::RandomX => ( previous_accum .accumulated_randomx_difficulty - .checked_add_difficulty(achieved_target.achieved()) + .checked_add_difficulty(achieved_target.target()) .ok_or(BlockError::DifficultyOverflow)?, previous_accum.accumulated_sha3x_difficulty, ), @@ -135,7 +135,7 @@ impl BlockHeaderAccumulatedDataBuilder<'_> { previous_accum.accumulated_randomx_difficulty, previous_accum .accumulated_sha3x_difficulty - .checked_add_difficulty(achieved_target.achieved()) + .checked_add_difficulty(achieved_target.target()) .ok_or(BlockError::DifficultyOverflow)?, ), }; diff --git a/base_layer/core/src/blocks/genesis_block.rs b/base_layer/core/src/blocks/genesis_block.rs index 5dca13f990..42a18e2887 100644 --- a/base_layer/core/src/blocks/genesis_block.rs +++ b/base_layer/core/src/blocks/genesis_block.rs @@ -48,7 +48,7 @@ pub fn get_genesis_block(network: Network) -> ChainBlock { NextNet => get_nextnet_genesis_block(), Igor => get_igor_genesis_block(), Esmeralda => get_esmeralda_genesis_block(), - LocalNet => get_esmeralda_genesis_block(), + LocalNet => get_localnet_genesis_block(), } } @@ -111,7 +111,7 @@ pub fn get_stagenet_genesis_block() -> ChainBlock { if add_faucet_utxos { // NB! Update 'consensus_constants.rs/pub fn igor()/ConsensusConstants {faucet_value: ?}' with total value // NB: `stagenet_genesis_sanity_check` must pass - let file_contents = include_str!("faucets/esmeralda_faucet.json"); + let file_contents = include_str!("faucets/stagenet_faucet.json"); add_faucet_utxos_to_genesis_block(file_contents, &mut block); // Enable print only if you need to generate new Merkle roots, then disable it again let print_values = false; @@ -119,9 +119,9 @@ pub fn get_stagenet_genesis_block() -> ChainBlock { // Hardcode the Merkle roots once they've been computed above block.header.kernel_mr = - FixedHash::from_hex("3f4011ec1e8ddfbd66fb7331c5623b38f529a66e81233d924df85f2070b2aacb").unwrap(); + FixedHash::from_hex("a08ff15219beea81d4131465290443fb3bd99d28b8af85975dbb2c77cb4cb5a0").unwrap(); block.header.output_mr = - FixedHash::from_hex("3e40efda288a57d3319c63388dd47ffe4b682baaf6a3b58622ec94d77ad712a2").unwrap(); + FixedHash::from_hex("435f13e21be06b0d0ae9ad3869ac7c723edd933983fa2e26df843c82594b3245").unwrap(); block.header.validator_node_mr = FixedHash::from_hex("277da65c40b2cf99db86baedb903a3f0a38540f3a94d40c826eecac7e27d5dfc").unwrap(); } @@ -140,7 +140,7 @@ pub fn get_stagenet_genesis_block() -> ChainBlock { fn get_stagenet_genesis_block_raw() -> Block { // Set genesis timestamp - let genesis_timestamp = DateTime::parse_from_rfc2822("07 Nov 2024 08:00:00 +0200").expect("parse may not fail"); + let genesis_timestamp = DateTime::parse_from_rfc2822("11 Mar 2024 08:00:00 +0200").expect("parse may not fail"); let not_before_proof = b"i am the stagenet genesis block, watch out, here i come \ \ The New York Times , 2000/01/01 \ @@ -163,7 +163,7 @@ pub fn get_nextnet_genesis_block() -> ChainBlock { if add_faucet_utxos { // NB! Update 'consensus_constants.rs/pub fn igor()/ConsensusConstants {faucet_value: ?}' with total value // NB: `nextnet_genesis_sanity_check` must pass - let file_contents = include_str!("faucets/esmeralda_faucet.json"); + let file_contents = include_str!("faucets/nextnet_faucet.json"); add_faucet_utxos_to_genesis_block(file_contents, &mut block); // Enable print only if you need to generate new Merkle roots, then disable it again let print_values = false; @@ -171,9 +171,9 @@ pub fn get_nextnet_genesis_block() -> ChainBlock { // Hardcode the Merkle roots once they've been computed above block.header.kernel_mr = - FixedHash::from_hex("3f4011ec1e8ddfbd66fb7331c5623b38f529a66e81233d924df85f2070b2aacb").unwrap(); + FixedHash::from_hex("36881d87e25183f5189d2dca5f7da450c399e7006dafd9bd9240f73a5fb3f0ad").unwrap(); block.header.output_mr = - FixedHash::from_hex("3e40efda288a57d3319c63388dd47ffe4b682baaf6a3b58622ec94d77ad712a2").unwrap(); + FixedHash::from_hex("7b65d5140485b44e33eef3690d46c41e4dc5c4520ad7464d7740f376f4f0a728").unwrap(); block.header.validator_node_mr = FixedHash::from_hex("277da65c40b2cf99db86baedb903a3f0a38540f3a94d40c826eecac7e27d5dfc").unwrap(); } @@ -192,7 +192,7 @@ pub fn get_nextnet_genesis_block() -> ChainBlock { fn get_nextnet_genesis_block_raw() -> Block { // Set genesis timestamp - let genesis_timestamp = DateTime::parse_from_rfc2822("12 Dec 2023 18:10:00 +0200").expect("parse may not fail"); + let genesis_timestamp = DateTime::parse_from_rfc2822("11 Mar 2024 08:00:00 +0200").expect("parse may not fail"); // Let us add a "not before" proof to the genesis block let not_before_proof = b"nextnet has a blast, its prowess echoed in every gust \ \ @@ -250,7 +250,7 @@ pub fn get_igor_genesis_block() -> ChainBlock { fn get_igor_genesis_block_raw() -> Block { // Set genesis timestamp - let genesis_timestamp = DateTime::parse_from_rfc2822("12 Dec 2023 08:20:00 +0200").expect("parse may not fail"); + let genesis_timestamp = DateTime::parse_from_rfc2822("11 Mar 2024 08:00:00 +0200").expect("parse may not fail"); // Let us add a "not before" proof to the genesis block let not_before_proof = b"but igor is the best, it is whispered in the wind \ \ @@ -304,7 +304,41 @@ pub fn get_esmeralda_genesis_block() -> ChainBlock { fn get_esmeralda_genesis_block_raw() -> Block { // Set genesis timestamp - let genesis_timestamp = DateTime::parse_from_rfc2822("08 Dec 2023 08:01:00 +0200").expect("parse may not fail"); + let genesis_timestamp = DateTime::parse_from_rfc2822("11 Mar 2024 08:00:00 +0200").expect("parse may not fail"); + // Let us add a "not before" proof to the genesis block + let not_before_proof = + b"as I sip my drink, thoughts of esmeralda consume my mind, like a refreshing nourishing draught \ + \ + The New York Times , 2000/01/01 \ + \ + Lorem Ipsum \ + \ + Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore \ + magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo \ + consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla \ + pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id \ + est laborum."; + get_raw_block(&genesis_timestamp, ¬_before_proof.to_vec()) +} + +pub fn get_localnet_genesis_block() -> ChainBlock { + // lets get the block + let block = crate::blocks::genesis_block::get_localnet_genesis_block_raw(); + let accumulated_data = BlockHeaderAccumulatedData { + hash: block.hash(), + total_kernel_offset: block.header.total_kernel_offset.clone(), + achieved_difficulty: Difficulty::min(), + total_accumulated_difficulty: 1.into(), + accumulated_randomx_difficulty: AccumulatedDifficulty::min(), + accumulated_sha3x_difficulty: AccumulatedDifficulty::min(), + target_difficulty: Difficulty::min(), + }; + ChainBlock::try_construct(Arc::new(block), accumulated_data).unwrap() +} + +fn get_localnet_genesis_block_raw() -> Block { + // Set genesis timestamp + let genesis_timestamp = DateTime::parse_from_rfc2822("20 Feb 2024 08:01:00 +0200").expect("parse may not fail"); // Let us add a "not before" proof to the genesis block let not_before_proof = b"as I sip my drink, thoughts of esmeralda consume my mind, like a refreshing nourishing draught \ @@ -368,6 +402,7 @@ mod test { use tari_common_types::{epoch::VnEpoch, types::Commitment}; use tari_utilities::ByteArray; + use Network; use super::*; use crate::{ @@ -383,14 +418,16 @@ mod test { }; #[test] - fn stagenet_genesis_sanity_check() { - // Note: Generate new data for `pub fn get_stagenet_genesis_block()` and `fn get_stagenet_genesis_block_raw()` + #[cfg(tari_target_network_testnet)] + fn esme_genesis_sanity_check() { + // Note: Generate new data for `pub fn get_esmeralda_genesis_block()` and `fn get_esmeralda_genesis_block_raw()` // if consensus values change, e.g. new faucet or other - let block = get_stagenet_genesis_block(); - check_block(Network::StageNet, &block, 100, 1); + let block = get_esmeralda_genesis_block(); + check_block(Network::Esmeralda, &block, 100, 1); } #[test] + #[cfg(tari_target_network_nextnet)] fn nextnet_genesis_sanity_check() { // Note: Generate new data for `pub fn get_nextnet_genesis_block()` and `fn get_stagenet_genesis_block_raw()` // if consensus values change, e.g. new faucet or other @@ -399,21 +436,29 @@ mod test { } #[test] - fn esmeralda_genesis_sanity_check() { - // Note: Generate new data for `pub fn get_esmeralda_genesis_block()` and `fn get_esmeralda_genesis_block_raw()` + #[cfg(tari_target_network_mainnet)] + fn stagenet_genesis_sanity_check() { + Network::set_current(Network::StageNet).unwrap(); + // Note: Generate new data for `pub fn get_stagenet_genesis_block()` and `fn get_stagenet_genesis_block_raw()` // if consensus values change, e.g. new faucet or other - let block = get_esmeralda_genesis_block(); - check_block(Network::Esmeralda, &block, 100, 1); + let block = get_stagenet_genesis_block(); + check_block(Network::StageNet, &block, 100, 1); } #[test] fn igor_genesis_sanity_check() { - // Note: Generate new data for `pub fn get_igor_genesis_block()` and `fn get_igor_genesis_block_raw()` - // if consensus values change, e.g. new faucet or other + // Note: If outputs and kernels are added, this test will fail unless you explicitly check that network == Igor let block = get_igor_genesis_block(); check_block(Network::Igor, &block, 0, 0); } + #[test] + fn localnet_genesis_sanity_check() { + // Note: If outputs and kernels are added, this test will fail unless you explicitly check that network == Igor + let block = get_localnet_genesis_block(); + check_block(Network::LocalNet, &block, 0, 0); + } + fn check_block(network: Network, block: &ChainBlock, expected_outputs: usize, expected_kernels: usize) { assert!(block.block().body.inputs().is_empty()); assert_eq!(block.block().body.kernels().len(), expected_kernels); diff --git a/base_layer/core/src/blocks/new_block_template.rs b/base_layer/core/src/blocks/new_block_template.rs index 1c05b433e0..370587117e 100644 --- a/base_layer/core/src/blocks/new_block_template.rs +++ b/base_layer/core/src/blocks/new_block_template.rs @@ -22,6 +22,7 @@ use std::fmt::{Display, Formatter}; +use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; use crate::{ @@ -36,7 +37,7 @@ use crate::{ /// The new block template is used constructing a new partial block, allowing a miner to added the coinbase utxo and as /// a final step the Base node to add the MMR roots to the header. -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct NewBlockTemplate { /// The NewBlockHeaderTemplate is used for the construction of a new mineable block. It contains all the metadata /// for the block that the Base Node is able to complete on behalf of a Miner. diff --git a/base_layer/core/src/blocks/new_blockheader_template.rs b/base_layer/core/src/blocks/new_blockheader_template.rs index b26b6aa7d3..5cdf55e172 100644 --- a/base_layer/core/src/blocks/new_blockheader_template.rs +++ b/base_layer/core/src/blocks/new_blockheader_template.rs @@ -22,15 +22,15 @@ use std::fmt::{Display, Error, Formatter}; +use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; use tari_common_types::types::{BlockHash, PrivateKey}; use tari_utilities::hex::Hex; use crate::{blocks::block_header::BlockHeader, proof_of_work::ProofOfWork}; - /// The NewBlockHeaderTemplate is used for the construction of a new mineable block. It contains all the metadata for /// the block that the Base Node is able to complete on behalf of a Miner. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize)] pub struct NewBlockHeaderTemplate { /// Version of the block pub version: u16, diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index e108dae80a..9cc5722ce5 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -26,7 +26,7 @@ use primitive_types::U256; use rand::{rngs::OsRng, RngCore}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{BlockHash, Commitment, HashOutput, PublicKey, Signature}, }; use tari_utilities::epoch_time::EpochTime; @@ -59,9 +59,10 @@ use crate::{ }, common::rolling_vec::RollingVec, proof_of_work::{PowAlgorithm, TargetDifficultyWindow}, - transactions::transaction_components::{TransactionKernel, TransactionOutput}, + transactions::transaction_components::{OutputType, TransactionInput, TransactionKernel, TransactionOutput}, OutputSmt, }; + const LOG_TARGET: &str = "c::bn::async_db"; fn trace_log(name: &str, f: F) -> R @@ -142,6 +143,10 @@ impl AsyncBlockchainDb { pub fn inner(&self) -> &BlockchainDatabase { &self.db } + + pub fn fetch_genesis_block(&self) -> ChainBlock { + self.db.fetch_genesis_block() + } } impl AsyncBlockchainDb { @@ -154,15 +159,23 @@ impl AsyncBlockchainDb { //---------------------------------- TXO --------------------------------------------// + make_async_fn!(fetch_output(output_hash: HashOutput) -> Option, "fetch_output"); + + make_async_fn!(fetch_input(output_hash: HashOutput) -> Option, "fetch_input"); + + make_async_fn!(fetch_unspent_output_hash_by_commitment(commitment: Commitment) -> Option, "fetch_unspent_output_by_commitment"); + make_async_fn!(fetch_outputs_with_spend_status_at_tip(hashes: Vec) -> Vec>, "fetch_outputs_with_spend_status_at_tip"); make_async_fn!(fetch_outputs_mined_info(hashes: Vec) -> Vec>, "fetch_outputs_mined_info"); make_async_fn!(fetch_inputs_mined_info(hashes: Vec) -> Vec>, "fetch_inputs_mined_info"); - make_async_fn!(fetch_outputs_in_block_with_spend_state(hash: HashOutput, spend_header: Option) -> Vec<(TransactionOutput, bool)>, "fetch_outputs_in_block_with_spend_state"); + make_async_fn!(fetch_outputs_in_block_with_spend_state(header_hash: HashOutput, spend_status_at_header: Option) -> Vec<(TransactionOutput, bool)>, "fetch_outputs_in_block_with_spend_state"); - make_async_fn!(fetch_outputs_in_block(hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + make_async_fn!(fetch_outputs_in_block(header_hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + + make_async_fn!(fetch_inputs_in_block(header_hash: HashOutput) -> Vec, "fetch_inputs_in_block"); make_async_fn!(utxo_count() -> usize, "utxo_count"); @@ -226,9 +239,9 @@ impl AsyncBlockchainDb { make_async_fn!(chain_header_or_orphan_exists(block_hash: BlockHash) -> bool, "header_exists"); - make_async_fn!(bad_block_exists(block_hash: BlockHash) -> bool, "bad_block_exists"); + make_async_fn!(bad_block_exists(block_hash: BlockHash) -> (bool, String), "bad_block_exists"); - make_async_fn!(add_bad_block(hash: BlockHash, height: u64) -> (), "add_bad_block"); + make_async_fn!(add_bad_block(hash: BlockHash, height: u64, reason: String) -> (), "add_bad_block"); make_async_fn!(fetch_block(height: u64, compact: bool) -> HistoricalBlock, "fetch_block"); @@ -350,6 +363,22 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } + pub fn prune_output_from_all_dbs( + &mut self, + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + ) -> &mut Self { + self.transaction + .prune_output_from_all_dbs(output_hash, commitment, output_type); + self + } + + pub fn delete_all_kernerls_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.transaction.delete_all_kernerls_in_block(block_hash); + self + } + pub fn update_block_accumulated_data_via_horizon_sync( &mut self, header_hash: HashOutput, @@ -374,8 +403,8 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - pub fn insert_bad_block(&mut self, hash: HashOutput, height: u64) -> &mut Self { - self.transaction.insert_bad_block(hash, height); + pub fn insert_bad_block(&mut self, hash: HashOutput, height: u64, reason: String) -> &mut Self { + self.transaction.insert_bad_block(hash, height, reason); self } diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index d291a136a6..3131984748 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -3,7 +3,7 @@ use tari_common_types::{ chain_metadata::ChainMetadata, - types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{Commitment, HashOutput, PublicKey, Signature}, }; use super::TemplateRegistrationEntry; @@ -91,7 +91,7 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option, ) -> Result, ChainStorageError>; /// Fetch a specific output. Returns the output @@ -165,7 +165,7 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_total_size_stats(&self) -> Result; /// Check if a block hash is in the bad block list - fn bad_block_exists(&self, block_hash: HashOutput) -> Result; + fn bad_block_exists(&self, block_hash: HashOutput) -> Result<(bool, String), ChainStorageError>; /// Fetches all tracked reorgs fn fetch_all_reorgs(&self) -> Result, ChainStorageError>; diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index fffa20a275..f59ec29389 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -40,6 +40,7 @@ use tari_common_types::{ chain_metadata::ChainMetadata, types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; +use tari_hashing::TransactionHashDomain; use tari_mmr::{ pruned_hashset::PrunedHashSet, sparse_merkle_tree::{DeleteResult, NodeKey, ValueHash}, @@ -89,10 +90,7 @@ use crate::{ DomainSeparatedConsensusHasher, }, proof_of_work::{monero_rx::MoneroPowData, PowAlgorithm, TargetDifficultyWindow}, - transactions::{ - transaction_components::{TransactionInput, TransactionKernel, TransactionOutput}, - TransactionHashDomain, - }, + transactions::transaction_components::{TransactionInput, TransactionKernel, TransactionOutput}, validation::{ helpers::calc_median_timestamp, CandidateBlockValidator, @@ -301,6 +299,11 @@ where B: BlockchainBackend Ok(blockchain_db) } + /// Get the genesis block form the consensus manager + pub fn fetch_genesis_block(&self) -> ChainBlock { + self.consensus_manager.get_genesis_block() + } + /// Returns a reference to the consensus cosntants at the current height pub fn consensus_constants(&self) -> Result<&ConsensusConstants, ChainStorageError> { let height = self.get_height()?; @@ -367,7 +370,7 @@ where B: BlockchainBackend /// that case to re-sync the metadata; or else just exit the program. pub fn get_height(&self) -> Result { let db = self.db_read_access()?; - Ok(db.fetch_chain_metadata()?.height_of_longest_chain()) + Ok(db.fetch_chain_metadata()?.best_block_height()) } /// Return the accumulated proof of work of the longest chain. @@ -383,12 +386,24 @@ where B: BlockchainBackend db.fetch_chain_metadata() } - pub fn fetch_unspent_output_by_commitment( + /// Returns a copy of the current output mined info + pub fn fetch_output(&self, output_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_output(&output_hash) + } + + /// Returns a copy of the current input mined info + pub fn fetch_input(&self, output_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_input(&output_hash) + } + + pub fn fetch_unspent_output_hash_by_commitment( &self, - commitment: &Commitment, + commitment: Commitment, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_unspent_output_hash_by_commitment(commitment) + db.fetch_unspent_output_hash_by_commitment(&commitment) } /// Return a list of matching utxos, with each being `None` if not found. If found, the transaction @@ -456,16 +471,21 @@ where B: BlockchainBackend pub fn fetch_outputs_in_block_with_spend_state( &self, - hash: HashOutput, - spend_status_at_header: Option, + header_hash: HashOutput, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_outputs_in_block_with_spend_state(&hash, spend_status_at_header) + db.fetch_outputs_in_block_with_spend_state(&header_hash, spend_status_at_header) } - pub fn fetch_outputs_in_block(&self, hash: HashOutput) -> Result, ChainStorageError> { + pub fn fetch_outputs_in_block(&self, header_hash: HashOutput) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_outputs_in_block(&hash) + db.fetch_outputs_in_block(&header_hash) + } + + pub fn fetch_inputs_in_block(&self, header_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_inputs_in_block(&header_hash) } /// Returns the number of UTXOs in the current unspent set @@ -955,10 +975,12 @@ where B: BlockchainBackend if db.contains(&DbKey::HeaderHash(block_hash))? { return Ok(BlockAddResult::BlockExists); } - if db.bad_block_exists(block_hash)? { + let block_exist = db.bad_block_exists(block_hash)?; + if block_exist.0 { return Err(ChainStorageError::ValidationError { source: ValidationError::BadBlockFound { hash: block_hash.to_hex(), + reason: block_exist.1, }, }); } @@ -983,7 +1005,7 @@ where B: BlockchainBackend info!( target: LOG_TARGET, "Best chain is now at height: {}", - db.fetch_chain_metadata()?.height_of_longest_chain() + db.fetch_chain_metadata()?.best_block_height() ); // If blocks were added and the node is in pruned mode, perform pruning prune_database_if_needed(&mut *db, self.config.pruning_horizon, self.config.pruning_interval)?; @@ -1074,12 +1096,12 @@ where B: BlockchainBackend } if end.is_none() { // `(n..)` means fetch blocks until this node's tip - end = Some(metadata.height_of_longest_chain()); + end = Some(metadata.best_block_height()); } let (start, end) = (start.unwrap(), end.unwrap()); - if end > metadata.height_of_longest_chain() { + if end > metadata.best_block_height() { return Err(ChainStorageError::ValueNotFound { entity: "Block", field: "end height", @@ -1132,16 +1154,16 @@ where B: BlockchainBackend } /// Returns true if this block exists in the chain, or is orphaned. - pub fn bad_block_exists(&self, hash: BlockHash) -> Result { + pub fn bad_block_exists(&self, hash: BlockHash) -> Result<(bool, String), ChainStorageError> { let db = self.db_read_access()?; db.bad_block_exists(hash) } /// Adds a block hash to the list of bad blocks so it wont get process again. - pub fn add_bad_block(&self, hash: BlockHash, height: u64) -> Result<(), ChainStorageError> { + pub fn add_bad_block(&self, hash: BlockHash, height: u64, reason: String) -> Result<(), ChainStorageError> { let mut db = self.db_write_access()?; let mut txn = DbTransaction::new(); - txn.insert_bad_block(hash, height); + txn.insert_bad_block(hash, height, reason); db.write(txn) } @@ -1282,13 +1304,13 @@ pub fn calculate_mmr_roots( let body = &block.body; let metadata = db.fetch_chain_metadata()?; - if header.prev_hash != *metadata.best_block() { + if header.prev_hash != *metadata.best_block_hash() { return Err(ChainStorageError::CannotCalculateNonTipMmr(format!( "Block (#{}) is not building on tip, previous hash is {} but the current tip is #{} {}", header.height, header.prev_hash, - metadata.height_of_longest_chain(), - metadata.best_block(), + metadata.best_block_height(), + metadata.best_block_hash(), ))); } @@ -1304,27 +1326,39 @@ pub fn calculate_mmr_roots( let mut output_smt = db.fetch_tip_smt()?; let mut input_mmr = PrunedInputMmr::new(PrunedHashSet::default()); - for kernel in body.kernels().iter() { + for kernel in body.kernels() { kernel_mmr.push(kernel.hash().to_vec())?; } - for output in body.outputs().iter() { + for output in body.outputs() { if !output.is_burned() { let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; let smt_node = ValueHash::try_from(output.smt_hash(header.height).as_slice())?; - output_smt.insert(smt_key, smt_node)?; + if let Err(e) = output_smt.insert(smt_key, smt_node) { + error!( + target: LOG_TARGET, + "Output commitment({}) already in SMT", + output.commitment.to_hex(), + ); + return Err(e.into()); + } } } - for input in body.inputs().iter() { + for input in body.inputs() { input_mmr.push(input.canonical_hash().to_vec())?; - // Search the DB for the output leaf index so that it can be marked as spent/deleted. - // If the output hash is not found, check the current output_mmr. This allows zero-conf transactions let smt_key = NodeKey::try_from(input.commitment()?.as_bytes())?; match output_smt.delete(&smt_key)? { DeleteResult::Deleted(_value_hash) => {}, - DeleteResult::KeyNotFound => return Err(ChainStorageError::UnspendableInput), + DeleteResult::KeyNotFound => { + error!( + target: LOG_TARGET, + "Could not find input({}) in SMT", + input.commitment()?.to_hex(), + ); + return Err(ChainStorageError::UnspendableInput); + }, }; } @@ -1655,7 +1689,7 @@ fn fetch_block_by_hash( fn check_for_valid_height(db: &T, height: u64) -> Result<(u64, bool), ChainStorageError> { let metadata = db.fetch_chain_metadata()?; - let tip_height = metadata.height_of_longest_chain(); + let tip_height = metadata.best_block_height(); if height > tip_height { return Err(ChainStorageError::InvalidQuery(format!( "Cannot get block at height {}. Chain tip is at {}", @@ -1678,7 +1712,7 @@ fn rewind_to_height( // Delete headers let last_header_height = last_header.height; let metadata = db.fetch_chain_metadata()?; - let last_block_height = metadata.height_of_longest_chain(); + let last_block_height = metadata.best_block_height(); // We use the cmp::max value here because we'll only delete headers here and leave remaining headers to be deleted // with the whole block let steps_back = last_header_height @@ -1725,9 +1759,7 @@ fn rewind_to_height( target_height ); - let effective_pruning_horizon = metadata - .height_of_longest_chain() - .saturating_sub(metadata.pruned_height()); + let effective_pruning_horizon = metadata.best_block_height().saturating_sub(metadata.pruned_height()); let prune_past_horizon = metadata.is_pruned_node() && steps_back > effective_pruning_horizon; if prune_past_horizon { warn!( @@ -1761,7 +1793,7 @@ fn rewind_to_height( last_block_height - h - 1 })?; let metadata = db.fetch_chain_metadata()?; - let expected_block_hash = *metadata.best_block(); + let expected_block_hash = *metadata.best_block_hash(); txn.set_best_block( chain_header.height(), chain_header.accumulated_data().hash, @@ -1884,7 +1916,7 @@ fn reorganize_chain( e ); if e.get_ban_reason().is_some() && e.get_ban_reason().unwrap().ban_duration != BanPeriod::Short { - txn.insert_bad_block(block.header().hash(), block.header().height); + txn.insert_bad_block(block.header().hash(), block.header().height, e.to_string()); } // We removed a block from the orphan chain, so the chain is now "broken", so we remove the rest of the // remaining blocks as well. @@ -1927,7 +1959,7 @@ fn swap_to_highest_pow_chain( // lets clear out all remaining headers that dont have a matching block // rewind to height will first delete the headers, then try delete from blocks, if we call this to the current // height it will only trim the extra headers with no blocks - rewind_to_height(db, metadata.height_of_longest_chain())?; + rewind_to_height(db, metadata.best_block_height())?; let strongest_orphan_tips = db.fetch_strongest_orphan_chain_tips()?; if strongest_orphan_tips.is_empty() { // we have no orphan chain tips, we have trimmed remaining headers, we are on the best tip we have, so lets @@ -2117,7 +2149,6 @@ fn insert_orphan_and_find_new_tips( // validate the block header let mut prev_timestamps = get_previous_timestamps(db, &candidate_block.header, rules)?; let result = validator.validate(db, &candidate_block.header, parent.header(), &prev_timestamps, None); - let achieved_target_diff = match result { Ok(achieved_target_diff) => achieved_target_diff, // future timelimit validation can succeed at a later time. As the block is not yet valid, we discard it @@ -2134,7 +2165,7 @@ fn insert_orphan_and_find_new_tips( }, Err(e) => { - txn.insert_bad_block(candidate_block.header.hash(), candidate_block.header.height); + txn.insert_bad_block(candidate_block.header.hash(), candidate_block.header.height, e.to_string()); db.write(txn)?; return Err(e.into()); }, @@ -2148,7 +2179,6 @@ fn insert_orphan_and_find_new_tips( .with_achieved_target_difficulty(achieved_target_diff) .with_total_kernel_offset(candidate_block.header.total_kernel_offset.clone()) .build()?; - let chain_block = ChainBlock::try_construct(candidate_block, accumulated_data).ok_or( ChainStorageError::UnexpectedResult("Somehow hash is missing from Chain block".to_string()), )?; @@ -2333,7 +2363,7 @@ fn find_strongest_orphan_tip( // block height will also be discarded. fn cleanup_orphans(db: &mut T, orphan_storage_capacity: usize) -> Result<(), ChainStorageError> { let metadata = db.fetch_chain_metadata()?; - let horizon_height = metadata.horizon_block_height(metadata.height_of_longest_chain()); + let horizon_height = metadata.pruned_height_at_given_chain_tip(metadata.best_block_height()); db.delete_oldest_orphans(horizon_height, orphan_storage_capacity) } @@ -2348,18 +2378,18 @@ fn prune_database_if_needed( return Ok(()); } - let db_height = metadata.height_of_longest_chain(); - let abs_pruning_horizon = db_height.saturating_sub(pruning_horizon); - + let prune_to_height_target = metadata.best_block_height().saturating_sub(pruning_horizon); debug!( target: LOG_TARGET, - "Current pruned height is: {}, pruning horizon is: {}, while the pruning interval is: {}", + "Blockchain height: {}, pruning horizon: {}, pruned height: {}, prune to height target: {}, pruning interval: {}", + metadata.best_block_height(), + metadata.pruning_horizon(), metadata.pruned_height(), - abs_pruning_horizon, + prune_to_height_target, pruning_interval, ); - if metadata.pruned_height() < abs_pruning_horizon.saturating_sub(pruning_interval) { - prune_to_height(db, abs_pruning_horizon)?; + if metadata.pruned_height() < prune_to_height_target.saturating_sub(pruning_interval) { + prune_to_height(db, prune_to_height_target)?; } Ok(()) @@ -2387,14 +2417,14 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) return Ok(()); } - if target_horizon_height > metadata.height_of_longest_chain() { + if target_horizon_height > metadata.best_block_height() { return Err(ChainStorageError::InvalidArguments { func: "prune_to_height", arg: "target_horizon_height", message: format!( "Target pruning horizon {} is greater than current block height {}", target_horizon_height, - metadata.height_of_longest_chain() + metadata.best_block_height() ), }); } @@ -2492,6 +2522,7 @@ mod test { create_new_blockchain, create_orphan_chain, create_test_blockchain_db, + update_block_and_smt, TempDatabase, }, BlockSpecs, @@ -2542,8 +2573,14 @@ mod test { .try_into_chain_block() .map(Arc::new) .unwrap(); - let (_, chain) = - create_orphan_chain(&db, &[("A->GB", 1, 120), ("B->A", 1, 120), ("C->B", 1, 120)], genesis).await; + let mut smt = db.fetch_tip_smt().unwrap(); + let (_, chain) = create_orphan_chain( + &db, + &[("A->GB", 1, 120), ("B->A", 1, 120), ("C->B", 1, 120)], + genesis, + &mut smt, + ) + .await; let access = db.db_read_access().unwrap(); let orphan_chain = get_orphan_link_main_chain(&*access, chain.get("C").unwrap().hash()).unwrap(); assert_eq!(orphan_chain[2].hash(), chain.get("C").unwrap().hash()); @@ -2564,6 +2601,9 @@ mod test { ]) .await; // Create reorg chain + // we only need a smt, this one will not be technically correct, but due to the use of mockvalidators(true), + // they will pass all mr tests + let mut smt = db.fetch_tip_smt().unwrap(); let fork_root = mainchain.get("B").unwrap().clone(); let (_, reorg_chain) = create_orphan_chain( &db, @@ -2574,6 +2614,7 @@ mod test { ("F2->E2", 1, 120), ], fork_root, + &mut smt, ) .await; let access = db.db_read_access().unwrap(); @@ -2608,7 +2649,8 @@ mod test { .try_into_chain_block() .map(Arc::new) .unwrap(); - let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block).await; + let mut smt = db.fetch_tip_smt().unwrap(); + let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block, &mut smt).await; let block = chain.get("A").unwrap().clone(); let mut access = db.db_write_access().unwrap(); insert_orphan_and_find_new_tips(&mut *access, block.to_arc_block(), &validator, &db.consensus_manager) @@ -2625,8 +2667,13 @@ mod test { let (_, main_chain) = create_main_chain(&db, &[("A->GB", 1, 120), ("B->A", 1, 120)]).await; let block_b = main_chain.get("B").unwrap().clone(); - let (_, orphan_chain) = - create_chained_blocks(&[("C2->GB", 1, 120), ("D2->C2", 1, 120), ("E2->D2", 1, 120)], block_b).await; + let mut smt = db.fetch_tip_smt().unwrap(); + let (_, orphan_chain) = create_chained_blocks( + &[("C2->GB", 1, 120), ("D2->C2", 1, 120), ("E2->D2", 1, 120)], + block_b, + &mut smt, + ) + .await; let mut access = db.db_write_access().unwrap(); let block_d2 = orphan_chain.get("D2").unwrap().clone(); @@ -2648,7 +2695,8 @@ mod test { let (_, main_chain) = create_main_chain(&db, &[("A->GB", 1, 120)]).await; let fork_root = main_chain.get("A").unwrap().clone(); - let (_, orphan_chain) = create_chained_blocks(&[("B2->GB", 1, 120)], fork_root).await; + let mut smt = db.fetch_tip_smt().unwrap(); + let (_, orphan_chain) = create_chained_blocks(&[("B2->GB", 1, 120)], fork_root, &mut smt).await; let mut access = db.db_write_access().unwrap(); let block = orphan_chain.get("B2").unwrap().clone(); @@ -2667,6 +2715,7 @@ mod test { assert_eq!(strongest_tips, 1); } + #[ignore] #[tokio::test] async fn it_correctly_detects_strongest_orphan_tips() { let db = create_new_blockchain(); @@ -2684,19 +2733,24 @@ mod test { // Fork 1 (with 3 blocks) let fork_root_1 = main_chain.get("A").unwrap().clone(); + // we only need a smt, this one will not be technically correct, but due to the use of mockvalidators(true), + // they will pass all mr tests + let mut smt = db.fetch_tip_smt().unwrap(); + let (_, orphan_chain_1) = create_chained_blocks( &[("B2->GB", 1, 120), ("C2->B2", 1, 120), ("D2->C2", 1, 120)], fork_root_1, + &mut smt, ) .await; // Fork 2 (with 1 block) let fork_root_2 = main_chain.get("GB").unwrap().clone(); - let (_, orphan_chain_2) = create_chained_blocks(&[("B3->GB", 1, 120)], fork_root_2).await; + let (_, orphan_chain_2) = create_chained_blocks(&[("B3->GB", 1, 120)], fork_root_2, &mut smt).await; // Fork 3 (with 1 block) let fork_root_3 = main_chain.get("B").unwrap().clone(); - let (_, orphan_chain_3) = create_chained_blocks(&[("B4->GB", 1, 120)], fork_root_3).await; + let (_, orphan_chain_3) = create_chained_blocks(&[("B4->GB", 1, 120)], fork_root_3, &mut smt).await; // Add blocks to db let mut access = db.db_write_access().unwrap(); @@ -2761,21 +2815,25 @@ mod test { } mod handle_possible_reorg { - use super::*; + use crate::test_helpers::blockchain::update_block_and_smt; + #[ignore] #[tokio::test] async fn it_links_many_orphan_branches_to_main_chain() { let test = TestHarness::setup(); - + let mut smt = test.db.fetch_tip_smt().unwrap(); let (_, main_chain) = create_main_chain(&test.db, block_specs!(["1a->GB"], ["2a->1a"], ["3a->2a"], ["4a->3a"])).await; let genesis = main_chain.get("GB").unwrap().clone(); let fork_root = main_chain.get("1a").unwrap().clone(); + let mut a1_block = fork_root.block().clone(); + update_block_and_smt(&mut a1_block, &mut smt); let (_, orphan_chain_b) = create_chained_blocks( block_specs!(["2b->GB"], ["3b->2b"], ["4b->3b"], ["5b->4b"], ["6b->5b"]), fork_root, + &mut smt, ) .await; @@ -2788,8 +2846,12 @@ mod test { // Add chain c orphans branching from chain b let fork_root = orphan_chain_b.get("3b").unwrap().clone(); - let (_, orphan_chain_c) = - create_chained_blocks(block_specs!(["4c->GB"], ["5c->4c"], ["6c->5c"], ["7c->6c"]), fork_root).await; + let (_, orphan_chain_c) = create_chained_blocks( + block_specs!(["4c->GB"], ["5c->4c"], ["6c->5c"], ["7c->6c"]), + fork_root, + &mut smt, + ) + .await; for name in ["7c", "5c", "6c", "4c"] { let block = orphan_chain_c.get(name).unwrap(); @@ -2801,6 +2863,7 @@ mod test { let (_, orphan_chain_d) = create_chained_blocks( block_specs!(["7d->GB", difficulty: Difficulty::from_u64(10).unwrap()]), fork_root, + &mut smt, ) .await; @@ -2825,8 +2888,8 @@ mod test { let tip = access.fetch_tip_header().unwrap(); assert_eq!(tip.hash(), block.hash()); let metadata = access.fetch_chain_metadata().unwrap(); - assert_eq!(metadata.best_block(), block.hash()); - assert_eq!(metadata.height_of_longest_chain(), block.height()); + assert_eq!(metadata.best_block_hash(), block.hash()); + assert_eq!(metadata.best_block_height(), block.height()); assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap()); let mut all_blocks = main_chain @@ -2849,12 +2912,13 @@ mod test { } } + #[ignore] #[tokio::test] async fn it_links_many_orphan_branches_to_main_chain_with_greater_reorg_than_median_timestamp_window() { let test = TestHarness::setup(); // This test assumes a MTC of 11 assert_eq!(test.consensus.consensus_constants(0).median_timestamp_count(), 11); - + let mut smt = test.db.fetch_tip_smt().unwrap(); let (_, main_chain) = create_main_chain( &test.db, block_specs!( @@ -2875,8 +2939,9 @@ mod test { ) .await; let genesis = main_chain.get("GB").unwrap().clone(); - let fork_root = main_chain.get("1a").unwrap().clone(); + let mut a1_block = fork_root.block().clone(); + update_block_and_smt(&mut a1_block, &mut smt); let (_, orphan_chain_b) = create_chained_blocks( block_specs!( ["2b->GB"], @@ -2892,6 +2957,7 @@ mod test { ["12b->11b", difficulty: Difficulty::from_u64(5).unwrap()] ), fork_root, + &mut smt, ) .await; @@ -2920,8 +2986,8 @@ mod test { let tip = access.fetch_tip_header().unwrap(); assert_eq!(tip.hash(), block.hash()); let metadata = access.fetch_chain_metadata().unwrap(); - assert_eq!(metadata.best_block(), block.hash()); - assert_eq!(metadata.height_of_longest_chain(), block.height()); + assert_eq!(metadata.best_block_hash(), block.hash()); + assert_eq!(metadata.best_block_height(), block.height()); assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap()); let mut all_blocks = main_chain.into_iter().chain(orphan_chain_b).collect::>(); @@ -2944,13 +3010,17 @@ mod test { #[tokio::test] async fn it_errors_if_reorging_to_an_invalid_height() { let test = TestHarness::setup(); + let mut smt = test.db.fetch_tip_smt().unwrap(); let (_, main_chain) = create_main_chain(&test.db, block_specs!(["1a->GB"], ["2a->1a"], ["3a->2a"], ["4a->3a"])).await; let fork_root = main_chain.get("1a").unwrap().clone(); + let mut a1_block = fork_root.block().clone(); + update_block_and_smt(&mut a1_block, &mut smt); let (_, orphan_chain_b) = create_chained_blocks( block_specs!(["2b->GB", height: 10, difficulty: Difficulty::from_u64(10).unwrap()]), fork_root, + &mut smt, ) .await; @@ -2962,6 +3032,7 @@ mod test { #[tokio::test] async fn it_allows_orphan_blocks_with_any_height() { let test = TestHarness::setup(); + let mut smt = test.db.fetch_tip_smt().unwrap(); let (_, main_chain) = create_main_chain( &test.db, block_specs!(["1a->GB", difficulty: Difficulty::from_u64(2).unwrap()]), @@ -2970,7 +3041,7 @@ mod test { let fork_root = main_chain.get("GB").unwrap().clone(); let (_, orphan_chain_b) = - create_orphan_chain(&test.db, block_specs!(["1b->GB", height: 10]), fork_root).await; + create_orphan_chain(&test.db, block_specs!(["1b->GB", height: 10]), fork_root, &mut smt).await; let block = orphan_chain_b.get("1b").unwrap().clone(); test.handle_possible_reorg(block.to_arc_block()) @@ -2989,6 +3060,7 @@ mod test { result[1].assert_added(); } + #[ignore] #[tokio::test] async fn test_handle_possible_reorg_case2() { let (result, blocks) = @@ -3001,6 +3073,7 @@ mod test { assert_added_hashes_eq(&result[2], vec!["A2"], &blocks); } + #[ignore] #[tokio::test] async fn test_handle_possible_reorg_case3() { // Switch to new chain and then reorg back @@ -3014,6 +3087,7 @@ mod test { assert_added_hashes_eq(&result[2], vec!["A", "B"], &blocks); } + #[ignore] #[tokio::test] async fn test_handle_possible_reorg_case4() { let (result, blocks) = test_case_handle_possible_reorg(&[ @@ -3034,6 +3108,7 @@ mod test { assert_added_hashes_eq(&result[4], vec!["A", "B", "C"], &blocks); } + #[ignore] #[tokio::test] async fn test_handle_possible_reorg_case5() { let (result, blocks) = test_case_handle_possible_reorg(&[ @@ -3075,6 +3150,7 @@ mod test { #[tokio::test] async fn test_handle_possible_reorg_case6_orphan_chain_link() { let db = create_new_blockchain(); + let mut smt = db.fetch_tip_smt().unwrap(); let (_, mainchain) = create_main_chain(&db, &[ ("A->GB", 1, 120), ("B->A", 1, 120), @@ -3086,10 +3162,15 @@ mod test { let mock_validator = MockValidator::new(true); let chain_strength_comparer = strongest_chain().by_sha3x_difficulty().build(); + let mut a_block = mainchain.get("A").unwrap().block().clone(); let fork_block = mainchain.get("B").unwrap().clone(); + let mut b_block = fork_block.block().clone(); + update_block_and_smt(&mut a_block, &mut smt); + update_block_and_smt(&mut b_block, &mut smt); let (_, reorg_chain) = create_chained_blocks( &[("C2->GB", 1, 120), ("D2->C2", 1, 120), ("E2->D2", 1, 120)], fork_block, + &mut smt, ) .await; @@ -3165,9 +3246,12 @@ mod test { let mock_validator = MockValidator::new(true); let chain_strength_comparer = strongest_chain().by_sha3x_difficulty().build(); - + // we only need a smt, this one will not be technically correct, but due to the use of mockvalidators(true), + // they will pass all mr tests + let mut smt = db.fetch_tip_smt().unwrap(); let fork_block = mainchain.get("C").unwrap().clone(); - let (_, reorg_chain) = create_chained_blocks(&[("D2->GB", 1, 120), ("E2->D2", 2, 120)], fork_block).await; + let (_, reorg_chain) = + create_chained_blocks(&[("D2->GB", 1, 120), ("E2->D2", 2, 120)], fork_block, &mut smt).await; // Add true orphans let mut access = db.db_write_access().unwrap(); @@ -3243,6 +3327,7 @@ mod test { assert_target_difficulties_eq(&result[4], vec![19, 24]); } + #[ignore] #[tokio::test] async fn test_handle_possible_reorg_target_difficulty_is_correct_case_2() { // Test a straight chain to get the correct target difficulty. The block times must be reduced so that the @@ -3292,6 +3377,7 @@ mod test { assert_target_difficulties_eq(&result[6], vec![10, 19, 23, 26]); } + #[ignore] #[tokio::test] async fn test_handle_possible_reorg_accum_difficulty_is_correct_case_1() { let (result, _blocks) = test_case_handle_possible_reorg(&[ @@ -3348,7 +3434,7 @@ mod test { } fn check_whole_chain(db: &mut TempDatabase) { - let mut h = db.fetch_chain_metadata().unwrap().height_of_longest_chain(); + let mut h = db.fetch_chain_metadata().unwrap().best_block_height(); while h > 0 { // fetch_chain_header_by_height will error if there are internal inconsistencies db.fetch_chain_header_by_height(h).unwrap(); @@ -3439,7 +3525,6 @@ mod test { blocks: T, ) -> Result<(Vec, HashMap>), ChainStorageError> { let test = TestHarness::setup(); - // let db = create_new_blockchain(); let genesis_block = test .db .fetch_block(0, true) @@ -3447,7 +3532,8 @@ mod test { .try_into_chain_block() .map(Arc::new) .unwrap(); - let (block_names, chain) = create_chained_blocks(blocks, genesis_block).await; + let mut smt = test.db.fetch_tip_smt().unwrap(); + let (block_names, chain) = create_chained_blocks(blocks, genesis_block, &mut smt).await; let mut results = vec![]; for name in block_names { diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 6ca0d7bf52..bb619f1ca5 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -33,7 +33,7 @@ use tari_utilities::hex::Hex; use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{error::ChainStorageError, HorizonData, Reorg}, - transactions::transaction_components::{TransactionKernel, TransactionOutput}, + transactions::transaction_components::{OutputType, TransactionKernel, TransactionOutput}, OutputSmt, }; @@ -132,6 +132,26 @@ impl DbTransaction { self } + pub fn prune_output_from_all_dbs( + &mut self, + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + ) -> &mut Self { + self.operations.push(WriteOperation::PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + }); + self + } + + pub fn delete_all_kernerls_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.operations + .push(WriteOperation::DeleteAllKernelsInBlock { block_hash }); + self + } + pub fn delete_all_inputs_in_block(&mut self, block_hash: BlockHash) -> &mut Self { self.operations .push(WriteOperation::DeleteAllInputsInBlock { block_hash }); @@ -157,10 +177,11 @@ impl DbTransaction { } /// Inserts a block hash into the bad block list - pub fn insert_bad_block(&mut self, block_hash: HashOutput, height: u64) -> &mut Self { + pub fn insert_bad_block(&mut self, block_hash: HashOutput, height: u64, reason: String) -> &mut Self { self.operations.push(WriteOperation::InsertBadBlock { hash: block_hash, height, + reason, }); self } @@ -290,6 +311,7 @@ pub enum WriteOperation { InsertBadBlock { hash: HashOutput, height: u64, + reason: String, }, DeleteHeader(u64), DeleteOrphan(HashOutput), @@ -304,6 +326,14 @@ pub enum WriteOperation { PruneOutputsSpentAtHash { block_hash: BlockHash, }, + PruneOutputFromAllDbs { + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + }, + DeleteAllKernelsInBlock { + block_hash: BlockHash, + }, DeleteAllInputsInBlock { block_hash: BlockHash, }, @@ -387,6 +417,18 @@ impl fmt::Display for WriteOperation { write!(f, "Update Block data for block {}", header_hash) }, PruneOutputsSpentAtHash { block_hash } => write!(f, "Prune output(s) at hash: {}", block_hash), + PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + } => write!( + f, + "Prune output from all dbs, hash : {}, commitment: {},output_type: {}", + output_hash, + commitment.to_hex(), + output_type, + ), + DeleteAllKernelsInBlock { block_hash } => write!(f, "Delete kernels in block {}", block_hash), DeleteAllInputsInBlock { block_hash } => write!(f, "Delete outputs in block {}", block_hash), SetAccumulatedDataForOrphan(accumulated_data) => { write!(f, "Set accumulated data for orphan {}", accumulated_data) @@ -406,7 +448,9 @@ impl fmt::Display for WriteOperation { SetPrunedHeight { height, .. } => write!(f, "Set pruned height to {}", height), DeleteHeader(height) => write!(f, "Delete header at height: {}", height), DeleteOrphan(hash) => write!(f, "Delete orphan with hash: {}", hash), - InsertBadBlock { hash, height } => write!(f, "Insert bad block #{} {}", height, hash), + InsertBadBlock { hash, height, reason } => { + write!(f, "Insert bad block #{} {} for {}", height, hash, reason) + }, SetHorizonData { .. } => write!(f, "Set horizon data"), InsertReorg { .. } => write!(f, "Insert reorg"), ClearAllReorgs => write!(f, "Clear all reorgs"), diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index 47f5217697..7261030149 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -21,7 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use lmdb_zero::error; -use tari_common_types::types::FixedHashSizeError; +use tari_common_types::{chain_metadata::ChainMetaDataError, types::FixedHashSizeError}; use tari_mmr::{error::MerkleMountainRangeError, sparse_merkle_tree::SMTError, MerkleProofError}; use tari_storage::lmdb_store::LMDBError; use thiserror::Error; @@ -118,7 +118,7 @@ pub enum ChainStorageError { #[error("Key {key} in {table_name} already exists")] KeyExists { table_name: &'static str, key: String }, #[error("Database resize required")] - DbResizeRequired, + DbResizeRequired(Option), #[error("DB transaction was too large ({0} operations)")] DbTransactionTooLarge(usize), #[error("DB needs to be resynced: {0}")] @@ -139,6 +139,8 @@ pub enum ChainStorageError { FromKeyBytesFailed(String), #[error("Sparse Merkle Tree error: {0}")] SMTError(#[from] SMTError), + #[error("Invalid ChainMetaData: {0}")] + InvalidChainMetaData(#[from] ChainMetaDataError), } impl ChainStorageError { @@ -181,7 +183,7 @@ impl ChainStorageError { _err @ ChainStorageError::IoError(_) | _err @ ChainStorageError::CannotCalculateNonTipMmr(_) | _err @ ChainStorageError::KeyExists { .. } | - _err @ ChainStorageError::DbResizeRequired | + _err @ ChainStorageError::DbResizeRequired(_) | _err @ ChainStorageError::DbTransactionTooLarge(_) | _err @ ChainStorageError::DatabaseResyncRequired(_) | _err @ ChainStorageError::BlockError(_) | @@ -190,6 +192,7 @@ impl ChainStorageError { _err @ ChainStorageError::FixedHashSizeError(_) | _err @ ChainStorageError::CompositeKeyLengthExceeded | _err @ ChainStorageError::FromKeyBytesFailed(_) | + _err @ ChainStorageError::InvalidChainMetaData(_) | _err @ ChainStorageError::OutOfRange => None, } } @@ -210,7 +213,7 @@ impl From for ChainStorageError { field: "", value: "".to_string(), }, - Code(error::MAP_FULL) => ChainStorageError::DbResizeRequired, + Code(error::MAP_FULL) => ChainStorageError::DbResizeRequired(None), _ => ChainStorageError::AccessError(err.to_string()), } } diff --git a/base_layer/core/src/chain_storage/lmdb_db/helpers.rs b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs index 1bf6b2e60d..064b7be75b 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/helpers.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs @@ -20,23 +20,53 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::time::Instant; + use lmdb_zero::error; use log::*; use serde::{de::DeserializeOwned, Serialize}; +use tari_storage::lmdb_store::BYTES_PER_MB; use crate::chain_storage::ChainStorageError; pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb"; -pub fn serialize(data: &T) -> Result, ChainStorageError> +/// Serialize the given data into a byte vector +/// Note: +/// `size_hint` is given as an option as checking what the serialized would be is expensive +/// for large data structures at ~30% overhead +pub fn serialize(data: &T, size_hint: Option) -> Result, ChainStorageError> where T: Serialize { - let size = bincode::serialized_size(&data).map_err(|e| ChainStorageError::AccessError(e.to_string()))?; - #[allow(clippy::cast_possible_truncation)] - let mut buf = Vec::with_capacity(size as usize); + let start = Instant::now(); + let mut buf = if let Some(size) = size_hint { + Vec::with_capacity(size) + } else { + let size = bincode::serialized_size(&data).map_err(|e| ChainStorageError::AccessError(e.to_string()))?; + #[allow(clippy::cast_possible_truncation)] + Vec::with_capacity(size as usize) + }; + let check_time = start.elapsed(); bincode::serialize_into(&mut buf, data).map_err(|e| { error!(target: LOG_TARGET, "Could not serialize lmdb: {:?}", e); ChainStorageError::AccessError(e.to_string()) })?; + if buf.len() >= BYTES_PER_MB { + let serialize_time = start.elapsed() - check_time; + trace!( + "lmdb_replace - {} MB, serialize check in {:.2?}, serialize in {:.2?}", + buf.len() / BYTES_PER_MB, + check_time, + serialize_time + ); + } + if let Some(size) = size_hint { + if buf.len() > size { + warn!( + target: LOG_TARGET, + "lmdb_replace - Serialized size hint was too small. Expected {}, got {}", size, buf.len() + ); + } + } Ok(buf) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs index 991b9bdef1..2b104b7289 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::fmt::Debug; +use std::{fmt::Debug, time::Instant}; use lmdb_zero::{ del, @@ -37,6 +37,7 @@ use lmdb_zero::{ }; use log::*; use serde::{de::DeserializeOwned, Serialize}; +use tari_storage::lmdb_store::BYTES_PER_MB; use tari_utilities::hex::to_hex; use crate::chain_storage::{ @@ -62,7 +63,7 @@ where K: AsLmdbBytes + ?Sized + Debug, V: Serialize + Debug, { - let val_buf = serialize(val)?; + let val_buf = serialize(val, None)?; match txn.access().put(db, key, &val_buf, put::NOOVERWRITE) { Ok(_) => { trace!( @@ -82,11 +83,11 @@ where }) }, err @ Err(lmdb_zero::Error::Code(lmdb_zero::error::MAP_FULL)) => { - error!( + info!( target: LOG_TARGET, "Could not insert {} bytes with key '{}' into '{}' ({:?})", val_buf.len(), to_hex(key.as_lmdb_bytes()), table_name, err ); - Err(ChainStorageError::DbResizeRequired) + Err(ChainStorageError::DbResizeRequired(Some(val_buf.len()))) }, Err(e) => { error!( @@ -112,11 +113,11 @@ where K: AsLmdbBytes + ?Sized, V: Serialize, { - let val_buf = serialize(val)?; + let val_buf = serialize(val, None)?; txn.access().put(db, key, &val_buf, put::Flags::empty()).map_err(|e| { if let lmdb_zero::Error::Code(code) = &e { if *code == lmdb_zero::error::MAP_FULL { - return ChainStorageError::DbResizeRequired; + return ChainStorageError::DbResizeRequired(Some(val_buf.len())); } } error!( @@ -128,16 +129,23 @@ where } /// Inserts or replaces the item at the given key. If the key does not exist, a new entry is created -pub fn lmdb_replace(txn: &WriteTransaction<'_>, db: &Database, key: &K, val: &V) -> Result<(), ChainStorageError> +pub fn lmdb_replace( + txn: &WriteTransaction<'_>, + db: &Database, + key: &K, + val: &V, + size_hint: Option, +) -> Result<(), ChainStorageError> where K: AsLmdbBytes + ?Sized, V: Serialize, { - let val_buf = serialize(val)?; - txn.access().put(db, key, &val_buf, put::Flags::empty()).map_err(|e| { + let val_buf = serialize(val, size_hint)?; + let start = Instant::now(); + let res = txn.access().put(db, key, &val_buf, put::Flags::empty()).map_err(|e| { if let lmdb_zero::Error::Code(code) = &e { if *code == lmdb_zero::error::MAP_FULL { - return ChainStorageError::DbResizeRequired; + return ChainStorageError::DbResizeRequired(Some(val_buf.len())); } } error!( @@ -145,7 +153,16 @@ where "Could not replace value in lmdb transaction: {:?}", e ); ChainStorageError::AccessError(e.to_string()) - }) + }); + if val_buf.len() >= BYTES_PER_MB { + let write_time = start.elapsed(); + trace!( + "lmdb_replace - {} MB, lmdb write in {:.2?}", + val_buf.len() / BYTES_PER_MB, + write_time + ); + } + res } /// Deletes the given key. An error is returned if the key does not exist @@ -175,7 +192,7 @@ where K: AsLmdbBytes + ?Sized, V: Serialize, { - txn.access().del_item(db, key, &serialize(value)?)?; + txn.access().del_item(db, key, &serialize(value, None)?)?; Ok(()) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 34496f4501..64fb8f5597 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -20,10 +20,18 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, fmt, fs, fs::File, ops::Deref, path::Path, sync::Arc, time::Instant}; +use std::{cmp::max, convert::TryFrom, fmt, fs, fs::File, ops::Deref, path::Path, sync::Arc, time::Instant}; use fs2::FileExt; -use lmdb_zero::{open, ConstTransaction, Database, Environment, ReadTransaction, WriteTransaction}; +use lmdb_zero::{ + open, + traits::AsLmdbBytes, + ConstTransaction, + Database, + Environment, + ReadTransaction, + WriteTransaction, +}; use log::*; use primitive_types::U256; use serde::{Deserialize, Serialize}; @@ -33,7 +41,7 @@ use tari_common_types::{ types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; use tari_mmr::sparse_merkle_tree::{DeleteResult, NodeKey, ValueHash}; -use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; +use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore, BYTES_PER_MB}; use tari_utilities::{ hex::{to_hex, Hex}, ByteArray, @@ -96,7 +104,14 @@ use crate::{ consensus::{ConsensusConstants, ConsensusManager}, transactions::{ aggregated_body::AggregateBody, - transaction_components::{TransactionInput, TransactionKernel, TransactionOutput, ValidatorNodeRegistration}, + transaction_components::{ + OutputType, + SpentOutput, + TransactionInput, + TransactionKernel, + TransactionOutput, + ValidatorNodeRegistration, + }, }, OutputSmt, PrunedKernelMmr, @@ -314,9 +329,11 @@ impl LMDBDatabase { fn apply_db_transaction(&mut self, txn: &DbTransaction) -> Result<(), ChainStorageError> { #[allow(clippy::enum_glob_use)] use WriteOperation::*; + + let number_of_operations = txn.operations().len(); let write_txn = self.write_transaction()?; - for op in txn.operations() { - trace!(target: LOG_TARGET, "[apply_db_transaction] WriteOperation: {}", op); + for (i, op) in txn.operations().iter().enumerate() { + trace!(target: LOG_TARGET, "[apply_db_transaction] WriteOperation: {} ({} of {})", op, i + 1, number_of_operations); match op { InsertOrphanBlock(block) => self.insert_orphan_block(&write_txn, block)?, InsertChainHeader { header } => { @@ -385,6 +402,16 @@ impl LMDBDatabase { PruneOutputsSpentAtHash { block_hash } => { self.prune_outputs_spent_at_hash(&write_txn, block_hash)?; }, + PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + } => { + self.prune_output_from_all_dbs(&write_txn, output_hash, commitment, *output_type)?; + }, + DeleteAllKernelsInBlock { block_hash } => { + self.delete_all_kernels_in_block(&write_txn, block_hash)?; + }, DeleteAllInputsInBlock { block_hash } => { self.delete_all_inputs_in_block(&write_txn, block_hash)?; }, @@ -454,11 +481,11 @@ impl LMDBDatabase { &MetadataValue::HorizonData(horizon_data.clone()), )?; }, - InsertBadBlock { hash, height } => { - self.insert_bad_block_and_cleanup(&write_txn, hash, *height)?; + InsertBadBlock { hash, height, reason } => { + self.insert_bad_block_and_cleanup(&write_txn, hash, *height, reason.to_string())?; }, InsertReorg { reorg } => { - lmdb_replace(&write_txn, &self.reorgs, &reorg.local_time.timestamp(), &reorg)?; + lmdb_replace(&write_txn, &self.reorgs, &reorg.local_time.timestamp(), &reorg, None)?; }, ClearAllReorgs => { lmdb_clear(&write_txn, &self.reorgs)?; @@ -473,48 +500,44 @@ impl LMDBDatabase { Ok(()) } - fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 26] { + fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 27] { [ - ("metadata_db", &self.metadata_db), - ("headers_db", &self.headers_db), - ("header_accumulated_data_db", &self.header_accumulated_data_db), - ("block_accumulated_data_db", &self.block_accumulated_data_db), - ("block_hashes_db", &self.block_hashes_db), - ("utxos_db", &self.utxos_db), - ("inputs_db", &self.inputs_db), - ("txos_hash_to_index_db", &self.txos_hash_to_index_db), - ("kernels_db", &self.kernels_db), - ("kernel_excess_index", &self.kernel_excess_index), - ("kernel_excess_sig_index", &self.kernel_excess_sig_index), - ("kernel_mmr_size_index", &self.kernel_mmr_size_index), - ("utxo_commitment_index", &self.utxo_commitment_index), - ("contract_index", &self.contract_index), - ("unique_id_index", &self.unique_id_index), + (LMDB_DB_METADATA, &self.metadata_db), + (LMDB_DB_HEADERS, &self.headers_db), + (LMDB_DB_HEADER_ACCUMULATED_DATA, &self.header_accumulated_data_db), + (LMDB_DB_BLOCK_ACCUMULATED_DATA, &self.block_accumulated_data_db), + (LMDB_DB_BLOCK_HASHES, &self.block_hashes_db), + (LMDB_DB_UTXOS, &self.utxos_db), + (LMDB_DB_INPUTS, &self.inputs_db), + (LMDB_DB_TXOS_HASH_TO_INDEX, &self.txos_hash_to_index_db), + (LMDB_DB_KERNELS, &self.kernels_db), + (LMDB_DB_KERNEL_EXCESS_INDEX, &self.kernel_excess_index), + (LMDB_DB_KERNEL_EXCESS_SIG_INDEX, &self.kernel_excess_sig_index), + (LMDB_DB_KERNEL_MMR_SIZE_INDEX, &self.kernel_mmr_size_index), + (LMDB_DB_UTXO_COMMITMENT_INDEX, &self.utxo_commitment_index), + (LMDB_DB_CONTRACT_ID_INDEX, &self.contract_index), + (LMDB_DB_UNIQUE_ID_INDEX, &self.unique_id_index), ( - "deleted_txo_hash_to_header_index", + LMDB_DB_DELETED_TXO_HASH_TO_HEADER_INDEX, &self.deleted_txo_hash_to_header_index, ), - ("orphans_db", &self.orphans_db), + (LMDB_DB_ORPHANS, &self.orphans_db), ( - "orphan_header_accumulated_data_db", + LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA, &self.orphan_header_accumulated_data_db, ), - ("monero_seed_height_db", &self.monero_seed_height_db), - ("orphan_chain_tips_db", &self.orphan_chain_tips_db), - ("orphan_parent_map_index", &self.orphan_parent_map_index), - ("bad_blocks", &self.bad_blocks), - ("reorgs", &self.reorgs), - ("validator_nodes", &self.validator_nodes), - ("validator_nodes_mapping", &self.validator_nodes_mapping), - ("template_registrations", &self.template_registrations), + (LMDB_DB_MONERO_SEED_HEIGHT, &self.monero_seed_height_db), + (LMDB_DB_ORPHAN_CHAIN_TIPS, &self.orphan_chain_tips_db), + (LMDB_DB_ORPHAN_PARENT_MAP_INDEX, &self.orphan_parent_map_index), + (LMDB_DB_BAD_BLOCK_LIST, &self.bad_blocks), + (LMDB_DB_REORGS, &self.reorgs), + (LMDB_DB_VALIDATOR_NODES, &self.validator_nodes), + (LMDB_DB_TIP_UTXO_SMT, &self.tip_utxo_smt), + (LMDB_DB_VALIDATOR_NODES_MAPPING, &self.validator_nodes_mapping), + (LMDB_DB_TEMPLATE_REGISTRATIONS, &self.template_registrations), ] } - fn prune_output(&self, txn: &WriteTransaction<'_>, key: OutputKey) -> Result<(), ChainStorageError> { - lmdb_delete(txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; - Ok(()) - } - fn insert_output( &self, txn: &WriteTransaction<'_>, @@ -555,7 +578,7 @@ impl LMDBDatabase { mined_height: header_height, mined_timestamp: header_timestamp, }, - "utxos_db", + LMDB_DB_UTXOS, )?; Ok(()) @@ -608,23 +631,63 @@ impl LMDBDatabase { ) } + fn input_with_output_data( + &self, + txn: &WriteTransaction<'_>, + input: TransactionInput, + ) -> Result { + let input_with_output_data = match input.spent_output { + SpentOutput::OutputData { .. } => input, + SpentOutput::OutputHash(output_hash) => match self.fetch_output_in_txn(txn, output_hash.as_slice()) { + Ok(Some(utxo_mined_info)) => TransactionInput { + version: input.version, + spent_output: SpentOutput::create_from_output(utxo_mined_info.output), + input_data: input.input_data, + script_signature: input.script_signature, + }, + Ok(None) => { + error!( + target: LOG_TARGET, + "Could not retrieve output data from input's output_hash `{}`", + output_hash.to_hex() + ); + return Err(ChainStorageError::ValueNotFound { + entity: "UTXO", + field: "hash", + value: output_hash.to_hex(), + }); + }, + Err(e) => { + error!( + target: LOG_TARGET, + "Could not retrieve output data from input's output_hash `{}` ({})", + output_hash.to_hex(), e + ); + return Err(e); + }, + }, + }; + Ok(input_with_output_data) + } + fn insert_input( &self, txn: &WriteTransaction<'_>, height: u64, header_timestamp: u64, header_hash: &HashOutput, - input: &TransactionInput, + input: TransactionInput, ) -> Result<(), ChainStorageError> { + let input_with_output_data = self.input_with_output_data(txn, input)?; lmdb_delete( txn, &self.utxo_commitment_index, - input.commitment()?.as_bytes(), + input_with_output_data.commitment()?.as_bytes(), "utxo_commitment_index", )?; - let hash = input.canonical_hash(); - let output_hash = input.output_hash(); + let hash = input_with_output_data.canonical_hash(); + let output_hash = input_with_output_data.output_hash(); let key = InputKey::new(header_hash, &hash)?; lmdb_insert( txn, @@ -639,7 +702,7 @@ impl LMDBDatabase { &self.inputs_db, &key.convert_to_comp_key(), &TransactionInputRowDataRef { - input: &input.to_compact(), + input: &input_with_output_data.to_compact(), header_hash, spent_timestamp: header_timestamp, spent_height: height, @@ -655,7 +718,7 @@ impl LMDBDatabase { k: MetadataKey, v: &MetadataValue, ) -> Result<(), ChainStorageError> { - lmdb_replace(txn, &self.metadata_db, &k.as_u32(), v)?; + lmdb_replace(txn, &self.metadata_db, &k.as_u32(), v, None)?; Ok(()) } @@ -836,6 +899,7 @@ impl LMDBDatabase { .fetch_height_from_hash(write_txn, block_hash) .or_not_found("Block", "hash", hash_hex)?; let next_height = height.saturating_add(1); + let prev_height = height.saturating_sub(1); if self.fetch_block_accumulated_data(write_txn, next_height)?.is_some() { return Err(ChainStorageError::InvalidOperation(format!( "Attempted to delete block at height {} while next block still exists", @@ -852,6 +916,21 @@ impl LMDBDatabase { let mut smt = self.fetch_tip_smt()?; self.delete_block_inputs_outputs(write_txn, block_hash.as_slice(), &mut smt)?; + + let new_tip_header = self.fetch_chain_header_by_height(prev_height)?; + let root = FixedHash::try_from(smt.hash().as_slice())?; + if root != new_tip_header.header().output_mr { + error!( + target: LOG_TARGET, + "Deleting block, new smt root(#{}) did not match expected (#{}) smt root", + root.to_hex(), + new_tip_header.header().output_mr.to_hex(), + ); + return Err(ChainStorageError::InvalidOperation( + "Deleting block, new smt root did not match expected smt root".to_string(), + )); + } + self.insert_tip_smt(write_txn, &smt)?; self.delete_block_kernels(write_txn, block_hash.as_slice())?; @@ -889,7 +968,17 @@ impl LMDBDatabase { continue; } let smt_key = NodeKey::try_from(utxo.output.commitment.as_bytes())?; - output_smt.delete(&smt_key)?; + match output_smt.delete(&smt_key)? { + DeleteResult::Deleted(_value_hash) => {}, + DeleteResult::KeyNotFound => { + error!( + target: LOG_TARGET, + "Could not find input({}) in SMT", + utxo.output.commitment.to_hex(), + ); + return Err(ChainStorageError::UnspendableInput); + }, + }; lmdb_delete( txn, &self.utxo_commitment_index, @@ -940,8 +1029,15 @@ impl LMDBDatabase { utxo_mined_info.output.minimum_value_promise, ); let smt_key = NodeKey::try_from(input.commitment()?.as_bytes())?; - let smt_node = ValueHash::try_from(input.smt_hash(row.spent_height).as_slice())?; - output_smt.insert(smt_key, smt_node)?; + let smt_node = ValueHash::try_from(input.smt_hash(utxo_mined_info.mined_height).as_slice())?; + if let Err(e) = output_smt.insert(smt_key, smt_node) { + error!( + target: LOG_TARGET, + "Output commitment({}) already in SMT", + input.commitment()?.to_hex(), + ); + return Err(e.into()); + } trace!(target: LOG_TARGET, "Input moved to UTXO set: {}", input); lmdb_insert( @@ -1158,7 +1254,14 @@ impl LMDBDatabase { if !output.is_burned() { let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; let smt_node = ValueHash::try_from(output.smt_hash(header.height).as_slice())?; - output_smt.insert(smt_key, smt_node)?; + if let Err(e) = output_smt.insert(smt_key, smt_node) { + error!( + target: LOG_TARGET, + "Output commitment({}) already in SMT", + output.commitment.to_hex(), + ); + return Err(e.into()); + } } let output_hash = output.hash(); @@ -1189,34 +1292,45 @@ impl LMDBDatabase { } // unique_id_index expects inputs to be inserted before outputs - for input in &inputs { - let smt_key = NodeKey::try_from(input.commitment()?.as_bytes())?; + for input in inputs { + let input_with_output_data = self.input_with_output_data(txn, input)?; + let smt_key = NodeKey::try_from(input_with_output_data.commitment()?.as_bytes())?; match output_smt.delete(&smt_key)? { DeleteResult::Deleted(_value_hash) => {}, - DeleteResult::KeyNotFound => return Err(ChainStorageError::UnspendableInput), + DeleteResult::KeyNotFound => { + error!( + target: LOG_TARGET, + "Could not find input({}) in SMT", + input_with_output_data.commitment()?.to_hex(), + ); + return Err(ChainStorageError::UnspendableInput); + }, }; - let features = input.features()?; + let features = input_with_output_data.features()?; if let Some(vn_reg) = features .sidechain_feature .as_ref() .and_then(|f| f.validator_node_registration()) { - self.validator_node_store(txn) - .delete(header.height, vn_reg.public_key(), input.commitment()?)?; + self.validator_node_store(txn).delete( + header.height, + vn_reg.public_key(), + input_with_output_data.commitment()?, + )?; } trace!( target: LOG_TARGET, "Inserting input (`{}`, `{}`)", - input.commitment()?.to_hex(), - input.output_hash().to_hex() + input_with_output_data.commitment()?.to_hex(), + input_with_output_data.output_hash().to_hex() ); self.insert_input( txn, current_header_at_height.height, current_header_at_height.timestamp.as_u64(), &block_hash, - input, + input_with_output_data, )?; } @@ -1293,8 +1407,46 @@ impl LMDBDatabase { } fn insert_tip_smt(&self, txn: &WriteTransaction<'_>, smt: &OutputSmt) -> Result<(), ChainStorageError> { + let start = Instant::now(); let k = MetadataKey::TipSmt; - lmdb_replace(txn, &self.tip_utxo_smt, &k.as_u32(), smt) + + // This is best effort, if it fails (typically when the entry does not yet exist) we just log it + if let Err(e) = lmdb_delete(txn, &self.tip_utxo_smt, &k.as_u32(), LMDB_DB_TIP_UTXO_SMT) { + debug!( + "Could NOT delete '{}' db with key '{}' ({})", + LMDB_DB_TIP_UTXO_SMT, + to_hex(k.as_u32().as_lmdb_bytes()), + e + ); + } + + #[allow(clippy::cast_possible_truncation)] + let estimated_bytes = smt.size().saturating_mul(225) as usize; + match lmdb_replace(txn, &self.tip_utxo_smt, &k.as_u32(), smt, Some(estimated_bytes)) { + Ok(_) => { + trace!( + "Inserted ~{} MB with key '{}' into '{}' (size {}) in {:.2?}", + estimated_bytes / BYTES_PER_MB, + to_hex(k.as_u32().as_lmdb_bytes()), + LMDB_DB_TIP_UTXO_SMT, + smt.size(), + start.elapsed() + ); + Ok(()) + }, + Err(e) => { + if let ChainStorageError::DbResizeRequired(Some(val)) = e { + trace!( + "Could NOT insert {} MB with key '{}' into '{}' (size {})", + val / BYTES_PER_MB, + to_hex(k.as_u32().as_lmdb_bytes()), + LMDB_DB_TIP_UTXO_SMT, + smt.size() + ); + } + Err(e) + }, + } } fn update_block_accumulated_data( @@ -1320,7 +1472,13 @@ impl LMDBDatabase { block_accum_data.kernels = kernel_hash_set; } - lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; + lmdb_replace( + write_txn, + &self.block_accumulated_data_db, + &height, + &block_accum_data, + None, + )?; Ok(()) } @@ -1332,7 +1490,7 @@ impl LMDBDatabase { ) -> Result<(), ChainStorageError> { let current_height = lmdb_get(write_txn, &self.monero_seed_height_db, seed)?.unwrap_or(std::u64::MAX); if height < current_height { - lmdb_replace(write_txn, &self.monero_seed_height_db, seed, &height)?; + lmdb_replace(write_txn, &self.monero_seed_height_db, seed, &height, None)?; }; Ok(()) } @@ -1355,21 +1513,93 @@ impl LMDBDatabase { let inputs = lmdb_fetch_matching_after::(write_txn, &self.inputs_db, block_hash.as_slice())?; - for input in inputs { + for input_data in inputs { + let input = input_data.input; + // From 'utxo_commitment_index::utxo_commitment_index' + if let SpentOutput::OutputData { commitment, .. } = input.spent_output.clone() { + debug!(target: LOG_TARGET, "Pruning output from 'utxo_commitment_index': key '{}'", commitment.to_hex()); + lmdb_delete( + write_txn, + &self.utxo_commitment_index, + commitment.as_bytes(), + "utxo_commitment_index", + )?; + } + // From 'utxos_db::utxos_db' + if let Some(key_bytes) = + lmdb_get::<_, Vec>(write_txn, &self.txos_hash_to_index_db, input.output_hash().as_slice())? + { + let mut buffer = [0u8; 32]; + buffer.copy_from_slice(&key_bytes[0..32]); + let key = OutputKey::new(&FixedHash::from(buffer), &input.output_hash())?; + debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), LMDB_DB_UTXOS)?; + }; + // From 'txos_hash_to_index_db::utxos_db' + debug!( + target: LOG_TARGET, + "Pruning output from 'txos_hash_to_index_db': key '{}'", + input.output_hash().to_hex() + ); lmdb_delete( write_txn, &self.txos_hash_to_index_db, - input.hash.as_slice(), - "utxos_db", + input.output_hash().as_slice(), + LMDB_DB_UTXOS, )?; - let key = OutputKey::new(block_hash, &input.hash)?; - debug!(target: LOG_TARGET, "Pruning output: {:?}", key); - self.prune_output(write_txn, key)?; } Ok(()) } + fn prune_output_from_all_dbs( + &self, + write_txn: &WriteTransaction<'_>, + output_hash: &HashOutput, + commitment: &Commitment, + output_type: OutputType, + ) -> Result<(), ChainStorageError> { + match lmdb_get::<_, Vec>(write_txn, &self.txos_hash_to_index_db, output_hash.as_slice())? { + Some(key_bytes) => { + if !matches!(output_type, OutputType::Burn) { + debug!(target: LOG_TARGET, "Pruning output from 'utxo_commitment_index': key '{}'", commitment.to_hex()); + lmdb_delete( + write_txn, + &self.utxo_commitment_index, + commitment.as_bytes(), + "utxo_commitment_index", + )?; + } + debug!(target: LOG_TARGET, "Pruning output from 'txos_hash_to_index_db': key '{}'", output_hash.to_hex()); + lmdb_delete( + write_txn, + &self.txos_hash_to_index_db, + output_hash.as_slice(), + LMDB_DB_UTXOS, + )?; + + let mut buffer = [0u8; 32]; + buffer.copy_from_slice(&key_bytes[0..32]); + let key = OutputKey::new(&FixedHash::from(buffer), output_hash)?; + debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), LMDB_DB_UTXOS)?; + }, + None => return Err(ChainStorageError::InvalidOperation("Output key not found".to_string())), + } + + Ok(()) + } + + fn delete_all_kernels_in_block( + &self, + txn: &WriteTransaction<'_>, + block_hash: &BlockHash, + ) -> Result<(), ChainStorageError> { + self.delete_block_kernels(txn, block_hash.as_slice())?; + debug!(target: LOG_TARGET, "Deleted kernels in block {}", block_hash.to_hex()); + Ok(()) + } + #[allow(clippy::ptr_arg)] fn fetch_orphan(&self, txn: &ConstTransaction<'_>, hash: &HashOutput) -> Result, ChainStorageError> { let val: Option = lmdb_get(txn, &self.orphans_db, hash.deref())?; @@ -1411,24 +1641,26 @@ impl LMDBDatabase { txn: &WriteTransaction<'_>, hash: &HashOutput, height: u64, + reason: String, ) -> Result<(), ChainStorageError> { #[cfg(test)] const CLEAN_BAD_BLOCKS_BEFORE_REL_HEIGHT: u64 = 10000; #[cfg(not(test))] const CLEAN_BAD_BLOCKS_BEFORE_REL_HEIGHT: u64 = 0; - lmdb_replace(txn, &self.bad_blocks, hash.deref(), &height)?; + lmdb_replace(txn, &self.bad_blocks, hash.deref(), &(height, reason), None)?; // Clean up bad blocks that are far from the tip let metadata = fetch_metadata(txn, &self.metadata_db)?; let deleted_before_height = metadata - .height_of_longest_chain() + .best_block_height() .saturating_sub(CLEAN_BAD_BLOCKS_BEFORE_REL_HEIGHT); if deleted_before_height == 0 { return Ok(()); } - let num_deleted = - lmdb_delete_each_where::<[u8], u64, _>(txn, &self.bad_blocks, |_, v| Some(v < deleted_before_height))?; + let num_deleted = lmdb_delete_each_where::<[u8], (u64, String), _>(txn, &self.bad_blocks, |_, (v, _)| { + Some(v < deleted_before_height) + })?; debug!(target: LOG_TARGET, "Cleaned out {} stale bad blocks", num_deleted); Ok(()) @@ -1568,10 +1800,35 @@ impl BlockchainBackend for LMDBDatabase { return Ok(()); } + // Ensure there will be enough space in the database to insert the block and replace the SMT before it is + // attempted; this is more efficient than relying on an error if the LMDB environment map size was reached with + // the write operation, with cleanup, resize and re-try afterwards. + let block_operations = txn.operations().iter().filter(|op| { + matches!(op, WriteOperation::InsertOrphanBlock { .. }) || + matches!(op, WriteOperation::InsertTipBlockBody { .. }) || + matches!(op, WriteOperation::InsertChainOrphanBlock { .. }) + }); + let count = block_operations.count(); + if count > 0 { + let (mapsize, size_used_bytes, size_left_bytes) = LMDBStore::get_stats(&self.env)?; + trace!( + target: LOG_TARGET, + "[apply_db_transaction] Block insert operations: {}, mapsize: {} MB, used: {} MB, remaining: {} MB", + count, mapsize / BYTES_PER_MB, size_used_bytes / BYTES_PER_MB, size_left_bytes / BYTES_PER_MB + ); + unsafe { + LMDBStore::resize_if_required( + &self.env, + &self.env_config, + Some(max(self.env_config.grow_size_bytes(), 128 * BYTES_PER_MB)), + )?; + } + } + let mark = Instant::now(); - // Resize this many times before assuming something is not right - const MAX_RESIZES: usize = 5; - for i in 0..MAX_RESIZES { + // Resize this many times before assuming something is not right (up to 1 GB) + let max_resizes = 1024 * BYTES_PER_MB / self.env_config.grow_size_bytes(); + for i in 0..max_resizes { let num_operations = txn.operations().len(); match self.apply_db_transaction(&txn) { Ok(_) => { @@ -1584,7 +1841,7 @@ impl BlockchainBackend for LMDBDatabase { return Ok(()); }, - Err(ChainStorageError::DbResizeRequired) => { + Err(ChainStorageError::DbResizeRequired(size_that_could_not_be_written)) => { info!( target: LOG_TARGET, "Database resize required (resized {} time(s) in this transaction)", @@ -1595,7 +1852,7 @@ impl BlockchainBackend for LMDBDatabase { // BlockchainDatabase, so we know there are no other threads taking out LMDB transactions when this // is called. unsafe { - LMDBStore::resize(&self.env, &self.env_config)?; + LMDBStore::resize(&self.env, &self.env_config, size_that_could_not_be_written)?; } }, Err(e) => { @@ -1829,23 +2086,23 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_outputs_in_block_with_spend_state( &self, - header_hash: &HashOutput, - spend_status_at_header: Option, + previous_header_hash: &HashOutput, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; let mut outputs: Vec<(TransactionOutput, bool)> = - lmdb_fetch_matching_after::(&txn, &self.utxos_db, header_hash.deref())? + lmdb_fetch_matching_after::(&txn, &self.utxos_db, previous_header_hash.deref())? .into_iter() .map(|row| (row.output, false)) .collect(); - if let Some(header) = spend_status_at_header { + if let Some(header_hash) = spend_status_at_header { let header_height = - self.fetch_height_from_hash(&txn, header_hash)? + self.fetch_height_from_hash(&txn, &header_hash)? .ok_or(ChainStorageError::ValueNotFound { entity: "Header", field: "hash", - value: header.to_hex(), + value: header_hash.to_hex(), })?; for output in &mut outputs { let hash = output.0.hash(); @@ -1856,7 +2113,7 @@ impl BlockchainBackend for LMDBDatabase { ChainStorageError::ValueNotFound { entity: "input", field: "hash", - value: header.to_hex(), + value: header_hash.to_hex(), }, )?; if input.spent_height <= header_height { @@ -1895,10 +2152,13 @@ impl BlockchainBackend for LMDBDatabase { lmdb_fetch_matching_after(&txn, &self.utxos_db, header_hash.as_slice()) } - fn fetch_inputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { + fn fetch_inputs_in_block( + &self, + previous_header_hash: &HashOutput, + ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; Ok( - lmdb_fetch_matching_after(&txn, &self.inputs_db, header_hash.as_slice())? + lmdb_fetch_matching_after(&txn, &self.inputs_db, previous_header_hash.as_slice())? .into_iter() .map(|f: TransactionInputRowData| f.input) .collect(), @@ -1957,14 +2217,14 @@ impl BlockchainBackend for LMDBDatabase { let txn = self.read_transaction()?; let metadata = self.fetch_chain_metadata()?; - let height = metadata.height_of_longest_chain(); + let height = metadata.best_block_height(); let header = lmdb_get(&txn, &self.headers_db, &height)?.ok_or_else(|| ChainStorageError::ValueNotFound { entity: "Header", field: "height", value: height.to_string(), })?; let accumulated_data = self - .fetch_header_accumulated_data_by_height(&txn, metadata.height_of_longest_chain())? + .fetch_header_accumulated_data_by_height(&txn, metadata.best_block_height())? .ok_or_else(|| ChainStorageError::ValueNotFound { entity: "BlockHeaderAccumulatedData", field: "height", @@ -2210,9 +2470,22 @@ impl BlockchainBackend for LMDBDatabase { .collect() } - fn bad_block_exists(&self, block_hash: HashOutput) -> Result { + fn bad_block_exists(&self, block_hash: HashOutput) -> Result<(bool, String), ChainStorageError> { let txn = self.read_transaction()?; - lmdb_exists(&txn, &self.bad_blocks, block_hash.deref()) + // We do this to ensure backwards compatibility on older exising dbs that did not store a reason + let exist = lmdb_exists(&txn, &self.bad_blocks, block_hash.deref())?; + match lmdb_get::<_, (u64, String)>(&txn, &self.bad_blocks, block_hash.deref()) { + Ok(Some((_height, reason))) => Ok((true, reason)), + Ok(None) => Ok((false, "".to_string())), + Err(ChainStorageError::AccessError(e)) => { + if exist { + Ok((true, "No reason recorded".to_string())) + } else { + Err(ChainStorageError::AccessError(e)) + } + }, + Err(e) => Err(e), + } } fn clear_all_pending_headers(&self) -> Result { @@ -2225,11 +2498,11 @@ impl BlockchainBackend for LMDBDatabase { }; let metadata = fetch_metadata(&txn, &self.metadata_db)?; - if metadata.height_of_longest_chain() == last_header.height { + if metadata.best_block_height() == last_header.height { return Ok(0); } - let start = metadata.height_of_longest_chain() + 1; + let start = metadata.best_block_height() + 1; let end = last_header.height; let mut num_deleted = 0; @@ -2318,7 +2591,7 @@ fn fetch_metadata(txn: &ConstTransaction<'_>, db: &Database) -> Result Result<(), ChainStorageError> { &db.metadata_db, &k.as_u32(), &MetadataValue::MigrationVersion(MIGRATION_VERSION), + None, )?; txn.commit()?; } diff --git a/base_layer/core/src/chain_storage/reorg.rs b/base_layer/core/src/chain_storage/reorg.rs index da72fa0986..c5b1c8a2bb 100644 --- a/base_layer/core/src/chain_storage/reorg.rs +++ b/base_layer/core/src/chain_storage/reorg.rs @@ -43,8 +43,8 @@ impl Reorg { pub fn from_reorged_blocks(added: &VecDeque>, removed: &[Arc]) -> Self { // Expects blocks to be ordered sequentially highest height to lowest (as in rewind_to_height) Self { - new_height: added.get(0).map(|b| b.header().height).unwrap_or_default(), - new_hash: added.get(0).map(|b| *b.hash()).unwrap_or_default(), + new_height: added.front().map(|b| b.header().height).unwrap_or_default(), + new_hash: added.front().map(|b| *b.hash()).unwrap_or_default(), prev_height: removed.first().map(|b| b.header().height).unwrap_or_default(), prev_hash: removed.first().map(|b| *b.hash()).unwrap_or_default(), num_blocks_added: added.len() as u64, diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 23e2174da0..dd18656bc6 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -428,7 +428,7 @@ mod fetch_total_size_stats { let _block_and_outputs = add_many_chained_blocks(2, &db, &key_manager).await; let stats = db.fetch_total_size_stats().unwrap(); assert_eq!( - stats.sizes().iter().find(|s| s.name == "utxos_db").unwrap().num_entries, + stats.sizes().iter().find(|s| s.name == "utxos").unwrap().num_entries, genesis_output_count + 2 ); } @@ -477,22 +477,6 @@ mod prepare_new_block { mod fetch_header_containing_kernel_mmr { use super::*; use crate::transactions::key_manager::create_memory_db_key_manager; - - #[test] - fn it_returns_genesis() { - let db = setup(); - let genesis = db.fetch_block(0, true).unwrap(); - assert_eq!(genesis.block().body.kernels().len(), 1); - let mut mmr_position = 0; - genesis.block().body.kernels().iter().for_each(|_| { - let header = db.fetch_header_containing_kernel_mmr(mmr_position).unwrap(); - assert_eq!(header.height(), 0); - mmr_position += 1; - }); - let err = db.fetch_header_containing_kernel_mmr(mmr_position).unwrap_err(); - matches!(err, ChainStorageError::ValueNotFound { .. }); - } - #[tokio::test] async fn it_returns_corresponding_header() { let db = setup(); diff --git a/base_layer/core/src/common/mod.rs b/base_layer/core/src/common/mod.rs index c6b7d1b293..0f447aecd3 100644 --- a/base_layer/core/src/common/mod.rs +++ b/base_layer/core/src/common/mod.rs @@ -22,7 +22,7 @@ use blake2::Blake2b; use digest::consts::U64; -use tari_hash_domains::ConfidentialOutputHashDomain; +use tari_hashing::ConfidentialOutputHashDomain; use crate::consensus::DomainSeparatedConsensusHasher; @@ -30,10 +30,12 @@ pub mod borsh; pub mod byte_counter; pub mod limited_reader; pub mod one_sided; + #[cfg(feature = "base_node")] pub mod rolling_avg; #[cfg(feature = "base_node")] pub mod rolling_vec; +pub(crate) mod waiting_requests; /// Hasher used in the DAN to derive masks and encrypted value keys pub type ConfidentialOutputHasher = DomainSeparatedConsensusHasher>; diff --git a/base_layer/core/src/common/one_sided.rs b/base_layer/core/src/common/one_sided.rs index fcb9e734cb..3d5740ee08 100644 --- a/base_layer/core/src/common/one_sided.rs +++ b/base_layer/core/src/common/one_sided.rs @@ -31,7 +31,7 @@ use tari_crypto::{ hashing::{DomainSeparatedHash, DomainSeparatedHasher}, keys::{PublicKey as PKtrait, SecretKey as SKtrait}, }; -use tari_hash_domains::WalletOutputEncryptionKeysDomain; +use tari_hashing::WalletOutputEncryptionKeysDomain; use tari_utilities::byte_array::ByteArrayError; hash_domain!( diff --git a/base_layer/common_types/src/waiting_requests.rs b/base_layer/core/src/common/waiting_requests.rs similarity index 100% rename from base_layer/common_types/src/waiting_requests.rs rename to base_layer/core/src/common/waiting_requests.rs diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index d5729296ac..530c9aa9f9 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -36,7 +36,7 @@ use crate::{ consensus::network::NetworkConsensus, proof_of_work::{Difficulty, PowAlgorithm}, transactions::{ - tari_amount::{uT, MicroMinotari, T}, + tari_amount::{uT, MicroMinotari}, transaction_components::{ OutputFeatures, OutputFeaturesVersion, @@ -50,6 +50,8 @@ use crate::{ }, }; +const ANNUAL_BLOCKS: u64 = 30 /* blocks/hr */ * 24 /* hr /d */ * 366 /* days / yr */; + /// This is the inner struct used to control all consensus values. #[derive(Debug, Clone)] pub struct ConsensusConstants { @@ -77,8 +79,10 @@ pub struct ConsensusConstants { /// This is the emission curve decay factor as a sum of fraction powers of two. e.g. [1,2] would be 1/2 + 1/4. [2] /// would be 1/4 pub(in crate::consensus) emission_decay: &'static [u64], - /// This is the emission curve tail amount - pub(in crate::consensus) emission_tail: MicroMinotari, + /// The tail emission inflation rate in basis points (bips). 100 bips = 1 percentage_point + pub(in crate::consensus) inflation_bips: u64, + /// The length, in blocks of each tail emission epoch (where the reward is held constant) + pub(in crate::consensus) tail_epoch_length: u64, /// This is the maximum age a Monero merge mined seed can be reused /// Monero forces a change every height mod 2048 blocks max_randomx_seed_height: u64, @@ -165,9 +169,14 @@ impl ConsensusConstants { self.effective_from_height } - /// This gets the emission curve values as (initial, decay, tail) - pub fn emission_amounts(&self) -> (MicroMinotari, &'static [u64], MicroMinotari) { - (self.emission_initial, self.emission_decay, self.emission_tail) + /// This gets the emission curve values as (initial, decay, inflation_bips, epoch_length) + pub fn emission_amounts(&self) -> (MicroMinotari, &'static [u64], u64, u64) { + ( + self.emission_initial, + self.emission_decay, + self.inflation_bips, + self.tail_epoch_length, + ) } /// The min height maturity a coinbase utxo must have. @@ -361,12 +370,12 @@ impl ConsensusConstants { algos.insert(PowAlgorithm::Sha3x, PowAlgorithmConstants { min_difficulty: Difficulty::min(), max_difficulty: Difficulty::min(), - target_time: 300, + target_time: 240, }); algos.insert(PowAlgorithm::RandomX, PowAlgorithmConstants { min_difficulty: Difficulty::min(), max_difficulty: Difficulty::min(), - target_time: 200, + target_time: 240, }); let (input_version_range, output_version_range, kernel_version_range) = version_zero(); let consensus_constants = vec![ConsensusConstants { @@ -380,11 +389,12 @@ impl ConsensusConstants { median_timestamp_count: 11, emission_initial: 18_462_816_327 * uT, emission_decay: &ESMERALDA_DECAY_PARAMS, - emission_tail: 800 * T, + inflation_bips: 1000, + tail_epoch_length: 100, max_randomx_seed_height: u64::MAX, max_extra_field_size: 200, proof_of_work: algos, - faucet_value: ESMERALDA_FAUCET_VALUE.into(), // The esmeralda genesis block is re-used for localnet + faucet_value: 0.into(), transaction_weight: TransactionWeight::latest(), max_script_byte_size: 2048, input_version_range, @@ -401,13 +411,13 @@ impl ConsensusConstants { coinbase_output_features_extra_max_length: 64, }]; #[cfg(any(test, debug_assertions))] - assert_hybrid_pow_constants(&consensus_constants, &[120], &[60], &[40]); + assert_hybrid_pow_constants(&consensus_constants, &[120], &[50], &[50]); consensus_constants } pub fn igor() -> Vec { // `igor` is a test network, so calculating these constants are allowed rather than being hardcoded. - let randomx_split: u64 = 60; + let randomx_split: u64 = 50; let sha3x_split: u64 = 100 - randomx_split; let randomx_target_time = 20; let sha3x_target_time = randomx_target_time * (100 - sha3x_split) / sha3x_split; @@ -443,7 +453,8 @@ impl ConsensusConstants { median_timestamp_count: 11, emission_initial: 5_538_846_115 * uT, emission_decay: &EMISSION_DECAY, - emission_tail: 100.into(), + inflation_bips: 100, + tail_epoch_length: ANNUAL_BLOCKS, max_randomx_seed_height: u64::MAX, max_extra_field_size: 200, proof_of_work: algos, @@ -480,12 +491,12 @@ impl ConsensusConstants { algos.insert(PowAlgorithm::Sha3x, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(60_000_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 300, + target_time: 240, }); algos.insert(PowAlgorithm::RandomX, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(60_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 200, + target_time: 240, }); let (input_version_range, output_version_range, kernel_version_range) = version_zero(); let consensus_constants = vec![ConsensusConstants { @@ -499,7 +510,8 @@ impl ConsensusConstants { median_timestamp_count: 11, emission_initial: ESMERALDA_INITIAL_EMISSION, emission_decay: &ESMERALDA_DECAY_PARAMS, - emission_tail: 800 * T, + inflation_bips: 100, + tail_epoch_length: ANNUAL_BLOCKS, max_randomx_seed_height: 3000, max_extra_field_size: 200, proof_of_work: algos, @@ -520,7 +532,7 @@ impl ConsensusConstants { coinbase_output_features_extra_max_length: 64, }]; #[cfg(any(test, debug_assertions))] - assert_hybrid_pow_constants(&consensus_constants, &[120], &[60], &[40]); + assert_hybrid_pow_constants(&consensus_constants, &[120], &[50], &[50]); consensus_constants } @@ -535,12 +547,12 @@ impl ConsensusConstants { algos.insert(PowAlgorithm::Sha3x, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(1_200_000_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 300, + target_time: 240, }); algos.insert(PowAlgorithm::RandomX, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(1_200_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 200, + target_time: 240, }); let (input_version_range, output_version_range, kernel_version_range) = version_zero(); let consensus_constants = vec![ConsensusConstants { @@ -554,7 +566,8 @@ impl ConsensusConstants { median_timestamp_count: 11, emission_initial: INITIAL_EMISSION, emission_decay: &EMISSION_DECAY, - emission_tail: 800 * T, + inflation_bips: 100, + tail_epoch_length: ANNUAL_BLOCKS, max_randomx_seed_height: 3000, max_extra_field_size: 200, proof_of_work: algos, @@ -575,7 +588,7 @@ impl ConsensusConstants { coinbase_output_features_extra_max_length: 64, }]; #[cfg(any(test, debug_assertions))] - assert_hybrid_pow_constants(&consensus_constants, &[120], &[60], &[40]); + assert_hybrid_pow_constants(&consensus_constants, &[120], &[50], &[50]); consensus_constants } @@ -584,12 +597,12 @@ impl ConsensusConstants { algos.insert(PowAlgorithm::Sha3x, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(1_200_000_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 300, + target_time: 240, }); algos.insert(PowAlgorithm::RandomX, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(1_200_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 200, + target_time: 240, }); let (input_version_range, output_version_range, kernel_version_range) = version_zero(); let consensus_constants = vec![ConsensusConstants { @@ -603,7 +616,8 @@ impl ConsensusConstants { median_timestamp_count: 11, emission_initial: INITIAL_EMISSION, emission_decay: &EMISSION_DECAY, - emission_tail: 800 * T, + inflation_bips: 100, + tail_epoch_length: ANNUAL_BLOCKS, max_randomx_seed_height: 3000, max_extra_field_size: 200, proof_of_work: algos, @@ -624,7 +638,7 @@ impl ConsensusConstants { coinbase_output_features_extra_max_length: 64, }]; #[cfg(any(test, debug_assertions))] - assert_hybrid_pow_constants(&consensus_constants, &[120], &[60], &[40]); + assert_hybrid_pow_constants(&consensus_constants, &[120], &[50], &[50]); consensus_constants } @@ -635,12 +649,12 @@ impl ConsensusConstants { algos.insert(PowAlgorithm::Sha3x, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(1_200_000_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 300, + target_time: 240, }); algos.insert(PowAlgorithm::RandomX, PowAlgorithmConstants { min_difficulty: Difficulty::from_u64(1_200_000).expect("valid difficulty"), max_difficulty: Difficulty::max(), - target_time: 200, + target_time: 240, }); let (input_version_range, output_version_range, kernel_version_range) = version_zero(); let consensus_constants = vec![ConsensusConstants { @@ -654,7 +668,8 @@ impl ConsensusConstants { median_timestamp_count: 11, emission_initial: 10_000_000.into(), emission_decay: &EMISSION_DECAY, - emission_tail: 100.into(), + inflation_bips: 100, + tail_epoch_length: ANNUAL_BLOCKS, max_randomx_seed_height: u64::MAX, max_extra_field_size: 200, proof_of_work: algos, @@ -675,7 +690,7 @@ impl ConsensusConstants { coinbase_output_features_extra_max_length: 64, }]; #[cfg(any(test, debug_assertions))] - assert_hybrid_pow_constants(&consensus_constants, &[120], &[60], &[40]); + assert_hybrid_pow_constants(&consensus_constants, &[120], &[50], &[50]); consensus_constants } @@ -835,11 +850,13 @@ impl ConsensusConstantsBuilder { mut self, intial_amount: MicroMinotari, decay: &'static [u64], - tail_amount: MicroMinotari, + inflation_bips: u64, + epoch_length: u64, ) -> Self { self.consensus.emission_initial = intial_amount; self.consensus.emission_decay = decay; - self.consensus.emission_tail = tail_amount; + self.consensus.inflation_bips = inflation_bips; + self.consensus.tail_epoch_length = epoch_length; self } @@ -876,7 +893,7 @@ mod test { ConsensusConstants, }, transactions::{ - tari_amount::{uT, MicroMinotari}, + tari_amount::{uT, MicroMinotari, T}, transaction_components::{OutputType, RangeProofType}, }, }; @@ -940,33 +957,43 @@ mod test { let schedule = EmissionSchedule::new( esmeralda[0].emission_initial, esmeralda[0].emission_decay, - esmeralda[0].emission_tail, + esmeralda[0].inflation_bips, + esmeralda[0].tail_epoch_length, + esmeralda[0].faucet_value(), ); // No genesis block coinbase assert_eq!(schedule.block_reward(0), MicroMinotari(0)); // Coinbases starts at block 1 let coinbase_offset = 1; let first_reward = schedule.block_reward(coinbase_offset); - assert_eq!(first_reward, esmeralda[0].emission_initial * uT); - assert_eq!(schedule.supply_at_block(coinbase_offset), first_reward); + assert_eq!(first_reward, esmeralda[0].emission_initial); + assert_eq!( + schedule.supply_at_block(coinbase_offset), + first_reward + esmeralda[0].faucet_value() + ); // 'half_life_block' at approximately '(total supply - faucet value) / 2' #[allow(clippy::cast_possible_truncation)] - let half_life_block = (365.0 * 24.0 * 30.0 * 2.76) as u64; + let half_life_block = 365 * 24 * 30 * 3; assert_eq!( schedule.supply_at_block(half_life_block + coinbase_offset), - 7_483_280_506_356_578 * uT + 7_935_818_494_624_306 * uT + esmeralda[0].faucet_value() ); - // Tail emission starts after block 3,255,552 + coinbase_offset + // 21 billion let mut rewards = schedule .iter() .skip(3255552 + usize::try_from(coinbase_offset).unwrap()); let (block_num, reward, supply) = rewards.next().unwrap(); assert_eq!(block_num, 3255553 + coinbase_offset); assert_eq!(reward, 800_000_415 * uT); - let total_supply_up_to_tail_emission = supply + esmeralda[0].faucet_value; - assert_eq!(total_supply_up_to_tail_emission, 20_999_999_999_819_869 * uT); + assert_eq!(supply, 20_999_999_999_819_869 * uT); let (_, reward, _) = rewards.next().unwrap(); - assert_eq!(reward, esmeralda[0].emission_tail); + assert_eq!(reward, 799_999_715 * uT); + // Inflating tail emission + let mut rewards = schedule.iter().skip(3259845); + let (block_num, reward, supply) = rewards.next().unwrap(); + assert_eq!(block_num, 3259846); + assert_eq!(reward, 797 * T); + assert_eq!(supply, 21_003_427_156_818_122 * uT); } #[test] @@ -975,7 +1002,9 @@ mod test { let schedule = EmissionSchedule::new( nextnet[0].emission_initial, nextnet[0].emission_decay, - nextnet[0].emission_tail, + nextnet[0].inflation_bips, + nextnet[0].tail_epoch_length, + nextnet[0].faucet_value(), ); // No genesis block coinbase assert_eq!(schedule.block_reward(0), MicroMinotari(0)); @@ -983,25 +1012,23 @@ mod test { let coinbase_offset = 1; let first_reward = schedule.block_reward(coinbase_offset); assert_eq!(first_reward, nextnet[0].emission_initial * uT); - assert_eq!(schedule.supply_at_block(coinbase_offset), first_reward); + assert_eq!( + schedule.supply_at_block(coinbase_offset), + first_reward + nextnet[0].faucet_value() + ); // 'half_life_block' at approximately '(total supply - faucet value) / 2' #[allow(clippy::cast_possible_truncation)] let half_life_block = (365.0 * 24.0 * 30.0 * 2.76) as u64; assert_eq!( schedule.supply_at_block(half_life_block + coinbase_offset), - 7_483_280_506_356_578 * uT + 7_483_280_506_356_578 * uT + nextnet[0].faucet_value() ); - // Tail emission starts after block 3,255,552 + coinbase_offset - let mut rewards = schedule - .iter() - .skip(3255552 + usize::try_from(coinbase_offset).unwrap()); + // Tail emission + let mut rewards = schedule.iter().skip(3259845); let (block_num, reward, supply) = rewards.next().unwrap(); - assert_eq!(block_num, 3255553 + coinbase_offset); - assert_eq!(reward, 800_000_415 * uT); - let total_supply_up_to_tail_emission = supply + nextnet[0].faucet_value; - assert_eq!(total_supply_up_to_tail_emission, 20_999_999_999_819_869 * uT); - let (_, reward, _) = rewards.next().unwrap(); - assert_eq!(reward, nextnet[0].emission_tail); + assert_eq!(block_num, 3259846); + assert_eq!(reward, 797 * T); + assert_eq!(supply, 21_003_427_156_818_122 * uT); } #[test] @@ -1010,7 +1037,9 @@ mod test { let schedule = EmissionSchedule::new( stagenet[0].emission_initial, stagenet[0].emission_decay, - stagenet[0].emission_tail, + stagenet[0].inflation_bips, + stagenet[0].tail_epoch_length, + stagenet[0].faucet_value(), ); // No genesis block coinbase assert_eq!(schedule.block_reward(0), MicroMinotari(0)); @@ -1018,31 +1047,35 @@ mod test { let coinbase_offset = 1; let first_reward = schedule.block_reward(coinbase_offset); assert_eq!(first_reward, stagenet[0].emission_initial * uT); - assert_eq!(schedule.supply_at_block(coinbase_offset), first_reward); + assert_eq!( + schedule.supply_at_block(coinbase_offset), + first_reward + stagenet[0].faucet_value() + ); // 'half_life_block' at approximately '(total supply - faucet value) / 2' #[allow(clippy::cast_possible_truncation)] let half_life_block = (365.0 * 24.0 * 30.0 * 2.76) as u64; assert_eq!( schedule.supply_at_block(half_life_block + coinbase_offset), - 7_483_280_506_356_578 * uT + 7_483_280_506_356_578 * uT + stagenet[0].faucet_value() ); - // Tail emission starts after block 3,255,552 + coinbase_offset - let mut rewards = schedule - .iter() - .skip(3255552 + usize::try_from(coinbase_offset).unwrap()); + // Tail emission + let mut rewards = schedule.iter().skip(3259845); let (block_num, reward, supply) = rewards.next().unwrap(); - assert_eq!(block_num, 3255553 + coinbase_offset); - assert_eq!(reward, 800_000_415 * uT); - let total_supply_up_to_tail_emission = supply + stagenet[0].faucet_value; - assert_eq!(total_supply_up_to_tail_emission, 20_999_999_999_819_869 * uT); - let (_, reward, _) = rewards.next().unwrap(); - assert_eq!(reward, stagenet[0].emission_tail); + assert_eq!(block_num, 3259846); + assert_eq!(reward, 797 * T); + assert_eq!(supply, 21_003_427_156_818_122 * uT); } #[test] fn igor_schedule() { let igor = ConsensusConstants::igor(); - let schedule = EmissionSchedule::new(igor[0].emission_initial, igor[0].emission_decay, igor[0].emission_tail); + let schedule = EmissionSchedule::new( + igor[0].emission_initial, + igor[0].emission_decay, + igor[0].inflation_bips, + igor[0].tail_epoch_length, + igor[0].faucet_value(), + ); // No genesis block coinbase assert_eq!(schedule.block_reward(0), MicroMinotari(0)); // Coinbases starts at block 1 @@ -1055,11 +1088,9 @@ mod test { let mut previous_reward = MicroMinotari(0); for (block_num, reward, supply) in rewards { if reward == previous_reward { - assert_eq!(block_num, 11_084_819 + 1); - assert_eq!(supply, MicroMinotari(6_326_198_792_915_738)); - // These set of constants does not result in a tail emission equal to the specified tail emission - assert_ne!(reward, igor[0].emission_tail); - assert_eq!(reward, MicroMinotari(2_097_151)); + assert_eq!(block_num, 11_084_796); + assert_eq!(supply, MicroMinotari(8_010_884_615_082_026)); + assert_eq!(reward, MicroMinotari(303_000_000)); break; } previous_reward = reward; diff --git a/base_layer/core/src/consensus/consensus_encoding.rs b/base_layer/core/src/consensus/consensus_encoding.rs index d053c03f70..ad361324b9 100644 --- a/base_layer/core/src/consensus/consensus_encoding.rs +++ b/base_layer/core/src/consensus/consensus_encoding.rs @@ -24,7 +24,7 @@ mod bytes; mod hashing; mod string; -pub use hashing::{ConsensusHasher, DomainSeparatedConsensusHasher}; +pub use hashing::DomainSeparatedConsensusHasher; pub use string::MaxSizeString; pub use self::bytes::MaxSizeBytes; diff --git a/base_layer/core/src/consensus/consensus_encoding/hashing.rs b/base_layer/core/src/consensus/consensus_encoding/hashing.rs index 6418f280b3..1eb42a460a 100644 --- a/base_layer/core/src/consensus/consensus_encoding/hashing.rs +++ b/base_layer/core/src/consensus/consensus_encoding/hashing.rs @@ -20,75 +20,37 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{io, io::Write, marker::PhantomData}; - -use blake2::Blake2b; use borsh::BorshSerialize; -use digest::{ - consts::{U32, U64}, - Digest, -}; +use digest::Digest; use tari_common::configuration::Network; -use tari_crypto::{hash_domain, hashing::DomainSeparation}; +use tari_crypto::hashing::DomainSeparation; +use tari_hashing::DomainSeparatedBorshHasher; /// Domain separated consensus encoding hasher. +/// This is a thin wrapper around the domain-separated Borsh hasher but adds the network byte in its constructor +/// functions pub struct DomainSeparatedConsensusHasher { - _m: PhantomData, - _d: PhantomData, + hasher: DomainSeparatedBorshHasher, } impl DomainSeparatedConsensusHasher where D: Default { - #[allow(clippy::new_ret_no_self)] - pub fn new(label: &'static str) -> ConsensusHasher { - Self::new_with_network(label, Network::get_current_or_default()) - } - - pub fn new_with_network(label: &'static str, network: Network) -> ConsensusHasher { - let mut digest = D::default(); - M::add_domain_separation_tag(&mut digest, &format!("{}.n{}", label, network.as_byte())); - ConsensusHasher::from_digest(digest) - } -} - -#[derive(Clone)] -pub struct ConsensusHasher { - writer: WriteHashWrapper, -} - -impl ConsensusHasher { - fn from_digest(digest: D) -> Self { - Self { - writer: WriteHashWrapper(digest), - } - } -} - -impl ConsensusHasher> { - pub fn finalize(self) -> [u8; 32] { - self.writer.0.finalize().into() - } - - pub fn update_consensus_encode(&mut self, data: &T) { - BorshSerialize::serialize(data, &mut self.writer) - .expect("Incorrect implementation of BorshSerialize encountered. Implementations MUST be infallible."); + pub fn new(label: &'static str) -> Self { + Self::new_with_network(label, Network::get_current_or_user_setting_or_default()) } - pub fn chain(mut self, data: &T) -> Self { - self.update_consensus_encode(data); - self + pub fn new_with_network(label: &'static str, network: Network) -> Self { + let hasher = DomainSeparatedBorshHasher::::new_with_label(&format!("{}.n{}", label, network.as_byte())); + Self { hasher } } -} -impl ConsensusHasher> { - pub fn finalize(self) -> [u8; 64] { - self.writer.0.finalize().into() + pub fn finalize(self) -> digest::Output { + self.hasher.finalize() } pub fn update_consensus_encode(&mut self, data: &T) { - BorshSerialize::serialize(data, &mut self.writer) - .expect("Incorrect implementation of BorshSerialize encountered. Implementations MUST be infallible."); + self.hasher.update_consensus_encode(data); } pub fn chain(mut self, data: &T) -> Self { @@ -97,45 +59,11 @@ impl ConsensusHasher> { } } -impl Default for ConsensusHasher> { - /// This `default` implementation is provided for convenience, but should not be used as the de-facto consensus - /// hasher, rather create a new unique hash domain. - fn default() -> Self { - hash_domain!( - DefaultConsensusHashDomain, - "com.tari.base_layer.core.consensus.consensus_encoding.hashing", - 0 - ); - DomainSeparatedConsensusHasher::>::new("default") - } -} - -impl Default for ConsensusHasher> { +impl Default for DomainSeparatedConsensusHasher { /// This `default` implementation is provided for convenience, but should not be used as the de-facto consensus - /// hasher, rather create a new unique hash domain. + /// hasher, rather specify a specific label fn default() -> Self { - hash_domain!( - DefaultConsensusHashDomain, - "com.tari.base_layer.core.consensus.consensus_encoding.hashing", - 0 - ); - DomainSeparatedConsensusHasher::>::new("default") - } -} - -/// This private struct wraps a Digest and implements the Write trait to satisfy the consensus encoding trait. -/// Do not use the DomainSeparatedHasher with this. -#[derive(Clone)] -struct WriteHashWrapper(D); - -impl Write for WriteHashWrapper { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.update(buf); - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) + DomainSeparatedConsensusHasher::::new("default") } } @@ -174,7 +102,7 @@ mod tests { #[test] fn it_hashes_using_the_domain_hasher() { - let network = Network::get_current_or_default(); + let network = Network::get_current_or_user_setting_or_default(); // Script is chosen because the consensus encoding impl for TariScript has 2 writes let mut hasher = Blake2b::::default(); @@ -185,12 +113,12 @@ mod tests { .chain(&255u64) .finalize(); - assert_eq!(hash, expected_hash.as_ref()); + assert_eq!(hash, expected_hash); } #[test] fn it_adds_to_hash_challenge_in_complete_chunks() { - let network = Network::get_current_or_default(); + let network = Network::get_current_or_user_setting_or_default(); // Script is chosen because the consensus encoding impl for TariScript has 2 writes let test_subject = script!(Nop); @@ -202,7 +130,7 @@ mod tests { .chain(&test_subject) .finalize(); - assert_eq!(hash, expected_hash.as_ref()); + assert_eq!(hash, expected_hash); } #[test] @@ -210,9 +138,40 @@ mod tests { let blake_hasher = Blake2b::::default(); let blake_hash = blake_hasher.chain_update(b"").finalize(); - let default_consensus_hasher = ConsensusHasher::>::default(); + let default_consensus_hasher = DomainSeparatedConsensusHasher::>::default(); let default_consensus_hash = default_consensus_hasher.chain(b"").finalize(); assert_ne!(blake_hash.as_slice(), default_consensus_hash.as_slice()); } + + #[test] + fn it_uses_the_network_environment_variable_if_set() { + let label = "test"; + let input = [1u8; 32]; + + for network in [ + Network::MainNet, + Network::StageNet, + Network::NextNet, + Network::LocalNet, + Network::Igor, + Network::Esmeralda, + ] { + // Generate a specific network hash + let hash_specify_network = + DomainSeparatedConsensusHasher::>::new_with_network(label, network) + .chain(&input) + .finalize(); + + // Generate an inferred network hash + std::env::set_var("TARI_NETWORK", network.as_key_str()); + let inferred_network_hash = DomainSeparatedConsensusHasher::>::new(label) + .chain(&input) + .finalize(); + std::env::remove_var("TARI_NETWORK"); + + // They should be equal + assert_eq!(hash_specify_network, inferred_network_hash); + } + } } diff --git a/base_layer/core/src/consensus/consensus_manager.rs b/base_layer/core/src/consensus/consensus_manager.rs index e315848c9f..4bb4c0814a 100644 --- a/base_layer/core/src/consensus/consensus_manager.rs +++ b/base_layer/core/src/consensus/consensus_manager.rs @@ -82,9 +82,7 @@ impl ConsensusManager { } } - /// Get a pointer to the emission schedule - /// The height provided here, decides the emission curve to use. It swaps to the integer curve upon reaching - /// 1_000_000_000 + /// Get a reference to the emission parameters pub fn emission_schedule(&self) -> &EmissionSchedule { &self.inner.emission } @@ -241,8 +239,11 @@ impl ConsensusManagerBuilder { let emission = EmissionSchedule::new( self.consensus_constants[0].emission_initial, self.consensus_constants[0].emission_decay, - self.consensus_constants[0].emission_tail, + self.consensus_constants[0].inflation_bips, + self.consensus_constants[0].tail_epoch_length, + self.consensus_constants[0].faucet_value(), ); + let inner = ConsensusManagerInner { consensus_constants: self.consensus_constants, network: self.network, diff --git a/base_layer/core/src/consensus/emission.rs b/base_layer/core/src/consensus/emission.rs index d6cfc671bc..09b7237f0b 100644 --- a/base_layer/core/src/consensus/emission.rs +++ b/base_layer/core/src/consensus/emission.rs @@ -20,8 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::cmp; - use crate::transactions::tari_amount::MicroMinotari; pub trait Emission { @@ -29,32 +27,50 @@ pub trait Emission { fn supply_at_block(&self, height: u64) -> MicroMinotari; } -/// The Minotari emission schedule. The emission schedule determines how much Minotari is mined as a block reward at -/// every block. -/// -/// NB: We don't know what the final emission schedule will be on Minotari yet, so do not give any weight to values or -/// formulae provided in this file, they will almost certainly change ahead of main-net release. +/// The Minotari emission schedule with inflating tail emission. The emission schedule determines how much Minotari is +/// mined as a block reward at every block. #[derive(Debug, Clone)] pub struct EmissionSchedule { initial: MicroMinotari, decay: &'static [u64], - tail: MicroMinotari, + inflation_bips: u64, // Tail inflation in basis points. 100 bips = 1 percentage point + epoch_length: u64, // The number of blocks in an inflation epoch + initial_supply: MicroMinotari, // The supply at block 0, from faucets or premine } impl EmissionSchedule { /// Create a new emission schedule instance. /// - /// The Emission schedule follows a similar pattern to Monero; with an exponentially decaying emission rate with - /// a constant tail emission rate. + /// ## Primary emission schedule + /// + /// The Emission schedule follows a similar pattern to Monero; with an initially exponentially decaying emission + /// rate and a tail emission. + /// /// - /// The block reward is given by - /// $$ r_n = \mathrm{MAX}(\mathrm(intfloor(r_{n-1} * (1 - \epsilon)), t) n > 0 $$ + /// The decay portion is given by + /// $$ r_n = \lfloor r_{n-1} * (1 - \epsilon) \rfloor, n > 0 $$ /// $$ r_0 = A_0 $$ /// /// where /// * $$A_0$$ is the genesis block reward /// * $$1 - \epsilon$$ is the decay rate - /// * $$t$$ is the constant tail emission rate + /// + /// The decay parameters are determined as described in [#decay_parameters]. + /// + /// ## Tail emission + /// + /// If the feature `mainnet_emission` is not enabled, the tail emission is constant. It is triggered if the reward + /// would fall below the `tail` value. + /// + /// If the feature `mainnet_emission` is enabled, the tail emission is calculated as follows: + /// + /// At each block, the reward is multiplied by `EPOCH_LENGTH` (approximately a year's worth of blocks) to + /// calculate `annual_supply`. + /// If `annual_supply/current_supply` is less than `0.01*inflation_bips`% then we enter tail emission mode. + /// + /// Every `EPOCH_LENGTH` blocks, the inflation rate is recalculated based on the current supply. + /// + /// ## Decay parameters /// /// The `intfloor` function is an integer-math-based multiplication of an integer by a fraction that's very close /// to one (e.g. 0.998,987,123,432)` that @@ -97,12 +113,24 @@ impl EmissionSchedule { /// /// The shift right operation will overflow if shifting more than 63 bits. `new` will panic if any of the decay /// values are greater than or equal to 64. - pub fn new(initial: MicroMinotari, decay: &'static [u64], tail: MicroMinotari) -> EmissionSchedule { + pub fn new( + initial: MicroMinotari, + decay: &'static [u64], + inflation_bips: u64, + epoch_length: u64, + initial_supply: MicroMinotari, + ) -> EmissionSchedule { assert!( decay.iter().all(|i| *i < 64), "Decay value would overflow. All `decay` values must be less than 64" ); - EmissionSchedule { initial, decay, tail } + EmissionSchedule { + initial, + decay, + inflation_bips, + epoch_length, + initial_supply, + } } /// Utility function to calculate the decay parameters that are provided in [EmissionSchedule::new]. This function @@ -113,7 +141,7 @@ impl EmissionSchedule { /// /// Input : `k`: A string representing a floating point number of (nearly) arbitrary precision, and less than one. /// - /// Returns: An array of powers of negative two when when applied as a shift right and sum operation is very + /// Returns: An array of powers of negative two when applied as a shift right and sum operation is very /// close to (1-k)*n. /// /// None - If k is not a valid floating point number less than one. @@ -173,18 +201,6 @@ impl EmissionSchedule { /// the emission curve if you're interested in the supply as well as the reward. /// /// This is an infinite iterator, and each value returned is a tuple of (block number, reward, and total supply) - /// - /// ```edition2018 - /// use tari_core::{ - /// consensus::emission::EmissionSchedule, - /// transactions::tari_amount::MicroMinotari, - /// }; - /// // Print the reward and supply for first 100 blocks - /// let schedule = EmissionSchedule::new(10.into(), &[3], 1.into()); - /// for (n, reward, supply) in schedule.iter().take(100) { - /// println!("{:3} {:9} {:9}", n, reward, supply); - /// } - /// ``` pub fn iter(&self) -> EmissionRate { EmissionRate::new(self) } @@ -203,15 +219,19 @@ pub struct EmissionRate<'a> { supply: MicroMinotari, reward: MicroMinotari, schedule: &'a EmissionSchedule, + epoch: u64, + epoch_counter: u64, } impl<'a> EmissionRate<'a> { fn new(schedule: &'a EmissionSchedule) -> EmissionRate<'a> { EmissionRate { block_num: 0, - supply: MicroMinotari(0), + supply: schedule.initial_supply, reward: MicroMinotari(0), schedule, + epoch: 0, + epoch_counter: 0, } } @@ -227,21 +247,53 @@ impl<'a> EmissionRate<'a> { self.reward } + fn next_decay_reward(&self) -> MicroMinotari { + let r = self.reward.as_u64(); + self.schedule + .decay + .iter() + .fold(self.reward, |sum, i| sum - MicroMinotari::from(r >> *i)) + } + /// Calculates the next reward by multiplying the decay factor by the previous block reward using integer math. /// /// We write the decay factor, 1 - k, as a sum of fraction powers of two. e.g. if we wanted 0.25 as our k, then /// (1-k) would be 0.75 = 1/2 plus 1/4 (1/2^2). /// /// Then we calculate k.R = (1 - e).R = R - e.R = R - (0.5 * R + 0.25 * R) = R - R >> 1 - R >> 2 - fn next_reward(&self) -> MicroMinotari { - let r = self.reward.as_u64(); - let next = self - .schedule - .decay - .iter() - .fold(self.reward, |sum, i| sum - MicroMinotari::from(r >> *i)); + fn next_reward(&mut self) { + // Inflation phase + if self.epoch > 0 { + self.epoch_counter += 1; + if self.epoch_counter >= self.schedule.epoch_length { + self.epoch_counter = 0; + self.epoch += 1; + self.reward = self.new_tail_emission(); + } + } else { + // Decay phase + let cutoff = self.new_tail_emission(); + let next_decay_reward = self.next_decay_reward(); + if self.epoch == 0 && next_decay_reward > cutoff { + self.reward = next_decay_reward; + } else { + self.epoch = 1; + self.reward = cutoff; + } + } + } - cmp::max(next, self.schedule.tail) + fn new_tail_emission(&self) -> MicroMinotari { + // Remember: 100% = 10,000 bips + let epoch_issuance = self + .supply + .as_u128() + .saturating_mul(u128::from(self.schedule.inflation_bips)) / + 10_000u128; + #[allow(clippy::cast_possible_truncation)] + let epoch_issuance = epoch_issuance as u64; // intentionally allow rounding via truncation + let reward = epoch_issuance / self.schedule.epoch_length; // in uT + MicroMinotari::from((reward / 1_000_000) * 1_000_000) // truncate to nearest whole XTR } } @@ -252,11 +304,11 @@ impl Iterator for EmissionRate<'_> { self.block_num += 1; if self.block_num == 1 { self.reward = self.schedule.initial; - self.supply = self.schedule.initial; + self.supply = self.supply.checked_add(self.reward)?; return Some((self.block_num, self.reward, self.supply)); } - self.reward = self.next_reward(); - // Once we've reached max supply, the iterator is done + self.next_reward(); // Has side effect + // Once we've reached max supply, the iterator is done self.supply = self.supply.checked_add(self.reward)?; Some((self.block_num, self.reward, self.supply)) } @@ -280,25 +332,90 @@ impl Emission for EmissionSchedule { #[cfg(test)] mod test { + #[test] + fn calc_array() { + assert_eq!(EmissionSchedule::decay_params("1.00"), None); + assert_eq!(EmissionSchedule::decay_params("56345"), None); + assert_eq!(EmissionSchedule::decay_params("0.75").unwrap(), vec![2]); + assert_eq!(EmissionSchedule::decay_params("0.25").unwrap(), vec![1, 2]); + assert_eq!(EmissionSchedule::decay_params("0.5").unwrap(), vec![1]); + assert_eq!(EmissionSchedule::decay_params("0.875").unwrap(), vec![3]); + assert_eq!(EmissionSchedule::decay_params("0.125").unwrap(), vec![1, 2, 3]); + assert_eq!(EmissionSchedule::decay_params("0.64732").unwrap(), vec![ + 2, 4, 5, 7, 10, 13, 16, 19, 20, 21, 22, 25, 29, 32, 33, 34, 35, 36, 38, 45, 47, 51, 53, 58, 59, 60, 62, 63 + ]); + assert_eq!(EmissionSchedule::decay_params("0.9999991208182701").unwrap(), vec![ + 21, 22, 23, 25, 26, 37, 38, 39, 41, 45, 49, 50, 51, 52, 55, 57, 59, 60, 63 + ]); + assert_eq!(EmissionSchedule::decay_params("0.0").unwrap(), vec![0]); + } + use crate::{ consensus::emission::{Emission, EmissionSchedule}, - transactions::tari_amount::{uT, MicroMinotari, T}, + transactions::tari_amount::{MicroMinotari, T}, }; #[test] - fn schedule() { - let schedule = EmissionSchedule::new( - MicroMinotari::from(10_000_100), - &[22, 23, 24, 26, 27], - MicroMinotari::from(100), - ); - assert_eq!(schedule.block_reward(0), MicroMinotari::from(0)); - assert_eq!(schedule.supply_at_block(0), MicroMinotari::from(0)); - assert_eq!(schedule.block_reward(1), MicroMinotari::from(10_000_100)); - assert_eq!(schedule.supply_at_block(1), MicroMinotari::from(10_000_100)); - // These values have been independently calculated - assert_eq!(schedule.block_reward(100 + 1), MicroMinotari::from(9_999_800)); - assert_eq!(schedule.supply_at_block(100 + 1), MicroMinotari::from(1_009_994_950)); + #[allow(clippy::cast_possible_truncation)] + fn mainnet_emission() { + let epoch_length = 30 * 24 * 366; + let halflife = 3 * 30 * 24 * 365; + let a0 = MicroMinotari::from(12_923_971_428); + let decay = &[21u64, 22, 23, 25, 26, 37, 38, 40]; + let premine = 6_300_000_000 * T; + let schedule = EmissionSchedule::new(a0, decay, 100, epoch_length, premine); + let mut iter = schedule.iter(); + assert_eq!(iter.block_num, 0); + assert_eq!(iter.reward, MicroMinotari::from(0)); + assert_eq!(iter.supply, premine); + let (num, reward, supply) = iter.next().unwrap(); + // Block 1 + assert_eq!(num, 1); + assert_eq!(reward, MicroMinotari::from(12_923_971_428)); + assert_eq!(supply, MicroMinotari::from(6_300_012_923_971_428)); + // Block 2 + let (num, reward, supply) = iter.next().unwrap(); + assert_eq!(num, 2); + assert_eq!(reward, MicroMinotari::from(12_923_960_068)); + assert_eq!(supply, MicroMinotari::from(6_300_025_847_931_496)); + + // Block 788,400. 50% Mined + let mut iter = iter.skip_while(|(num, _, _)| *num < halflife); + let (num, reward, supply) = iter.next().unwrap(); + assert_eq!(num, halflife); + assert_eq!(reward.as_u64(), 6_463_480_936); + let total_supply = 21_000_000_000 * T - premine; + let residual = (supply - premine) * 2 - total_supply; + // Within 0.01% of mining half the total supply + assert!(residual < total_supply / 10000, "Residual: {}", residual); + // Head to tail emission + let mut iter = iter.skip_while(|(num, _, _)| *num < 3_220_980); + let (num, reward, supply) = iter.next().unwrap(); + assert_eq!(num, 3_220_980); + assert_eq!(reward, MicroMinotari::from(764_000_449)); + assert_eq!(supply, MicroMinotari::from(20_140_382_328_948_420)); + let (num, reward, _) = iter.next().unwrap(); + assert_eq!(num, 3_220_981); + assert_eq!(reward, 764 * T); + let (num, reward, _) = iter.next().unwrap(); + assert_eq!(num, 3_220_982); + assert_eq!(reward, 764 * T); + // Next boosting + let mut iter = iter.skip((epoch_length - 3) as usize); + let (num, reward, supply) = iter.next().unwrap(); + assert_eq!(num, 3_484_500); + assert_eq!(reward, 764 * T); + assert_eq!(supply, MicroMinotari::from(20_341_711_608_948_420)); + let (num, reward, _) = iter.next().unwrap(); + assert_eq!(num, 3_484_501); + assert_eq!(reward, 771 * T); + let (num, reward, supply) = iter.next().unwrap(); + assert_eq!(num, 3_484_502); + assert_eq!(reward, 771 * T); + // Check supply inflation. Because of rounding, it could be between 98 and 100 bips + let epoch_supply = 771 * T * epoch_length; + let inflation = (10000 * epoch_supply / supply).as_u64(); // 1 bip => 100 + assert!(inflation < 100 && inflation > 98, "Inflation: {} bips", inflation); } #[test] @@ -308,7 +425,9 @@ mod test { let schedule = EmissionSchedule::new( MicroMinotari::from(10000000u64), &[22, 23, 24, 26, 27], - MicroMinotari::from(100), + 0, + 100000, + MicroMinotari::from(0), ); // Slow but does not overflow assert_eq!(schedule.block_reward(height + 1), MicroMinotari::from(4_194_303)); @@ -320,83 +439,36 @@ mod test { let schedule = EmissionSchedule::new( MicroMinotari::from(INITIAL), &[2], // 0.25 decay - MicroMinotari::from(100), + 1000, + 10, + 100 * T, ); assert_eq!(schedule.block_reward(0), MicroMinotari(0)); - assert_eq!(schedule.supply_at_block(0), MicroMinotari(0)); + assert_eq!(schedule.supply_at_block(0), 100 * T); let values = schedule.iter().take(101).collect::>(); let (height, reward, supply) = values[0]; assert_eq!(height, 1); assert_eq!(reward, MicroMinotari::from(INITIAL)); - assert_eq!(supply, MicroMinotari::from(INITIAL)); + assert_eq!(supply, MicroMinotari::from(INITIAL) + 100 * T); let (height, reward, supply) = values[1]; assert_eq!(height, 2); assert_eq!(reward, MicroMinotari::from(7_500_075)); - assert_eq!(supply, MicroMinotari::from(17_500_175)); + assert_eq!(supply, MicroMinotari::from(117_500_175)); let (height, reward, supply) = values[2]; assert_eq!(height, 3); assert_eq!(reward, MicroMinotari::from(5_625_057)); - assert_eq!(supply, MicroMinotari::from(23_125_232)); - let (height, reward, supply) = values[10]; - assert_eq!(height, 11); - assert_eq!(reward, MicroMinotari::from(563_142)); - assert_eq!(supply, MicroMinotari::from(38_310_986)); - let (height, reward, supply) = values[41]; - assert_eq!(height, 42); - assert_eq!(reward, MicroMinotari::from(100)); - assert_eq!(supply, MicroMinotari::from(40_000_252)); - - let mut tot_supply = MicroMinotari::from(0); - for (_, reward, supply) in schedule.iter().take(1000) { - tot_supply += reward; - assert_eq!(tot_supply, supply); - } - } - - #[test] - #[allow(clippy::identity_op)] - fn emission() { - let emission = EmissionSchedule::new(1 * T, &[1, 2], 100 * uT); - let mut emission = emission.iter(); - // decay is 1 - 0.25 - 0.125 = 0.625 - assert_eq!(emission.block_height(), 0); - assert_eq!(emission.block_reward(), MicroMinotari(0)); - assert_eq!(emission.supply(), MicroMinotari(0)); - - assert_eq!(emission.next(), Some((1, 1_000_000 * uT, 1_000_000 * uT))); - assert_eq!(emission.next(), Some((2, 250_000 * uT, 1_250_000 * uT))); - assert_eq!(emission.next(), Some((3, 62_500 * uT, 1_312_500 * uT))); - assert_eq!(emission.next(), Some((4, 15_625 * uT, 1_328_125 * uT))); - assert_eq!(emission.next(), Some((5, 3_907 * uT, 1_332_032 * uT))); - assert_eq!(emission.next(), Some((6, 978 * uT, 1_333_010 * uT))); - assert_eq!(emission.next(), Some((7, 245 * uT, 1_333_255 * uT))); - // Tail emission kicks in - assert_eq!(emission.next(), Some((8, 100 * uT, 1_333_355 * uT))); - assert_eq!(emission.next(), Some((9, 100 * uT, 1_333_455 * uT))); - - assert_eq!(emission.block_height(), 9); - assert_eq!(emission.block_reward(), 100 * uT); - assert_eq!(emission.supply(), 1333455 * uT); - let schedule = EmissionSchedule::new(1 * T, &[1, 2], 100 * uT); - assert_eq!(emission.block_reward(), schedule.block_reward(9)); - assert_eq!(emission.supply(), schedule.supply_at_block(9)) - } - - #[test] - fn calc_array() { - assert_eq!(EmissionSchedule::decay_params("1.00"), None); - assert_eq!(EmissionSchedule::decay_params("56345"), None); - assert_eq!(EmissionSchedule::decay_params("0.75").unwrap(), vec![2]); - assert_eq!(EmissionSchedule::decay_params("0.25").unwrap(), vec![1, 2]); - assert_eq!(EmissionSchedule::decay_params("0.5").unwrap(), vec![1]); - assert_eq!(EmissionSchedule::decay_params("0.875").unwrap(), vec![3]); - assert_eq!(EmissionSchedule::decay_params("0.125").unwrap(), vec![1, 2, 3]); - assert_eq!(EmissionSchedule::decay_params("0.64732").unwrap(), vec![ - 2, 4, 5, 7, 10, 13, 16, 19, 20, 21, 22, 25, 29, 32, 33, 34, 35, 36, 38, 45, 47, 51, 53, 58, 59, 60, 62, 63 - ]); - assert_eq!(EmissionSchedule::decay_params("0.9999991208182701").unwrap(), vec![ - 21, 22, 23, 25, 26, 37, 38, 39, 41, 45, 49, 50, 51, 52, 55, 57, 59, 60, 63 - ]); - assert_eq!(EmissionSchedule::decay_params("0.0").unwrap(), vec![0]); + assert_eq!(supply, MicroMinotari::from(123_125_232)); + let (height, reward, supply) = values[8]; + assert_eq!(height, 9); + assert_eq!(reward, MicroMinotari::from(1_001_140)); + assert_eq!(supply, MicroMinotari::from(136_996_989)); + let (height, reward, supply) = values[9]; + assert_eq!(height, 10); + assert_eq!(reward, MicroMinotari::from(1_000_000)); + assert_eq!(supply, MicroMinotari::from(137_996_989)); + let (height, reward, supply) = values[99]; + assert_eq!(height, 100); + assert_eq!(reward, MicroMinotari::from(2_000_000)); + assert_eq!(supply, MicroMinotari::from(248_996_989)); } } diff --git a/base_layer/core/src/covenants/decoder.rs b/base_layer/core/src/covenants/decoder.rs index c4b7e54386..9a8aef6ec0 100644 --- a/base_layer/core/src/covenants/decoder.rs +++ b/base_layer/core/src/covenants/decoder.rs @@ -95,6 +95,7 @@ pub(super) trait CovenantReadExt: io::Read { impl CovenantReadExt for R { /// Reads next byte code + #[allow(clippy::unused_io_amount)] fn read_next_byte_code(&mut self) -> Result, io::Error> { let mut buf = [0u8; 1]; loop { diff --git a/base_layer/core/src/covenants/output_set.rs b/base_layer/core/src/covenants/output_set.rs index caf5f18e12..d00cfa4826 100644 --- a/base_layer/core/src/covenants/output_set.rs +++ b/base_layer/core/src/covenants/output_set.rs @@ -163,7 +163,7 @@ impl Eq for Indexed {} impl PartialOrd for Indexed { fn partial_cmp(&self, other: &Self) -> Option { - self.index.partial_cmp(&other.index) + Some(self.cmp(other)) } } diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index d9ebf079c5..6cbee92a07 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -57,7 +57,7 @@ mod domain_hashing { use blake2::Blake2b; use digest::consts::U32; use tari_crypto::{hash_domain, hashing::DomainSeparatedHasher}; - use tari_hash_domains::ValidatorNodeBmtHashDomain; + use tari_hashing::ValidatorNodeBmtHashDomain; use tari_mmr::{ pruned_hashset::PrunedHashSet, sparse_merkle_tree::SparseMerkleTree, diff --git a/base_layer/core/src/mempool/mempool.rs b/base_layer/core/src/mempool/mempool.rs index 97a3be262b..07f3d5c903 100644 --- a/base_layer/core/src/mempool/mempool.rs +++ b/base_layer/core/src/mempool/mempool.rs @@ -22,6 +22,7 @@ use std::sync::{Arc, RwLock}; +use log::debug; use tari_common_types::types::{PrivateKey, Signature}; use tokio::task; @@ -41,6 +42,8 @@ use crate::{ validation::TransactionValidator, }; +pub const LOG_TARGET: &str = "c::mp::mempool"; + /// The Mempool consists of an Unconfirmed Transaction Pool, Pending Pool, Orphan Pool and Reorg Pool and is responsible /// for managing and maintaining all unconfirmed transactions that have not yet been included in a block, and /// transactions that have recently been included in a block. @@ -117,8 +120,33 @@ impl Mempool { /// Returns a list of transaction ranked by transaction priority up to a given weight. /// Only transactions that fit into a block will be returned pub async fn retrieve(&self, total_weight: u64) -> Result>, MempoolError> { - self.with_write_access(move |storage| storage.retrieve_and_revalidate(total_weight)) - .await + let start = std::time::Instant::now(); + let retrieved = self + .with_read_access(move |storage| storage.retrieve(total_weight)) + .await?; + debug!( + target: LOG_TARGET, + "Retrieved {} highest priority transaction(s) from the mempool in {:.0?} ms", + retrieved.retrieved_transactions.len(), + start.elapsed() + ); + + if !retrieved.transactions_to_remove_and_insert.is_empty() { + // we need to remove all transactions that need to be rechecked. + debug!( + target: LOG_TARGET, + "Removing {} transaction(s) from unconfirmed pool because they need re-evaluation", + retrieved.transactions_to_remove_and_insert.len() + ); + + let transactions_to_remove_and_insert = retrieved.transactions_to_remove_and_insert.clone(); + self.with_write_access(move |storage| { + storage.remove_and_reinsert_transactions(transactions_to_remove_and_insert) + }) + .await?; + } + + Ok(retrieved.retrieved_transactions) } pub async fn retrieve_by_excess_sigs( diff --git a/base_layer/core/src/mempool/mempool_storage.rs b/base_layer/core/src/mempool/mempool_storage.rs index b7f99c6277..554af9e2d6 100644 --- a/base_layer/core/src/mempool/mempool_storage.rs +++ b/base_layer/core/src/mempool/mempool_storage.rs @@ -32,7 +32,7 @@ use crate::{ mempool::{ error::MempoolError, reorg_pool::ReorgPool, - unconfirmed_pool::{UnconfirmedPool, UnconfirmedPoolError}, + unconfirmed_pool::{RetrieveResults, TransactionKey, UnconfirmedPool, UnconfirmedPoolError}, FeePerGramStat, MempoolConfig, StateResponse, @@ -52,7 +52,7 @@ pub const LOG_TARGET: &str = "c::mp::mempool_storage"; /// for managing and maintaining all unconfirmed transactions have not yet been included in a block, and transactions /// that have recently been included in a block. pub struct MempoolStorage { - unconfirmed_pool: UnconfirmedPool, + pub(crate) unconfirmed_pool: UnconfirmedPool, reorg_pool: ReorgPool, validator: Box, rules: ConsensusManager, @@ -156,6 +156,23 @@ impl MempoolStorage { .transaction_weight_params() } + /// Ensures that all transactions are safely deleted in order and from all storage and then + /// re-inserted + pub(crate) fn remove_and_reinsert_transactions( + &mut self, + transactions: Vec<(TransactionKey, Arc)>, + ) -> Result<(), MempoolError> { + for (tx_key, _) in &transactions { + self.unconfirmed_pool + .remove_transaction(*tx_key) + .map_err(|e| MempoolError::InternalError(e.to_string()))?; + } + self.insert_txs(transactions.iter().map(|(_, tx)| tx.clone()).collect()) + .map_err(|e| MempoolError::InternalError(e.to_string()))?; + + Ok(()) + } + // Insert a set of new transactions into the UTxPool. fn insert_txs(&mut self, txs: Vec>) -> Result<(), UnconfirmedPoolError> { for tx in txs { @@ -285,11 +302,10 @@ impl MempoolStorage { /// Returns a list of transaction ranked by transaction priority up to a given weight. /// Will only return transactions that will fit into the given weight - pub fn retrieve_and_revalidate(&mut self, total_weight: u64) -> Result>, MempoolError> { - let results = self.unconfirmed_pool.fetch_highest_priority_txs(total_weight)?; - self.insert_txs(results.transactions_to_insert) - .map_err(|e| MempoolError::InternalError(e.to_string()))?; - Ok(results.retrieved_transactions) + pub fn retrieve(&self, total_weight: u64) -> Result { + self.unconfirmed_pool + .fetch_highest_priority_txs(total_weight) + .map_err(|e| MempoolError::InternalError(e.to_string())) } pub fn retrieve_by_excess_sigs( diff --git a/base_layer/core/src/mempool/priority/prioritized_transaction.rs b/base_layer/core/src/mempool/priority/prioritized_transaction.rs index d656a8cfd6..0c78db88b7 100644 --- a/base_layer/core/src/mempool/priority/prioritized_transaction.rs +++ b/base_layer/core/src/mempool/priority/prioritized_transaction.rs @@ -42,7 +42,13 @@ pub struct FeePriority(Vec); impl FeePriority { pub fn new(transaction: &Transaction, insert_epoch: u64, weight: u64) -> Result { - let fee_per_byte = transaction.body.get_total_fee()?.as_u64().saturating_mul(1000) / weight; + let fee_per_byte = transaction + .body + .get_total_fee()? + .as_u64() + .saturating_mul(1000) + .checked_div(weight) + .ok_or(TransactionError::ZeroWeight)?; // Big-endian used here, the MSB is in the starting index. The ordering for Vec is taken from elements left // to right and the unconfirmed pool expects the lowest priority to be sorted lowest to highest in the // BTreeMap @@ -95,7 +101,13 @@ impl PrioritizedTransaction { Ok(Self { key, priority: FeePriority::new(&transaction, insert_epoch, weight)?, - fee_per_byte: transaction.body.get_total_fee()?.as_u64().saturating_mul(1000) / weight, + fee_per_byte: transaction + .body + .get_total_fee()? + .as_u64() + .saturating_mul(1000) + .checked_div(weight) + .ok_or(TransactionError::ZeroWeight)?, weight, transaction, dependent_output_hashes: dependent_outputs.unwrap_or_default(), @@ -162,4 +174,37 @@ mod tests { assert!(p2 > p1); } + + #[test] + fn prioritized_from_empty_transaction() { + let weighting = TransactionWeight::latest(); + match PrioritizedTransaction::new( + 0, + &weighting, + Arc::new(Transaction::new( + vec![], + vec![], + vec![], + Default::default(), + Default::default(), + )), + None, + ) { + Ok(_) => panic!("Empty transaction should not be valid"), + Err(e) => assert_eq!(e, TransactionError::ZeroWeight), + } + } + + #[test] + fn fee_priority_with_zero_weight() { + let weight = 0; + match FeePriority::new( + &Transaction::new(vec![], vec![], vec![], Default::default(), Default::default()), + SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(), + weight, + ) { + Ok(_) => panic!("Empty transaction should not be valid"), + Err(e) => assert_eq!(e, TransactionError::ZeroWeight), + } + } } diff --git a/base_layer/core/src/mempool/service/request.rs b/base_layer/core/src/mempool/service/request.rs index 932e69f6b0..8e4a4447b8 100644 --- a/base_layer/core/src/mempool/service/request.rs +++ b/base_layer/core/src/mempool/service/request.rs @@ -23,10 +23,10 @@ use core::fmt::{Display, Error, Formatter}; use serde::{Deserialize, Serialize}; -use tari_common_types::{types::Signature, waiting_requests::RequestKey}; +use tari_common_types::types::Signature; use tari_utilities::hex::Hex; -use crate::transactions::transaction_components::Transaction; +use crate::{common::waiting_requests::RequestKey, transactions::transaction_components::Transaction}; /// API Request enum for Mempool requests. #[derive(Debug, Serialize, Deserialize)] diff --git a/base_layer/core/src/mempool/service/response.rs b/base_layer/core/src/mempool/service/response.rs index 5a46622132..51323b0754 100644 --- a/base_layer/core/src/mempool/service/response.rs +++ b/base_layer/core/src/mempool/service/response.rs @@ -22,9 +22,10 @@ use std::{fmt, fmt::Formatter}; -use tari_common_types::waiting_requests::RequestKey; - -use crate::mempool::{FeePerGramStat, StateResponse, StatsResponse, TxStorageResponse}; +use crate::{ + common::waiting_requests::RequestKey, + mempool::{FeePerGramStat, StateResponse, StatsResponse, TxStorageResponse}, +}; /// API Response enum for Mempool responses. #[derive(Clone, Debug)] diff --git a/base_layer/core/src/mempool/unconfirmed_pool/mod.rs b/base_layer/core/src/mempool/unconfirmed_pool/mod.rs index 65606b0580..33e1e904a7 100644 --- a/base_layer/core/src/mempool/unconfirmed_pool/mod.rs +++ b/base_layer/core/src/mempool/unconfirmed_pool/mod.rs @@ -27,7 +27,7 @@ mod unconfirmed_pool; // Public re-exports pub use error::UnconfirmedPoolError; use tari_crypto::hash_domain; -pub use unconfirmed_pool::{UnconfirmedPool, UnconfirmedPoolConfig}; +pub use unconfirmed_pool::{RetrieveResults, TransactionKey, UnconfirmedPool, UnconfirmedPoolConfig}; hash_domain!( UnconfirmedPoolOutputTokenIdHashDomain, diff --git a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs index 8c7568a9be..bb7edfce04 100644 --- a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs +++ b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs @@ -48,7 +48,7 @@ use crate::{ pub const LOG_TARGET: &str = "c::mp::unconfirmed_pool::unconfirmed_pool_storage"; -type TransactionKey = usize; +pub type TransactionKey = usize; /// Configuration for the UnconfirmedPool #[derive(Clone, Copy, Serialize, Deserialize, Debug)] @@ -92,9 +92,10 @@ pub struct UnconfirmedPool { } // helper class to reduce type complexity +#[derive(Debug, Clone)] pub struct RetrieveResults { pub retrieved_transactions: Vec>, - pub transactions_to_insert: Vec>, + pub transactions_to_remove_and_insert: Vec<(TransactionKey, Arc)>, } pub type CompleteTransactionBranch = HashMap>, u64, u64)>; @@ -183,7 +184,7 @@ impl UnconfirmedPool { /// Returns a set of the highest priority unconfirmed transactions, that can be included in a block #[allow(clippy::too_many_lines)] - pub fn fetch_highest_priority_txs(&mut self, total_weight: u64) -> Result { + pub fn fetch_highest_priority_txs(&self, total_weight: u64) -> Result { // The process of selection is as follows: // Assume that all transaction have the same weight for simplicity. A(20)->B(2) means A depends on B and A has // fee 20 and B has fee 2. A(20)->B(2)->C(14), D(12) @@ -297,24 +298,10 @@ impl UnconfirmedPool { 0, )?; } - if !transactions_to_remove_and_recheck.is_empty() { - // we need to remove all transactions that need to be rechecked. - debug!( - target: LOG_TARGET, - "Removing {} transaction(s) from unconfirmed pool because they need re-evaluation", - transactions_to_remove_and_recheck.len() - ); - } - for (tx_key, _) in &transactions_to_remove_and_recheck { - self.remove_transaction(*tx_key)?; - } let results = RetrieveResults { retrieved_transactions: selected_txs.into_values().collect(), - transactions_to_insert: transactions_to_remove_and_recheck - .into_iter() - .map(|(_, tx)| tx) - .collect(), + transactions_to_remove_and_insert: transactions_to_remove_and_recheck, }; Ok(results) } @@ -544,7 +531,7 @@ impl UnconfirmedPool { .flat_map(|tx| tx.body.inputs()) .map(|i| i.output_hash()) .collect::>(); - for (_, transaction) in current_transactions.iter() { + for transaction in current_transactions.values() { for input in transaction.body.inputs() { if insert_set.contains(&input.output_hash()) { return true; @@ -646,8 +633,7 @@ impl UnconfirmedPool { Ok(Some(v)) => Some(Ok(v)), Ok(None) => None, }) - .collect::, _>>()? - .into_iter(), + .collect::, _>>()?, ); debug!( target: LOG_TARGET, @@ -680,8 +666,7 @@ impl UnconfirmedPool { Ok(Some(v)) => Some(Ok(v)), Ok(None) => None, }) - .collect::, _>>()? - .into_iter(), + .collect::, _>>()?, ); debug!( target: LOG_TARGET, @@ -705,7 +690,10 @@ impl UnconfirmedPool { } /// Ensures that all transactions are safely deleted in order and from all storage - fn remove_transaction(&mut self, tx_key: TransactionKey) -> Result>, UnconfirmedPoolError> { + pub(crate) fn remove_transaction( + &mut self, + tx_key: TransactionKey, + ) -> Result>, UnconfirmedPoolError> { let prioritized_transaction = match self.tx_by_key.remove(&tx_key) { Some(tx) => tx, None => return Ok(None), diff --git a/base_layer/core/src/proof_of_work/difficulty.rs b/base_layer/core/src/proof_of_work/difficulty.rs index 13e05d34a1..dc9ec85baf 100644 --- a/base_layer/core/src/proof_of_work/difficulty.rs +++ b/base_layer/core/src/proof_of_work/difficulty.rs @@ -22,6 +22,7 @@ use std::fmt; +use borsh::{BorshDeserialize, BorshSerialize}; use num_format::{Locale, ToFormattedString}; use primitive_types::U256; use serde::{Deserialize, Serialize}; @@ -34,7 +35,9 @@ use crate::proof_of_work::{error::DifficultyError, DifficultyAdjustmentError}; pub const MIN_DIFFICULTY: u64 = 1; /// The difficulty is defined as the maximum target divided by the block hash. -#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Deserialize, Serialize)] +#[derive( + Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Deserialize, Serialize, BorshSerialize, BorshDeserialize, +)] pub struct Difficulty(u64); impl Difficulty { diff --git a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs index ab5b34150b..dc4aea7399 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs @@ -57,10 +57,10 @@ pub const LOG_TARGET: &str = "c::pow::monero_rx"; pub fn randomx_difficulty( header: &BlockHeader, randomx_factory: &RandomXFactory, - gen_hash: &FixedHash, + genesis_block_hash: &FixedHash, consensus: &ConsensusManager, ) -> Result { - let monero_pow_data = verify_header(header, gen_hash, consensus)?; + let monero_pow_data = verify_header(header, genesis_block_hash, consensus)?; debug!(target: LOG_TARGET, "Valid Monero data: {}", monero_pow_data); let blockhashing_blob = monero_pow_data.to_blockhashing_blob(); let vm = randomx_factory.create(monero_pow_data.randomx_key())?; @@ -100,13 +100,17 @@ fn parse_extra_field_truncate_on_error(raw_extra_field: &RawExtraField) -> Extra /// If these assertions pass, a valid `MoneroPowData` instance is returned pub fn verify_header( header: &BlockHeader, - gen_hash: &FixedHash, + genesis_block_hash: &FixedHash, consensus: &ConsensusManager, ) -> Result { let monero_data = MoneroPowData::from_header(header, consensus)?; let expected_merge_mining_hash = header.merge_mining_hash(); - let extra_field = ExtraField::try_parse(&monero_data.coinbase_tx_extra) - .map_err(|_| MergeMineError::DeserializeError("Invalid extra field".to_string()))?; + let extra_field = ExtraField::try_parse(&monero_data.coinbase_tx_extra); + let extra_field = extra_field.unwrap_or_else(|ex_field| { + warn!(target: LOG_TARGET, "Error deserializing, Monero extra field"); + ex_field + }); + debug!(target: LOG_TARGET, "Extra field: {:?}", extra_field); // Check that the Tari MM hash is found in the Monero coinbase transaction // and that only 1 Tari header is found @@ -125,7 +129,7 @@ pub fn verify_header( depth, &merge_mining_hash, &expected_merge_mining_hash, - gen_hash, + genesis_block_hash, ) } } @@ -300,21 +304,21 @@ pub fn create_ordered_transaction_hashes_from_block(block: &monero::Block) -> Ve .collect() } -/// Inserts merge mining hash into a Monero block -pub fn insert_merge_mining_tag_and_aux_chain_merkle_root_into_block>( +/// Inserts aux chain merkle root and info into a Monero block +pub fn insert_aux_chain_mr_and_info_into_block>( block: &mut monero::Block, - hash: T, + aux_chain_mr: T, aux_chain_count: u8, aux_nonce: u32, ) -> Result<(), MergeMineError> { if aux_chain_count == 0 { return Err(MergeMineError::ZeroAuxChains); } - if hash.as_ref().len() != monero::Hash::len_bytes() { + if aux_chain_mr.as_ref().len() != monero::Hash::len_bytes() { return Err(MergeMineError::HashingError(format!( "Expected source to be {} bytes, but it was {} bytes", monero::Hash::len_bytes(), - hash.as_ref().len() + aux_chain_mr.as_ref().len() ))); } // When we insert the merge mining tag, we need to make sure that the extra field is valid. @@ -334,17 +338,18 @@ pub fn insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&bytes[..]).unwrap(); let block_header = BlockHeader::new(0); let hash = block_header.merge_mining_hash(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); let coinbase = block.miner_tx.clone(); let extra = coinbase.prefix.extra; @@ -558,7 +563,7 @@ mod test { validator_node_size: 0, }; let hash = block_header.merge_mining_hash(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); let hashes = create_ordered_transaction_hashes_from_block(&block); assert_eq!(hashes.len(), block.tx_hashes.len() + 1); let root = tree_hash(&hashes).unwrap(); @@ -660,7 +665,7 @@ mod test { validator_node_size: 0, }; let hash = block_header.merge_mining_hash(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); let count = 1 + (u16::try_from(block.tx_hashes.len()).unwrap()); let mut hashes = Vec::with_capacity(count as usize); hashes.push(block.miner_tx.hash()); @@ -806,7 +811,7 @@ mod test { validator_node_size: 0, }; let hash = Hash::null(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); let count = 1 + (u16::try_from(block.tx_hashes.len()).unwrap()); let mut hashes = Vec::with_capacity(count as usize); let mut proof = Vec::with_capacity(count as usize); @@ -882,7 +887,7 @@ mod test { validator_node_size: 0, }; let hash = block_header.merge_mining_hash(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); #[allow(clippy::redundant_clone)] let mut block_header2 = block_header.clone(); block_header2.version = 1; @@ -890,7 +895,7 @@ mod test { assert!(extract_aux_merkle_root_from_block(&block).is_ok()); // Try via the API - this will fail because more than one merge mining tag is not allowed - assert!(insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash2, 1, 0).is_err()); + assert!(insert_aux_chain_mr_and_info_into_block(&mut block, hash2, 1, 0).is_err()); // Now bypass the API - this will effectively allow us to insert more than one merge mining tag, // like trying to sneek it in. Later on, when we call `verify_header(&block_header)`, it should fail. @@ -997,7 +1002,7 @@ mod test { // Now insert the merge mining tag - this would also clean up the extra field and remove the invalid sub-fields let hash = block_header.merge_mining_hash(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); assert!(ExtraField::try_parse(&block.miner_tx.prefix.extra.clone()).is_ok()); // Verify that the merge mining tag is there @@ -1038,7 +1043,7 @@ mod test { validator_node_size: 0, }; let hash = block_header.merge_mining_hash(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); let count = 1 + (u16::try_from(block.tx_hashes.len()).unwrap()); let mut hashes = Vec::with_capacity(count as usize); let mut proof = Vec::with_capacity(count as usize); @@ -1171,7 +1176,7 @@ mod test { validator_node_size: 0, }; let hash = block_header.merge_mining_hash(); - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut block, hash, 1, 0).unwrap(); + insert_aux_chain_mr_and_info_into_block(&mut block, hash, 1, 0).unwrap(); let count = 1 + (u16::try_from(block.tx_hashes.len()).unwrap()); let mut hashes = Vec::with_capacity(count as usize); let mut proof = Vec::with_capacity(count as usize); @@ -1248,4 +1253,27 @@ mod test { ); assert_eq!(difficulty.as_u64(), 430603); } + + #[test] + fn test_extra_field_deserialize() { + let bytes = vec![ + 3, 33, 0, 149, 5, 198, 66, 174, 39, 113, 243, 68, 202, 221, 222, 116, 10, 209, 194, 56, 247, 252, 23, 248, + 28, 44, 81, 91, 44, 214, 211, 242, 3, 12, 70, 0, 0, 0, 1, 251, 88, 0, 0, 96, 49, 163, 82, 175, 205, 74, + 138, 126, 250, 226, 106, 10, 255, 139, 49, 41, 168, 110, 203, 150, 252, 208, 234, 140, 2, 17, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let raw_extra_field = RawExtraField(bytes); + let res = ExtraField::try_parse(&raw_extra_field); + assert!(res.is_err()); + let field = res.unwrap_err(); + let mm_tag = SubField::MergeMining( + Some(VarInt(0)), + Hash::from_slice( + hex::decode("9505c642ae2771f344caddde740ad1c238f7fc17f81c2c515b2cd6d3f2030c46") + .unwrap() + .as_slice(), + ), + ); + assert_eq!(field.0[0], mm_tag); + } } diff --git a/base_layer/core/src/proof_of_work/monero_rx/mod.rs b/base_layer/core/src/proof_of_work/monero_rx/mod.rs index 7d29264add..24701323e0 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/mod.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/mod.rs @@ -29,7 +29,7 @@ pub use helpers::{ create_ordered_transaction_hashes_from_block, deserialize_monero_block_from_hex, extract_aux_merkle_root_from_block, - insert_merge_mining_tag_and_aux_chain_merkle_root_into_block, + insert_aux_chain_mr_and_info_into_block, randomx_difficulty, serialize_monero_block_to_hex, verify_header, diff --git a/base_layer/core/src/proof_of_work/randomx_factory.rs b/base_layer/core/src/proof_of_work/randomx_factory.rs index 4f68b1e7df..fb4889d3fa 100644 --- a/base_layer/core/src/proof_of_work/randomx_factory.rs +++ b/base_layer/core/src/proof_of_work/randomx_factory.rs @@ -63,6 +63,7 @@ impl RandomXVMInstance { // light mode. These are not set by RandomX automatically even in fast mode. Ok(Self { + #[allow(clippy::arc_with_non_send_sync)] instance: Arc::new(RwLock::new(vm)), }) } diff --git a/base_layer/core/src/proof_of_work/sha3x_pow.rs b/base_layer/core/src/proof_of_work/sha3x_pow.rs index 783e642c41..c1953826b2 100644 --- a/base_layer/core/src/proof_of_work/sha3x_pow.rs +++ b/base_layer/core/src/proof_of_work/sha3x_pow.rs @@ -96,6 +96,7 @@ pub mod test { } #[test] + #[cfg(tari_target_network_testnet)] fn validate_max_target() { let mut header = get_header(); header.nonce = 631; diff --git a/base_layer/core/src/proto/block_header.rs b/base_layer/core/src/proto/block_header.rs index deedf75aed..28295d490f 100644 --- a/base_layer/core/src/proto/block_header.rs +++ b/base_layer/core/src/proto/block_header.rs @@ -70,7 +70,7 @@ impl TryFrom for BlockHeader { impl From for proto::BlockHeader { fn from(header: BlockHeader) -> Self { Self { - version: u32::try_from(header.version).unwrap(), + version: u32::from(header.version), height: header.height, prev_hash: header.prev_hash.to_vec(), timestamp: header.timestamp.as_u64(), diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index d60d6cfa03..b14a1128bd 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -19,9 +19,9 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - use std::{ collections::HashMap, + convert::TryFrom, fs, ops::Deref, path::{Path, PathBuf}, @@ -34,8 +34,10 @@ use tari_common_types::{ tari_address::TariAddress, types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; +use tari_mmr::sparse_merkle_tree::{NodeKey, ValueHash}; use tari_storage::lmdb_store::LMDBConfig; use tari_test_utils::paths::create_temporary_data_path; +use tari_utilities::ByteArray; use super::{create_block, mine_to_difficulty}; use crate::{ @@ -277,7 +279,7 @@ impl BlockchainBackend for TempDatabase { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { self.db .as_ref() @@ -390,7 +392,7 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_total_size_stats() } - fn bad_block_exists(&self, block_hash: HashOutput) -> Result { + fn bad_block_exists(&self, block_hash: HashOutput) -> Result<(bool, String), ChainStorageError> { self.db.as_ref().unwrap().bad_block_exists(block_hash) } @@ -425,6 +427,7 @@ impl BlockchainBackend for TempDatabase { pub async fn create_chained_blocks>( blocks: T, genesis_block: Arc, + output_smt: &mut OutputSmt, ) -> (Vec, HashMap>) { let mut block_hashes = HashMap::new(); block_hashes.insert("GB".to_string(), genesis_block); @@ -439,7 +442,7 @@ pub async fn create_chained_blocks>( .unwrap_or_else(|| panic!("Could not find block {}", block_spec.parent)); let name = block_spec.name; let difficulty = block_spec.difficulty; - let (block, _) = create_block( + let (mut block, _) = create_block( &rules, prev_block.block(), block_spec, @@ -449,6 +452,7 @@ pub async fn create_chained_blocks>( None, ) .await; + update_block_and_smt(&mut block, output_smt); let block = mine_block(block, prev_block.accumulated_data(), difficulty); block_names.push(name.to_string()); block_hashes.insert(name.to_string(), block); @@ -479,7 +483,8 @@ pub async fn create_main_chain>( .try_into_chain_block() .map(Arc::new) .unwrap(); - let (names, chain) = create_chained_blocks(blocks, genesis_block).await; + let mut smt = db.fetch_tip_smt().unwrap(); + let (names, chain) = create_chained_blocks(blocks, genesis_block, &mut smt).await; names.iter().for_each(|name| { let block = chain.get(name).unwrap(); db.add_block(block.to_arc_block()).unwrap(); @@ -492,8 +497,9 @@ pub async fn create_orphan_chain>( db: &BlockchainDatabase, blocks: T, root_block: Arc, + smt: &mut OutputSmt, ) -> (Vec, HashMap>) { - let (names, chain) = create_chained_blocks(blocks, root_block).await; + let (names, chain) = create_chained_blocks(blocks, root_block, smt).await; let mut txn = DbTransaction::new(); for name in &names { let block = chain.get(name).unwrap().clone(); @@ -504,9 +510,24 @@ pub async fn create_orphan_chain>( (names, chain) } +pub fn update_block_and_smt(block: &mut Block, smt: &mut OutputSmt) { + for output in block.body.outputs() { + let smt_key = NodeKey::try_from(output.commitment.as_bytes()).unwrap(); + let smt_node = ValueHash::try_from(output.smt_hash(block.header.height).as_slice()).unwrap(); + // suppress this error as some unit tests rely on this not being completely correct. + let _result = smt.insert(smt_key, smt_node); + } + for input in block.body.inputs() { + let smt_key = NodeKey::try_from(input.commitment().unwrap().as_bytes()).unwrap(); + smt.delete(&smt_key).unwrap(); + } + let root = FixedHash::try_from(smt.hash().as_slice()).unwrap(); + block.header.output_mr = root; +} + pub struct TestBlockchain { db: BlockchainDatabase, - chain: Vec<(&'static str, Arc)>, + chain: Vec<(&'static str, Arc, OutputSmt)>, rules: ConsensusManager, pub km: MemoryDbKeyManager, script_key_id: TariKeyId, @@ -533,8 +554,9 @@ impl TestBlockchain { wallet_payment_address, range_proof_type: RangeProofType::BulletProofPlus, }; + let smt = blockchain.db.fetch_tip_smt().unwrap(); - blockchain.chain.push(("GB", genesis)); + blockchain.chain.push(("GB", genesis, smt)); blockchain } @@ -611,25 +633,29 @@ impl TestBlockchain { block: Arc, ) -> Result { let result = self.db.add_block(block.to_arc_block())?; - self.chain.push((name, block)); + let smt = self.db.fetch_tip_smt().unwrap(); + self.chain.push((name, block, smt)); Ok(result) } - pub fn get_block_by_name(&self, name: &'static str) -> Option> { - self.chain.iter().find(|(n, _)| *n == name).map(|(_, ch)| ch.clone()) + pub fn get_block_and_smt_by_name(&self, name: &'static str) -> Option<(Arc, OutputSmt)> { + self.chain + .iter() + .find(|(n, _, _)| *n == name) + .map(|(_, ch, smt)| (ch.clone(), smt.clone())) } - pub fn get_tip_block(&self) -> (&'static str, Arc) { + pub fn get_tip_block(&self) -> (&'static str, Arc, OutputSmt) { self.chain.last().cloned().unwrap() } pub async fn create_chained_block(&self, block_spec: BlockSpec) -> (Arc, WalletOutput) { - let parent = self - .get_block_by_name(block_spec.parent) + let (parent, mut parent_smt) = self + .get_block_and_smt_by_name(block_spec.parent) .ok_or_else(|| format!("Parent block not found with name '{}'", block_spec.parent)) .unwrap(); let difficulty = block_spec.difficulty; - let (block, coinbase) = create_block( + let (mut block, coinbase) = create_block( &self.rules, parent.block(), block_spec, @@ -639,13 +665,14 @@ impl TestBlockchain { Some(self.range_proof_type), ) .await; + update_block_and_smt(&mut block, &mut parent_smt); let block = mine_block(block, parent.accumulated_data(), difficulty); (block, coinbase) } pub async fn create_unmined_block(&self, block_spec: BlockSpec) -> (Block, WalletOutput) { - let parent = self - .get_block_by_name(block_spec.parent) + let (parent, mut parent_smt) = self + .get_block_and_smt_by_name(block_spec.parent) .ok_or_else(|| format!("Parent block not found with name '{}'", block_spec.parent)) .unwrap(); let (mut block, outputs) = create_block( @@ -658,17 +685,19 @@ impl TestBlockchain { Some(self.range_proof_type), ) .await; + update_block_and_smt(&mut block, &mut parent_smt); block.body.sort(); (block, outputs) } - pub fn mine_block(&self, parent_name: &'static str, block: Block, difficulty: Difficulty) -> Arc { - let parent = self.get_block_by_name(parent_name).unwrap(); + pub fn mine_block(&self, parent_name: &'static str, mut block: Block, difficulty: Difficulty) -> Arc { + let (parent, mut parent_smt) = self.get_block_and_smt_by_name(parent_name).unwrap(); + update_block_and_smt(&mut block, &mut parent_smt); mine_block(block, parent.accumulated_data(), difficulty) } pub async fn create_next_tip(&self, spec: BlockSpec) -> (Arc, WalletOutput) { - let (name, _) = self.get_tip_block(); + let (name, _, _) = self.get_tip_block(); self.create_chained_block(spec.with_parent_block(name)).await } @@ -676,7 +705,7 @@ impl TestBlockchain { &mut self, spec: BlockSpec, ) -> Result<(Arc, WalletOutput), ChainStorageError> { - let (tip, _) = self.get_tip_block(); + let (tip, _, _) = self.get_tip_block(); self.append(spec.with_parent_block(tip)).await } @@ -688,6 +717,6 @@ impl TestBlockchain { } pub fn get_genesis_block(&self) -> Arc { - self.chain.first().map(|(_, block)| block).unwrap().clone() + self.chain.first().map(|(_, block, _)| block).unwrap().clone() } } diff --git a/base_layer/core/src/transactions/aggregated_body.rs b/base_layer/core/src/transactions/aggregated_body.rs index 69b7a75887..47148fb116 100644 --- a/base_layer/core/src/transactions/aggregated_body.rs +++ b/base_layer/core/src/transactions/aggregated_body.rs @@ -351,7 +351,7 @@ impl AggregateBody { /// Lists the number of inputs, outputs, and kernels in the block pub fn to_counts_string(&self) -> String { format!( - "{} input(s), {} output(s), {} kernel(s)", + "input(s): {}, output(s): {}, kernel(s): {}", self.inputs.len(), self.outputs.len(), self.kernels.len() @@ -408,7 +408,7 @@ impl From for AggregateBody { impl Display for AggregateBody { fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { if !self.is_sorted() { - writeln!(fmt, "WARNING: Block body is not sorted.")?; + writeln!(fmt, "WARNING: Body is not sorted.")?; } writeln!(fmt, "--- Transaction Kernels ---")?; for (i, kernel) in self.kernels.iter().enumerate() { diff --git a/base_layer/core/src/transactions/coinbase_builder.rs b/base_layer/core/src/transactions/coinbase_builder.rs index c8ae1811ab..ead4800baa 100644 --- a/base_layer/core/src/transactions/coinbase_builder.rs +++ b/base_layer/core/src/transactions/coinbase_builder.rs @@ -662,7 +662,7 @@ mod test { let block_reward = rules.emission_schedule().block_reward(42) + missing_fee; let builder = CoinbaseBuilder::new(key_manager.clone()); let builder = builder - .with_block_height(4200000) + .with_block_height(4_200_000) .with_fees(1 * uT) .with_spend_key_id(p.spend_key_id.clone()) .with_encryption_key_id(TariKeyId::default()) @@ -729,7 +729,7 @@ mod test { TransactionKeyManagerInterface, TxoStage, }, - transaction_components::{RangeProofType, TransactionKernelVersion}, + transaction_components::{KernelBuilder, RangeProofType, TransactionKernelVersion}, }; #[tokio::test] @@ -863,4 +863,134 @@ mod test { ) .unwrap(); } + + #[tokio::test] + #[allow(clippy::too_many_lines)] + #[allow(clippy::identity_op)] + async fn multi_coinbase_amount() { + // We construct two txs both valid with a single coinbase. We then add a duplicate coinbase utxo to the one, and + // a duplicate coinbase kernel to the other one. + let (builder, rules, factories, key_manager) = get_builder(); + let p = TestParams::new(&key_manager).await; + // We just want some small amount here. + let missing_fee = rules.emission_schedule().block_reward(4200000) + (2 * uT); + let wallet_payment_address = TariAddress::default(); + let builder = builder + .with_block_height(42) + .with_fees(1 * uT) + .with_spend_key_id(p.spend_key_id.clone()) + .with_encryption_key_id(TariKeyId::default()) + .with_sender_offset_key_id(p.sender_offset_key_id.clone()) + .with_script_key_id(p.script_key_id.clone()) + .with_script(one_sided_payment_script(wallet_payment_address.public_key())) + .with_range_proof_type(RangeProofType::RevealedValue); + let (tx1, wo1) = builder + .build(rules.consensus_constants(0), rules.emission_schedule()) + .await + .unwrap(); + + // we calculate a duplicate tx here so that we can have a coinbase with the correct fee amount + let block_reward = rules.emission_schedule().block_reward(42) + missing_fee; + let builder = CoinbaseBuilder::new(key_manager.clone()); + let builder = builder + .with_block_height(4200000) + .with_fees(1 * uT) + .with_spend_key_id(p.spend_key_id.clone()) + .with_encryption_key_id(TariKeyId::default()) + .with_sender_offset_key_id(p.sender_offset_key_id) + .with_script_key_id(p.script_key_id) + .with_script(one_sided_payment_script(wallet_payment_address.public_key())) + .with_range_proof_type(RangeProofType::RevealedValue); + let (tx2, wo2) = builder + .build(rules.consensus_constants(0), rules.emission_schedule()) + .await + .unwrap(); + + let coinbase1 = tx1.body.outputs()[0].clone(); + let coinbase2 = tx2.body.outputs()[0].clone(); + let mut kernel_1 = tx1.body.kernels()[0].clone(); + let kernel_2 = tx2.body.kernels()[0].clone(); + let excess = &kernel_1.excess + &kernel_2.excess; + kernel_1.excess = &kernel_1.excess + &kernel_2.excess; + kernel_1.excess_sig = &kernel_1.excess_sig + &kernel_2.excess_sig; + let mut body1 = AggregateBody::new(Vec::new(), vec![coinbase1, coinbase2], vec![kernel_1.clone()]); + body1.sort(); + + body1 + .check_coinbase_output( + block_reward, + rules.consensus_constants(0).coinbase_min_maturity(), + &factories, + 42, + ) + .unwrap(); + body1.verify_kernel_signatures().unwrap_err(); + + // lets create a new kernel with a correct signature + let (new_nonce1, nonce1) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .unwrap(); + let (new_nonce2, nonce2) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .unwrap(); + let nonce = &nonce1 + &nonce2; + let kernel_message = TransactionKernel::build_kernel_signature_message( + &TransactionKernelVersion::get_current_version(), + kernel_1.fee, + kernel_1.lock_height, + &kernel_1.features, + &None, + ); + + let mut kernel_signature = key_manager + .get_partial_txo_kernel_signature( + &wo1.spending_key_id, + &new_nonce1, + &nonce, + excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &kernel_1.features, + TxoStage::Output, + ) + .await + .unwrap(); + kernel_signature = &kernel_signature + + &key_manager + .get_partial_txo_kernel_signature( + &wo2.spending_key_id, + &new_nonce2, + &nonce, + excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &kernel_1.features, + TxoStage::Output, + ) + .await + .unwrap(); + let kernel_new = KernelBuilder::new() + .with_fee(0.into()) + .with_features(kernel_1.features) + .with_lock_height(kernel_1.lock_height) + .with_excess(&excess) + .with_signature(kernel_signature) + .build() + .unwrap(); + + let mut body2 = AggregateBody::new(Vec::new(), body1.outputs().clone(), vec![kernel_new]); + body2.sort(); + + body2 + .check_coinbase_output( + block_reward, + rules.consensus_constants(0).coinbase_min_maturity(), + &factories, + 42, + ) + .unwrap(); + body2.verify_kernel_signatures().unwrap(); + } } diff --git a/base_layer/core/src/transactions/key_manager/error.rs b/base_layer/core/src/transactions/key_manager/error.rs index ef92873c28..c26a7f618d 100644 --- a/base_layer/core/src/transactions/key_manager/error.rs +++ b/base_layer/core/src/transactions/key_manager/error.rs @@ -20,8 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use serde::{Deserialize, Serialize}; use tari_crypto::signatures::CommitmentAndPublicKeySignatureError; use tari_key_manager::error::KeyManagerError; +use tari_utilities::ByteArrayError; use thiserror::Error; use crate::transactions::transaction_components::TransactionError; @@ -41,3 +43,35 @@ impl From for CoreKeyManagerError { CoreKeyManagerError::CommitmentAndPublicKeySignatureError(err.to_string()) } } + +/// Ledger device errors. +#[derive(Debug, PartialEq, Error, Deserialize, Serialize, Clone, Eq)] +pub enum LedgerDeviceError { + /// HID API error + #[error("HID API error `{0}`")] + HidApi(String), + /// Native HID transport error + #[error("Native HID transport error `{0}`")] + NativeTransport(String), + /// Ledger application not started + #[error("Ledger application not started")] + ApplicationNotStarted, + /// Ledger application instruction error + #[error("Ledger application instruction error `{0}`")] + Instruction(String), + /// Ledger application processing error + #[error("Processing error `{0}`")] + Processing(String), + /// Conversion error to or from ledger + #[error("Conversion failed: {0}")] + ByteArrayError(String), + /// Not yet supported + #[error("Ledger is not fully supported")] + NotSupported, +} + +impl From for LedgerDeviceError { + fn from(e: ByteArrayError) -> Self { + LedgerDeviceError::ByteArrayError(e.to_string()) + } +} diff --git a/base_layer/core/src/transactions/key_manager/initializer.rs b/base_layer/core/src/transactions/key_manager/initializer.rs index 34a0434156..55c6824cbe 100644 --- a/base_layer/core/src/transactions/key_manager/initializer.rs +++ b/base_layer/core/src/transactions/key_manager/initializer.rs @@ -28,7 +28,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_common_types::types::PublicKey; +use tari_common_types::{types::PublicKey, wallet_types::WalletType}; use tari_key_manager::{ cipher_seed::CipherSeed, key_manager_service::storage::database::{KeyManagerBackend, KeyManagerDatabase}, @@ -44,17 +44,24 @@ where T: KeyManagerBackend backend: Option, master_seed: CipherSeed, crypto_factories: CryptoFactories, + wallet_type: WalletType, } impl TransactionKeyManagerInitializer where T: KeyManagerBackend + 'static { /// Creates a new [TransactionKeyManagerInitializer] from the provided [KeyManagerBackend] and [CipherSeed] - pub fn new(backend: T, master_seed: CipherSeed, crypto_factories: CryptoFactories) -> Self { + pub fn new( + backend: T, + master_seed: CipherSeed, + crypto_factories: CryptoFactories, + wallet_type: WalletType, + ) -> Self { Self { backend: Some(backend), master_seed, crypto_factories, + wallet_type, } } } @@ -73,6 +80,7 @@ where T: KeyManagerBackend + 'static self.master_seed.clone(), KeyManagerDatabase::new(backend), self.crypto_factories.clone(), + self.wallet_type, )?; context.register_handle(key_manager); diff --git a/base_layer/core/src/transactions/key_manager/inner.rs b/base_layer/core/src/transactions/key_manager/inner.rs index 85ad670052..94bbe955c2 100644 --- a/base_layer/core/src/transactions/key_manager/inner.rs +++ b/base_layer/core/src/transactions/key_manager/inner.rs @@ -19,14 +19,25 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#[cfg(feature = "ledger")] +use std::sync::{Arc, Mutex}; use std::{collections::HashMap, ops::Shl}; use blake2::Blake2b; use digest::consts::U64; +#[cfg(feature = "ledger")] +use ledger_transport::APDUCommand; +#[cfg(feature = "ledger")] +use ledger_transport_hid::TransportNativeHID; use log::*; +#[cfg(feature = "ledger")] +use once_cell::sync::Lazy; use rand::rngs::OsRng; use strum::IntoEnumIterator; -use tari_common_types::types::{ComAndPubSignature, Commitment, PrivateKey, PublicKey, RangeProof, Signature}; +use tari_common_types::{ + types::{ComAndPubSignature, Commitment, PrivateKey, PublicKey, RangeProof, Signature}, + wallet_types::WalletType, +}; use tari_comms::types::CommsDHKE; use tari_crypto::{ commitment::{ExtensionDegree, HomomorphicCommitmentFactory}, @@ -54,33 +65,32 @@ use tari_key_manager::{ use tari_utilities::{hex::Hex, ByteArray}; use tokio::sync::RwLock; -use crate::{ - one_sided::diffie_hellman_stealth_domain_hasher, - transactions::{ - transaction_components::{KernelFeatures, TransactionError, TransactionInput, TransactionInputVersion}, - CryptoFactories, - }, -}; - const LOG_TARGET: &str = "key_manager::key_manager_service"; const KEY_MANAGER_MAX_SEARCH_DEPTH: u64 = 1_000_000; use crate::{ common::ConfidentialOutputHasher, + one_sided::diffie_hellman_stealth_domain_hasher, transactions::{ key_manager::{ interface::{TransactionKeyManagerBranch, TxoStage}, + LedgerDeviceError, TariKeyId, }, tari_amount::MicroMinotari, transaction_components::{ EncryptedData, + KernelFeatures, RangeProofType, + TransactionError, + TransactionInput, + TransactionInputVersion, TransactionKernel, TransactionKernelVersion, TransactionOutput, TransactionOutputVersion, }, + CryptoFactories, }, }; @@ -95,8 +105,12 @@ pub struct TransactionKeyManagerInner { db: KeyManagerDatabase, master_seed: CipherSeed, crypto_factories: CryptoFactories, + wallet_type: WalletType, } +#[cfg(feature = "ledger")] +pub static TRANSPORT: Lazy>>> = Lazy::new(|| Arc::new(Mutex::new(None))); + impl TransactionKeyManagerInner where TBackend: KeyManagerBackend + 'static { @@ -108,12 +122,14 @@ where TBackend: KeyManagerBackend + 'static master_seed: CipherSeed, db: KeyManagerDatabase, crypto_factories: CryptoFactories, + wallet_type: WalletType, ) -> Result { let mut km = TransactionKeyManagerInner { key_managers: HashMap::new(), db, master_seed, crypto_factories, + wallet_type, }; km.add_standard_core_branches()?; Ok(km) @@ -429,6 +445,46 @@ where TBackend: KeyManagerBackend + 'static // Transaction input section (transactions > transaction_components > transaction_input) // ----------------------------------------------------------------------------------------------------------------- + pub async fn get_script_private_key(&self, script_key_id: &TariKeyId) -> Result { + match self.wallet_type { + WalletType::Software => self.get_private_key(script_key_id).await.map_err(|e| e.into()), + WalletType::Ledger(_account) => { + #[cfg(not(feature = "ledger"))] + return Err(TransactionError::LedgerDeviceError(LedgerDeviceError::NotSupported)); + + #[cfg(feature = "ledger")] + { + let data = script_key_id.managed_index().expect("and index").to_le_bytes().to_vec(); + let command = APDUCommand { + cla: 0x80, + ins: 0x02, // GetPrivateKey - see `./applications/mp_ledger/src/main.rs/Instruction` + p1: 0x00, + p2: 0x00, + data, + }; + let binding = TRANSPORT.lock().expect("lock exists"); + let transport = binding.as_ref().expect("transport exists"); + match transport.exchange(&command) { + Ok(result) => { + if result.data().len() < 33 { + return Err(LedgerDeviceError::Processing(format!( + "'get_private_key' insufficient data - expected 33 got {} bytes ({:?})", + result.data().len(), + result + )) + .into()); + } + PrivateKey::from_canonical_bytes(&result.data()[1..33]) + .map_err(|e| TransactionError::InvalidSignatureError(e.to_string())) + }, + Err(e) => Err(LedgerDeviceError::Instruction(format!("GetPrivateKey: {}", e)).into()), + } + } + // end script private key + }, + } + } + pub async fn get_script_signature( &self, script_key_id: &TariKeyId, @@ -443,8 +499,8 @@ where TBackend: KeyManagerBackend + 'static let ephemeral_commitment = self.crypto_factories.commitment.commit(&r_x, &r_a); let ephemeral_pubkey = PublicKey::from_secret_key(&r_y); let commitment = self.get_commitment(spend_key_id, value).await?; - let script_private_key = self.get_private_key(script_key_id).await?; let spend_private_key = self.get_private_key(spend_key_id).await?; + let script_private_key = self.get_script_private_key(script_key_id).await?; let challenge = TransactionInput::finalize_script_signature_challenge( txi_version, diff --git a/base_layer/core/src/transactions/key_manager/memory_db_key_manager.rs b/base_layer/core/src/transactions/key_manager/memory_db_key_manager.rs index 35e6776036..d5c35fe2a6 100644 --- a/base_layer/core/src/transactions/key_manager/memory_db_key_manager.rs +++ b/base_layer/core/src/transactions/key_manager/memory_db_key_manager.rs @@ -25,6 +25,7 @@ use std::{iter, mem::size_of}; use chacha20poly1305::{Key, KeyInit, XChaCha20Poly1305}; use rand::{distributions::Alphanumeric, rngs::OsRng, Rng, RngCore}; use tari_common_sqlite::connection::{DbConnection, DbConnectionUrl}; +use tari_common_types::wallet_types::WalletType; use tari_key_manager::{ cipher_seed::CipherSeed, key_manager_service::storage::{database::KeyManagerDatabase, sqlite_db::KeyManagerSqliteDatabase}, @@ -50,11 +51,13 @@ pub fn create_memory_db_key_manager_with_range_proof_size(size: usize) -> Memory let key_ga = Key::from_slice(&key); let db_cipher = XChaCha20Poly1305::new(key_ga); let factory = CryptoFactories::new(size); + let wallet_type = WalletType::Software; TransactionKeyManagerWrapper::>::new( cipher, KeyManagerDatabase::new(KeyManagerSqliteDatabase::init(connection, db_cipher)), factory, + wallet_type, ) .unwrap() } diff --git a/base_layer/core/src/transactions/key_manager/mod.rs b/base_layer/core/src/transactions/key_manager/mod.rs index 2d803f06f0..ec8033b7df 100644 --- a/base_layer/core/src/transactions/key_manager/mod.rs +++ b/base_layer/core/src/transactions/key_manager/mod.rs @@ -46,4 +46,4 @@ pub use memory_db_key_manager::{ }; mod error; -pub use error::CoreKeyManagerError; +pub use error::{CoreKeyManagerError, LedgerDeviceError}; diff --git a/base_layer/core/src/transactions/key_manager/wrapper.rs b/base_layer/core/src/transactions/key_manager/wrapper.rs index 2823c55db2..9e316892e0 100644 --- a/base_layer/core/src/transactions/key_manager/wrapper.rs +++ b/base_layer/core/src/transactions/key_manager/wrapper.rs @@ -24,7 +24,10 @@ use std::sync::Arc; use blake2::Blake2b; use digest::consts::U64; -use tari_common_types::types::{ComAndPubSignature, Commitment, PrivateKey, PublicKey, RangeProof, Signature}; +use tari_common_types::{ + types::{ComAndPubSignature, Commitment, PrivateKey, PublicKey, RangeProof, Signature}, + wallet_types::WalletType, +}; use tari_comms::types::CommsDHKE; use tari_crypto::{hashing::DomainSeparatedHash, ristretto::RistrettoComSig}; use tari_key_manager::{ @@ -80,12 +83,14 @@ where TBackend: KeyManagerBackend + 'static master_seed: CipherSeed, db: KeyManagerDatabase, crypto_factories: CryptoFactories, + wallet_type: WalletType, ) -> Result { Ok(TransactionKeyManagerWrapper { transaction_key_manager_inner: Arc::new(RwLock::new(TransactionKeyManagerInner::new( master_seed, db, crypto_factories, + wallet_type, )?)), }) } diff --git a/base_layer/core/src/transactions/mod.rs b/base_layer/core/src/transactions/mod.rs index 1dab9c0911..74078b0f38 100644 --- a/base_layer/core/src/transactions/mod.rs +++ b/base_layer/core/src/transactions/mod.rs @@ -6,7 +6,6 @@ pub mod aggregated_body; mod crypto_factories; pub use crypto_factories::CryptoFactories; -use tari_crypto::hash_domain; mod coinbase_builder; pub use coinbase_builder::{ @@ -33,7 +32,3 @@ pub mod key_manager; #[macro_use] #[cfg(feature = "base_node")] pub mod test_helpers; - -// Hash domain for all transaction-related hashes, including the script signature challenge, transaction hash and kernel -// signature challenge -hash_domain!(TransactionHashDomain, "com.tari.base_layer.core.transactions", 0); diff --git a/base_layer/core/src/transactions/tari_amount.rs b/base_layer/core/src/transactions/tari_amount.rs index f679664e81..4110f016fb 100644 --- a/base_layer/core/src/transactions/tari_amount.rs +++ b/base_layer/core/src/transactions/tari_amount.rs @@ -148,6 +148,11 @@ impl MicroMinotari { self.0 } + #[inline] + pub fn as_u128(&self) -> u128 { + u128::from(self.0) + } + pub fn to_currency_string(&self, sep: char) -> String { format!("{} µT", format_currency(&self.as_u64().to_string(), sep)) } @@ -353,7 +358,7 @@ impl FromStr for Minotari { impl Display for Minotari { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { - let d1 = Decimal::try_from(self.0.as_u64()).expect("will succeed"); + let d1 = Decimal::from(self.0.as_u64()); let d2 = Decimal::try_from(1_000_000f64).expect("will succeed"); let precision = f.precision().unwrap_or(6); write!(f, "{1:.*} T", precision, d1 / d2) diff --git a/base_layer/core/src/transactions/test_helpers.rs b/base_layer/core/src/transactions/test_helpers.rs index 1716225a5f..586f6c7e3b 100644 --- a/base_layer/core/src/transactions/test_helpers.rs +++ b/base_layer/core/src/transactions/test_helpers.rs @@ -72,6 +72,7 @@ pub async fn create_test_input< amount: MicroMinotari, maturity: u64, key_manager: &TransactionKeyManagerWrapper>, + coinbase_extra: Vec, ) -> WalletOutput { let params = TestParams::new(key_manager).await; params @@ -80,6 +81,7 @@ pub async fn create_test_input< value: amount, features: OutputFeatures { maturity, + coinbase_extra, ..Default::default() }, ..Default::default() @@ -354,12 +356,14 @@ pub async fn create_coinbase_wallet_output( .unwrap() } -pub async fn create_wallet_output_with_data( +pub async fn create_wallet_output_with_data< + TKeyManagerDbConnection: PooledDbConnection + Clone + 'static, +>( script: TariScript, output_features: OutputFeatures, test_params: &TestParams, value: MicroMinotari, - key_manager: &MemoryDbKeyManager, + key_manager: &TransactionKeyManagerWrapper>, ) -> Result { test_params .create_output( diff --git a/base_layer/core/src/transactions/transaction_components/encrypted_data.rs b/base_layer/core/src/transactions/transaction_components/encrypted_data.rs index 1c29b35a7d..273fd14740 100644 --- a/base_layer/core/src/transactions/transaction_components/encrypted_data.rs +++ b/base_layer/core/src/transactions/transaction_components/encrypted_data.rs @@ -40,7 +40,7 @@ use digest::{consts::U32, generic_array::GenericArray, FixedOutput}; use serde::{Deserialize, Serialize}; use tari_common_types::types::{Commitment, PrivateKey}; use tari_crypto::{hashing::DomainSeparatedHasher, keys::SecretKey}; -use tari_hash_domains::TransactionSecureNonceKdfDomain; +use tari_hashing::TransactionSecureNonceKdfDomain; use tari_utilities::{ hex::{from_hex, to_hex, Hex, HexError}, safe_array::SafeArray, diff --git a/base_layer/core/src/transactions/transaction_components/error.rs b/base_layer/core/src/transactions/transaction_components/error.rs index f7bf4d72a4..ab038bffa5 100644 --- a/base_layer/core/src/transactions/transaction_components/error.rs +++ b/base_layer/core/src/transactions/transaction_components/error.rs @@ -32,7 +32,7 @@ use tari_key_manager::key_manager_service::KeyManagerServiceError; use tari_script::ScriptError; use thiserror::Error; -use crate::transactions::transaction_components::EncryptedDataError; +use crate::transactions::{key_manager::LedgerDeviceError, transaction_components::EncryptedDataError}; //---------------------------------------- TransactionError ----------------------------------------------------// #[derive(Clone, Debug, PartialEq, Error, Deserialize, Serialize, Eq)] @@ -73,6 +73,10 @@ pub enum TransactionError { KeyManagerError(String), #[error("EncryptedData error: {0}")] EncryptedDataError(String), + #[error("Ledger device error: {0}")] + LedgerDeviceError(#[from] LedgerDeviceError), + #[error("Transaction has a zero weight, not possible")] + ZeroWeight, } impl From for TransactionError { diff --git a/base_layer/core/src/transactions/transaction_components/mod.rs b/base_layer/core/src/transactions/transaction_components/mod.rs index 9011bb0185..6b2eb0eb5b 100644 --- a/base_layer/core/src/transactions/transaction_components/mod.rs +++ b/base_layer/core/src/transactions/transaction_components/mod.rs @@ -90,8 +90,10 @@ hidden_type!(EncryptedDataKey, SafeArray); //---------------------------------------- Crate functions ----------------------------------------------------// +use tari_hashing::TransactionHashDomain; + use super::tari_amount::MicroMinotari; -use crate::{consensus::DomainSeparatedConsensusHasher, covenants::Covenant, transactions::TransactionHashDomain}; +use crate::{consensus::DomainSeparatedConsensusHasher, covenants::Covenant}; /// Implement the canonical hashing function for TransactionOutput and WalletOutput for use in /// ordering as well as for the output hash calculation for TransactionInput. diff --git a/base_layer/core/src/transactions/transaction_components/output_features.rs b/base_layer/core/src/transactions/transaction_components/output_features.rs index 8b685a6b7f..0db451450b 100644 --- a/base_layer/core/src/transactions/transaction_components/output_features.rs +++ b/base_layer/core/src/transactions/transaction_components/output_features.rs @@ -59,7 +59,7 @@ pub struct OutputFeatures { /// transaction. This is enforced in [AggregatedBody::check_output_features]. /// /// For coinbase outputs, the maximum length of this field is determined by the consensus constant, - /// `coinbase_output_features_metadata_max_length`. + /// `coinbase_output_features_extra_max_length`. pub coinbase_extra: Vec, /// Features that are specific to a side chain pub sidechain_feature: Option, diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs b/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs index 650807ad56..60c594b82d 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs @@ -29,12 +29,10 @@ use tari_common_types::{ epoch::VnEpoch, types::{FixedHash, PublicKey, Signature}, }; +use tari_hashing::TransactionHashDomain; use tari_utilities::ByteArray; -use crate::{ - consensus::DomainSeparatedConsensusHasher, - transactions::{transaction_components::ValidatorNodeSignature, TransactionHashDomain}, -}; +use crate::{consensus::DomainSeparatedConsensusHasher, transactions::transaction_components::ValidatorNodeSignature}; #[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize, BorshSerialize, BorshDeserialize)] pub struct ValidatorNodeRegistration { @@ -90,6 +88,7 @@ fn generate_shard_key(public_key: &PublicKey, entropy: &[u8; 32]) -> [u8; 32] { .chain(public_key) .chain(entropy) .finalize() + .into() } #[cfg(test)] diff --git a/base_layer/core/src/transactions/transaction_components/test.rs b/base_layer/core/src/transactions/transaction_components/test.rs index d68ca986a7..067ca86aef 100644 --- a/base_layer/core/src/transactions/transaction_components/test.rs +++ b/base_layer/core/src/transactions/transaction_components/test.rs @@ -280,6 +280,11 @@ async fn sender_signature_verification() { #[test] fn kernel_hash() { + #[cfg(tari_target_network_mainnet)] + if let Network::MainNet = Network::get_current_or_user_setting_or_default() { + eprintln!("This test is configured for stagenet only"); + return; + } let s = PrivateKey::from_hex("6c6eebc5a9c02e1f3c16a69ba4331f9f63d0718401dea10adc4f9d3b879a2c09").unwrap(); let r = PublicKey::from_hex("28e8efe4e5576aac931d358d0f6ace43c55fa9d4186d1d259d1436caa876d43b").unwrap(); let sig = Signature::new(r, s); @@ -291,6 +296,17 @@ fn kernel_hash() { .with_lock_height(500) .build() .unwrap(); + #[cfg(tari_target_network_nextnet)] + assert_eq!( + &k.hash().to_hex(), + "c1f6174935d08358809fcf244a9a1edb078b74a1ae18ab4c7dd501b0294a2a94" + ); + #[cfg(tari_target_network_mainnet)] + assert_eq!( + &k.hash().to_hex(), + "b94992cb59695ebad3786e9f51a220e91c627f8b38f51bcf6c87297325d1b410" + ); + #[cfg(tari_target_network_testnet)] assert_eq!( &k.hash().to_hex(), "38b03d013f941e86c027969fbbc190ca2a28fa2d7ac075d50dbfb6232deee646" @@ -310,6 +326,24 @@ fn kernel_metadata() { .with_lock_height(500) .build() .unwrap(); + #[cfg(tari_target_network_mainnet)] + match Network::get_current_or_user_setting_or_default() { + Network::MainNet => { + eprintln!("This test is configured for stagenet only"); + }, + Network::StageNet => assert_eq!( + &k.hash().to_hex(), + "75a357c2769098b19a6aedc7e46f6be305f4f1a1831556cd380b0b0f20bfdf12" + ), + n => panic!("Only mainnet networks should target mainnet. Network was {}", n), + } + + #[cfg(tari_target_network_nextnet)] + assert_eq!( + &k.hash().to_hex(), + "22e39392dfeae9653c73437880be71e99f4b8a2b23289d54f57b8931deebfeed" + ); + #[cfg(tari_target_network_testnet)] assert_eq!( &k.hash().to_hex(), "ebc852fbac798c25ce497b416f69ec11a97e186aacaa10e2bb4ca5f5a0f197f2" diff --git a/base_layer/core/src/transactions/transaction_components/transaction_input.rs b/base_layer/core/src/transactions/transaction_components/transaction_input.rs index 64c56d63e9..8186dbabd6 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_input.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_input.rs @@ -35,6 +35,7 @@ use rand::rngs::OsRng; use serde::{Deserialize, Serialize}; use tari_common_types::types::{ComAndPubSignature, Commitment, CommitmentFactory, FixedHash, HashOutput, PublicKey}; use tari_crypto::tari_utilities::hex::Hex; +use tari_hashing::TransactionHashDomain; use tari_script::{ExecutionStack, ScriptContext, StackItem, TariScript}; use super::{TransactionInputVersion, TransactionOutputVersion}; @@ -50,7 +51,6 @@ use crate::{ OutputFeatures, TransactionError, }, - TransactionHashDomain, }, }; @@ -209,6 +209,7 @@ impl TransactionInput { .chain(commitment) .chain(&message) .finalize() + .into() }, } } @@ -227,6 +228,7 @@ impl TransactionInput { .chain(script) .chain(input_data) .finalize() + .into() }, } } @@ -553,7 +555,7 @@ impl Eq for TransactionInput {} impl PartialOrd for TransactionInput { fn partial_cmp(&self, other: &Self) -> Option { - self.output_hash().partial_cmp(&other.output_hash()) + Some(self.cmp(other)) } } diff --git a/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs b/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs index 2db05dc26e..85d60c166f 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs @@ -33,6 +33,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use digest::consts::{U32, U64}; use serde::{Deserialize, Serialize}; use tari_common_types::types::{Commitment, FixedHash, PublicKey, Signature}; +use tari_hashing::TransactionHashDomain; use tari_utilities::{hex::Hex, message_format::MessageFormat}; use super::TransactionKernelVersion; @@ -42,7 +43,6 @@ use crate::{ tari_amount::MicroMinotari, transaction_components::{KernelFeatures, TransactionError}, transaction_protocol::TransactionMetadata, - TransactionHashDomain, }, }; @@ -51,7 +51,7 @@ use crate::{ /// [Mimblewimble TLU post](https://tlu.tarilabs.com/protocols/mimblewimble-1/sources/PITCHME.link.html?highlight=mimblewimble#mimblewimble). /// The kernel also tracks other transaction metadata, such as the lock height for the transaction (i.e. the earliest /// this transaction can be mined) and the transaction fee, in cleartext. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize, Default)] pub struct TransactionKernel { pub version: TransactionKernelVersion, /// Options for a kernel's structure or use @@ -211,7 +211,7 @@ impl TransactionKernel { .chain(total_excess) .chain(message); match version { - TransactionKernelVersion::V0 => common.finalize(), + TransactionKernelVersion::V0 => common.finalize().into(), } } @@ -231,7 +231,7 @@ impl TransactionKernel { .chain(features) .chain(burn_commitment); match version { - TransactionKernelVersion::V0 => common.finalize(), + TransactionKernelVersion::V0 => common.finalize().into(), } } } @@ -258,7 +258,7 @@ impl Display for TransactionKernel { impl PartialOrd for TransactionKernel { fn partial_cmp(&self, other: &Self) -> Option { - self.excess_sig.partial_cmp(&other.excess_sig) + Some(self.cmp(other)) } } diff --git a/base_layer/core/src/transactions/transaction_components/transaction_output.rs b/base_layer/core/src/transactions/transaction_components/transaction_output.rs index 9211af456a..49fd8fa53d 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_output.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_output.rs @@ -49,8 +49,9 @@ use tari_crypto::{ extended_range_proof::{ExtendedRangeProofService, Statement}, keys::SecretKey, ristretto::bulletproofs_plus::RistrettoAggregatedPublicStatement, - tari_utilities::{hex::Hex, ByteArray}, + tari_utilities::hex::Hex, }; +use tari_hashing::TransactionHashDomain; use tari_script::TariScript; use super::TransactionOutputVersion; @@ -70,7 +71,6 @@ use crate::{ TransactionInput, WalletOutput, }, - TransactionHashDomain, }, }; @@ -429,7 +429,7 @@ impl TransactionOutput { .chain(commitment) .chain(&message); match version { - TransactionOutputVersion::V0 | TransactionOutputVersion::V1 => common.finalize(), + TransactionOutputVersion::V0 | TransactionOutputVersion::V1 => common.finalize().into(), } } @@ -464,7 +464,7 @@ impl TransactionOutput { .chain(encrypted_data) .chain(minimum_value_promise); match version { - TransactionOutputVersion::V0 | TransactionOutputVersion::V1 => common.finalize(), + TransactionOutputVersion::V0 | TransactionOutputVersion::V1 => common.finalize().into(), } } @@ -515,7 +515,7 @@ impl Display for TransactionOutput { impl PartialOrd for TransactionOutput { fn partial_cmp(&self, other: &Self) -> Option { - self.commitment.partial_cmp(&other.commitment) + Some(self.cmp(other)) } } @@ -545,29 +545,11 @@ pub fn batch_verify_range_proofs( minimum_value_promise: output.minimum_value_promise.into(), }], }); - proofs.push(output.proof_result()?.to_vec().clone()); - } - if let Err(err_1) = prover.verify_batch(proofs.iter().collect(), statements.iter().collect()) { - for output in &bulletproof_plus_proofs { - if let Err(err_2) = output.verify_range_proof(prover) { - return Err(RangeProofError::InvalidRangeProof { - reason: format!( - "commitment {}, minimum_value_promise {}, proof {} ({:?})", - output.commitment.to_hex(), - output.minimum_value_promise, - output.proof_hex_display(false), - err_2, - ), - }); - } - } - Err(RangeProofError::InvalidRangeProof { - reason: format!( - "Batch verification failed, but individual verification passed - {:?}", - err_1 - ), - })? + proofs.push(output.proof_result()?.as_vec()); } + + // Attempt to verify the range proofs in a batch + prover.verify_batch(proofs, statements.iter().collect())?; } let revealed_value_proofs = outputs diff --git a/base_layer/core/src/transactions/transaction_components/unblinded_output.rs b/base_layer/core/src/transactions/transaction_components/unblinded_output.rs index 463ff285d8..b3c2b0650c 100644 --- a/base_layer/core/src/transactions/transaction_components/unblinded_output.rs +++ b/base_layer/core/src/transactions/transaction_components/unblinded_output.rs @@ -191,7 +191,7 @@ impl PartialEq for UnblindedOutput { impl PartialOrd for UnblindedOutput { fn partial_cmp(&self, other: &Self) -> Option { - self.value.partial_cmp(&other.value) + Some(self.cmp(other)) } } diff --git a/base_layer/core/src/transactions/transaction_components/wallet_output.rs b/base_layer/core/src/transactions/transaction_components/wallet_output.rs index f620b4709c..955e4178ae 100644 --- a/base_layer/core/src/transactions/transaction_components/wallet_output.rs +++ b/base_layer/core/src/transactions/transaction_components/wallet_output.rs @@ -332,7 +332,7 @@ impl PartialEq for WalletOutput { impl PartialOrd for WalletOutput { fn partial_cmp(&self, other: &Self) -> Option { - self.value.partial_cmp(&other.value) + Some(self.cmp(other)) } } diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index 776fe40e28..2e6ae88294 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -1043,7 +1043,7 @@ mod test { let key_manager = create_memory_db_key_manager(); let p1 = TestParams::new(&key_manager).await; let p2 = TestParams::new(&key_manager).await; - let input = create_test_input(MicroMinotari(1200), 0, &key_manager).await; + let input = create_test_input(MicroMinotari(1200), 0, &key_manager, vec![]).await; let mut builder = SenderTransactionProtocol::builder(create_consensus_constants(0), key_manager.clone()); let script = TariScript::default(); let output_features = OutputFeatures::default(); @@ -1106,7 +1106,7 @@ mod test { let a_change_key = TestParams::new(&key_manager).await; // Bob's parameters let bob_key = TestParams::new(&key_manager).await; - let input = create_test_input(MicroMinotari(1200), 0, &key_manager).await; + let input = create_test_input(MicroMinotari(1200), 0, &key_manager, vec![]).await; let utxo = input.to_transaction_input(&key_manager).await.unwrap(); let script = script!(Nop); let consensus_constants = create_consensus_constants(0); @@ -1213,7 +1213,7 @@ mod test { let alice_key = TestParams::new(&key_manager).await; // Bob's parameters let bob_key = TestParams::new(&key_manager).await; - let input = create_test_input(MicroMinotari(25000), 0, &key_manager).await; + let input = create_test_input(MicroMinotari(25000), 0, &key_manager, vec![]).await; let consensus_constants = create_consensus_constants(0); let mut builder = SenderTransactionProtocol::builder(consensus_constants.clone(), key_manager.clone()); let script = script!(Nop); @@ -1324,9 +1324,9 @@ mod test { let factories = CryptoFactories::default(); // Bob's parameters let bob_key = TestParams::new(&key_manager).await; - let input = create_test_input(MicroMinotari(10000), 0, &key_manager).await; - let input2 = create_test_input(MicroMinotari(2000), 0, &key_manager).await; - let input3 = create_test_input(MicroMinotari(15000), 0, &key_manager).await; + let input = create_test_input(MicroMinotari(10000), 0, &key_manager, vec![]).await; + let input2 = create_test_input(MicroMinotari(2000), 0, &key_manager, vec![]).await; + let input3 = create_test_input(MicroMinotari(15000), 0, &key_manager, vec![]).await; let consensus_constants = create_consensus_constants(0); let mut builder = SenderTransactionProtocol::builder(consensus_constants.clone(), key_manager.clone()); let script = script!(Nop); @@ -1431,7 +1431,7 @@ mod test { // Alice's parameters let key_manager = create_memory_db_key_manager(); let (utxo_amount, fee_per_gram, amount) = (MicroMinotari(2500), MicroMinotari(10), MicroMinotari(500)); - let input = create_test_input(utxo_amount, 0, &key_manager).await; + let input = create_test_input(utxo_amount, 0, &key_manager, vec![]).await; let script = script!(Nop); let mut builder = SenderTransactionProtocol::builder(create_consensus_constants(0), key_manager.clone()); let change = TestParams::new(&key_manager).await; @@ -1469,7 +1469,7 @@ mod test { // Alice's parameters let key_manager = create_memory_db_key_manager(); let (utxo_amount, fee_per_gram, amount) = (MicroMinotari(2500), MicroMinotari(10), MicroMinotari(500)); - let input = create_test_input(utxo_amount, 0, &key_manager).await; + let input = create_test_input(utxo_amount, 0, &key_manager, vec![]).await; let script = script!(Nop); let mut builder = SenderTransactionProtocol::builder(create_consensus_constants(0), key_manager.clone()); let change = TestParams::new(&key_manager).await; @@ -1511,7 +1511,7 @@ mod test { // Bob's parameters let bob_test_params = TestParams::new(&key_manager_bob).await; let alice_value = MicroMinotari(25000); - let input = create_test_input(alice_value, 0, &key_manager_alice).await; + let input = create_test_input(alice_value, 0, &key_manager_alice, vec![]).await; let script = script!(Nop); let consensus_constants = create_consensus_constants(0); diff --git a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs index 4bdb2e36df..a96691dd75 100644 --- a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs +++ b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs @@ -298,15 +298,15 @@ where KM: TransactionKeyManagerInterface .inputs .iter() .map(|i| i.output.value) - .fold(Ok(MicroMinotari::zero()), |acc, x| { - acc?.checked_add(x).ok_or("Total inputs being spent amount overflow") + .try_fold(MicroMinotari::zero(), |acc, x| { + acc.checked_add(x).ok_or("Total inputs being spent amount overflow") })?; let total_to_self = self .sender_custom_outputs .iter() .map(|o| o.output.value) - .fold(Ok(MicroMinotari::zero()), |acc, x| { - acc?.checked_add(x).ok_or("Total outputs to self amount overflow") + .try_fold(MicroMinotari::zero(), |acc, x| { + acc.checked_add(x).ok_or("Total outputs to self amount overflow") })?; let total_amount = match &self.recipient { Some(data) => data.amount, @@ -343,8 +343,8 @@ where KM: TransactionKeyManagerInterface // Subtract with a check on going negative let total_input_value = [total_to_self, total_amount, fee_without_change] .iter() - .fold(Ok(MicroMinotari::zero()), |acc, x| { - acc?.checked_add(x).ok_or("Total input value overflow") + .try_fold(MicroMinotari::zero(), |acc, x| { + acc.checked_add(x).ok_or("Total input value overflow") })?; let change_amount = total_being_spent.checked_sub(total_input_value); match change_amount { @@ -502,7 +502,7 @@ where KM: TransactionKeyManagerInterface return self.build_err("Fee is less than the minimum"); } - let change_output_pair = match { change_output } { + let change_output_pair = match change_output { Some((output, sender_offset_key_id)) => { if self.sender_custom_outputs.len() >= MAX_TRANSACTION_OUTPUTS { return self.build_err("Too many outputs in transaction"); @@ -691,7 +691,7 @@ mod test { // Create some inputs let key_manager = create_memory_db_key_manager(); let p = TestParams::new(&key_manager).await; - let input = create_test_input(MicroMinotari(5000), 0, &key_manager).await; + let input = create_test_input(MicroMinotari(5000), 0, &key_manager, vec![]).await; let constants = create_consensus_constants(0); let expected_fee = Fee::from(*constants.transaction_weight_params()).calculate( MicroMinotari(4), @@ -755,6 +755,7 @@ mod test { 2000 * uT + tx_fee + fee_for_change_output - 1 * uT, 0, &key_manager, + vec![], ) .await; let output = p @@ -815,7 +816,7 @@ mod test { .await .unwrap() .with_fee_per_gram(MicroMinotari(2)); - let input_base = create_test_input(MicroMinotari(50), 0, &key_manager).await; + let input_base = create_test_input(MicroMinotari(50), 0, &key_manager, vec![]).await; for _ in 0..=MAX_TRANSACTION_INPUTS { builder.with_input(input_base.clone()).await.unwrap(); } @@ -836,7 +837,7 @@ mod test { p.get_size_for_default_features_and_scripts(1) .expect("Failed to borsh serialized size"), ); - let input = create_test_input(500 * uT + tx_fee, 0, &key_manager).await; + let input = create_test_input(500 * uT + tx_fee, 0, &key_manager, vec![]).await; let script = script!(Nop); // Start the builder let constants = create_consensus_constants(0); @@ -873,7 +874,7 @@ mod test { // Create some inputs let key_manager = create_memory_db_key_manager(); let p = TestParams::new(&key_manager).await; - let input = create_test_input(MicroMinotari(400), 0, &key_manager).await; + let input = create_test_input(MicroMinotari(400), 0, &key_manager, vec![]).await; let script = script!(Nop); let output = create_wallet_output_with_data( script.clone(), @@ -925,8 +926,8 @@ mod test { // Create some inputs let key_manager = create_memory_db_key_manager(); let p = TestParams::new(&key_manager).await; - let input1 = create_test_input(MicroMinotari(2000), 0, &key_manager).await; - let input2 = create_test_input(MicroMinotari(3000), 0, &key_manager).await; + let input1 = create_test_input(MicroMinotari(2000), 0, &key_manager, vec![]).await; + let input2 = create_test_input(MicroMinotari(3000), 0, &key_manager, vec![]).await; let fee_per_gram = MicroMinotari(6); let script = script!(Nop); diff --git a/base_layer/core/src/transactions/weight.rs b/base_layer/core/src/transactions/weight.rs index 8d9fa9d87a..72f91f0409 100644 --- a/base_layer/core/src/transactions/weight.rs +++ b/base_layer/core/src/transactions/weight.rs @@ -163,4 +163,11 @@ mod test { ); } } + + #[test] + fn empty_body_weight() { + let weighting = TransactionWeight::latest(); + let body = AggregateBody::empty(); + assert_eq!(weighting.calculate_body(&body).unwrap(), 0); + } } diff --git a/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs b/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs index 67c0824c20..d1a799233c 100644 --- a/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs +++ b/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs @@ -112,25 +112,41 @@ fn validate_input_not_pruned( let mut inputs: Vec = body.inputs().clone(); for input in &mut inputs { if input.is_compact() { - let output_mined_info = db - .fetch_output(&input.output_hash())? - .ok_or(ValidationError::UnknownInput)?; + let output = match db.fetch_output(&input.output_hash()) { + Ok(val) => match val { + Some(output_mined_info) => output_mined_info.output, + None => { + let input_output_hash = input.output_hash(); + if let Some(found) = body.outputs().iter().find(|o| o.hash() == input_output_hash) { + found.clone() + } else { + warn!( + target: LOG_TARGET, + "Input not found in database or block, commitment: {}, hash: {}", + input.commitment()?.to_hex(), input_output_hash.to_hex() + ); + return Err(ValidationError::UnknownInput); + } + }, + }, + Err(e) => return Err(ValidationError::from(e)), + }; - let rp_hash = match output_mined_info.output.proof { + let rp_hash = match output.proof { Some(proof) => proof.hash(), None => FixedHash::zero(), }; input.add_output_data( - output_mined_info.output.version, - output_mined_info.output.features, - output_mined_info.output.commitment, - output_mined_info.output.script, - output_mined_info.output.sender_offset_public_key, - output_mined_info.output.covenant, - output_mined_info.output.encrypted_data, - output_mined_info.output.metadata_signature, + output.version, + output.features, + output.commitment, + output.script, + output.sender_offset_public_key, + output.covenant, + output.encrypted_data, + output.metadata_signature, rp_hash, - output_mined_info.output.minimum_value_promise, + output.minimum_value_promise, ); } } @@ -207,11 +223,16 @@ fn check_inputs_are_utxos(db: &B, body: &AggregateBody) -> } let output_hashes = output_hashes.as_ref().unwrap(); - let output_hash = input.output_hash(); - if output_hashes.iter().any(|output| output == &output_hash) { + let input_output_hash = input.output_hash(); + if output_hashes.iter().any(|val| val == &input_output_hash) { continue; } - not_found_inputs.push(output_hash); + warn!( + target: LOG_TARGET, + "Input not found in database, commitment: {}, hash: {}", + input.commitment()?.to_hex(), input_output_hash.to_hex() + ); + not_found_inputs.push(input_output_hash); }, Err(err) => { return Err(err); diff --git a/base_layer/core/src/validation/aggregate_body/aggregate_body_internal_validator.rs b/base_layer/core/src/validation/aggregate_body/aggregate_body_internal_validator.rs index 0edff76f0e..f2a2b4d929 100644 --- a/base_layer/core/src/validation/aggregate_body/aggregate_body_internal_validator.rs +++ b/base_layer/core/src/validation/aggregate_body/aggregate_body_internal_validator.rs @@ -311,7 +311,7 @@ fn check_weight( if block_weight <= max_weight { trace!( target: LOG_TARGET, - "SV - Block contents for block #{} : {}; weight {}.", + "Aggregated body at height #{} : {}; weight {} is valid.", height, body.to_counts_string(), block_weight, diff --git a/base_layer/core/src/validation/block_body/block_body_full_validator.rs b/base_layer/core/src/validation/block_body/block_body_full_validator.rs index 7175b23b18..c16924fc25 100644 --- a/base_layer/core/src/validation/block_body/block_body_full_validator.rs +++ b/base_layer/core/src/validation/block_body/block_body_full_validator.rs @@ -105,15 +105,15 @@ impl BlockBodyValidator for BlockBodyFullValidator { } fn validate_block_metadata(block: &Block, metadata: &ChainMetadata) -> Result<(), ValidationError> { - if block.header.prev_hash != *metadata.best_block() { + if block.header.prev_hash != *metadata.best_block_hash() { return Err(ValidationError::IncorrectPreviousHash { - expected: metadata.best_block().to_hex(), + expected: metadata.best_block_hash().to_hex(), block_hash: block.hash().to_hex(), }); } - if block.header.height != metadata.height_of_longest_chain() + 1 { + if block.header.height != metadata.best_block_height() + 1 { return Err(ValidationError::IncorrectHeight { - expected: metadata.height_of_longest_chain() + 1, + expected: metadata.best_block_height() + 1, block_height: block.header.height, }); } diff --git a/base_layer/core/src/validation/chain_balance.rs b/base_layer/core/src/validation/chain_balance.rs index ff69eb2e60..c7153d7d21 100644 --- a/base_layer/core/src/validation/chain_balance.rs +++ b/base_layer/core/src/validation/chain_balance.rs @@ -92,8 +92,9 @@ impl ChainBalanceValidator { } fn get_emission_commitment_at(&self, height: u64) -> Commitment { - let total_supply = - self.rules.get_total_emission_at(height) + self.rules.consensus_constants(height).faucet_value(); + // With inflating tail emission, we **must** know the value of the premine as part of the supply calc in order + // to determine the correct inflation curve. Therefore, the premine is already included in the supply + let total_supply = self.rules.get_total_emission_at(height); debug!( target: LOG_TARGET, "Expected emission at height {} is {}", height, total_supply diff --git a/base_layer/core/src/validation/error.rs b/base_layer/core/src/validation/error.rs index 890da0b8f1..e5cbb243f2 100644 --- a/base_layer/core/src/validation/error.rs +++ b/base_layer/core/src/validation/error.rs @@ -89,7 +89,7 @@ pub enum ValidationError { #[error("Expected block previous hash to be {expected}, but was {block_hash}")] IncorrectPreviousHash { expected: String, block_hash: String }, #[error("Bad block with hash {hash} found")] - BadBlockFound { hash: String }, + BadBlockFound { hash: String, reason: String }, #[error("Script exceeded maximum script size, expected less than {max_script_size} but was {actual_script_size}")] TariScriptExceedsMaxSize { max_script_size: usize, diff --git a/base_layer/core/src/validation/header/header_full_validator.rs b/base_layer/core/src/validation/header/header_full_validator.rs index 0adbe89107..5782c1b1f0 100644 --- a/base_layer/core/src/validation/header/header_full_validator.rs +++ b/base_layer/core/src/validation/header/header_full_validator.rs @@ -174,8 +174,12 @@ pub fn check_timestamp_ftl( } fn check_not_bad_block(db: &B, hash: FixedHash) -> Result<(), ValidationError> { - if db.bad_block_exists(hash)? { - return Err(ValidationError::BadBlockFound { hash: hash.to_hex() }); + let block_exist = db.bad_block_exists(hash)?; + if block_exist.0 { + return Err(ValidationError::BadBlockFound { + hash: hash.to_hex(), + reason: block_exist.1, + }); } Ok(()) } diff --git a/base_layer/core/src/validation/helpers.rs b/base_layer/core/src/validation/helpers.rs index cc6706fa5d..4ceac26a70 100644 --- a/base_layer/core/src/validation/helpers.rs +++ b/base_layer/core/src/validation/helpers.rs @@ -163,7 +163,8 @@ pub fn is_all_unique_and_sorted<'a, I: IntoIterator, T: PartialOrd true } -/// This function checks that an input is a valid spendable UTXO +/// This function checks that an input is a valid spendable UTXO in the database. It cannot confirm +/// zero confermation transactions. pub fn check_input_is_utxo(db: &B, input: &TransactionInput) -> Result<(), ValidationError> { let output_hash = input.output_hash(); if let Some(utxo_hash) = db.fetch_unspent_output_hash_by_commitment(input.commitment()?)? { @@ -203,7 +204,7 @@ pub fn check_input_is_utxo(db: &B, input: &TransactionInpu warn!( target: LOG_TARGET, - "Validation failed due to input: {} which does not exist yet", input + "Input ({}, {}) does not exist in the database yet", input.commitment()?.to_hex(), output_hash.to_hex() ); Err(ValidationError::UnknownInput) } diff --git a/base_layer/core/src/validation/transaction/transaction_chain_validator.rs b/base_layer/core/src/validation/transaction/transaction_chain_validator.rs index fd4f6535b2..7879cd50f5 100644 --- a/base_layer/core/src/validation/transaction/transaction_chain_validator.rs +++ b/base_layer/core/src/validation/transaction/transaction_chain_validator.rs @@ -62,7 +62,7 @@ impl TransactionValidator for TransactionChainLinkedValida { let db = self.db.db_read_access()?; - let tip_height = db.fetch_chain_metadata()?.height_of_longest_chain(); + let tip_height = db.fetch_chain_metadata()?.best_block_height(); self.aggregate_body_validator.validate(&tx.body, tip_height, &*db)?; }; diff --git a/base_layer/core/src/validation/transaction/transaction_internal_validator.rs b/base_layer/core/src/validation/transaction/transaction_internal_validator.rs index 74725418f4..7991352d06 100644 --- a/base_layer/core/src/validation/transaction/transaction_internal_validator.rs +++ b/base_layer/core/src/validation/transaction/transaction_internal_validator.rs @@ -86,8 +86,8 @@ impl TransactionInternalConsistencyValidator { &tx.offset, &tx.script_offset, None, - Some(*tip_metadata.best_block()), - tip_metadata.height_of_longest_chain(), + Some(*tip_metadata.best_block_hash()), + tip_metadata.best_block_height(), ) } } diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index c5e35ff216..b3f2018dad 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -133,9 +133,9 @@ fn test_store_and_retrieve_block() { let hash = blocks[0].hash(); // Check the metadata let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); assert_eq!(metadata.best_block(), hash); - assert_eq!(metadata.horizon_block(metadata.height_of_longest_chain()), 0); + assert_eq!(metadata.horizon_block(metadata.best_block_height()), 0); // Fetch the block back let block0 = db.fetch_block(0, true).unwrap(); assert_eq!(block0.confirmations(), 1); @@ -151,7 +151,7 @@ fn test_add_multiple_blocks() { let consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_store_with_consensus(consensus_manager.clone()); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); let block0 = store.fetch_block(0, true).unwrap(); assert_eq!(metadata.best_block(), block0.hash()); // Add another block @@ -165,7 +165,7 @@ fn test_add_multiple_blocks() { .unwrap(); let metadata = store.get_chain_metadata().unwrap(); let hash = block1.hash(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), hash); // Adding blocks is idempotent assert_eq!( @@ -174,7 +174,7 @@ fn test_add_multiple_blocks() { ); // Check the metadata let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), hash); } @@ -309,15 +309,15 @@ fn test_rewind_past_horizon_height() { let _block4 = append_block(&store, &block3, vec![], &consensus_manager, Difficulty::min()).unwrap(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 4); + assert_eq!(metadata.best_block_height(), 4); // we should not be able to rewind to the future - assert!(store.rewind_to_height(metadata.height_of_longest_chain() + 1).is_err()); + assert!(store.rewind_to_height(metadata.best_block_height() + 1).is_err()); let horizon_height = metadata.pruned_height(); assert_eq!(horizon_height, 2); // rewinding past pruning horizon should set us to height 0 so we can resync from gen block. assert!(store.rewind_to_height(horizon_height - 1).is_ok()); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); } #[test] @@ -366,7 +366,7 @@ fn test_handle_tip_reorg_with_zero_conf() { &consensus_manager ) .is_ok()); - assert_eq!(store.get_chain_metadata().unwrap().height_of_longest_chain(), 3); + assert_eq!(store.get_chain_metadata().unwrap().best_block_height(), 3); // Create Forked Chain @@ -406,7 +406,7 @@ fn test_handle_tip_reorg_with_zero_conf() { // Check that B2 was removed from the block orphans and A2 has been orphaned. assert!(store.fetch_orphan(*orphan_blocks[2].hash()).is_err()); assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); - assert_eq!(store.get_chain_metadata().unwrap().height_of_longest_chain(), 2); + assert_eq!(store.get_chain_metadata().unwrap().best_block_height(), 2); // Block B3 let txs = vec![ @@ -470,7 +470,7 @@ fn test_handle_tip_reorg_with_zero_conf() { } else { panic!(); } - assert_eq!(store.get_chain_metadata().unwrap().height_of_longest_chain(), 5); + assert_eq!(store.get_chain_metadata().unwrap().best_block_height(), 5); } #[test] #[allow(clippy::too_many_lines)] @@ -1374,7 +1374,7 @@ fn test_restore_metadata_and_pruning_horizon_update() { db.add_block(block1.to_arc_block()).unwrap(); block_hash = *block1.hash(); let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block_hash); assert_eq!(metadata.pruning_horizon(), 1000); } @@ -1394,7 +1394,7 @@ fn test_restore_metadata_and_pruning_horizon_update() { .unwrap(); let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block_hash); assert_eq!(metadata.pruning_horizon(), 2000); } @@ -1412,7 +1412,7 @@ fn test_restore_metadata_and_pruning_horizon_update() { .unwrap(); let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block_hash); assert_eq!(metadata.pruning_horizon(), 900); } @@ -1439,7 +1439,7 @@ fn test_invalid_block() { let mut outputs = vec![vec![output]]; let block0_hash = *blocks[0].hash(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); assert_eq!(metadata.best_block(), &block0_hash); assert_eq!(store.fetch_block(0, true).unwrap().block().hash(), block0_hash); assert!(store.fetch_block(1, true).is_err()); @@ -1464,7 +1464,7 @@ fn test_invalid_block() { ); let block1_hash = *blocks[1].hash(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block1_hash); assert_eq!(store.fetch_block(0, true).unwrap().hash(), &block0_hash); assert_eq!(store.fetch_block(1, true).unwrap().hash(), &block1_hash); @@ -1487,7 +1487,7 @@ fn test_invalid_block() { .unwrap_err() ); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block1_hash); assert_eq!(store.fetch_block(0, true).unwrap().hash(), &block0_hash); assert_eq!(store.fetch_block(1, true).unwrap().hash(), &block1_hash); @@ -1511,7 +1511,7 @@ fn test_invalid_block() { ); let block2_hash = blocks[2].hash(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 2); + assert_eq!(metadata.best_block_height(), 2); assert_eq!(metadata.best_block(), block2_hash); assert_eq!(store.fetch_block(0, true).unwrap().hash(), &block0_hash); assert_eq!(store.fetch_block(1, true).unwrap().hash(), &block1_hash); @@ -1934,7 +1934,7 @@ fn test_fails_validation() { unpack_enum!(ValidationError::CustomError(_s) = source); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); } #[test] @@ -1974,7 +1974,7 @@ fn pruned_mode_cleanup_and_fetch_block() { let metadata = store.get_chain_metadata().unwrap(); assert_eq!(metadata.pruned_height(), 2); - assert_eq!(metadata.height_of_longest_chain(), 5); + assert_eq!(metadata.best_block_height(), 5); assert_eq!(metadata.pruning_horizon(), 3); } diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index b4f11bc64b..8fdaaad977 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -191,6 +191,7 @@ fn update_genesis_block_mmr_roots(template: NewBlockTemplate) -> Result( consensus: &ConsensusManager, achieved_difficulty: Difficulty, key_manager: &MemoryDbKeyManager, -) -> Result { - append_block_with_coinbase(db, prev_block, txns, consensus, achieved_difficulty, key_manager) - .await - .map(|(b, _)| b) +) -> Result<(ChainBlock, WalletOutput), ChainStorageError> { + append_block_with_coinbase(db, prev_block, txns, consensus, achieved_difficulty, key_manager).await } /// Create a new block with the provided transactions and add a coinbase output. The new MMR roots are calculated, and @@ -577,7 +576,7 @@ pub async fn construct_chained_blocks( let mut prev_block = block0; let mut blocks = Vec::new(); for _i in 0..n { - let block = append_block(db, &prev_block, vec![], consensus, Difficulty::min(), key_manager) + let (block, _) = append_block(db, &prev_block, vec![], consensus, Difficulty::min(), key_manager) .await .unwrap(); prev_block = block.clone(); diff --git a/base_layer/core/tests/helpers/nodes.rs b/base_layer/core/tests/helpers/nodes.rs index 98702db9d8..207373969c 100644 --- a/base_layer/core/tests/helpers/nodes.rs +++ b/base_layer/core/tests/helpers/nodes.rs @@ -41,7 +41,7 @@ use tari_core::{ LocalNodeCommsInterface, StateMachineHandle, }, - chain_storage::{BlockchainDatabase, Validators}, + chain_storage::{BlockchainDatabase, BlockchainDatabaseConfig, Validators}, consensus::{ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, mempool::{ service::{LocalMempoolService, MempoolHandle}, @@ -52,7 +52,7 @@ use tari_core::{ OutboundMempoolServiceInterface, }, proof_of_work::randomx_factory::RandomXFactory, - test_helpers::blockchain::{create_store_with_consensus_and_validators, TempDatabase}, + test_helpers::blockchain::{create_store_with_consensus_and_validators_and_config, TempDatabase}, validation::{ mocks::MockValidator, transaction::TransactionChainLinkedValidator, @@ -186,7 +186,11 @@ impl BaseNodeBuilder { /// Build the test base node and start its services. #[allow(clippy::redundant_closure)] - pub async fn start(self, data_path: &str) -> (NodeInterfaces, ConsensusManager) { + pub async fn start( + self, + data_path: &str, + blockchain_db_config: BlockchainDatabaseConfig, + ) -> (NodeInterfaces, ConsensusManager) { let validators = self.validators.unwrap_or_else(|| { Validators::new( MockValidator::new(true), @@ -198,7 +202,11 @@ impl BaseNodeBuilder { let consensus_manager = self .consensus_manager .unwrap_or_else(|| ConsensusManagerBuilder::new(network).build().unwrap()); - let blockchain_db = create_store_with_consensus_and_validators(consensus_manager.clone(), validators); + let blockchain_db = create_store_with_consensus_and_validators_and_config( + consensus_manager.clone(), + validators, + blockchain_db_config, + ); let mempool_validator = TransactionChainLinkedValidator::new(blockchain_db.clone(), consensus_manager.clone()); let mempool = Mempool::new( self.mempool_config.unwrap_or_default(), @@ -234,127 +242,53 @@ pub async fn wait_until_online(nodes: &[&NodeInterfaces]) { } } -// Creates a network with two Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_2_base_nodes(data_path: &str) -> (NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - - let network = Network::LocalNet; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity.clone()) - .with_peers(vec![bob_node_identity.clone()]) - .start(data_path) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity) - .with_peers(vec![alice_node_identity]) - .with_consensus_manager(consensus_manager) - .start(data_path) - .await; - - wait_until_online(&[&alice_node, &bob_node]).await; - - (alice_node, bob_node, consensus_manager) -} - -// Creates a network with two Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_2_base_nodes_with_config>( - mempool_service_config: MempoolServiceConfig, - liveness_service_config: LivenessConfig, - p2p_config: P2pConfig, +// Creates a network with multiple Base Nodes where each node in the network knows the other nodes in the network. +pub async fn create_network_with_multiple_base_nodes_with_config>( + mempool_service_configs: Vec, + liveness_service_configs: Vec, + blockchain_db_configs: Vec, + p2p_configs: Vec, consensus_manager: ConsensusManager, data_path: P, -) -> (NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - let network = Network::LocalNet; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity.clone()) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_p2p_config(p2p_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("alice").as_os_str().to_str().unwrap()) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity) - .with_peers(vec![alice_node_identity]) - .with_mempool_service_config(mempool_service_config) - .with_liveness_service_config(liveness_service_config) - .with_p2p_config(p2p_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("bob").as_os_str().to_str().unwrap()) - .await; - - wait_until_online(&[&alice_node, &bob_node]).await; - - (alice_node, bob_node, consensus_manager) -} - -// Creates a network with three Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_3_base_nodes( - data_path: &str, -) -> (NodeInterfaces, NodeInterfaces, NodeInterfaces, ConsensusManager) { - let network = Network::LocalNet; - let consensus_manager = ConsensusManagerBuilder::new(network).build().unwrap(); - create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - data_path, - ) - .await -} - -// Creates a network with three Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_3_base_nodes_with_config>( - mempool_service_config: MempoolServiceConfig, - liveness_service_config: LivenessConfig, - consensus_manager: ConsensusManager, - data_path: P, -) -> (NodeInterfaces, NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - let carol_node_identity = random_node_identity(); - let network = Network::LocalNet; - - log::info!( - "Alice = {}, Bob = {}, Carol = {}", - alice_node_identity.node_id().short_str(), - bob_node_identity.node_id().short_str(), - carol_node_identity.node_id().short_str() - ); - let (carol_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(carol_node_identity.clone()) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("carol").as_os_str().to_str().unwrap()) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity.clone()) - .with_peers(vec![carol_node_identity.clone()]) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("bob").as_os_str().to_str().unwrap()) - .await; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity) - .with_peers(vec![bob_node_identity, carol_node_identity]) - .with_mempool_service_config(mempool_service_config) - .with_liveness_service_config(liveness_service_config) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("alice").as_os_str().to_str().unwrap()) - .await; + network: Network, +) -> (Vec, ConsensusManager) { + let num_of_nodes = mempool_service_configs.len(); + if num_of_nodes != liveness_service_configs.len() || + num_of_nodes != blockchain_db_configs.len() || + num_of_nodes != p2p_configs.len() + { + panic!("create_network_with_multiple_base_nodes_with_config: All configs must be the same length"); + } + let mut node_identities = Vec::with_capacity(num_of_nodes); + for i in 0..num_of_nodes { + node_identities.push(random_node_identity()); + log::info!( + "node identity {} = `{}`", + i + 1, + node_identities[node_identities.len() - 1].node_id().short_str() + ); + } + let mut node_interfaces = Vec::with_capacity(num_of_nodes); + for i in 0..num_of_nodes { + let (node, _) = BaseNodeBuilder::new(network.into()) + .with_node_identity(node_identities[i].clone()) + .with_peers(node_identities.iter().take(i).cloned().collect()) + .with_mempool_service_config(mempool_service_configs[i].clone()) + .with_liveness_service_config(liveness_service_configs[i].clone()) + .with_p2p_config(p2p_configs[i].clone()) + .with_consensus_manager(consensus_manager.clone()) + .start( + data_path.as_ref().join(i.to_string()).as_os_str().to_str().unwrap(), + blockchain_db_configs[i], + ) + .await; + node_interfaces.push(node); + } - wait_until_online(&[&alice_node, &bob_node, &carol_node]).await; + let node_interface_refs = node_interfaces.iter().collect::>(); + wait_until_online(node_interface_refs.as_slice()).await; - (alice_node, bob_node, carol_node, consensus_manager) + (node_interfaces, consensus_manager) } // Helper function for creating a random node indentity. @@ -445,15 +379,18 @@ async fn setup_base_node_services( blockchain_db.clone().into(), base_node_service, )); - let comms = comms + let mut comms = comms .add_protocol_extension(rpc_server) .spawn_with_transport(MemoryTransport) .await .unwrap(); // Set the public address for tests - comms - .node_identity() - .add_public_address(comms.listening_address().clone()); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); + comms.node_identity().add_public_address(address.bind_address().clone()); let outbound_nci = handles.expect_handle::(); let local_nci = handles.expect_handle::(); diff --git a/base_layer/core/tests/helpers/sample_blockchains.rs b/base_layer/core/tests/helpers/sample_blockchains.rs index a958186ec3..40c82fb429 100644 --- a/base_layer/core/tests/helpers/sample_blockchains.rs +++ b/base_layer/core/tests/helpers/sample_blockchains.rs @@ -170,6 +170,12 @@ pub async fn create_blockchain_db_no_cut_through() -> ( (db, blocks, outputs, consensus_manager, key_manager) } +pub fn consensus_constants(network: Network) -> ConsensusConstantsBuilder { + ConsensusConstantsBuilder::new(network) + .with_emission_amounts(100_000_000.into(), &EMISSION, 10, 1000) + .with_coinbase_lockheight(1) +} + /// Create a new blockchain database containing only the Genesis block #[allow(dead_code)] pub async fn create_new_blockchain( @@ -182,10 +188,7 @@ pub async fn create_new_blockchain( MemoryDbKeyManager, ) { let key_manager = create_memory_db_key_manager(); - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .with_coinbase_lockheight(1) - .build(); + let consensus_constants = consensus_constants(network).build(); let (block0, output) = create_genesis_block(&consensus_constants, &key_manager).await; let consensus_manager = ConsensusManagerBuilder::new(network) .add_consensus_constants(consensus_constants) @@ -243,10 +246,7 @@ pub async fn create_new_blockchain_lmdb( MemoryDbKeyManager, ) { let key_manager = create_memory_db_key_manager(); - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .with_coinbase_lockheight(1) - .build(); + let consensus_constants = consensus_constants(network).build(); let (block0, output) = create_genesis_block(&consensus_constants, &key_manager).await; let consensus_manager = ConsensusManagerBuilder::new(network) .add_consensus_constants(consensus_constants) diff --git a/base_layer/core/tests/helpers/sync.rs b/base_layer/core/tests/helpers/sync.rs index c3af805031..93a77d1fbc 100644 --- a/base_layer/core/tests/helpers/sync.rs +++ b/base_layer/core/tests/helpers/sync.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use tari_common::configuration::Network; use tari_common_types::types::HashOutput; @@ -28,19 +28,32 @@ use tari_comms::peer_manager::NodeId; use tari_core::{ base_node::{ chain_metadata_service::PeerChainMetadata, - state_machine_service::states::{BlockSync, HeaderSyncState, StateEvent, StatusInfo}, + state_machine_service::states::{ + BlockSync, + DecideNextSync, + HeaderSyncState, + HorizonStateSync, + StateEvent, + StatusInfo, + }, sync::SyncPeer, BaseNodeStateMachine, BaseNodeStateMachineConfig, SyncValidators, }, blocks::ChainBlock, - chain_storage::DbTransaction, - consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder}, + chain_storage::{BlockchainDatabaseConfig, DbTransaction}, + consensus::{ConsensusManager, ConsensusManagerBuilder}, mempool::MempoolServiceConfig, proof_of_work::{randomx_factory::RandomXFactory, Difficulty}, test_helpers::blockchain::TempDatabase, - transactions::key_manager::{create_memory_db_key_manager, MemoryDbKeyManager}, + transactions::{ + key_manager::{create_memory_db_key_manager, MemoryDbKeyManager}, + tari_amount::T, + test_helpers::schema_to_transaction, + transaction_components::{Transaction, WalletOutput}, + }, + txn_schema, validation::mocks::MockValidator, }; use tari_p2p::{services::liveness::LivenessConfig, P2pConfig}; @@ -50,11 +63,11 @@ use tokio::sync::{broadcast, watch}; use crate::helpers::{ block_builders::{append_block, create_genesis_block}, - nodes::{create_network_with_2_base_nodes_with_config, NodeInterfaces}, + nodes::{create_network_with_multiple_base_nodes_with_config, NodeInterfaces}, + sample_blockchains, }; -static EMISSION: [u64; 2] = [10, 10]; - +/// Helper function to initialize header sync with a single peer pub fn initialize_sync_headers_with_ping_pong_data( local_node_interfaces: &NodeInterfaces, peer_node_interfaces: &NodeInterfaces, @@ -69,6 +82,7 @@ pub fn initialize_sync_headers_with_ping_pong_data( ) } +/// Helper function to initialize header sync with a single peer pub async fn sync_headers_execute( state_machine: &mut BaseNodeStateMachine, header_sync: &mut HeaderSyncState, @@ -76,6 +90,7 @@ pub async fn sync_headers_execute( header_sync.next_event(state_machine).await } +/// Helper function to initialize block sync with a single peer pub fn initialize_sync_blocks(peer_node_interfaces: &NodeInterfaces) -> BlockSync { BlockSync::from(vec![SyncPeer::from(PeerChainMetadata::new( peer_node_interfaces.node_identity.node_id().clone(), @@ -84,6 +99,7 @@ pub fn initialize_sync_blocks(peer_node_interfaces: &NodeInterfaces) -> BlockSyn ))]) } +/// Helper function to initialize block sync with a single peer pub async fn sync_blocks_execute( state_machine: &mut BaseNodeStateMachine, block_sync: &mut BlockSync, @@ -91,67 +107,106 @@ pub async fn sync_blocks_execute( block_sync.next_event(state_machine).await } -pub async fn create_network_with_local_and_peer_nodes() -> ( - BaseNodeStateMachine, - NodeInterfaces, - NodeInterfaces, +/// Helper function to decide what to do next +pub async fn decide_horizon_sync( + local_state_machine: &mut BaseNodeStateMachine, + local_header_sync: HeaderSyncState, +) -> StateEvent { + let mut next_sync = DecideNextSync::from(local_header_sync.clone()); + next_sync.next_event(local_state_machine).await +} + +/// Helper function to initialize horizon state sync with a single peer +pub fn initialize_horizon_sync_without_header_sync(peer_node_interfaces: &NodeInterfaces) -> HorizonStateSync { + HorizonStateSync::from(vec![SyncPeer::from(PeerChainMetadata::new( + peer_node_interfaces.node_identity.node_id().clone(), + peer_node_interfaces.blockchain_db.get_chain_metadata().unwrap(), + None, + ))]) +} + +/// Helper function to initialize horizon state sync with a single peer +pub async fn horizon_sync_execute( + state_machine: &mut BaseNodeStateMachine, + horizon_sync: &mut HorizonStateSync, +) -> StateEvent { + horizon_sync.next_event(state_machine).await +} + +/// Helper function to create a network with multiple nodes +pub async fn create_network_with_multiple_nodes( + blockchain_db_configs: Vec, +) -> ( + Vec>, + Vec, ChainBlock, ConsensusManager, MemoryDbKeyManager, + WalletOutput, ) { + let num_nodes = blockchain_db_configs.len(); + if num_nodes < 2 { + panic!("Must have at least 2 nodes"); + } let network = Network::LocalNet; let temp_dir = tempdir().unwrap(); let key_manager = create_memory_db_key_manager(); - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); - let (initial_block, _) = create_genesis_block(&consensus_constants, &key_manager).await; + let consensus_constants = sample_blockchains::consensus_constants(network).build(); + let (initial_block, coinbase_wallet_output) = create_genesis_block(&consensus_constants, &key_manager).await; let consensus_manager = ConsensusManagerBuilder::new(network) .add_consensus_constants(consensus_constants) .with_block(initial_block.clone()) .build() .unwrap(); - let (local_node, peer_node, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, - P2pConfig::default(), + let (node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); num_nodes], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + num_nodes + ], + blockchain_db_configs, + vec![P2pConfig::default(); num_nodes], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; let shutdown = Shutdown::new(); - let (state_change_event_publisher, _) = broadcast::channel(10); - let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); - // Alice needs a state machine for header sync - let local_state_machine = BaseNodeStateMachine::new( - local_node.blockchain_db.clone().into(), - local_node.local_nci.clone(), - local_node.comms.connectivity(), - local_node.comms.peer_manager(), - local_node.chain_metadata_handle.get_event_stream(), - BaseNodeStateMachineConfig::default(), - SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), - status_event_sender, - state_change_event_publisher, - RandomXFactory::default(), - consensus_manager.clone(), - shutdown.to_signal(), - ); + let mut state_machines = Vec::with_capacity(num_nodes); + for node_interface in node_interfaces.iter().take(num_nodes) { + let (state_change_event_publisher, _) = broadcast::channel(10); + let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); + state_machines.push(BaseNodeStateMachine::new( + node_interface.blockchain_db.clone().into(), + node_interface.local_nci.clone(), + node_interface.comms.connectivity(), + node_interface.comms.peer_manager(), + node_interface.chain_metadata_handle.get_event_stream(), + BaseNodeStateMachineConfig::default(), + SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), + status_event_sender, + state_change_event_publisher, + RandomXFactory::default(), + consensus_manager.clone(), + shutdown.to_signal(), + )); + } ( - local_state_machine, - local_node, - peer_node, + state_machines, + node_interfaces, initial_block, consensus_manager, key_manager, + coinbase_wallet_output, ) } +/// Helper enum to specify what to delete #[allow(dead_code)] #[derive(Debug)] pub enum WhatToDelete { @@ -169,12 +224,12 @@ fn delete_block(txn: &mut DbTransaction, node: &NodeInterfaces, blocks: &[ChainB blocks[index + 1].height(), blocks[index + 1].accumulated_data().hash, blocks[index + 1].accumulated_data().total_accumulated_difficulty, - *node.blockchain_db.get_chain_metadata().unwrap().best_block(), + *node.blockchain_db.get_chain_metadata().unwrap().best_block_hash(), blocks[index + 1].to_chain_header().timestamp(), ); } -// Delete blocks and headers in reverse order; the first block in the slice wil not be deleted +/// Delete blocks and headers in reverse order; the first block in the slice wil not be deleted pub fn delete_some_blocks_and_headers( blocks_with_anchor: &[ChainBlock], instruction: WhatToDelete, @@ -229,6 +284,7 @@ pub fn delete_some_blocks_and_headers( } } +/// Set the best block in the blockchain_db #[allow(dead_code)] pub fn set_best_block(block: &ChainBlock, previous_block_hash: &HashOutput, node: &NodeInterfaces) { let mut txn = DbTransaction::new(); @@ -242,47 +298,59 @@ pub fn set_best_block(block: &ChainBlock, previous_block_hash: &HashOutput, node node.blockchain_db.write(txn).unwrap(); } +/// Add some existing blocks to the blockchain_db pub fn add_some_existing_blocks(blocks: &[ChainBlock], node: &NodeInterfaces) { for block in blocks { let _res = node.blockchain_db.add_block(block.block().clone().into()).unwrap(); } } -// Return blocks added, including the start block +/// Return blocks and coinbases added, including the start block and coinbase pub async fn create_and_add_some_blocks( node: &NodeInterfaces, start_block: &ChainBlock, + start_coinbase: &WalletOutput, number_of_blocks: usize, consensus_manager: &ConsensusManager, key_manager: &MemoryDbKeyManager, difficulties: &[u64], -) -> Vec { - if number_of_blocks != difficulties.len() { + transactions: &Option>>, +) -> (Vec, Vec) { + let transactions = if let Some(val) = transactions { + val.clone() + } else { + vec![vec![]; number_of_blocks] + }; + if number_of_blocks != difficulties.len() || number_of_blocks != transactions.len() { panic!( - "Number of blocks ({}) and difficulties length ({}) must be equal", + "Number of blocks ({}), transactions length ({}) and difficulties length ({}) must be equal", number_of_blocks, + transactions.len(), difficulties.len() ); } let mut blocks = vec![start_block.clone()]; + let mut coinbases = vec![start_coinbase.clone()]; let mut prev_block = start_block.clone(); - for item in difficulties.iter().take(number_of_blocks) { - prev_block = append_block( + for (item, txns) in difficulties.iter().zip(transactions.iter()) { + let (new_block, coinbase) = append_block( &node.blockchain_db, &prev_block, - vec![], + txns.clone(), consensus_manager, Difficulty::from_u64(*item).unwrap(), key_manager, ) .await .unwrap(); - blocks.push(prev_block.clone()); + prev_block = new_block.clone(); + blocks.push(new_block.clone()); + coinbases.push(coinbase.clone()); } - blocks + (blocks, coinbases) } -// We give some time for the peer to be banned as it is an async process +/// We give some time for the peer to be banned as it is an async process pub async fn wait_for_is_peer_banned(this_node: &NodeInterfaces, peer_node_id: &NodeId, seconds: u64) -> bool { let interval_ms = 100; let intervals = seconds * 1000 / interval_ms; @@ -300,3 +368,143 @@ pub async fn wait_for_is_peer_banned(this_node: &NodeInterfaces, peer_node_id: & } false } + +/// Condensed format of the state machine state for display +pub fn state_event(event: &StateEvent) -> String { + match event { + StateEvent::Initialized => "Initialized".to_string(), + StateEvent::HeadersSynchronized(_, _) => "HeadersSynchronized".to_string(), + StateEvent::HeaderSyncFailed(_) => "HeaderSyncFailed".to_string(), + StateEvent::ProceedToHorizonSync(_) => "ProceedToHorizonSync".to_string(), + StateEvent::ProceedToBlockSync(_) => "ProceedToBlockSync".to_string(), + StateEvent::HorizonStateSynchronized => "HorizonStateSynchronized".to_string(), + StateEvent::HorizonStateSyncFailure => "HorizonStateSyncFailure".to_string(), + StateEvent::BlocksSynchronized => "BlocksSynchronized".to_string(), + StateEvent::BlockSyncFailed => "BlockSyncFailed".to_string(), + StateEvent::FallenBehind(_) => "FallenBehind".to_string(), + StateEvent::NetworkSilence => "NetworkSilence".to_string(), + StateEvent::FatalError(_) => "FatalError".to_string(), + StateEvent::Continue => "Continue".to_string(), + StateEvent::UserQuit => "UserQuit".to_string(), + } +} + +/// Return blocks and coinbases added, including the start block and coinbase +pub async fn create_block_chain_with_transactions( + node: &NodeInterfaces, + initial_block: &ChainBlock, + initial_coinbase: &WalletOutput, + consensus_manager: &ConsensusManager, + key_manager: &MemoryDbKeyManager, + intermediate_height: u64, + number_of_blocks: usize, + spend_genesis_coinbase_in_block: usize, + follow_up_transaction_in_block: usize, + follow_up_coinbases_to_spend: usize, +) -> (Vec, Vec) { + assert!(spend_genesis_coinbase_in_block > 1); + assert!((spend_genesis_coinbase_in_block as u64) < intermediate_height); + assert!(follow_up_transaction_in_block > spend_genesis_coinbase_in_block + 1); + assert!((follow_up_transaction_in_block as u64) > intermediate_height); + assert!(number_of_blocks as u64 > follow_up_transaction_in_block as u64 + intermediate_height + 1); + let add_blocks_a = spend_genesis_coinbase_in_block - 1; + let add_blocks_b = follow_up_transaction_in_block - 1 - add_blocks_a; + let add_blocks_c = number_of_blocks - add_blocks_a - add_blocks_b; + assert!(follow_up_coinbases_to_spend > add_blocks_a); + assert!(follow_up_coinbases_to_spend < follow_up_transaction_in_block); + + // Create a blockchain with some blocks to enable spending the genesys coinbase early on + let (blocks_a, coinbases_a) = create_and_add_some_blocks( + node, + initial_block, + initial_coinbase, + add_blocks_a, + consensus_manager, + key_manager, + &vec![3; add_blocks_a], + &None, + ) + .await; + assert_eq!(node.blockchain_db.get_height().unwrap(), add_blocks_a as u64); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + add_blocks_a as u64 + ); + // Add a transaction to spend the genesys coinbase + let schema = txn_schema!( + from: vec![initial_coinbase.clone()], + to: vec![1 * T; 10] + ); + let (txns_genesis_coinbase, _outputs) = schema_to_transaction(&[schema], key_manager).await; + let mut txns_all = vec![vec![]; add_blocks_b]; + txns_all[0] = txns_genesis_coinbase + .into_iter() + .map(|t| Arc::try_unwrap(t).unwrap()) + .collect::>(); + // Expand the blockchain with the genesys coinbase spend transaction + let (blocks_b, coinbases_b) = create_and_add_some_blocks( + node, + &blocks_a[blocks_a.len() - 1], + &coinbases_a[coinbases_a.len() - 1], + add_blocks_b, + consensus_manager, + key_manager, + &vec![3; add_blocks_b], + &Some(txns_all), + ) + .await; + assert_eq!( + node.blockchain_db.get_height().unwrap(), + (add_blocks_a + add_blocks_b) as u64 + ); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + (add_blocks_a + add_blocks_b) as u64 + ); + // Add a transaction to spend some more coinbase outputs + let mut coinbases_to_spend = Vec::with_capacity(follow_up_coinbases_to_spend); + for coinbase in coinbases_a.iter().skip(1) + // Skip the genesys coinbase + { + coinbases_to_spend.push(coinbase.clone()); + } + for coinbase in coinbases_b + .iter() + .skip(1) // Skip the last coinbase of the previously added blocks + .take(follow_up_coinbases_to_spend - coinbases_to_spend.len()) + { + coinbases_to_spend.push(coinbase.clone()); + } + assert_eq!(coinbases_to_spend.len(), follow_up_coinbases_to_spend); + let schema = txn_schema!( + from: coinbases_to_spend, + to: vec![1 * T; 20] + ); + let (txns_additional_coinbases, _outputs) = schema_to_transaction(&[schema], key_manager).await; + let mut txns_all = vec![vec![]; add_blocks_c]; + txns_all[0] = txns_additional_coinbases + .into_iter() + .map(|t| Arc::try_unwrap(t).unwrap()) + .collect::>(); + // Expand the blockchain with the spend transaction + let (blocks_c, coinbases_c) = create_and_add_some_blocks( + node, + &blocks_b[blocks_b.len() - 1], + &coinbases_b[coinbases_b.len() - 1], + add_blocks_c, + consensus_manager, + key_manager, + &vec![3; add_blocks_c], + &Some(txns_all), + ) + .await; + assert_eq!(node.blockchain_db.get_height().unwrap(), number_of_blocks as u64); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + number_of_blocks as u64 + ); + let blocks = [&blocks_a[..], &blocks_b[1..], &blocks_c[1..]].concat(); + let coinbases = [&coinbases_a[..], &coinbases_b[1..], &coinbases_c[1..]].concat(); + + (blocks, coinbases) +} diff --git a/base_layer/core/tests/tests/base_node_rpc.rs b/base_layer/core/tests/tests/base_node_rpc.rs index ec11eff7c3..bc8f0c39fb 100644 --- a/base_layer/core/tests/tests/base_node_rpc.rs +++ b/base_layer/core/tests/tests/base_node_rpc.rs @@ -41,6 +41,7 @@ use tari_core::{ sync::rpc::BaseNodeSyncRpcService, }, blocks::ChainBlock, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, proto::{ base_node::{FetchMatchingUtxos, Signatures as SignaturesProto, SyncUtxosByBlockRequest}, @@ -94,7 +95,7 @@ async fn setup() -> ( .unwrap(); let (mut base_node, _consensus_manager) = BaseNodeBuilder::new(network) .with_consensus_manager(consensus_manager.clone()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; base_node.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, @@ -160,7 +161,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(resp).unwrap(); assert_eq!(resp.confirmations, 0); - assert_eq!(resp.block_hash, None); + assert_eq!(resp.best_block_hash, None); assert_eq!(resp.location, TxLocation::NotStored); // First lets try submit tx2 which will be an orphan tx @@ -178,7 +179,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(service.transaction_query(req).await.unwrap().into_message()).unwrap(); assert_eq!(resp.confirmations, 0); - assert_eq!(resp.block_hash, None); + assert_eq!(resp.best_block_hash, None); assert_eq!(resp.location, TxLocation::NotStored); // Now submit a block with Tx1 in it so that Tx2 is no longer an orphan @@ -201,7 +202,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(service.transaction_query(req).await.unwrap().into_message()).unwrap(); assert_eq!(resp.confirmations, 0); - assert_eq!(resp.block_hash, None); + assert_eq!(resp.best_block_hash, None); assert_eq!(resp.location, TxLocation::InMempool); // Now if we submit Tx1 is should return as rejected as AlreadyMined as Tx1's kernel is present @@ -245,7 +246,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(service.transaction_query(req).await.unwrap().into_message()).unwrap(); assert_eq!(resp.confirmations, 1); - assert_eq!(resp.block_hash, Some(block1.hash())); + assert_eq!(resp.best_block_hash, Some(block1.hash())); assert_eq!(resp.location, TxLocation::Mined); // try a batch query let msg = SignaturesProto { diff --git a/base_layer/core/tests/tests/block_sync.rs b/base_layer/core/tests/tests/block_sync.rs index 9011a4b276..ae22cb32b6 100644 --- a/base_layer/core/tests/tests/block_sync.rs +++ b/base_layer/core/tests/tests/block_sync.rs @@ -20,21 +20,40 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_core::base_node::state_machine_service::states::StateEvent; +use tari_core::{base_node::state_machine_service::states::StateEvent, chain_storage::BlockchainDatabaseConfig}; -use crate::helpers::{sync, sync::WhatToDelete}; +use crate::helpers::{ + sync, + sync::{state_event, WhatToDelete}, +}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_block_sync_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 5, &consensus_manager, &key_manager, &[3; 5]).await; + let (_blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 5, + &consensus_manager, + &key_manager, + &[3; 5], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 5); // Alice attempts header sync @@ -78,17 +97,26 @@ async fn test_block_sync_peer_supplies_no_blocks_with_ban() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); @@ -129,17 +157,26 @@ async fn test_block_sync_peer_supplies_not_all_blocks_with_ban() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); @@ -174,3 +211,176 @@ async fn test_block_sync_peer_supplies_not_all_blocks_with_ban() { // Bob will be banned assert!(sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); } + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_block_sync_with_conbase_spend_happy_path_1() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_block_sync_with_conbase_spend_happy_path_1 > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Bob (archival node) and Carol (archival node) + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Carol is an archival node + BlockchainDatabaseConfig::default(), + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut carol_state_machine = state_machines.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 4; + let (blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + 3, + 10, // > follow_up_transaction_in_block + intermediate_height + 1 + 2, // < intermediate_height, + 5, // > intermediate_height + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 1 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[1..=10], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 1); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 1. Carol attempts header sync sync from Bob + println!("\n1. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 1); + + // 2. Carol attempts block sync from Bob to the tip (to height 1) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 1)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[2..=2], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 2); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 3. Carol attempts header sync sync from Bob + println!("\n3. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 2); + + // 4. Carol attempts block sync from Bob to the tip (to height 2) + println!("\n4. Carol attempts block sync from Bob to the tip (to height 2)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_block_sync_with_conbase_spend_happy_path_2() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_block_sync_with_conbase_spend_happy_path_2 > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Bob (archival node) and Carol (archival node) + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Carol is an archival node + BlockchainDatabaseConfig::default(), + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut carol_state_machine = state_machines.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 4; + let (_blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + 3, + 10, // > follow_up_transaction_in_block + intermediate_height + 1 + 2, // < intermediate_height, + 5, // > intermediate_height + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // 1. Carol attempts header sync sync from Bob + println!("\n1. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 10); + + // 2. Carol attempts block sync from Bob to the tip (to height 10) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 10)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); +} diff --git a/base_layer/core/tests/tests/block_validation.rs b/base_layer/core/tests/tests/block_validation.rs index 891c1d57bd..b07c94af1e 100644 --- a/base_layer/core/tests/tests/block_validation.rs +++ b/base_layer/core/tests/tests/block_validation.rs @@ -131,19 +131,19 @@ async fn test_monero_blocks() { let mut block_1 = db.prepare_new_block(block_1_t).unwrap(); // Now we have block 1, lets add monero data to it - add_monero_data(&mut block_1, seed1); + add_monero_test_data(&mut block_1, seed1); let cb_1 = assert_block_add_result_added(&db.add_block(Arc::new(block_1)).unwrap()); // Now lets add a second faulty block using the same seed hash let (block_2_t, _) = chain_block_with_new_coinbase(&cb_1, vec![], &cm, None, &key_manager).await; let mut block_2 = db.prepare_new_block(block_2_t).unwrap(); - add_monero_data(&mut block_2, seed1); + add_monero_test_data(&mut block_2, seed1); let cb_2 = assert_block_add_result_added(&db.add_block(Arc::new(block_2)).unwrap()); // Now lets add a third faulty block using the same seed hash. This should fail. let (block_3_t, _) = chain_block_with_new_coinbase(&cb_2, vec![], &cm, None, &key_manager).await; let mut block_3 = db.prepare_new_block(block_3_t).unwrap(); let mut block_3_broken = block_3.clone(); - add_monero_data(&mut block_3_broken, seed1); + add_monero_test_data(&mut block_3_broken, seed1); match db.add_block(Arc::new(block_3_broken)) { Err(ChainStorageError::ValidationError { source: ValidationError::BlockHeaderError(BlockHeaderValidationError::OldSeedHash), @@ -171,7 +171,7 @@ async fn test_monero_blocks() { }, }; // now lets fix the seed, and try again - add_monero_data(&mut block_3, seed2); + add_monero_test_data(&mut block_3, seed2); // lets break the nonce count let hash1 = block_3.hash(); block_3.header.nonce = 1; @@ -194,14 +194,14 @@ async fn test_monero_blocks() { assert_block_add_result_added(&db.add_block(Arc::new(block_3.clone())).unwrap()); } -fn add_monero_data(tblock: &mut Block, seed_key: &str) { +fn add_monero_test_data(tblock: &mut Block, seed_key: &str) { let blocktemplate_blob = "0c0c8cd6a0fa057fe21d764e7abf004e975396a2160773b93712bf6118c3b4959ddd8ee0f76aad0000000002e1ea2701ffa5ea2701d5a299e2abb002028eb3066ced1b2cc82ea046f3716a48e9ae37144057d5fb48a97f941225a1957b2b0106225b7ec0a6544d8da39abe68d8bd82619b4a7c5bdae89c3783b256a8fa47820208f63aa86d2e857f070000" .to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut mblock = monero_rx::deserialize::(&bytes[..]).unwrap(); let hash = monero::Hash::from_slice(tblock.header.merge_mining_hash().as_slice()); - monero_rx::insert_merge_mining_tag_and_aux_chain_merkle_root_into_block(&mut mblock, hash, 1, 0).unwrap(); + monero_rx::insert_aux_chain_mr_and_info_into_block(&mut mblock, hash, 1, 0).unwrap(); let hashes = monero_rx::create_ordered_transaction_hashes_from_block(&mblock); let merkle_root = monero_rx::tree_hash(&hashes).unwrap(); let coinbase_merkle_proof = monero_rx::create_merkle_proof(&hashes, &hashes[0]).unwrap(); @@ -240,7 +240,7 @@ fn add_monero_data(tblock: &mut Block, seed_key: &str) { } fn add_bad_monero_data(tblock: &mut Block, seed_key: &str) { - add_monero_data(tblock, seed_key); + add_monero_test_data(tblock, seed_key); // Add some "garbage" bytes to the end of the pow_data tblock.header.pow.pow_data.extend([1u8; 100]); } @@ -1278,15 +1278,15 @@ async fn test_fee_overflow() { offset: txn.offset, body: { let mut inputs = Vec::with_capacity(txn.body.inputs().len()); - for input in txn.body.inputs().iter() { + for input in txn.body.inputs() { inputs.push(input.clone()); } let mut outputs = Vec::with_capacity(txn.body.outputs().len()); - for output in txn.body.outputs().iter() { + for output in txn.body.outputs() { outputs.push(output.clone()); } let mut kernels = Vec::with_capacity(txn.body.kernels().len()); - for kernel in txn.body.kernels().iter() { + for kernel in txn.body.kernels() { kernels.push(TransactionKernel { version: kernel.version, features: kernel.features, diff --git a/base_layer/core/tests/tests/header_sync.rs b/base_layer/core/tests/tests/header_sync.rs index 5745f24125..c092c6e7ab 100644 --- a/base_layer/core/tests/tests/header_sync.rs +++ b/base_layer/core/tests/tests/header_sync.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_core::base_node::{state_machine_service::states::StateEvent, sync::HeaderSyncStatus}; +use tari_core::{ + base_node::{state_machine_service::states::StateEvent, sync::HeaderSyncStatus}, + chain_storage::BlockchainDatabaseConfig, +}; use crate::helpers::{sync, sync::WhatToDelete}; @@ -30,12 +33,28 @@ async fn test_header_sync_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add 1 block to Bob's chain - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); // Alice attempts header sync, still on the genesys block, headers will be lagging @@ -74,8 +93,17 @@ async fn test_header_sync_happy_path() { } // Bob adds another block - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 1, &consensus_manager, &key_manager, &[3]).await; + let (_blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); // Alice attempts header sync, still on the genesys block, headers will be lagging @@ -97,30 +125,62 @@ async fn test_header_sync_happy_path() { } } +#[ignore] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_header_sync_with_fork_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add 1 block to Bob's chain - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); // Bob adds another block - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); // Alice adds 3 (different) blocks, with POW on par with Bob's chain, but with greater height - let _alice_blocks = - sync::create_and_add_some_blocks(&alice_node, &initial_block, 3, &consensus_manager, &key_manager, &[ - 3, 2, 1, - ]) - .await; + let _alice_blocks = sync::create_and_add_some_blocks( + &alice_node, + &initial_block, + &initial_coinbase, + 3, + &consensus_manager, + &key_manager, + &[3, 2, 1], + &None, + ) + .await; assert_eq!(alice_node.blockchain_db.get_height().unwrap(), 3); assert_eq!( alice_node @@ -148,8 +208,17 @@ async fn test_header_sync_with_fork_happy_path() { assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); // Bob adds more blocks and draws ahead of Alice - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 2, &consensus_manager, &key_manager, &[3; 2]).await; + let _blocks = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 2, + &consensus_manager, + &key_manager, + &[3; 2], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 4); // Alice attempts header sync to Bob's chain with higher POW, headers will be lagging with reorg steps @@ -176,17 +245,26 @@ async fn test_header_sync_uneven_headers_and_blocks_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain, with more headers than blocks - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; sync::delete_some_blocks_and_headers(&blocks[5..=10], WhatToDelete::Blocks, &bob_node); @@ -224,17 +302,26 @@ async fn test_header_sync_uneven_headers_and_blocks_peer_lies_about_pow_no_ban() // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain, with more headers than blocks - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; sync::delete_some_blocks_and_headers(&blocks[5..=10], WhatToDelete::Blocks, &bob_node); @@ -287,12 +374,28 @@ async fn test_header_sync_even_headers_and_blocks_peer_lies_about_pow_with_ban() // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain - let blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 6, &consensus_manager, &key_manager, &[3; 6]).await; + let (blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 6, + &consensus_manager, + &key_manager, + &[3; 6], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 6); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 6); @@ -333,12 +436,28 @@ async fn test_header_sync_even_headers_and_blocks_peer_metadata_improve_with_reo // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain - let blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 6, &consensus_manager, &key_manager, &[3; 6]).await; + let (blocks, coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 6, + &consensus_manager, + &key_manager, + &[3; 6], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 6); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 6); @@ -351,8 +470,17 @@ async fn test_header_sync_even_headers_and_blocks_peer_metadata_improve_with_reo let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); // Bob's chain will reorg with improved metadata sync::delete_some_blocks_and_headers(&blocks[4..=6], WhatToDelete::Blocks, &bob_node); - let _blocks = - sync::create_and_add_some_blocks(&bob_node, &blocks[4], 3, &consensus_manager, &key_manager, &[3; 3]).await; + let _blocks = sync::create_and_add_some_blocks( + &bob_node, + &blocks[4], + &coinbases[4], + 3, + &consensus_manager, + &key_manager, + &[3; 3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 7); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 7); let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; diff --git a/base_layer/core/tests/tests/horizon_sync.rs b/base_layer/core/tests/tests/horizon_sync.rs new file mode 100644 index 0000000000..c1be254adf --- /dev/null +++ b/base_layer/core/tests/tests/horizon_sync.rs @@ -0,0 +1,841 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::cmp::min; + +use tari_core::{ + base_node::state_machine_service::states::{HorizonStateSync, StateEvent}, + chain_storage::BlockchainDatabaseConfig, +}; + +use crate::helpers::{ + sync, + sync::{decide_horizon_sync, state_event, WhatToDelete}, +}; + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_initial_horizon_sync_from_archival_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_initial_horizon_sync_from_archival_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) + let pruning_horizon = 5; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 15; + let (blocks, coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + pruning_horizon, + 30, // > follow_up_transaction_in_block + pruning_horizon + 1 + 3, // < pruning_horizon + 16, // > pruning_horizon + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 10 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[10..=30], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 10); + + // 1. Alice attempts horizon sync without having done header sync + println!("\n1. Alice attempts horizon sync without having done header sync\n"); + + let mut horizon_sync = sync::initialize_horizon_sync_without_header_sync(&bob_node); + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!(alice_node.blockchain_db.get_height().unwrap(), 0); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Alice does header sync (to height 10) + println!("\n2. Alice does header sync (to height 10)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 10); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 3. Alice attempts horizon sync after header sync (to height 5; includes genesys block UTXO spend) + println!("\n3. Alice attempts horizon sync after header sync (to height 5; includes genesys block UTXO spend)\n"); + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync.clone()).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts horizon sync again without any change in the blockchain + println!("\n4. Alice attempts horizon sync again without any change in the blockchain\n"); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("4. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 5. Alice attempts block sync to the tip (to height 10) + println!("\n5. Alice attempts block sync to the tip (to height 10)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks (containing the block with the spend transaction at height 16) + sync::add_some_existing_blocks(&blocks[11..=25], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 25); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 25); + + // 6. Alice does header sync to the new height (to height 25) + println!("\n6. Alice does header sync to the new height (to height 25)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 25); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 7. Alice attempts horizon sync to the new pruning height (to height 20 - STXOs should be pruned) Outputs created + // after height 10 and spent up to height 20 with corresponding inputs should not be streamed; we do not have way + // to verify this except looking at the detail log files. + println!("\n7. Alice attempts horizon sync to the new pruning height (to height 20 - STXOs should be pruned)\n"); + let spent_coinbases = coinbases + .iter() + .skip(1) + .take(10) // To current height + .collect::>(); + for output in &spent_coinbases { + let output_hash = output.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = output.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_some()); + } + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("7. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + for output in &spent_coinbases { + let output_hash = output.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + let commitment = output.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + } + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks (containing the block with the spend transaction at height 16) + sync::add_some_existing_blocks(&blocks[26..=30], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 30); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 30); + + // 8. Alice does header sync to the new height (to height 30) + println!("\n8. Alice does header sync to the new height (to height 30)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 30); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 9. Alice attempts horizon sync to the new pruning height (to height 25) + println!("\n9. Alice attempts horizon sync to the new pruning height (to height 25)\n"); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("9. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_consecutive_horizon_sync_from_prune_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_initial_horizon_sync_from_prune_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) and Carol (pruning node) + let pruning_horizon_alice = 4; + let pruning_horizon_carol = 12; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Alice is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_alice, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Carol is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_carol, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let mut carol_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 5; + let (blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + min(pruning_horizon_alice, pruning_horizon_carol), + 28, // > follow_up_transaction_in_block + pruning_horizon_carol + 1 + 2, // < pruning_horizon_alice, < pruning_horizon_carol + 14, // > pruning_horizon_alice, > pruning_horizon_carol + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 8 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[8..=28], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 8); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 8); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 1. Alice attempts initial horizon sync from Bob (to pruning height 4; includes genesys block UTXO spend) + println!( + "\n1. Alice attempts initial horizon sync from Bob (to pruning height 4; includes genesys block UTXO spend)\n" + ); + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob.clone()).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 8); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("1. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Carol attempts initial horizon sync from Bob with inadequate height + println!("\n2. Carol attempts initial horizon sync from Bob with inadequate height\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 8); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + match event { + StateEvent::ProceedToBlockSync(_) => println!("Carol chose `ProceedToBlockSync` instead"), + _ => panic!("2. Carol should not choose '{:?}'", event), + } + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[9..=13], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 13); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 13); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 3. Alice attempts horizon sync from Bob (to pruning height 9) + println!("\n3. Alice attempts horizon sync from Bob (to pruning height 9)\n"); + + let mut header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 13); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts block sync from Bob to the tip (to height 13) + println!("\n4. Alice attempts block sync from Bob to the tip (to height 13)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 5 Carol attempts initial horizon sync from Alice with adequate height (but Alice is not an archival node) + println!( + "\n5. Carol attempts initial horizon sync from Alice with adequate height (but Alice is not an archival \ + node)\n" + ); + + let mut header_sync_carol_from_alice = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &alice_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_alice).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 13); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_alice).await; + match event { + StateEvent::Continue => println!("Carol chose `Continue` instead"), + _ => panic!("5. Carol should not choose '{:?}'", event), + } + // Alice will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, alice_node.node_identity.node_id(), 1).await); + + // 6. Carol attempts initial horizon sync from Bob with adequate height (to pruning height 1) + println!("\n6. Carol attempts initial horizon sync from Bob with adequate height (to height 1)\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 13); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("6. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[14..=18], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 18); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 18); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 7. Alice attempts horizon sync from Bob (to pruning height 14) + println!("\n7. Alice attempts horizon sync from Bob (to pruning height 14)\n"); + + let mut header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 18); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("7. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 8. Alice attempts block sync from Bob to the tip (to height 18) + println!("\n8. Alice attempts block sync from Bob to the tip (to height 18)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 9. Carol attempts horizon sync from Alice with inadequate pruning horizon (to height 6) + println!("\n9. Carol attempts horizon sync from Alice with inadequate pruning horizon (to height 6)\n"); + + let mut header_sync_carol_from_alice = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &alice_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_alice).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 18); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_alice).await; + match event { + StateEvent::Continue => println!("Carol chose `Continue` instead"), + _ => panic!("9. Carol should not choose '{:?}'", event), + } + // Alice will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, alice_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[14..=22], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 22); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 22); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 10. Carol attempts horizon sync from Bob (to pruning height 10) + println!("\n10. Carol attempts horizon sync from Bob (to pruning height 10)\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 22); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("10. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 11. Carol attempts block sync from Bob to the tip (to height 22) + println!("\n11. Carol attempts block sync from Bob to the tip (to height 22)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 12. Alice attempts horizon sync from Carol with adequate pruning horizon (to height 18) + println!("\n12. Alice attempts horizon sync from Carol with adequate pruning horizon (to height 18)\n"); + + let mut header_sync_alice_from_carol = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &carol_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_carol).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 22); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_carol).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("12. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Carol will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, carol_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_initial_horizon_sync_from_prune_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_initial_horizon_sync_from_prune_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) and Carol (pruning node) + let pruning_horizon_alice = 4; + let pruning_horizon_carol = 12; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Alice is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_alice, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Carol is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_carol, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let mut carol_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 5; + let (_blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + min(pruning_horizon_alice, pruning_horizon_carol), + 28, // > follow_up_transaction_in_block + pruning_horizon_carol + 1 + 2, // < pruning_horizon_alice, < pruning_horizon_carol + 14, // > pruning_horizon_alice, > pruning_horizon_carol + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // 1. Carol attempts initial horizon sync from Bob archival node (to pruning height 16) + println!("\n1. Carol attempts initial horizon sync from Bob archival node (to pruning height 16)\n"); + + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(carol_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(carol_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 28); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("1. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + + assert!(carol_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(carol_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_none()); + + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Carol attempts block sync from Bob to the tip (to height 28) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 28)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 3. Alice attempts initial horizon sync from Carol prune node (to height 24) + println!("\n3. Alice attempts initial horizon sync from Carol prune node (to height 24)\n"); + + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let mut header_sync_alice_from_carol = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &carol_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_carol).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 28); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_carol).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_none()); + + // Carol will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, carol_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts block sync from Carol prune node to the tip (to height 28) + println!("\n4. Alice attempts block sync from Carol prune node to the tip (to height 28)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&carol_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Carol will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, carol_node.node_identity.node_id(), 1).await); +} diff --git a/base_layer/core/tests/tests/mempool.rs b/base_layer/core/tests/tests/mempool.rs index 0bb1d7a6d3..9feb7c180f 100644 --- a/base_layer/core/tests/tests/mempool.rs +++ b/base_layer/core/tests/tests/mempool.rs @@ -28,6 +28,7 @@ use tari_common_types::types::{Commitment, PrivateKey, PublicKey, Signature}; use tari_comms_dht::domain_message::OutboundDomainMessage; use tari_core::{ base_node::state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager}, mempool::{Mempool, MempoolConfig, MempoolServiceConfig, TxStorageResponse}, proof_of_work::Difficulty, @@ -87,7 +88,7 @@ use crate::helpers::{ generate_block, generate_new_block, }, - nodes::{create_network_with_2_base_nodes_with_config, create_network_with_3_base_nodes_with_config}, + nodes::create_network_with_multiple_base_nodes_with_config, sample_blockchains::{create_new_blockchain, create_new_blockchain_with_constants}, }; @@ -1035,16 +1036,14 @@ async fn test_reorg() { mempool.process_reorg(vec![], vec![reorg_block4.into()]).await.unwrap(); } -static EMISSION: [u64; 2] = [10, 10]; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(clippy::too_many_lines)] #[allow(clippy::identity_op)] async fn receive_and_propagate_transaction() { let temp_dir = tempdir().unwrap(); let network = Network::LocalNet; - let consensus_constants = ConsensusConstantsBuilder::new(network) + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network) .with_coinbase_lockheight(100) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) .build(); let key_manager = create_memory_db_key_manager(); let (block0, utxo) = create_genesis_block(&consensus_constants, &key_manager).await; @@ -1053,14 +1052,21 @@ async fn receive_and_propagate_transaction() { .with_block(block0) .build() .unwrap(); - let (mut alice_node, mut bob_node, mut carol_node, _consensus_manager) = - create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - temp_dir.path().to_str().unwrap(), - ) - .await; + + let (mut node_interfaces, _consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 3], + vec![LivenessConfig::default(); 3], + vec![BlockchainDatabaseConfig::default(); 3], + vec![P2pConfig::default(); 3], + consensus_manager, + temp_dir.path().to_str().unwrap(), + network, + ) + .await; + let mut alice_node = node_interfaces.remove(0); + let mut bob_node = node_interfaces.remove(0); + let mut carol_node = node_interfaces.remove(0); + alice_node.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, state_info: StateInfo::Listening(ListeningInfo::new(true)), @@ -1163,10 +1169,8 @@ async fn receive_and_propagate_transaction() { #[allow(clippy::too_many_lines)] async fn consensus_validation_large_tx() { let network = Network::LocalNet; - // We dont want to compute the 19500 limit of local net, so we create smaller blocks - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .with_coinbase_lockheight(1) + // We don't want to compute the 19500 limit of local net, so we create smaller blocks + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network) .with_max_block_transaction_weight(500) .build(); let (mut store, mut blocks, mut outputs, consensus_manager, key_manager) = @@ -1349,9 +1353,7 @@ async fn consensus_validation_large_tx() { async fn validation_reject_min_fee() { let network = Network::LocalNet; // We dont want to compute the 19500 limit of local net, so we create smaller blocks - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .with_coinbase_lockheight(1) + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network) .with_max_block_transaction_weight(500) .build(); let (mut store, mut blocks, mut outputs, consensus_manager, key_manager) = @@ -1722,14 +1724,20 @@ async fn block_event_and_reorg_event_handling() { .with_block(block0.clone()) .build() .unwrap(); - let (mut alice, mut bob, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - P2pConfig::default(), + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 2], + vec![LivenessConfig::default(); 2], + vec![BlockchainDatabaseConfig::default(); 2], + vec![P2pConfig::default(); 2], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let mut alice = node_interfaces.remove(0); + let mut bob = node_interfaces.remove(0); + alice.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, state_info: StateInfo::Listening(ListeningInfo::new(true)), diff --git a/base_layer/core/tests/tests/mod.rs b/base_layer/core/tests/tests/mod.rs index e36b646680..5e3ade249b 100644 --- a/base_layer/core/tests/tests/mod.rs +++ b/base_layer/core/tests/tests/mod.rs @@ -27,6 +27,7 @@ mod base_node_rpc; mod block_sync; mod block_validation; mod header_sync; +mod horizon_sync; mod mempool; mod node_comms_interface; mod node_service; diff --git a/base_layer/core/tests/tests/node_comms_interface.rs b/base_layer/core/tests/tests/node_comms_interface.rs index 5f02572356..4480cfce56 100644 --- a/base_layer/core/tests/tests/node_comms_interface.rs +++ b/base_layer/core/tests/tests/node_comms_interface.rs @@ -100,8 +100,8 @@ async fn inbound_get_metadata() { if let Ok(NodeCommsResponse::ChainMetadata(received_metadata)) = inbound_nch.handle_request(NodeCommsRequest::GetChainMetadata).await { - assert_eq!(received_metadata.height_of_longest_chain(), 0); - assert_eq!(received_metadata.best_block(), &block.hash()); + assert_eq!(received_metadata.best_block_height(), 0); + assert_eq!(received_metadata.best_block_hash(), &block.hash()); assert_eq!(received_metadata.pruning_horizon(), 0); } else { panic!(); @@ -464,7 +464,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { randomx_factory, ); - let block1 = append_block( + let (block1, _) = append_block( &store, &block0, vec![], @@ -474,7 +474,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block2 = append_block( + let (block2, _) = append_block( &store, &block1, vec![], @@ -484,7 +484,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block3 = append_block( + let (block3, _) = append_block( &store, &block2, vec![], @@ -494,7 +494,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block4 = append_block( + let (block4, _) = append_block( &store, &block3, vec![], @@ -504,7 +504,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let _block5 = append_block( + let (_block5, _) = append_block( &store, &block4, vec![], diff --git a/base_layer/core/tests/tests/node_service.rs b/base_layer/core/tests/tests/node_service.rs index 9877d99b44..cc36517d53 100644 --- a/base_layer/core/tests/tests/node_service.rs +++ b/base_layer/core/tests/tests/node_service.rs @@ -31,7 +31,8 @@ use tari_core::{ state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, }, blocks::{ChainBlock, NewBlock}, - consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, + chain_storage::BlockchainDatabaseConfig, + consensus::{ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, mempool::TxStorageResponse, proof_of_work::{randomx_factory::RandomXFactory, Difficulty, PowAlgorithm}, transactions::{ @@ -87,9 +88,7 @@ async fn propagate_and_forward_many_valid_blocks() { let carol_node_identity = random_node_identity(); let dan_node_identity = random_node_identity(); let network = Network::LocalNet; - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network).build(); let (block0, outputs) = create_genesis_block_with_utxos(&[T, T], &consensus_constants, &key_manager).await; let (tx01, _tx01_out) = spend_utxos( @@ -104,25 +103,37 @@ async fn propagate_and_forward_many_valid_blocks() { let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![alice_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity.clone()) .with_peers(vec![bob_node_identity.clone()]) .with_consensus_manager(rules) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut dan_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(dan_node_identity) .with_peers(vec![carol_node_identity, bob_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("dan").to_str().unwrap()) + .start( + temp_dir.path().join("dan").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; wait_until_online(&[&alice_node, &bob_node, &carol_node, &dan_node]).await; @@ -166,7 +177,8 @@ async fn propagate_and_forward_many_valid_blocks() { &key_manager, ) .await - .unwrap(), + .unwrap() + .0, ); blocks .extend(construct_chained_blocks(&alice_node.blockchain_db, blocks[0].clone(), &rules, 5, &key_manager).await); @@ -208,8 +220,8 @@ async fn propagate_and_forward_many_valid_blocks() { dan_node.shutdown().await; } -static EMISSION: [u64; 2] = [10, 10]; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[allow(clippy::too_many_lines)] async fn propagate_and_forward_invalid_block_hash() { // Alice will propagate a "made up" block hash to Bob, Bob will request the block from Alice. Alice will not be able // to provide the block and so Bob will not propagate the hash further to Carol. @@ -222,9 +234,7 @@ async fn propagate_and_forward_invalid_block_hash() { let carol_node_identity = random_node_identity(); let network = Network::LocalNet; let key_manager = create_memory_db_key_manager(); - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network).build(); let (block0, genesis_coinbase) = create_genesis_block(&consensus_constants, &key_manager).await; let rules = ConsensusManager::builder(network) .add_consensus_constants(consensus_constants) @@ -234,19 +244,28 @@ async fn propagate_and_forward_invalid_block_hash() { let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![alice_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity) .with_peers(vec![bob_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; wait_until_online(&[&alice_node, &bob_node, &carol_node]).await; @@ -276,7 +295,7 @@ async fn propagate_and_forward_invalid_block_hash() { ) .await; let txs = txs.into_iter().map(|tx| (*tx).clone()).collect(); - let block1 = append_block( + let (block1, _) = append_block( &alice_node.blockchain_db, &block0, txs, @@ -346,9 +365,7 @@ async fn propagate_and_forward_invalid_block() { let dan_node_identity = random_node_identity(); let key_manager = create_memory_db_key_manager(); let network = Network::LocalNet; - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network).build(); let (block0, _) = create_genesis_block(&consensus_constants, &key_manager).await; let rules = ConsensusManager::builder(network) .add_consensus_constants(consensus_constants) @@ -361,7 +378,10 @@ async fn propagate_and_forward_invalid_block() { let (mut dan_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(dan_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("dan").to_str().unwrap()) + .start( + temp_dir.path().join("dan").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity.clone()) @@ -372,20 +392,29 @@ async fn propagate_and_forward_invalid_block() { mock_validator.clone(), stateless_block_validator.clone(), ) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![dan_node_identity]) .with_consensus_manager(rules) .with_validators(mock_validator.clone(), mock_validator, stateless_block_validator) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity) .with_peers(vec![bob_node_identity, carol_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; alice_node @@ -423,7 +452,7 @@ async fn propagate_and_forward_invalid_block() { // This is a valid block, however Bob, Carol and Dan's block validator is set to always reject the block // after fetching it. - let block1 = append_block( + let (block1, _) = append_block( &alice_node.blockchain_db, &block0, vec![], @@ -485,20 +514,20 @@ async fn local_get_metadata() { let network = Network::LocalNet; let key_manager = create_memory_db_key_manager(); let (mut node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let db = &node.blockchain_db; let block0 = db.fetch_block(0, true).unwrap().try_into_chain_block().unwrap(); - let block1 = append_block(db, &block0, vec![], &consensus_manager, Difficulty::min(), &key_manager) + let (block1, _) = append_block(db, &block0, vec![], &consensus_manager, Difficulty::min(), &key_manager) .await .unwrap(); - let block2 = append_block(db, &block1, vec![], &consensus_manager, Difficulty::min(), &key_manager) + let (block2, _) = append_block(db, &block1, vec![], &consensus_manager, Difficulty::min(), &key_manager) .await .unwrap(); let metadata = node.local_nci.get_metadata().await.unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 2); - assert_eq!(metadata.best_block(), block2.hash()); + assert_eq!(metadata.best_block_height(), 2); + assert_eq!(metadata.best_block_hash(), block2.hash()); node.shutdown().await; } @@ -517,7 +546,7 @@ async fn local_get_new_block_template_and_get_new_block() { .unwrap(); let (mut node, _rules) = BaseNodeBuilder::new(network.into()) .with_consensus_manager(rules) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let schema = [ @@ -566,7 +595,7 @@ async fn local_get_new_block_with_zero_conf() { HeaderFullValidator::new(rules.clone(), difficulty_calculator), BlockBodyInternalConsistencyValidator::new(rules, true, factories.clone()), ) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let (tx01, tx01_out) = spend_utxos( @@ -652,7 +681,7 @@ async fn local_get_new_block_with_combined_transaction() { HeaderFullValidator::new(rules.clone(), difficulty_calculator), BlockBodyInternalConsistencyValidator::new(rules, true, factories.clone()), ) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let (tx01, tx01_out) = spend_utxos( @@ -718,7 +747,7 @@ async fn local_submit_block() { let network = Network::LocalNet; let key_manager = create_memory_db_key_manager(); let (mut node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let db = &node.blockchain_db; diff --git a/base_layer/core/tests/tests/node_state_machine.rs b/base_layer/core/tests/tests/node_state_machine.rs index 55e68c79de..8c673b02f4 100644 --- a/base_layer/core/tests/tests/node_state_machine.rs +++ b/base_layer/core/tests/tests/node_state_machine.rs @@ -36,7 +36,8 @@ use tari_core::{ }, SyncValidators, }, - consensus::{ConsensusConstantsBuilder, ConsensusManagerBuilder}, + chain_storage::BlockchainDatabaseConfig, + consensus::ConsensusManagerBuilder, mempool::MempoolServiceConfig, proof_of_work::{randomx_factory::RandomXFactory, Difficulty}, test_helpers::blockchain::create_test_blockchain_db, @@ -58,40 +59,45 @@ use crate::helpers::{ block_builders::{append_block, chain_block, create_genesis_block}, chain_metadata::MockChainMetadata, nodes::{ - create_network_with_2_base_nodes_with_config, - create_network_with_3_base_nodes_with_config, + create_network_with_multiple_base_nodes_with_config, random_node_identity, wait_until_online, BaseNodeBuilder, }, }; -static EMISSION: [u64; 2] = [10, 10]; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_listening_lagging() { let network = Network::LocalNet; let temp_dir = tempdir().unwrap(); let key_manager = create_memory_db_key_manager(); - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network).build(); let (prev_block, _) = create_genesis_block(&consensus_constants, &key_manager).await; let consensus_manager = ConsensusManagerBuilder::new(network) .add_consensus_constants(consensus_constants) .with_block(prev_block.clone()) .build() .unwrap(); - let (alice_node, bob_node, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, - P2pConfig::default(), + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 2], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + 2 + ], + vec![BlockchainDatabaseConfig::default(); 2], + vec![P2pConfig::default(); 2], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let alice_node = node_interfaces.remove(0); + let bob_node = node_interfaces.remove(0); + let shutdown = Shutdown::new(); let (state_change_event_publisher, _) = broadcast::channel(10); let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); @@ -117,7 +123,7 @@ async fn test_listening_lagging() { let mut bob_local_nci = bob_node.local_nci; // Bob Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &bob_db, &prev_block, vec![], @@ -143,37 +149,47 @@ async fn test_listening_lagging() { assert!(matches!(next_event, StateEvent::FallenBehind(_))); } +#[allow(clippy::too_many_lines)] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_listening_initial_fallen_behind() { let network = Network::LocalNet; let temp_dir = tempdir().unwrap(); let key_manager = create_memory_db_key_manager(); - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); + let consensus_constants = crate::helpers::sample_blockchains::consensus_constants(network).build(); let (gen_block, _) = create_genesis_block(&consensus_constants, &key_manager).await; let consensus_manager = ConsensusManagerBuilder::new(network) .add_consensus_constants(consensus_constants) .with_block(gen_block.clone()) .build() .unwrap(); - let (alice_node, bob_node, charlie_node, consensus_manager) = create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 3], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + 3 + ], + vec![BlockchainDatabaseConfig::default(); 3], + vec![P2pConfig::default(); 3], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let alice_node = node_interfaces.remove(0); + let bob_node = node_interfaces.remove(0); + let charlie_node = node_interfaces.remove(0); + let shutdown = Shutdown::new(); let bob_db = bob_node.blockchain_db; let mut bob_local_nci = bob_node.local_nci; // Bob Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &bob_db, &gen_block, vec![], @@ -196,7 +212,7 @@ async fn test_listening_initial_fallen_behind() { let mut charlie_local_nci = charlie_node.local_nci; // charlie Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &charlie_db, &gen_block, vec![], @@ -232,6 +248,7 @@ async fn test_listening_initial_fallen_behind() { shutdown.to_signal(), ); + assert_eq!(alice_node.blockchain_db.get_height().unwrap(), 0); let await_event_task = task::spawn(async move { Listening::new().next_event(&mut alice_state_machine).await }); let next_event = time::timeout(Duration::from_secs(10), await_event_task) @@ -256,7 +273,7 @@ async fn test_listening_initial_fallen_behind() { async fn test_event_channel() { let temp_dir = tempdir().unwrap(); let (node, consensus_manager) = BaseNodeBuilder::new(Network::Esmeralda.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; // let shutdown = Shutdown::new(); let db = create_test_blockchain_db(); @@ -283,7 +300,7 @@ async fn test_event_channel() { let node_identity = random_node_identity(); let block_hash = Blake2b::::digest(node_identity.node_id().as_bytes()).into(); - let metadata = ChainMetadata::new(10, block_hash, 2800, 0, 5000.into(), 0); + let metadata = ChainMetadata::new(10, block_hash, 2800, 0, 5000.into(), 0).unwrap(); node.comms .peer_manager() diff --git a/base_layer/key_manager/Cargo.toml b/base_layer/key_manager/Cargo.toml index ac7e8b8639..a3cc41b077 100644 --- a/base_layer/key_manager/Cargo.toml +++ b/base_layer/key_manager/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet key management" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2021" [lib] @@ -13,14 +13,14 @@ crate-type = ["lib", "cdylib"] [dependencies] tari_crypto = { version = "0.20" } tari_utilities = { version = "0.7" } -tari_common_sqlite = { path = "../../common_sqlite" } -tari_common_types = { path = "../../base_layer/common_types"} -tari_service_framework = { path = "../service_framework" } +tari_common_sqlite = { path = "../../common_sqlite", version = "1.0.0-pre.11a" } +tari_common_types = { path = "../../base_layer/common_types", version = "1.0.0-pre.11a"} +tari_service_framework = { path = "../service_framework", version = "1.0.0-pre.11a" } async-trait = {version = "0.1.50"} chrono = { version = "0.4.19", default-features = false, features = ["serde"] } chacha20poly1305 = "0.10.1" -tokio = { version = "1.23", features = ["sync", "macros"]} +tokio = { version = "1.36", features = ["sync", "macros"]} futures = { version = "^0.3.1", features = ["compat", "std"] } log = {version = "0.4.6"} diesel = { version = "2.0.3", features = ["sqlite", "serde_json", "chrono", "64-column-tables"]} diff --git a/base_layer/key_manager/src/cipher_seed.rs b/base_layer/key_manager/src/cipher_seed.rs index 0b5f7a63fa..acae2f54c1 100644 --- a/base_layer/key_manager/src/cipher_seed.rs +++ b/base_layer/key_manager/src/cipher_seed.rs @@ -283,16 +283,16 @@ impl CipherSeed { let expected_mac = Self::generate_mac(&birthday_bytes, entropy.reveal(), version, salt.as_ref(), &mac_key)?; // Verify the MAC in constant time to avoid leaking data - if mac.ct_eq(&expected_mac).unwrap_u8() == 0 { - return Err(KeyManagerError::DecryptionFailed); + if mac.ct_eq(&expected_mac).into() { + Ok(Self { + version, + birthday, + entropy: Box::from(*entropy.reveal()), + salt, + }) + } else { + Err(KeyManagerError::DecryptionFailed) } - - Ok(Self { - version, - birthday, - entropy: Box::from(*entropy.reveal()), - salt, - }) } /// Encrypt or decrypt data using ChaCha20 diff --git a/base_layer/key_manager/src/key_manager.rs b/base_layer/key_manager/src/key_manager.rs index eea68718c6..0bac8d5995 100644 --- a/base_layer/key_manager/src/key_manager.rs +++ b/base_layer/key_manager/src/key_manager.rs @@ -104,7 +104,7 @@ where // input let derive_key = mac_domain_hasher::(LABEL_DERIVE_KEY) .chain(self.seed.entropy()) - .chain(self.branch_seed.as_str().as_bytes()) + .chain(self.branch_seed.as_bytes()) .chain(key_index.to_le_bytes()) .finalize(); diff --git a/base_layer/key_manager/src/lib.rs b/base_layer/key_manager/src/lib.rs index fc09da6a72..6118226c6e 100644 --- a/base_layer/key_manager/src/lib.rs +++ b/base_layer/key_manager/src/lib.rs @@ -139,7 +139,7 @@ mod tests { Hidden::hide("olá".to_string()), ]); - let vec_words = vec![ + let vec_words = [ "hi".to_string(), "niao".to_string(), "hola".to_string(), diff --git a/base_layer/mmr/Cargo.toml b/base_layer/mmr/Cargo.toml index bf29922595..94bd83e0ab 100644 --- a/base_layer/mmr/Cargo.toml +++ b/base_layer/mmr/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "A Merkle Mountain Range implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [features] @@ -13,7 +13,7 @@ default = [] [dependencies] tari_utilities = { version = "0.7" } tari_crypto = { version = "0.20" } -tari_common = { path = "../../common" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } thiserror = "1.0" borsh = "1.2" digest = "0.10" diff --git a/base_layer/mmr/src/backend.rs b/base_layer/mmr/src/backend.rs index 9d215743a2..69235daf01 100644 --- a/base_layer/mmr/src/backend.rs +++ b/base_layer/mmr/src/backend.rs @@ -83,7 +83,7 @@ impl ArrayLike for Vec { } fn get(&self, index: usize) -> Result, Self::Error> { - Ok((self as &[Self::Value]).get(index).map(Clone::clone)) + Ok((self as &[Self::Value]).get(index).cloned()) } fn clear(&mut self) -> Result<(), Self::Error> { diff --git a/base_layer/mmr/src/balanced_binary_merkle_proof.rs b/base_layer/mmr/src/balanced_binary_merkle_proof.rs index 23fe5d8522..eb8ee789ce 100644 --- a/base_layer/mmr/src/balanced_binary_merkle_proof.rs +++ b/base_layer/mmr/src/balanced_binary_merkle_proof.rs @@ -20,7 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{collections::HashMap, convert::TryFrom, marker::PhantomData}; +use std::{ + collections::{HashMap, HashSet}, + convert::TryFrom, + marker::PhantomData, +}; use borsh::{BorshDeserialize, BorshSerialize}; use digest::Digest; @@ -198,22 +202,38 @@ where D: Digest + DomainDigest .iter() .max() .ok_or(BalancedBinaryMerkleProofError::CantMergeZeroProofs)?; - + let mut consumed = HashSet::new(); // We need to compute the hashes row by row to be sure they are processed correctly. for height in (0..max_height).rev() { let hashes = computed_hashes.clone(); - for (index, leaf) in computed_hashes.iter_mut().enumerate() { + let mut dangling_paths = HashSet::new(); + for (index, leaf) in computed_hashes.iter_mut().enumerate().rev() { if self.heights[index] <= height { continue; } let Some(hash_or_index) = self.paths[index].pop() else { + // Check if we already joined with other path. + if !consumed.contains(&index) { + // If the path ended, it's going to be merged to some other path. + if !dangling_paths.insert(index) { + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); + } + } // Path at this index already completely processed continue; }; let hash = match hash_or_index { MergedBalancedBinaryMerkleIndexOrHash::Index(index) => { + if !dangling_paths + .remove(&usize::try_from(index).map_err(|_| BalancedBinaryMerkleProofError::MathOverflow)?) + { + // If some path is joining our path, that path should have ended. + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); + } + consumed + .insert(usize::try_from(index).map_err(|_| BalancedBinaryMerkleProofError::MathOverflow)?); let index = usize::try_from(index).map_err(|_| BalancedBinaryMerkleProofError::MathOverflow)?; // The index must also point to one of the proofs @@ -235,6 +255,14 @@ where D: Digest + DomainDigest // Parent self.node_indices[index] = (self.node_indices[index] - 1) >> 1; } + if !dangling_paths.is_empty() { + // Something path ended, but it's not joined with any other path. + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); + } + } + if consumed.len() + 1 < self.paths.len() { + // If the proof is valid then all but one paths will be consumed by other paths. + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); } Ok(computed_hashes[0] == *root) } @@ -292,7 +320,8 @@ mod test { heights: vec![1], _phantom: PhantomData, }; - assert!(!proof.verify_consume(&vec![0u8; 32], vec![vec![]]).unwrap()); + // This will fail because the node height is 1 and it's empty, so it's not going to compute the root hash. + proof.verify_consume(&vec![0u8; 32], vec![vec![]]).unwrap_err(); } #[test] @@ -334,10 +363,10 @@ mod test { #[test] fn test_merge_proof_full_tree() { - let leaves = (0..255).map(|i| vec![i; 32]).collect::>(); + let leaves = (0..=255).map(|i| vec![i; 32]).collect::>(); let bmt = BalancedBinaryMerkleTree::::create(leaves.clone()); let root = bmt.get_merkle_root(); - let proofs = (0..255) + let proofs = (0..=255) .map(|i| BalancedBinaryMerkleProof::generate_proof(&bmt, i)) .collect::, _>>() .unwrap(); @@ -382,11 +411,23 @@ mod test { heights: vec![0, 0], _phantom: PhantomData, }; - // This should fail but does not - // proof .verify_consume(&vec![5u8; 32], vec![vec![5u8; 32], vec![2u8; 32]]) .unwrap_err(); - assert!(proof + // This will fail because there are more hashes on the same level as there can be. + proof .verify_consume(&vec![5u8; 32], vec![vec![5u8; 32], vec![2u8; 32]]) - .unwrap()); + .unwrap_err(); + + let proof = MergedBalancedBinaryMerkleProof:: { + paths: vec![vec![MergedBalancedBinaryMerkleIndexOrHash::Hash(vec![5u8; 32])], vec![ + MergedBalancedBinaryMerkleIndexOrHash::Index(1), + ]], + node_indices: vec![1, 1], + heights: vec![0, 1], + _phantom: PhantomData, + }; + // This will fail because we can't have any more nodes if we have leaf at the root. + proof + .verify_consume(&vec![5u8; 32], vec![vec![5u8; 32], vec![2u8; 32]]) + .unwrap_err(); } #[test] diff --git a/base_layer/mmr/src/sparse_merkle_tree/bit_utils.rs b/base_layer/mmr/src/sparse_merkle_tree/bit_utils.rs index 4471bd3214..d5b8bbc5ff 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/bit_utils.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/bit_utils.rs @@ -98,8 +98,6 @@ pub fn traverse_direction( #[cfg(test)] mod test { - use std::convert::TryFrom; - use super::*; use crate::sparse_merkle_tree::{bit_utils::count_common_prefix, NodeKey}; @@ -120,9 +118,9 @@ mod test { #[test] fn traverse_directions() { - let parent_key = NodeKey::try_from(b"\xffbcdefgh12345678abcdefgh12345678").unwrap(); + let parent_key = NodeKey::from(b"\xffbcdefgh12345678abcdefgh12345678"); // 10111111 in hex is 0xBF - let child_key = NodeKey::try_from(b"\xBFbcdefgh12345678abcdefgh12345678").unwrap(); + let child_key = NodeKey::from(b"\xBFbcdefgh12345678abcdefgh12345678"); assert_eq!( traverse_direction(0, &parent_key, &child_key).unwrap(), TraverseDirection::Right @@ -140,7 +138,7 @@ mod test { }); assert_eq!(err, expected_err); // 11011111 in hex is 0xDF - let child_key = NodeKey::try_from(b"\xDFbcdefgh12345678abcdefgh12345678").unwrap(); + let child_key = NodeKey::from(b"\xDFbcdefgh12345678abcdefgh12345678"); assert_eq!( traverse_direction(0, &parent_key, &child_key).unwrap(), TraverseDirection::Right @@ -156,8 +154,8 @@ mod test { TraverseDirection::Left ); - let parent_key = NodeKey::try_from(b"abcdefgh\x082345678abcdefgh12345678").unwrap(); - let child_key = NodeKey::try_from(b"abcdefgh\x0A2345678abcdefgh12345678").unwrap(); + let parent_key = NodeKey::from(b"abcdefgh\x082345678abcdefgh12345678"); + let child_key = NodeKey::from(b"abcdefgh\x0A2345678abcdefgh12345678"); // 0x8 in binary is 00001000 // 0xA in binary is 00001010 // matches to 8*8 + 5 places, next is a 0, so is a left child @@ -167,7 +165,7 @@ mod test { ); // 0xC in binary is 00001100 // matches to 8*8 + 5 places, next is a 1, so is a right child - let child_key = NodeKey::try_from(b"abcdefgh\x0C2345678abcdefgh12345678").unwrap(); + let child_key = NodeKey::from(b"abcdefgh\x0C2345678abcdefgh12345678"); assert_eq!( traverse_direction(69, &parent_key, &child_key).unwrap(), TraverseDirection::Right @@ -185,7 +183,7 @@ mod test { #[test] fn height_keys() { - let key = NodeKey::try_from(b"abcdefgh12345678abcdefgh12345678").unwrap(); + let key = NodeKey::from(b"abcdefgh12345678abcdefgh12345678"); let hkey = height_key(&key, 0); assert_eq!(hkey.as_slice(), &[0u8; 32]); let hkey = height_key(&key, 3); diff --git a/base_layer/mmr/src/sparse_merkle_tree/node.rs b/base_layer/mmr/src/sparse_merkle_tree/node.rs index 1a7467b669..6ac8be86c8 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/node.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/node.rs @@ -177,7 +177,7 @@ impl<'a> ExactSizeIterator for PathIterator<'a> { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] #[serde(bound(deserialize = "H:"))] #[serde(bound(serialize = "H:"))] pub enum Node { @@ -186,16 +186,6 @@ pub enum Node { Branch(BranchNode), } -impl Clone for Node { - fn clone(&self) -> Self { - match self { - Empty(n) => Empty(n.clone()), - Leaf(n) => Leaf(n.clone()), - Branch(_) => panic!("Branch nodes cannot be cloned"), - } - } -} - impl Node { /// A non-mutable version of [`Node::hash`], which you can use if you _absolutely know_ that the hash is correct. /// This would be the case for Empty or Leaf nodes, but you should never call this if the node might be a branch @@ -660,7 +650,7 @@ mod test { #[test] fn hash_type_from_slice() { - let arr = vec![1u8; 32]; + let arr = [1u8; 32]; assert!(matches!(NodeKey::try_from(&arr[..3]), Err(SMTError::ArrayTooShort(3)))); assert!(NodeKey::try_from(&arr[..]).is_ok()); assert!(matches!( diff --git a/base_layer/mmr/src/sparse_merkle_tree/tree.rs b/base_layer/mmr/src/sparse_merkle_tree/tree.rs index caa2b38102..73366350f3 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/tree.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/tree.rs @@ -31,7 +31,7 @@ pub enum DeleteResult { KeyNotFound, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] #[serde(bound(deserialize = "H:"))] #[serde(bound(serialize = "H:"))] pub struct SparseMerkleTree { @@ -229,7 +229,7 @@ impl> SparseMerkleTree { Ok(result) } - /// Update and existing node at location `key` in the tree, or, if the key does not exist, insert a new node at + /// Update an existing node at location `key` in the tree, or, if the key does not exist, insert a new node at /// location `key` instead. Returns `Ok(UpdateResult::Updated)` if the node was updated, or /// `Ok(UpdateResult::Inserted)` if the node was inserted. /// diff --git a/base_layer/p2p/Cargo.toml b/base_layer/p2p/Cargo.toml index ee877fc532..ad4f10a9f9 100644 --- a/base_layer/p2p/Cargo.toml +++ b/base_layer/p2p/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_p2p" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" authors = ["The Tari Development community"] description = "Tari base layer-specific peer-to-peer communication features" repository = "https://github.com/tari-project/tari" @@ -10,13 +10,13 @@ license = "BSD-3-Clause" edition = "2018" [dependencies] -tari_comms = { path = "../../comms/core" } -tari_comms_dht = { path = "../../comms/dht" } -tari_common = { path = "../../common" } +tari_comms = { path = "../../comms/core", version = "1.0.0-pre.11a" } +tari_comms_dht = { path = "../../comms/dht", version = "1.0.0-pre.11a" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } tari_crypto = { version = "0.20" } -tari_service_framework = { path = "../service_framework" } -tari_shutdown = { path = "../../infrastructure/shutdown" } -tari_storage = { path = "../../infrastructure/storage" } +tari_service_framework = { path = "../service_framework", version = "1.0.0-pre.11a" } +tari_shutdown = { path = "../../infrastructure/shutdown", version = "1.0.0-pre.11a" } +tari_storage = { path = "../../infrastructure/storage", version = "1.0.0-pre.11a" } tari_utilities = { version = "0.7" } anyhow = "1.0.53" @@ -32,7 +32,7 @@ rustls = "0.20.2" semver = { version = "1.0.1", optional = true } serde = "1.0.90" thiserror = "1.0.26" -tokio = { version = "1.23", features = ["macros"] } +tokio = { version = "1.36", features = ["macros"] } tokio-stream = { version = "0.1.9", default-features = false, features = ["time"] } tower = "0.4.11" trust-dns-client = { version = "=0.21.0-alpha.5", features = ["dns-over-rustls"] } @@ -41,12 +41,12 @@ webpki = "0.22" [dev-dependencies] tari_test_utils = { path = "../../infrastructure/test_utils" } -config = "0.13.0" +config = "0.14.0" clap = "3.2" tempfile = "3.1.0" [build-dependencies] -tari_common = { path = "../../common", features = ["build"] } +tari_common = { path = "../../common", features = ["build"], version = "1.0.0-pre.11a" } [features] test-mocks = [] diff --git a/base_layer/p2p/src/auto_update/gpg_keys/README.md b/base_layer/p2p/src/auto_update/gpg_keys/README.md new file mode 100644 index 0000000000..dfbec2d64b --- /dev/null +++ b/base_layer/p2p/src/auto_update/gpg_keys/README.md @@ -0,0 +1,42 @@ +# Tari developer public keys + +This folder contains the public keys for Tari developers + +## Why use public keys + +Here's a +[long, but interesting read](https://mikegerwitz.com/2012/05/a-git-horror-story-repository-integrity-with-signed-commits). + +## Creating a public key + +If you don't already have a GPG key, follow the steps +[described here](https://help.github.com/articles/generating-a-new-gpg-key/) to create one. + +## Importing keys into your keyring + +Lot of detail on how to import keys into your keyring is given in the +[Redhat docs](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/4/html/Step_by_Step_Guide/s1-gnupg-import.html), +but here is a tl;dr: + + gpg --import key.asc + +The output looks similar to the following: + +```text +gpg: key F78FFE84: public key imported +gpg: Total number processed: 1 +gpg: imported: 1 +``` + +## Signing commits with your key + +https://help.github.com/articles/signing-commits/ + +## Submitting your public key + +To add your GPG key to the list, export your **public** key with + + gpg --armor --export + +then, create a pull request with your GPG public key in a single file in this folder with the name +`.asc` diff --git a/base_layer/p2p/src/auto_update/gpg_keys/swvheerden.asc b/base_layer/p2p/src/auto_update/gpg_keys/swvheerden.asc new file mode 100644 index 0000000000..42eef171bb --- /dev/null +++ b/base_layer/p2p/src/auto_update/gpg_keys/swvheerden.asc @@ -0,0 +1,53 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQSuBGXB9V8RDACNREHtA4e53celi1DFDMQwFVBsKg2eN6S1IqINuZhgDquRL92O +Jtcja18GTzaVdC80+TuvL19XWdi4B869XFClZpLlbM1p6HP11haOtG30CRNbRWAQ +FOgDTO+Z/HkktsZlY1Zv0LLOwr/U2EbZAAKp/uKndpwST7rsmBJZyepbPKMTv0yj +IEJSC1NJ2r7iStdEEfpCzzBgDlWesg0+3f710+Z4kQm6EvPq8Z3tFCpcOMKcfbbh +d4cDYJSikEPelrkh6a51gFkcCVAO4KUbmROu95ijHA2INpXtf4L5kiY6iS2OEaej +9J6Whh/spD8bV5wUbV/dpgl21esLrdFpS/0sXlPnez1gDH1d2nIonBA94LxrdL/K +rNfScyLSCI9HfWaXjs8UkrAEHlvbStDI1pvPla7Qjtx+8H9D/UF+1kqfCAgAvuzH +Mwn00HybRLnjIx3RO4fmv8HdmPJHlAt2apT5M+KlxzE2aKu59OqXDaN1cMcmenox +DaXEWx30MB6eFB8BAJAnyT0w9C6NMAml4ecnaI+AsGzwuTXXTFppXm7mhRZ5C/9o +O7YWlyVBGxOoihwlNQU7wu1xt82NRE0PhynNWc1wQrFzPt9Xj2sy7FoEqYjFLkAg +9V8qJ8Gv1neu0DcIqt7tPBJ7ZLcvKtngLbXV1jSGiZsMhHSNYThCZfZu2AitjZgP +BJ7zwsWuAG/x491ellGnE3qnYd7eR628TiHueT2J0CmneovEF65anSPswdCY93/u +YvKYVXHt4Zwtdekc7PmZeVUu2atVuG+ELxYWBBSI01/YeQXi2cJ16rxJf1Eq8nkd +9sli7pK1r+c5tbjgDB8JVp5CEiwkkakVogNBXxYdSmB0GxIIQ38qbWTJ2ZDmeJH4 +aWQEs3EDtplaTKdLUIIl84Lgx7r7EMYlQKQC8Ah3HNmneihasZa3xvhsarBa5uYw +dmz23u73AXn3aWnX+d8RdToQ//znnP06LL2KwQocAPyM5yuo5N1P7kAThqSC17em +XlMu/6Y7emVq7H0Xi4ElQRrJQnOuZOOxcsUOIfu4VTDE6PNSwc6OnROVE+bPyssL +/irHxwOkSQEwYkuj3jAL3R15GnRiMHKy206WLYpkDlLm1HKvFM60xbSdXn8C7Z2c +56RgVBs2JNzo9dsjBv58RT4q/cK+NuKlvRFlgmAc8gOmJ5woh5zw/pLow8i0ljwz +NW2GD9TlcNUrzFjU4sQ4siFw9RY3Oc8TTZbL3YP6Ci+NqtRXRIbrXYECNijPHfAz +d6rtFTI2uQl7ii6ZkQJ9fzAMa7riigPtjNhZW6WiKl43TAEaimZ/YFSPU1zTrwMG +AVBauTzxU+R2rPjoxoZddvok2548Mzt0eFVvF5Qfx11mt+bL2MfMT3ryUxgAEHOF +bvLE0aQd49u+hChAItfh9Bq5d2mtQ1YXFvu8n82EjGd0SXUuN6AVs1HLF51v1cxZ +vxGjE3oTKgfNZavrlvRD46kbWYtor3qYGuokGwzB5BLOlWtxOzYXMDMligj7WqXz +/V8oEgUmwnwt58V8Z5HR28jQH9+yyGFbE1L7bliwG2rYgT2w+7x6hl6rRaE1Th6/ +5LQpU2NoYWxrIHZhbiBIZWVyZGVuIDxzd3ZoZWVyZGVuQGdtYWlsLmNvbT6IlgQT +EQgAPhYhBGrrA9URsApsptQzcQGqGYFT/ofiBQJlwfVfAhsDBQkHhh8JBQsJCAcC +BhUKCQgLAgQWAgMBAh4BAheAAAoJEAGqGYFT/ofi/+gA/3vDPjDCMUwGcupnBnYD +W1Bi8yhu/pMXRnTdZvXerCvJAQCGVjvOssfx0Nb9S92nb6QyreCg1W5snNZSVRjl +8uywo7kDDQRlwfVfEAwAhpp8f6B2xNk0M/+YZNOskzwbj+XSzkgiSXp8xLSCchrM +poiM1cvRgkDw/TA3lo5kd1S9q4NHdP/t/4xFpZ6o+oPNbW2MqwyqH0CtHprN6HtX +x3G71s1JbAHq40EtXGFVLx29yTpQY3pBBQD6kdH/T5xi2IR+Xi1RaMfcsZL4ilzc +6eq818AsFFuOfMgicxmUJKXd3vDywjpmY6VbqZ68UULPngYQkNfYVafx4LOD1y5O +8qPfxR0MCcpqHDX/P9Zo2OXr0PJFf+lgV45/nrdA4/SxbuT4E++Xgm7ZTHwktpI2 +giyNjZ08Mqy/j8VlxxHd231AmL11tSGy501C5NyYIgxZLq/lRe/M8uNWy1eiAIYD +Qos9lnvZTRJxDxwkQp7jqqNfnWMW9wtqp/I2Y5S1LlRrq9T+PC/t5N0RzE9WaWpJ +8voThtdOps0nB+IA3o45dNz4b3yhVEt8AjQwp0LAncXnp3VyqwLDvSf5ua/YQPZx +u3DxgWyckPSOpHomZ1VnAAMFC/95PVeQHX6lGjIXL1eK5EfyxBgDuc6TVc5zfHor +aJTwDpGD5dzriaRRZUQzdFSyUjSRG796foT9T7CQk3uuhzfdJTRFGuszGmUgWGTQ +Rry3OJni3ZDRmOkbWzlcYlwVsekd5BpbSTqPCiq55OsycT4EwMY3rnCMtQ+8TcFJ +Sn27GM4trN/ForclTHoLDQTvY6qT559rWox8zrUmH23AA4CQofb2MPlIM/iYosMy +XTl9MQfNhESe6gS7EHbQalGbQjbXA6Q3o5wsktdRMLXAhYL4aOU9kS3YDaH5d8lk +68xVYe7mb6rj6tv0UKUp68NwnPhm3TGItnsBlUhCVcWzw2Fttt9xF2I99fDl8FJE +5MTMiiGET51w9sB51BA+K1Lj57T4YmIgAzgRqw6zHstGqXU5US13RvpGAX/RzgRP +icmzkQiBGXm66IqUzRu0SelomW2oTfXjo7VVs5U8zpc2KWHb8wqN7rDWDxfJZsj9 +RyEJgc6ekmgC2vlbFqqAQ/C44+uIfgQYEQgAJhYhBGrrA9URsApsptQzcQGqGYFT +/ofiBQJlwfVfAhsMBQkHhh8JAAoJEAGqGYFT/ofi768A/jBr3sFU97M5Hf0nGfYA +Bjw6yQmDOU02magWtM4aJmpBAP9YLTeYB02+GYo3wM7MQ8xyJaS8Ed+PzAeNtQMM +xDOnOQ== +=VupM +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/base_layer/p2p/src/auto_update/mod.rs b/base_layer/p2p/src/auto_update/mod.rs index c67ea11a41..97c28a4acb 100644 --- a/base_layer/p2p/src/auto_update/mod.rs +++ b/base_layer/p2p/src/auto_update/mod.rs @@ -202,14 +202,7 @@ async fn http_download(url: T) -> Result impl Iterator { MAINTAINERS.iter().map(|s| { diff --git a/base_layer/p2p/src/auto_update/signature.rs b/base_layer/p2p/src/auto_update/signature.rs index b8bae51e9c..fb770e9992 100644 --- a/base_layer/p2p/src/auto_update/signature.rs +++ b/base_layer/p2p/src/auto_update/signature.rs @@ -64,7 +64,60 @@ mod test { use pgp::Deserializable; use super::*; - use crate::auto_update::{maintainers, MAINTAINERS}; + use crate::auto_update::maintainers; + + const PUBLIC_KEY: &str = r#"-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF6y/8YBEAC+9x9jq0q8sle/M8aYlp4b9cHJPb6sataUaMzOxx/hQ9WCrhU1 +GhJrDk+QPBMBtvT1oWMWa5KhMFNS1S0KTYbXensnF2tOdT6kSAWKXufW4hQ32p4B +NW6aqrOxKMLj7jI2hwlCgRvlK+51J/l7e1OvCpQFL3wH/VMPBG5TgIRmgLeFZWWB +WtD6VjOAJROBiESb5DW+ox3hyxFEKMmwdC+B8b346GJedGFZem9eaN3ApjYBz/Ev +YsQQk2zL/eK5HeSYfboFBCWQrpIFtaJwyhzRlW2s5jz79Jv6kULZ+SVmfRerqk9c +jCzp48R5SJxIulk/PThqZ7sE6vEvwoGGSUzhQ0z1LhhFXt/0qg0qNeIvGkO5HRIR +R5i73/WG1PlgmcjtZHV54M86sTwm3yMevlHI5+i8Y4PAcYulftX9fVf85SitnWS5 +oAg3xP0pIWWztk8Ng4hWMM7sGE7q7BpjxuuGjrb9SNOTQuK8I7hg81p08LSNioOm +RD2OTkgbzew4YIMy+SmkmrFWvoKCRrWxNsQl4osVhOcLOlVBYoIjnBxy7AmHZzZC +ftgH5n6ODoB0CqZrc+UcMX4CFVtI7vaZOp1mcHN8geMhq1TjMJoGGlYuonaO34wM +2o+n+HbFJBCzfz/Q4pnuGwPDlumFU08E++ch63joMtXM1qAD5rNJMHfebQARAQAB +tDBTdGFubGV5IEJvbmRpIDxzZGJvbmRpQHVzZXJzLm5vcmVwbHkuZ2l0aHViLmNv +bT6JAk4EEwEIADgWIQQze5HvxfECfYrt9j0YhbFJUEwKZAUCXrL/xgIbAwULCQgH +AgYVCgkICwIEFgIDAQIeAQIXgAAKCRAYhbFJUEwKZIvVEAC3uGPAduK06FWxERWj +qXDR/tj7rNh6MaYXTLDM79sXP9nOj9aZOmA6lKZDRZ8lQyoZykwlVriHkJLYFotj +mxBPfgy1j5a2I52sF1sZMxwCg1nChvDivvnXTORMMcTWtIFkKu3cdzmO1Jil1tFB +zb205DG6gJ4JtXPpXKdAPkaJ68pqGcsAUU0N1KXla6ob/QwNlvp5aQ7cdR7uNbuI +kRx/KpsFNpA4jeP0+hK6kSaJgBdIUWzUWkfz9ubBdCRN8oWG+aazq4Y3DvaSnmbr +VCdb78Ni+QP98VtQhdk0UEc+T7vdbS9c71t6qMqNlRUWoiBZORnWa2QTqxhFGsM0 +FZhGX4UIZsdqMkTn/egf5zy/UmgqvmX2ujgQVj4OzkXT022wKgnr4z09/jymUPXE +o4QU15kTmjwTkNk8E3Cj1HbppyEgPNJ2bO3wnJbt6XMKejIXJC8X7G5v4WomOe8j +HVhqpAeOuML4u7KYg73wgRnIIMXCLR2VeS4iSZ42x/L6lWS5NzaGMV6nZv8t5ehh +otZ3uaWlHa4rRK2wrwveN/JdoYXqmZIoOb5Ivt9PlbUZ6NgHXDyHC7rCShtyPK2j +tY6BkoFz4HAloxhFGjRxBfDFjx9nefJ418owI1tOP1rNCoblROT1ggLlQ9a6URIF +R5WvoQC843hWwspzi7ll1Vz5JbkCDQResv/GARAArIvngo2dj+bZgu9/edkrKKbq +JZQj9fqaZDJrHXOmg/3t29qvEnyFJnyl9VYhSmLCppuy0k4YY4DaaCebBPafyV8e +Q/JNF3Le1FO7LHmoHuXFvcOvOVJhANpFKmNX3jaEYT7zDTbJ705FGldaC3udn12n +nEFlAEJjYQA6bgQAXXS02JjeVfl82IEgYpR0yFJjbL690tQ87Emlk3zeRrd/Esuv +Au9jHDTILSkUxa2dHTOgbtPwkk0N1NeGYIvWLYtwVcQ7KF+1xv/WVjO0dyr2qoia +4guJejBkNXAfYbodg5f7KjUYOcmTotSFurens5SdS+KUuaQtbfxGOt6nthwEU/N5 +x2/M64Y4l4vXtrjV+6d6RtvlPHnMTMAdfE6f3F/+wEsVlBQFbV2kn0nbDIJSlwys +L/kR6R9fHPtjSmS1omZWqE7bOu288j/M7/aP4Jcflj1t0+0WGfliS+0IgrNphUUA +1tpC7PXzXKzMtdK5xzLIZWAnjoXpzjVhcFglQpQSk9y4V9lqZbawx+RfHW1U2RYp +rVfvm42wg0DPYanWXzgO4nZdwSzu9RQQUdhdJAxCVV9ODh6CAVj0G7q2XEerjAUE +ZTxf1WKCJTpCy1B6w2lf1PN2zKDVpha0/76u/QcZGg5dAqklpSAaRNj3uDnq1HEP +RQOm6ladgLXO46J+ao0AEQEAAYkCNgQYAQgAIBYhBDN7ke/F8QJ9iu32PRiFsUlQ +TApkBQJesv/GAhsMAAoJEBiFsUlQTApk6HsP/A/sNwdzhTKIWGpdyxXz2YdUSK++ +kaQdZwtDIVcSZQ0yIFf0fPLkeoSd7jZfANmu2O1vnocBjdMcNOvPNjxKpkExJLVs +ttMiqla0ood8LuA9wteRFKRgoJc3Y71bWsxavLTfA4jDK+CaJG+K+vRDU7gwAdF+ +5rKhUIyn7pph7eWGHOv4bzGLEjV4NlLSzZGBA0aMDaWMGgStNzCD25yU7zYEJIWn +8gq2Rq0by8H6NLg6tygh5w8s2NUhPI5V31kZhsC1Kn5kExn4rVxFusqwG63gkPz1 +avx7E5kfChTgjaDlf0gnC73/alMeO4vTJKeDJaq581dza9jwJqaDC1+/ozYdGt7u +3KUxjhiSnWe38/AGna9cB4mAD4reCczH51gthlyeYNaSw+L0rsSMKvth9EYAHknP +ZFT97SIDPF1/2bRgO05I+J4BaSMA+2Euv/O3RWk953l+eR8MoZlr5mnMRM4Guy7K +nfTh5LZFccJyvW+CsxKKfwe/RNQPZLBuScqAogjsd+I6sVlmgLSyKkR2B3voRQ0g +l6J2669tX0wMPM/XsVlZ/UDdfUe6spRO8PXBwe+zdAAejUotLk4aMyhxxZVKCEwO +CrdiSo3ds50gaF1BXP72gfZW0E8djcD9ATfONqxFfftUwPbnbAqKh8t+L+If5H5r +tQrYpH9CNXgX9dC9 +=7S7i +-----END PGP PUBLIC KEY BLOCK-----"#; const VALID_SIGNATURE: &str = r#"-----BEGIN PGP SIGNATURE----- iQIzBAEBCAAdFiEEM3uR78XxAn2K7fY9GIWxSVBMCmQFAmDYhicACgkQGIWxSVBM @@ -87,10 +140,11 @@ l9smp8LtJcXkw4cNgE4MB9VKdx+NhdbvWemt7ccldeL22hmyS24= #[test] fn it_verifies_signed_message() { let (sig, _) = pgp::StandaloneSignature::from_string(VALID_SIGNATURE.trim()).unwrap(); - let verifier = SignedMessageVerifier::new(maintainers().collect()); + let (key, _) = pgp::SignedPublicKey::from_string(PUBLIC_KEY).unwrap(); + let verifier = SignedMessageVerifier::new(vec![key]); let signer = verifier.verify_signature(&sig, MESSAGE).unwrap(); - let (maintainer, _) = pgp::SignedPublicKey::from_string(MAINTAINERS[3]).unwrap(); + let (maintainer, _) = pgp::SignedPublicKey::from_string(PUBLIC_KEY).unwrap(); assert_eq!(*signer, maintainer); } diff --git a/base_layer/p2p/src/dns/mock.rs b/base_layer/p2p/src/dns/mock.rs index 5791d11ecf..e4ee81a4a7 100644 --- a/base_layer/p2p/src/dns/mock.rs +++ b/base_layer/p2p/src/dns/mock.rs @@ -60,12 +60,10 @@ where E: From + Error + Clone + Send + Sync + Unpin + 'static let responses = (*self.messages) .clone() .into_iter() - .fold(Result::<_, E>::Ok(Message::new()), |msg, resp| { - msg.and_then(|mut msg| { - resp.map(move |resp| { - msg.add_answers(resp.answers().iter().cloned()); - msg - }) + .try_fold(Message::new(), |mut msg, resp| { + resp.map(move |resp| { + msg.add_answers(resp.answers().iter().cloned()); + msg }) }) .map(DnsResponse::from); diff --git a/base_layer/p2p/src/initialization.rs b/base_layer/p2p/src/initialization.rs index ac9ab9b653..d4cf62ca97 100644 --- a/base_layer/p2p/src/initialization.rs +++ b/base_layer/p2p/src/initialization.rs @@ -51,8 +51,15 @@ use tari_comms::{ ProtocolId, }, tor, - tor::HiddenServiceControllerError, - transports::{predicate::FalsePredicate, MemoryTransport, SocksConfig, SocksTransport, TcpWithTorTransport}, + tor::{HiddenServiceControllerError, TorIdentity}, + transports::{ + predicate::FalsePredicate, + HiddenServiceTransport, + MemoryTransport, + SocksConfig, + SocksTransport, + TcpWithTorTransport, + }, utils::cidr::parse_cidrs, CommsBuilder, CommsBuilderError, @@ -209,9 +216,10 @@ pub async fn initialize_local_test_comms>( Ok((comms, dht, event_sender)) } -pub async fn spawn_comms_using_transport( +pub async fn spawn_comms_using_transport( comms: UnspawnedCommsNode, transport_config: TransportConfig, + after_comms: F, ) -> Result { let comms = match transport_config.transport_type { TransportType::Memory => { @@ -249,22 +257,16 @@ pub async fn spawn_comms_using_transport( let tor_config = transport_config.tor; debug!(target: LOG_TARGET, "Building TOR comms stack ({:?})", tor_config); let listener_address_override = tor_config.listener_address_override.clone(); - let mut hidden_service_ctl = initialize_hidden_service(tor_config)?; + let hidden_service_ctl = initialize_hidden_service(tor_config)?; // Set the listener address to be the address (usually local) to which tor will forward all traffic - let transport = hidden_service_ctl.initialize_transport().await?; - - info!( - target: LOG_TARGET, - "Tor hidden service initialized. proxied_address = '{:?}', listener_override_address = {:?}", - hidden_service_ctl.proxied_address(), - listener_address_override, - ); + let instant = Instant::now(); + let transport = HiddenServiceTransport::new(hidden_service_ctl, after_comms); + debug!(target: LOG_TARGET, "TOR transport initialized in {:.0?}", instant.elapsed()); comms .with_listener_address( listener_address_override.unwrap_or_else(|| multiaddr![Ip4([127, 0, 0, 1]), Tcp(0u16)]), ) - .with_hidden_service_controller(hidden_service_ctl) .spawn_with_transport(transport) .await? }, diff --git a/base_layer/p2p/tests/support/comms_and_services.rs b/base_layer/p2p/tests/support/comms_and_services.rs index 4bd2dca73f..a653cb4f7a 100644 --- a/base_layer/p2p/tests/support/comms_and_services.rs +++ b/base_layer/p2p/tests/support/comms_and_services.rs @@ -51,11 +51,14 @@ pub async fn setup_comms_services( .await .unwrap(); - let comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let mut comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); // Set the public address for tests - comms - .node_identity() - .add_public_address(comms.listening_address().clone()); + comms.node_identity().add_public_address(address.bind_address().clone()); (comms, dht, messaging_events) } diff --git a/base_layer/service_framework/Cargo.toml b/base_layer/service_framework/Cargo.toml index 0dca433d5d..fd28704cf7 100644 --- a/base_layer/service_framework/Cargo.toml +++ b/base_layer/service_framework/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_service_framework" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" authors = ["The Tari Development Community"] description = "The Tari communication stack service framework" repository = "https://github.com/tari-project/tari" @@ -10,19 +10,19 @@ license = "BSD-3-Clause" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tari_shutdown = { path = "../../infrastructure/shutdown" } +tari_shutdown = { path = "../../infrastructure/shutdown", version = "1.0.0-pre.11a" } anyhow = "1.0.53" async-trait = "0.1.50" futures = { version = "^0.3.16", features = ["async-await"] } log = "0.4.8" thiserror = "1.0.26" -tokio = { version = "1.23", features = ["rt"] } +tokio = { version = "1.36", features = ["rt", "sync"] } tower-service = { version = "0.3" } [dev-dependencies] tari_test_utils = { path = "../../infrastructure/test_utils" } -tokio = { version = "1.23", features = ["rt-multi-thread", "macros", "time"] } +tokio = { version = "1.36", features = ["rt-multi-thread", "macros", "time"] } futures-test = { version = "0.3.3" } tower = "0.4" diff --git a/base_layer/service_framework/src/context/handles.rs b/base_layer/service_framework/src/context/handles.rs index 8b29b98131..2042c8f4e8 100644 --- a/base_layer/service_framework/src/context/handles.rs +++ b/base_layer/service_framework/src/context/handles.rs @@ -197,7 +197,7 @@ impl ServiceHandles { acquire_lock!(self.handles) .get(&type_id) .and_then(|b| b.downcast_ref::()) - .map(Clone::clone) + .cloned() } /// Returns the shutdown signal for this stack diff --git a/base_layer/tari_mining_helper_ffi/Cargo.toml b/base_layer/tari_mining_helper_ffi/Cargo.toml index 18e04674a9..19bb735224 100644 --- a/base_layer/tari_mining_helper_ffi/Cargo.toml +++ b/base_layer/tari_mining_helper_ffi/Cargo.toml @@ -3,24 +3,30 @@ name = "minotari_mining_helper_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency miningcore C FFI bindings" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] -tari_comms = { path = "../../comms/core" } +tari_comms = { path = "../../comms/core" } tari_crypto = { version = "0.20" } -tari_common = { path = "../../common" } -tari_core = { path = "../core", default-features = false, features = ["transactions"]} +tari_common = { path = "../../common" } +tari_core = { path = "../core", default-features = false, features = ["transactions", "base_node_proto", "base_node"] } +tari_common_types = { path = "../../base_layer/common_types", version = "1.0.0-pre.11a" } tari_utilities = { version = "0.7" } libc = "0.2.65" thiserror = "1.0.26" borsh = "1.2" hex = "0.4.2" +tokio = { version = "1.36", features = ["rt"] } [dev-dependencies] -tari_core = { path = "../core", features = ["transactions", "base_node"]} - +tari_core = { path = "../core", features = ["transactions", "base_node"] } rand = "0.8" +[build-dependencies] +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a" } +cbindgen = "0.24.3" +tari_common = { path = "../../common", features = ["build", "static-application-info"] } + [lib] -crate-type = ["staticlib","cdylib"] +crate-type = ["cdylib"] diff --git a/base_layer/tari_mining_helper_ffi/build.rs b/base_layer/tari_mining_helper_ffi/build.rs new file mode 100644 index 0000000000..a8967a500a --- /dev/null +++ b/base_layer/tari_mining_helper_ffi/build.rs @@ -0,0 +1,60 @@ +// Copyright 2019, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{env, path::PathBuf}; + +use cbindgen::{Config, Language, LineEndingStyle, ParseConfig, Style}; +use tari_common::build::StaticApplicationInfo; +use tari_features::resolver::build_features; + +fn main() { + build_features(); + let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); + + // generate version info + let gen = StaticApplicationInfo::initialize().unwrap(); + gen.write_consts_to_outdir("consts.rs").unwrap(); + + let output_file = PathBuf::from(&crate_dir) + .join("tari_mining_helper.h") + .display() + .to_string(); + + let config = Config { + language: Language::C, + header: Some("// Copyright 2024. The Tari Project\n// SPDX-License-Identifier: BSD-3-Clause".to_string()), + parse: ParseConfig { + parse_deps: true, + include: Some(vec!["tari_comms".to_string()]), + ..Default::default() + }, + autogen_warning: Some("// This file was generated by cargo-bindgen. Please do not edit manually.".to_string()), + style: Style::Tag, + cpp_compat: true, + line_endings: LineEndingStyle::Native, + ..Default::default() + }; + + cbindgen::generate_with_config(&crate_dir, config) + .unwrap() + .write_to_file(output_file); +} diff --git a/base_layer/tari_mining_helper_ffi/src/error.rs b/base_layer/tari_mining_helper_ffi/src/error.rs index d5a8b475a0..e9fea91c8d 100644 --- a/base_layer/tari_mining_helper_ffi/src/error.rs +++ b/base_layer/tari_mining_helper_ffi/src/error.rs @@ -36,6 +36,14 @@ pub enum InterfaceError { AllocationError, #[error("An error because the supplied position was out of range")] PositionInvalidError, + #[error("An error has occurred when trying to create the tokio runtime: `{0}`")] + TokioError(String), + #[error("An error has occurred when trying to create the a coinbase: `{0}`")] + CoinbaseBuildError(String), + #[error("An invalid address was passed in: `{0}`")] + InvalidAddress(String), + #[error("An invalid network was passed in: `{0}`")] + InvalidNetwork(String), } /// This struct is meant to hold an error for use by Miningcore. The error has an integer code and string @@ -73,6 +81,22 @@ impl From for MiningHelperError { code: 6, message: format!("{:?}", v), }, + InterfaceError::TokioError(_) => Self { + code: 7, + message: format!("{:?}", v), + }, + InterfaceError::CoinbaseBuildError(_) => Self { + code: 8, + message: format!("{:?}", v), + }, + InterfaceError::InvalidAddress(_) => Self { + code: 9, + message: format!("{:?}", v), + }, + InterfaceError::InvalidNetwork(_) => Self { + code: 10, + message: format!("{:?}", v), + }, } } } diff --git a/base_layer/tari_mining_helper_ffi/src/lib.rs b/base_layer/tari_mining_helper_ffi/src/lib.rs index 5909511f1c..8888fb8e52 100644 --- a/base_layer/tari_mining_helper_ffi/src/lib.rs +++ b/base_layer/tari_mining_helper_ffi/src/lib.rs @@ -30,14 +30,30 @@ mod error; use core::ptr; -use std::{convert::TryFrom, ffi::CString, slice}; +use std::{convert::TryFrom, ffi::CString, slice, str::FromStr}; use borsh::{BorshDeserialize, BorshSerialize}; use libc::{c_char, c_int, c_uchar, c_uint, c_ulonglong}; -use tari_core::{blocks::BlockHeader, proof_of_work::sha3x_difficulty}; +use tari_common::{configuration::Network, network_check::set_network_if_choice_valid}; +use tari_common_types::tari_address::TariAddress; +use tari_core::{ + blocks::{BlockHeader, NewBlockTemplate}, + consensus::ConsensusManager, + proof_of_work::sha3x_difficulty, + transactions::{ + generate_coinbase, + key_manager::create_memory_db_key_manager, + transaction_components::RangeProofType, + }, +}; use tari_crypto::tari_utilities::hex::Hex; +use tokio::runtime::Runtime; use crate::error::{InterfaceError, MiningHelperError}; +mod consts { + // Import the auto-generated const values from the Manifest and Git + include!(concat!(env!("OUT_DIR"), "/consts.rs")); +} pub type TariPublicKey = tari_comms::types::CommsPublicKey; #[derive(Debug, PartialEq, Clone)] @@ -233,6 +249,155 @@ pub unsafe extern "C" fn inject_nonce(header: *mut ByteVector, nonce: c_ulonglon (*header).0 = buffer; } +/// Injects a coinbase into a blocktemplate +/// +/// ## Arguments +/// `block_template_bytes` - The block template as bytes, serialized with borsh.io +/// `value` - The value of the coinbase +/// `stealth_payment` - Boolean value, is this a stealh payment or normal one-sided +/// `revealed_value_proof` - Boolean value, should this use the reveal value proof, or BP+ +/// `wallet_payment_address` - The address to pay the coinbase to +/// `coinbase_extra` - The value of the coinbase extra field +/// `network` - The value of the network +/// +/// ## Returns +/// `block_template_bytes` - The updated block template +/// `error_out` - Error code returned, 0 means no error +/// +/// # Safety +/// None +#[allow(clippy::too_many_lines)] +#[no_mangle] +pub unsafe extern "C" fn inject_coinbase( + block_template_bytes: *mut ByteVector, + coibase_value: c_ulonglong, + stealth_payment: bool, + revealed_value_proof: bool, + wallet_payment_address: *const c_char, + coinbase_extra: *const c_char, + network: c_uint, + error_out: *mut c_int, +) { + let mut error = 0; + ptr::swap(error_out, &mut error as *mut c_int); + if block_template_bytes.is_null() { + error = MiningHelperError::from(InterfaceError::NullError("block template".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + } + if wallet_payment_address.is_null() { + error = MiningHelperError::from(InterfaceError::NullError("wallet_payment_address".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + } + let native_string_address = CString::from_raw(wallet_payment_address as *mut i8) + .to_str() + .unwrap() + .to_owned(); + let wallet_address = match TariAddress::from_str(&native_string_address) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::InvalidAddress(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }, + }; + if coinbase_extra.is_null() { + error = MiningHelperError::from(InterfaceError::NullError("coinbase_extra".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + } + let network_u8 = match u8::try_from(network) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }, + }; + let network = match Network::try_from(network_u8) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }, + }; + // Set the static network variable according to the user chosen network (for use with + // `get_current_or_user_setting_or_default()`) - + if let Err(e) = set_network_if_choice_valid(network) { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }; + let coinbase_extra_string = CString::from_raw(coinbase_extra as *mut i8) + .to_str() + .unwrap() + .to_owned(); + let mut bytes = (*block_template_bytes).0.as_slice(); + let mut block_template: NewBlockTemplate = match BorshDeserialize::deserialize(&mut bytes) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::Conversion(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }, + }; + let key_manager = create_memory_db_key_manager(); + + let consensus_manager = match ConsensusManager::builder(network).build() { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::NullError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }, + }; + let runtime = match Runtime::new() { + Ok(r) => r, + Err(e) => { + error = MiningHelperError::from(InterfaceError::TokioError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }, + }; + let range_proof_type = if revealed_value_proof { + RangeProofType::RevealedValue + } else { + RangeProofType::BulletProofPlus + }; + let height = block_template.header.height; + let (coinbase_output, coinbase_kernel) = match runtime.block_on(async { + // we dont count the fee or the reward here, we assume the caller has calculated the amount to be the exact + // value for the coinbase(s) they want. + generate_coinbase( + 0.into(), + coibase_value.into(), + height, + coinbase_extra_string.as_bytes(), + &key_manager, + &wallet_address, + stealth_payment, + consensus_manager.consensus_constants(height), + range_proof_type, + ) + .await + }) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::CoinbaseBuildError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return; + }, + }; + block_template.body.add_output(coinbase_output); + block_template.body.add_kernel(coinbase_kernel); + block_template.body.sort(); + let mut buffer = Vec::new(); + BorshSerialize::serialize(&block_template, &mut buffer).unwrap(); + (*block_template_bytes).0 = buffer; +} + /// Returns the difficulty of a share /// /// ## Arguments @@ -245,9 +410,36 @@ pub unsafe extern "C" fn inject_nonce(header: *mut ByteVector, nonce: c_ulonglon /// # Safety /// None #[no_mangle] -pub unsafe extern "C" fn share_difficulty(header: *mut ByteVector, error_out: *mut c_int) -> c_ulonglong { +pub unsafe extern "C" fn share_difficulty( + header: *mut ByteVector, + network: c_uint, + error_out: *mut c_int, +) -> c_ulonglong { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); + let network_u8 = match u8::try_from(network) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 1; + }, + }; + let network = match Network::try_from(network_u8) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 1; + }, + }; + // Set the static network variable according to the user chosen network (for use with + // `get_current_or_user_setting_or_default()`) - + if let Err(e) = set_network_if_choice_valid(network) { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 1; + }; if header.is_null() { error = MiningHelperError::from(InterfaceError::NullError("header".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); @@ -297,12 +489,36 @@ pub unsafe extern "C" fn share_difficulty(header: *mut ByteVector, error_out: *m pub unsafe extern "C" fn share_validate( header: *mut ByteVector, hash: *const c_char, + network: c_uint, share_difficulty: c_ulonglong, template_difficulty: c_ulonglong, error_out: *mut c_int, ) -> c_int { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); + let network_u8 = match u8::try_from(network) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 1; + }, + }; + let network = match Network::try_from(network_u8) { + Ok(v) => v, + Err(e) => { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 1; + }, + }; + // Set the static network variable according to the user chosen network (for use with + // `get_current_or_user_setting_or_default()`) - + if let Err(e) = set_network_if_choice_valid(network) { + error = MiningHelperError::from(InterfaceError::InvalidNetwork(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 1; + }; if header.is_null() { error = MiningHelperError::from(InterfaceError::NullError("header".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); @@ -355,6 +571,7 @@ mod tests { use tari_core::{ blocks::{genesis_block::get_genesis_block, Block}, proof_of_work::Difficulty, + transactions::tari_amount::MicroMinotari, }; use super::*; @@ -386,9 +603,34 @@ mod tests { #[test] fn detect_change_in_consensus_encoding() { - const NONCE: u64 = 11937686248184272944; - let difficulty = Difficulty::from_u64(35357).expect("Failed to create difficulty"); + #[cfg(tari_target_network_mainnet)] + let (nonce, difficulty, network) = match Network::get_current_or_user_setting_or_default() { + Network::MainNet => ( + 3145418102407526886, + Difficulty::from_u64(1505).unwrap(), + Network::MainNet, + ), + Network::StageNet => ( + 135043993867732261, + Difficulty::from_u64(1059).unwrap(), + Network::StageNet, + ), + _ => panic!("Invalid network for mainnet target"), + }; + #[cfg(tari_target_network_nextnet)] + let (nonce, difficulty, network) = ( + 5154919981564263219, + Difficulty::from_u64(2950).unwrap(), + Network::NextNet, + ); + #[cfg(not(any(tari_target_network_mainnet, tari_target_network_nextnet)))] + let (nonce, difficulty, network) = ( + 8520885611996410570, + Difficulty::from_u64(3143).unwrap(), + Network::Esmeralda, + ); unsafe { + set_network_if_choice_valid(network).unwrap(); let mut error = -1; let error_ptr = &mut error as *mut c_int; let block = create_test_block(); @@ -396,13 +638,14 @@ mod tests { #[allow(clippy::cast_possible_truncation)] let len = header_bytes.len() as u32; let byte_vec = byte_vector_create(header_bytes.as_ptr(), len, error_ptr); - inject_nonce(byte_vec, NONCE, error_ptr); + inject_nonce(byte_vec, nonce, error_ptr); assert_eq!(error, 0); - let result = share_difficulty(byte_vec, error_ptr); + let result = share_difficulty(byte_vec, u32::from(network.as_byte()), error_ptr); if result != difficulty.as_u64() { // Use this to generate new NONCE and DIFFICULTY // Use ONLY if you know encoding has changed let (difficulty, nonce) = generate_nonce_with_min_difficulty(min_difficulty()).unwrap(); + eprintln!("network = {network:?}"); eprintln!("nonce = {:?}", nonce); eprintln!("difficulty = {:?}", difficulty); panic!( @@ -417,6 +660,7 @@ mod tests { #[test] fn check_difficulty() { unsafe { + let network = Network::get_current_or_user_setting_or_default(); let (difficulty, nonce) = generate_nonce_with_min_difficulty(min_difficulty()).unwrap(); let mut error = -1; let error_ptr = &mut error as *mut c_int; @@ -426,7 +670,7 @@ mod tests { let byte_vec = byte_vector_create(header_bytes.as_ptr(), len, error_ptr); inject_nonce(byte_vec, nonce, error_ptr); assert_eq!(error, 0); - let result = share_difficulty(byte_vec, error_ptr); + let result = share_difficulty(byte_vec, u32::from(network.as_byte()), error_ptr); assert_eq!(result, difficulty.as_u64()); byte_vector_destroy(byte_vec); } @@ -453,6 +697,7 @@ mod tests { #[test] fn check_share() { unsafe { + let network = Network::get_current_or_user_setting_or_default(); let (difficulty, nonce) = generate_nonce_with_min_difficulty(min_difficulty()).unwrap(); let mut error = -1; let error_ptr = &mut error as *mut c_int; @@ -471,6 +716,7 @@ mod tests { let result = share_validate( byte_vec, hash_hex_broken_ptr, + u32::from(network.as_byte()), share_difficulty, template_difficulty, error_ptr, @@ -485,20 +731,41 @@ mod tests { share_difficulty = difficulty.as_u64() + 1000; template_difficulty = difficulty.as_u64() + 2000; // let calculate for invalid share and target diff - let result = share_validate(byte_vec, hash_hex_ptr, share_difficulty, template_difficulty, error_ptr); + let result = share_validate( + byte_vec, + hash_hex_ptr, + u32::from(network.as_byte()), + share_difficulty, + template_difficulty, + error_ptr, + ); assert_eq!(result, 4); assert_eq!(error, 4); // let calculate for valid share and invalid target diff share_difficulty = difficulty.as_u64(); let hash_hex = CString::new(hash.clone()).unwrap(); let hash_hex_ptr: *const c_char = CString::into_raw(hash_hex) as *const c_char; - let result = share_validate(byte_vec, hash_hex_ptr, share_difficulty, template_difficulty, error_ptr); + let result = share_validate( + byte_vec, + hash_hex_ptr, + u32::from(network.as_byte()), + share_difficulty, + template_difficulty, + error_ptr, + ); assert_eq!(result, 1); // let calculate for valid target diff template_difficulty = difficulty.as_u64(); let hash_hex = CString::new(hash).unwrap(); let hash_hex_ptr: *const c_char = CString::into_raw(hash_hex) as *const c_char; - let result = share_validate(byte_vec, hash_hex_ptr, share_difficulty, template_difficulty, error_ptr); + let result = share_validate( + byte_vec, + hash_hex_ptr, + u32::from(network.as_byte()), + share_difficulty, + template_difficulty, + error_ptr, + ); assert_eq!(result, 0); byte_vector_destroy(byte_vec); } @@ -529,4 +796,49 @@ mod tests { assert_ne!(error, 0); } } + + #[test] + fn check_inject_coinbase() { + unsafe { + let network = Network::get_current_or_user_setting_or_default(); + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let header = BlockHeader::new(0); + let block = + NewBlockTemplate::from_block(header.into_builder().build(), Difficulty::min(), 0.into()).unwrap(); + + let block_bytes = borsh::to_vec(&block).unwrap(); + #[allow(clippy::cast_possible_truncation)] + let len = block_bytes.len() as u32; + let byte_vec = byte_vector_create(block_bytes.as_ptr(), len, error_ptr); + + let address = TariAddress::default(); + let add_string = CString::new(address.to_string()).unwrap(); + let add_ptr: *const c_char = CString::into_raw(add_string) as *const c_char; + + let extra_string = CString::new("a").unwrap(); + let extra_ptr: *const c_char = CString::into_raw(extra_string) as *const c_char; + + inject_coinbase( + byte_vec, + 100, + false, + true, + add_ptr, + extra_ptr, + u32::from(network.as_byte()), + error_ptr, + ); + + assert_eq!(error, 0); + + let block_temp: NewBlockTemplate = BorshDeserialize::deserialize(&mut (*byte_vec).0.as_slice()).unwrap(); + + assert_eq!(block_temp.body.kernels().len(), 1); + assert_eq!(block_temp.body.outputs().len(), 1); + assert!(block_temp.body.outputs()[0].features.is_coinbase()); + assert_eq!(block_temp.body.outputs()[0].features.coinbase_extra, vec![97]); + assert_eq!(block_temp.body.outputs()[0].minimum_value_promise, MicroMinotari(100)); + } + } } diff --git a/base_layer/tari_mining_helper_ffi/tari_mining_helper.h b/base_layer/tari_mining_helper_ffi/tari_mining_helper.h new file mode 100644 index 0000000000..705f33c554 --- /dev/null +++ b/base_layer/tari_mining_helper_ffi/tari_mining_helper.h @@ -0,0 +1,202 @@ +// Copyright 2024. The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +// This file was generated by cargo-bindgen. Please do not edit manually. + +#include +#include +#include +#include + +/** + * The latest version of the Identity Signature. + */ +#define IdentitySignature_LATEST_VERSION 0 + +struct ByteVector; + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/** + * Creates a ByteVector + * + * ## Arguments + * `byte_array` - The pointer to the byte array + * `element_count` - The number of elements in byte_array + * `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions + * as an out parameter. + * + * ## Returns + * `*mut ByteVector` - Pointer to the created ByteVector. Note that it will be ptr::null_mut() + * if the byte_array pointer was null or if the elements in the byte_vector don't match + * element_count when it is created + * + * # Safety + * The ```byte_vector_destroy``` function must be called when finished with a ByteVector to prevent a memory leak + */ +struct ByteVector *byte_vector_create(const unsigned char *byte_array, + unsigned int element_count, + int *error_out); + +/** + * Frees memory for a ByteVector + * + * ## Arguments + * `bytes` - The pointer to a ByteVector + * + * ## Returns + * `()` - Does not return a value, equivalent to void in C + * + * # Safety + * None + */ +void byte_vector_destroy(struct ByteVector *bytes); + +/** + * Gets a c_uchar at position in a ByteVector + * + * ## Arguments + * `ptr` - The pointer to a ByteVector + * `position` - The integer position + * `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions + * as an out parameter. + * + * ## Returns + * `c_uchar` - Returns a character. Note that the character will be a null terminator (0) if ptr + * is null or if the position is invalid + * + * # Safety + * None + */ +unsigned char byte_vector_get_at(struct ByteVector *ptr, + unsigned int position, + int *error_out); + +/** + * Gets the number of elements in a ByteVector + * + * ## Arguments + * `ptr` - The pointer to a ByteVector + * `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions + * as an out parameter. + * + * ## Returns + * `c_uint` - Returns the integer number of elements in the ByteVector. Note that it will be zero + * if ptr is null + * + * # Safety + * None + */ +unsigned int byte_vector_get_length(const struct ByteVector *vec, + int *error_out); + +/** + * Validates a hex string is convertible into a TariPublicKey + * + * ## Arguments + * `hex` - The hex formatted cstring to be validated + * + * ## Returns + * `bool` - Returns true/false + * `error_out` - Error code returned, 0 means no error + * + * # Safety + * None + */ +bool public_key_hex_validate(const char *hex, int *error_out); + +/** + * Injects a nonce into a blocktemplate + * + * ## Arguments + * `hex` - The hex formatted cstring + * `nonce` - The nonce to be injected + * + * ## Returns + * `c_char` - The updated hex formatted cstring or null on error + * `error_out` - Error code returned, 0 means no error + * + * # Safety + * None + */ +void inject_nonce(struct ByteVector *header, unsigned long long nonce, int *error_out); + +/** + * Injects a coinbase into a blocktemplate + * + * ## Arguments + * `block_template_bytes` - The block template as bytes, serialized with borsh.io + * `value` - The value of the coinbase + * `stealth_payment` - Boolean value, is this a stealh payment or normal one-sided + * `revealed_value_proof` - Boolean value, should this use the reveal value proof, or BP+ + * `wallet_payment_address` - The address to pay the coinbase to + * `coinbase_extra` - The value of the coinbase extra field + * `network` - The value of the network + * + * ## Returns + * `block_template_bytes` - The updated block template + * `error_out` - Error code returned, 0 means no error + * + * # Safety + * None + */ +void inject_coinbase(struct ByteVector *block_template_bytes, + unsigned long long coibase_value, + bool stealth_payment, + bool revealed_value_proof, + const char *wallet_payment_address, + const char *coinbase_extra, + unsigned int network, + int *error_out); + +/** + * Returns the difficulty of a share + * + * ## Arguments + * `hex` - The hex formatted cstring to be validated + * + * ## Returns + * `c_ulonglong` - Difficulty, 0 on error + * `error_out` - Error code returned, 0 means no error + * + * # Safety + * None + */ +unsigned long long share_difficulty(struct ByteVector *header, + unsigned int network, + int *error_out); + +/** + * Validates a share submission + * + * ## Arguments + * `hex` - The hex representation of the share to be validated + * `hash` - The hash of the share to be validated + * `nonce` - The nonce for the share to be validated + * `stratum_difficulty` - The stratum difficulty to be checked against (meeting this means that the share is valid for + * payout) `template_difficulty` - The difficulty to be checked against (meeting this means the share is also a block + * to be submitted to the chain) + * + * ## Returns + * `c_uint` - Returns one of the following: + * 0: Valid Block + * 1: Valid Share + * 2: Invalid Share + * 3: Invalid Difficulty + * `error_out` - Error code returned, 0 means no error + * + * # Safety + * None + */ +int share_validate(struct ByteVector *header, + const char *hash, + unsigned int network, + unsigned long long share_difficulty, + unsigned long long template_difficulty, + int *error_out); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus diff --git a/base_layer/wallet/Cargo.toml b/base_layer/wallet/Cargo.toml index 1ee9e6fe41..914a325eb4 100644 --- a/base_layer/wallet/Cargo.toml +++ b/base_layer/wallet/Cargo.toml @@ -3,30 +3,30 @@ name = "minotari_wallet" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet library" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] -tari_core = { path = "../../base_layer/core", default-features = false, features = ["transactions", "mempool_proto", "base_node_proto"] } -tari_common = { path = "../../common" } -tari_common_types = { path = "../../base_layer/common_types" } -tari_comms = { path = "../../comms/core" } -tari_comms_dht = { path = "../../comms/dht" } +tari_core = { path = "../../base_layer/core", default-features = false, features = ["transactions", "mempool_proto", "base_node_proto"], version = "1.0.0-pre.11a" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } +tari_common_types = { path = "../../base_layer/common_types", version = "1.0.0-pre.11a" } +tari_comms = { path = "../../comms/core", version = "1.0.0-pre.11a" } +tari_comms_dht = { path = "../../comms/dht", version = "1.0.0-pre.11a" } tari_crypto = { version = "0.20" } -tari_key_manager = { path = "../key_manager", features = ["key_manager_service"] } -tari_p2p = { path = "../p2p", features = ["auto-update"] } -tari_script = { path = "../../infrastructure/tari_script" } -tari_service_framework = { path = "../service_framework" } -tari_shutdown = { path = "../../infrastructure/shutdown" } -tari_common_sqlite = { path = "../../common_sqlite" } +tari_key_manager = { path = "../key_manager", features = ["key_manager_service"], version = "1.0.0-pre.11a" } +tari_p2p = { path = "../p2p", features = ["auto-update"], version = "1.0.0-pre.11a"} +tari_script = { path = "../../infrastructure/tari_script", version = "1.0.0-pre.11a" } +tari_service_framework = { path = "../service_framework", version = "1.0.0-pre.11a" } +tari_shutdown = { path = "../../infrastructure/shutdown", version = "1.0.0-pre.11a" } +tari_common_sqlite = { path = "../../common_sqlite", version = "1.0.0-pre.11a" } tari_utilities = { version = "0.7" } -tari_contacts = { path = "../../base_layer/contacts" } +tari_contacts = { path = "../../base_layer/contacts", version = "1.0.0-pre.11a" } # Uncomment for tokio tracing via tokio-console (needs "tracing" features) #console-subscriber = "0.1.3" -#tokio = { version = "1.20", features = ["sync", "macros", "tracing"] } +#tokio = { version = "1.36", features = ["sync", "macros", "tracing"] } # Uncomment for normal use (non tokio-console tracing) -tokio = { version = "1.23", features = ["sync", "macros"] } +tokio = { version = "1.36", features = ["sync", "macros"] } async-trait = "0.1.50" argon2 = "0.4.1" @@ -57,7 +57,7 @@ chacha20poly1305 = "0.10.1" zeroize = "1" [build-dependencies] -tari_common = { path = "../../common", features = ["build", "static-application-info"] } +tari_common = { path = "../../common", features = ["build", "static-application-info"], version = "1.0.0-pre.11a" } [dev-dependencies] tari_p2p = { path = "../p2p", features = ["test-mocks"] } diff --git a/base_layer/wallet/src/base_node_service/monitor.rs b/base_layer/wallet/src/base_node_service/monitor.rs index 6581ab4bf3..0acccd01e7 100644 --- a/base_layer/wallet/src/base_node_service/monitor.rs +++ b/base_layer/wallet/src/base_node_service/monitor.rs @@ -175,7 +175,7 @@ where self.db.set_chain_metadata(chain_metadata.clone())?; let is_synced = tip_info.is_synced; - let height_of_longest_chain = chain_metadata.height_of_longest_chain(); + let best_block_height = chain_metadata.best_block_height(); let new_block = self .update_state(BaseNodeState { @@ -191,7 +191,7 @@ where target: LOG_TARGET, "Base node {} Tip: {} ({}) Latency: {} ms", base_node_id, - height_of_longest_chain, + best_block_height, if is_synced { "Synced" } else { "Syncing..." }, latency.as_millis() ); @@ -221,11 +221,11 @@ where let mut lock = self.state.write().await; let (new_block_detected, height, hash) = match (new_state.chain_metadata.clone(), lock.chain_metadata.clone()) { (Some(new_metadata), Some(old_metadata)) => ( - new_metadata.best_block() != old_metadata.best_block(), - new_metadata.height_of_longest_chain(), - *new_metadata.best_block(), + new_metadata.best_block_hash() != old_metadata.best_block_hash(), + new_metadata.best_block_height(), + *new_metadata.best_block_hash(), ), - (Some(new_metadata), _) => (true, new_metadata.height_of_longest_chain(), *new_metadata.best_block()), + (Some(new_metadata), _) => (true, new_metadata.best_block_height(), *new_metadata.best_block_hash()), (None, _) => (false, 0, BlockHashType::default()), }; diff --git a/base_layer/wallet/src/config.rs b/base_layer/wallet/src/config.rs index 813b323d01..3d804cb5f5 100644 --- a/base_layer/wallet/src/config.rs +++ b/base_layer/wallet/src/config.rs @@ -32,7 +32,7 @@ use tari_common::{ configuration::{serializers, Network, StringList}, SubConfigPath, }; -use tari_common_types::grpc_authentication::GrpcAuthentication; +use tari_common_types::{grpc_authentication::GrpcAuthentication, wallet_types::WalletType}; use tari_comms::multiaddr::Multiaddr; use tari_p2p::P2pConfig; use tari_utilities::SafePassword; @@ -118,6 +118,13 @@ pub struct WalletConfig { pub use_libtor: bool, /// A path to the file that stores the base node identity and secret key pub identity_file: Option, + /// The type of wallet software, or specific type of hardware + pub wallet_type: Option, + /// The cool down period between balance enquiry checks in seconds; requests faster than this will be ignored. + /// For specialized wallets processing many batch transactions this setting could be increased to 60 s to retain + /// responsiveness of the wallet with slightly delayed balance updates + #[serde(with = "serializers::seconds")] + pub balance_enquiry_cooldown_period: Duration, } impl Default for WalletConfig { @@ -156,6 +163,8 @@ impl Default for WalletConfig { num_required_confirmations: 3, use_libtor: true, identity_file: None, + wallet_type: None, + balance_enquiry_cooldown_period: Duration::from_secs(5), } } } diff --git a/base_layer/wallet/src/operation_id.rs b/base_layer/wallet/src/operation_id.rs index 7b91bd8a23..d1d565a746 100644 --- a/base_layer/wallet/src/operation_id.rs +++ b/base_layer/wallet/src/operation_id.rs @@ -50,7 +50,7 @@ impl Hash for OperationId { impl PartialEq for OperationId { fn eq(&self, other: &Self) -> bool { - self.0.eq(&other.0) + self.0 == other.0 } } diff --git a/base_layer/wallet/src/output_manager_service/error.rs b/base_layer/wallet/src/output_manager_service/error.rs index f7f7c3ea64..c7ae684627 100644 --- a/base_layer/wallet/src/output_manager_service/error.rs +++ b/base_layer/wallet/src/output_manager_service/error.rs @@ -148,6 +148,8 @@ pub enum OutputManagerError { ValidationInProgress, #[error("Invalid data: `{0}`")] RangeProofError(String), + #[error("Transaction is over sized: `{0}`")] + TooManyInputsToFulfillTransaction(String), } impl From for OutputManagerError { diff --git a/base_layer/wallet/src/output_manager_service/handle.rs b/base_layer/wallet/src/output_manager_service/handle.rs index e208a09ccf..54caae3e11 100644 --- a/base_layer/wallet/src/output_manager_service/handle.rs +++ b/base_layer/wallet/src/output_manager_service/handle.rs @@ -45,10 +45,7 @@ use tower::Service; use crate::output_manager_service::{ error::OutputManagerError, service::{Balance, OutputInfoByTxId}, - storage::{ - database::OutputBackendQuery, - models::{DbWalletOutput, KnownOneSidedPaymentScript, SpendingPriority}, - }, + storage::models::{DbWalletOutput, KnownOneSidedPaymentScript, SpendingPriority}, UtxoSelectionCriteria, }; @@ -90,7 +87,6 @@ pub enum OutputManagerRequest { CancelTransaction(TxId), GetSpentOutputs, GetUnspentOutputs, - GetOutputsBy(OutputBackendQuery), GetInvalidOutputs, ValidateUtxos, RevalidateTxos, @@ -152,7 +148,6 @@ impl fmt::Display for OutputManagerRequest { CancelTransaction(v) => write!(f, "CancelTransaction ({})", v), GetSpentOutputs => write!(f, "GetSpentOutputs"), GetUnspentOutputs => write!(f, "GetUnspentOutputs"), - GetOutputsBy(q) => write!(f, "GetOutputs({:#?})", q), GetInvalidOutputs => write!(f, "GetInvalidOutputs"), ValidateUtxos => write!(f, "ValidateUtxos"), RevalidateTxos => write!(f, "RevalidateTxos"), diff --git a/base_layer/wallet/src/output_manager_service/mod.rs b/base_layer/wallet/src/output_manager_service/mod.rs index b73a23f515..ef26e0d5a2 100644 --- a/base_layer/wallet/src/output_manager_service/mod.rs +++ b/base_layer/wallet/src/output_manager_service/mod.rs @@ -62,6 +62,9 @@ use crate::{ util::wallet_identity::WalletIdentity, }; +/// The maximum number of transaction inputs that can be created in a single transaction, slightly less than the maximum +/// that a single comms message can hold. +pub const TRANSACTION_INPUTS_LIMIT: u32 = 4000; const LOG_TARGET: &str = "wallet::output_manager_service::initializer"; pub struct OutputManagerServiceInitializer diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index ed734105d9..98ec0f2e20 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -63,7 +63,7 @@ use tari_script::{inputs, script, ExecutionStack, Opcode, TariScript}; use tari_service_framework::reply_channel; use tari_shutdown::ShutdownSignal; use tari_utilities::{hex::Hex, ByteArray}; -use tokio::sync::Mutex; +use tokio::{sync::Mutex, time::Instant}; use crate::{ base_node_service::handle::{BaseNodeEvent, BaseNodeServiceHandle}, @@ -88,6 +88,7 @@ use crate::{ OutputStatus, }, tasks::TxoValidationTask, + TRANSACTION_INPUTS_LIMIT, }, util::wallet_identity::WalletIdentity, }; @@ -171,6 +172,8 @@ where let mut base_node_service_event_stream = self.base_node_service.get_event_stream(); debug!(target: LOG_TARGET, "Output Manager Service started"); + // Outputs marked as shorttermencumbered are not yet stored as transactions in the TMS, so lets clear them + self.resources.db.clear_short_term_encumberances()?; loop { tokio::select! { event = base_node_service_event_stream.recv() => { @@ -226,7 +229,7 @@ where .map(|_| OutputManagerResponse::OutputMetadataSignatureUpdated), OutputManagerRequest::GetBalance => { let current_tip_for_time_lock_calculation = match self.base_node_service.get_chain_metadata().await { - Ok(metadata) => metadata.map(|m| m.height_of_longest_chain()), + Ok(metadata) => metadata.map(|m| m.best_block_height()), Err(_) => None, }; self.get_balance(current_tip_for_time_lock_calculation) @@ -304,10 +307,6 @@ where let outputs = self.fetch_unspent_outputs()?; Ok(OutputManagerResponse::UnspentOutputs(outputs)) }, - OutputManagerRequest::GetOutputsBy(q) => { - let outputs = self.fetch_outputs_by(q)?.into_iter().map(|v| v.into()).collect(); - Ok(OutputManagerResponse::Outputs(outputs)) - }, OutputManagerRequest::ValidateUtxos => { self.validate_outputs().map(OutputManagerResponse::TxoValidationStarted) }, @@ -1258,6 +1257,7 @@ where num_outputs: usize, total_output_features_and_scripts_byte_size: usize, ) -> Result { + let start = Instant::now(); debug!( target: LOG_TARGET, "select_utxos amount: {}, fee_per_gram: {}, num_outputs: {}, output_features_and_scripts_byte_size: {}, \ @@ -1284,11 +1284,21 @@ where target: LOG_TARGET, "select_utxos selection criteria: {}", selection_criteria ); - let tip_height = chain_metadata.as_ref().map(|m| m.height_of_longest_chain()); + let tip_height = chain_metadata.as_ref().map(|m| m.best_block_height()); + let start_new = Instant::now(); let uo = self .resources .db .fetch_unspent_outputs_for_spending(&selection_criteria, amount, tip_height)?; + let uo_len = uo.len(); + trace!( + target: LOG_TARGET, + "select_utxos profile - fetch_unspent_outputs_for_spending: {} outputs, {} ms (at {})", + uo_len, + start_new.elapsed().as_millis(), + start.elapsed().as_millis(), + ); + let start_new = Instant::now(); // For non-standard queries, we want to ensure that the intended UTXOs are selected if !selection_criteria.filter.is_standard() && uo.is_empty() { @@ -1311,7 +1321,7 @@ where .map_err(|e| OutputManagerError::ConversionError(e.to_string()))?, ); - trace!(target: LOG_TARGET, "We found {} UTXOs to select from", uo.len()); + trace!(target: LOG_TARGET, "We found {} UTXOs to select from", uo_len); let mut requires_change_output = false; let mut utxos_total_value = MicroMinotari::from(0); @@ -1350,9 +1360,23 @@ where let perfect_utxo_selection = utxos_total_value == amount + fee_without_change; let enough_spendable = utxos_total_value > amount + fee_with_change; + trace!( + target: LOG_TARGET, + "select_utxos profile - final_selection: {} outputs from {}, {} ms (at {})", + utxos.len(), + uo_len, + start_new.elapsed().as_millis(), + start.elapsed().as_millis(), + ); if !perfect_utxo_selection && !enough_spendable { - let current_tip_for_time_lock_calculation = chain_metadata.map(|cm| cm.height_of_longest_chain()); + if uo_len == TRANSACTION_INPUTS_LIMIT as usize { + return Err(OutputManagerError::TooManyInputsToFulfillTransaction(format!( + "Input limit '{}' reached", + TRANSACTION_INPUTS_LIMIT + ))); + } + let current_tip_for_time_lock_calculation = chain_metadata.map(|cm| cm.best_block_height()); let balance = self.get_balance(current_tip_for_time_lock_calculation)?; let pending_incoming = balance.pending_incoming_balance; if utxos_total_value + pending_incoming >= amount + fee_with_change { @@ -1379,8 +1403,8 @@ where Ok(self.resources.db.fetch_all_unspent_outputs()?) } - pub fn fetch_outputs_by(&self, q: OutputBackendQuery) -> Result, OutputManagerError> { - Ok(self.resources.db.fetch_outputs_by(q)?) + pub fn fetch_outputs_by_query(&self, q: OutputBackendQuery) -> Result, OutputManagerError> { + Ok(self.resources.db.fetch_outputs_by_query(q)?) } pub fn fetch_invalid_outputs(&self) -> Result, OutputManagerError> { diff --git a/base_layer/wallet/src/output_manager_service/storage/database/backend.rs b/base_layer/wallet/src/output_manager_service/storage/database/backend.rs index 702e6f2caf..2f1a72cffc 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database/backend.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database/backend.rs @@ -14,6 +14,7 @@ use crate::output_manager_service::{ storage::{ database::{DbKey, DbValue, OutputBackendQuery, WriteOperation}, models::DbWalletOutput, + sqlite_db::{ReceivedOutputInfoForBatch, SpentOutputInfoForBatch}, }, }; @@ -37,29 +38,20 @@ pub trait OutputManagerBackend: Send + Sync + Clone { /// Modify the state the of the backend with a write operation fn write(&self, op: WriteOperation) -> Result, OutputManagerStorageError>; fn fetch_pending_incoming_outputs(&self) -> Result, OutputManagerStorageError>; - - fn set_received_output_mined_height_and_status( + /// Perform a batch update of the received outputs' mined height and status + fn set_received_outputs_mined_height_and_statuses( &self, - hash: FixedHash, - mined_height: u64, - mined_in_block: FixedHash, - confirmed: bool, - mined_timestamp: u64, + updates: Vec, ) -> Result<(), OutputManagerStorageError>; - - fn set_output_to_unmined_and_invalid(&self, hash: FixedHash) -> Result<(), OutputManagerStorageError>; - fn update_last_validation_timestamp(&self, hash: FixedHash) -> Result<(), OutputManagerStorageError>; + /// Perform a batch update of the outputs' unmined and invalid state + fn set_outputs_to_unmined_and_invalid(&self, hashes: Vec) -> Result<(), OutputManagerStorageError>; + /// Perform a batch update of the outputs' last validation timestamp + fn update_last_validation_timestamps(&self, commitments: Vec) -> Result<(), OutputManagerStorageError>; fn set_outputs_to_be_revalidated(&self) -> Result<(), OutputManagerStorageError>; - - fn mark_output_as_spent( - &self, - hash: FixedHash, - mark_deleted_at_height: u64, - mark_deleted_in_block: FixedHash, - confirmed: bool, - ) -> Result<(), OutputManagerStorageError>; - - fn mark_output_as_unspent(&self, hash: FixedHash) -> Result<(), OutputManagerStorageError>; + /// Perform a batch update of the outputs' spent status + fn mark_outputs_as_spent(&self, updates: Vec) -> Result<(), OutputManagerStorageError>; + /// Perform a batch update of the outputs' unspent status + fn mark_outputs_as_unspent(&self, hashes: Vec<(FixedHash, bool)>) -> Result<(), OutputManagerStorageError>; /// This method encumbers the specified outputs into a `PendingTransactionOutputs` record. This is a short term /// encumberance in case the app is closed or crashes before transaction neogtiation is complete. These will be /// cleared on startup of the service. @@ -101,5 +93,5 @@ pub trait OutputManagerBackend: Send + Sync + Clone { current_tip_height: Option, ) -> Result, OutputManagerStorageError>; fn fetch_outputs_by_tx_id(&self, tx_id: TxId) -> Result, OutputManagerStorageError>; - fn fetch_outputs_by(&self, q: OutputBackendQuery) -> Result, OutputManagerStorageError>; + fn fetch_outputs_by_query(&self, q: OutputBackendQuery) -> Result, OutputManagerStorageError>; } diff --git a/base_layer/wallet/src/output_manager_service/storage/database/mod.rs b/base_layer/wallet/src/output_manager_service/storage/database/mod.rs index 20ec2edc8e..503f607b84 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database/mod.rs @@ -30,7 +30,7 @@ pub use backend::OutputManagerBackend; use log::*; use tari_common_types::{ transaction::TxId, - types::{Commitment, HashOutput}, + types::{Commitment, FixedHash, HashOutput}, }; use tari_core::transactions::{ tari_amount::MicroMinotari, @@ -44,6 +44,7 @@ use crate::output_manager_service::{ service::Balance, storage::{ models::{DbWalletOutput, KnownOneSidedPaymentScript}, + sqlite_db::{ReceivedOutputInfoForBatch, SpentOutputInfoForBatch}, OutputStatus, }, }; @@ -380,22 +381,18 @@ where T: OutputManagerBackend + 'static Ok(()) } - pub fn set_received_output_mined_height_and_status( + pub fn set_received_outputs_mined_height_and_statuses( &self, - hash: HashOutput, - mined_height: u64, - mined_in_block: HashOutput, - confirmed: bool, - mined_timestamp: u64, + updates: Vec, ) -> Result<(), OutputManagerStorageError> { let db = self.db.clone(); - db.set_received_output_mined_height_and_status(hash, mined_height, mined_in_block, confirmed, mined_timestamp)?; + db.set_received_outputs_mined_height_and_statuses(updates)?; Ok(()) } - pub fn set_output_to_unmined_and_invalid(&self, hash: HashOutput) -> Result<(), OutputManagerStorageError> { + pub fn set_outputs_to_unmined_and_invalid(&self, hashes: Vec) -> Result<(), OutputManagerStorageError> { let db = self.db.clone(); - db.set_output_to_unmined_and_invalid(hash)?; + db.set_outputs_to_unmined_and_invalid(hashes)?; Ok(()) } @@ -405,27 +402,27 @@ where T: OutputManagerBackend + 'static Ok(()) } - pub fn update_last_validation_timestamp(&self, hash: HashOutput) -> Result<(), OutputManagerStorageError> { + pub fn update_last_validation_timestamps( + &self, + commitments: Vec, + ) -> Result<(), OutputManagerStorageError> { let db = self.db.clone(); - db.update_last_validation_timestamp(hash)?; + db.update_last_validation_timestamps(commitments)?; Ok(()) } - pub fn mark_output_as_spent( + pub fn mark_outputs_as_spent( &self, - hash: HashOutput, - deleted_height: u64, - deleted_in_block: HashOutput, - confirmed: bool, + updates: Vec, ) -> Result<(), OutputManagerStorageError> { let db = self.db.clone(); - db.mark_output_as_spent(hash, deleted_height, deleted_in_block, confirmed)?; + db.mark_outputs_as_spent(updates)?; Ok(()) } - pub fn mark_output_as_unspent(&self, hash: HashOutput) -> Result<(), OutputManagerStorageError> { + pub fn mark_outputs_as_unspent(&self, hashes: Vec<(FixedHash, bool)>) -> Result<(), OutputManagerStorageError> { let db = self.db.clone(); - db.mark_output_as_unspent(hash)?; + db.mark_outputs_as_unspent(hashes)?; Ok(()) } @@ -434,8 +431,11 @@ where T: OutputManagerBackend + 'static Ok(outputs) } - pub fn fetch_outputs_by(&self, q: OutputBackendQuery) -> Result, OutputManagerStorageError> { - self.db.fetch_outputs_by(q) + pub fn fetch_outputs_by_query( + &self, + q: OutputBackendQuery, + ) -> Result, OutputManagerStorageError> { + self.db.fetch_outputs_by_query(q) } } diff --git a/base_layer/wallet/src/output_manager_service/storage/models.rs b/base_layer/wallet/src/output_manager_service/storage/models.rs index 522483572c..0e8b4202e4 100644 --- a/base_layer/wallet/src/output_manager_service/storage/models.rs +++ b/base_layer/wallet/src/output_manager_service/storage/models.rs @@ -100,7 +100,7 @@ impl PartialEq for DbWalletOutput { impl PartialOrd for DbWalletOutput { fn partial_cmp(&self, other: &Self) -> Option { - self.wallet_output.value.partial_cmp(&other.wallet_output.value) + Some(self.cmp(other)) } } diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs index 26b206f0fb..8e0018044d 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs @@ -25,6 +25,7 @@ use std::{convert::TryFrom, str::FromStr}; use chrono::{NaiveDateTime, Utc}; use derivative::Derivative; use diesel::{ + connection::SimpleConnection, prelude::*, r2d2::{ConnectionManager, PooledConnection}, result::Error as DieselError, @@ -415,64 +416,99 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { .collect::, _>>() } - fn set_received_output_mined_height_and_status( + // Perform a batch update of the received outputs; this is more efficient than updating each output individually. + fn set_received_outputs_mined_height_and_statuses( &self, - hash: FixedHash, - mined_height: u64, - mined_in_block: FixedHash, - confirmed: bool, - mined_timestamp: u64, + updates: Vec, ) -> Result<(), OutputManagerStorageError> { let start = Instant::now(); let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let status = if confirmed { - OutputStatus::Unspent as i32 - } else { - OutputStatus::UnspentMinedUnconfirmed as i32 - }; - debug!( - target: LOG_TARGET, - "`set_received_output_mined_height` status: {}", status + + let commitments: Vec = updates.iter().map(|update| update.commitment.clone()).collect(); + if !OutputSql::verify_outputs_exist(&commitments, &mut conn)? { + return Err(OutputManagerStorageError::ValuesNotFound); + } + + // This SQL query is a dummy `INSERT INTO` statement combined with an `ON CONFLICT` clause and `UPDATE` action. + // It specifies what action should be taken if a unique constraint violation occurs during the execution of the + // `INSERT INTO` statement. The `INSERT INTO` statement must list all columns that cannot be NULL should it + // succeed. We provide `commitment` values that will cause a unique constraint violation, triggering the + // `ON CONFLICT` clause. The `ON CONFLICT` clause ensures that if a row with a matching commitment already + // exists, the specified columns (`mined_height`, `mined_in_block`, `status`, `mined_timestamp`, + // `marked_deleted_at_height`, `marked_deleted_in_block`, `last_validation_timestamp`) will be updated with the + // provided values. The `UPDATE` action updates the existing row with the new values provided by the + // `INSERT INTO` statement. The `excluded` keyword refers to the new data being inserted or updated and allows + // accessing the values provided in the `VALUES` clause of the `INSERT INTO` statement. + // Note: + // `diesel` does not support batch updates, so we have to do it manually. For example, this + // `diesel::insert_into(...).values(&...).on_conflict(outputs::hash).do_update().set((...)).execute(&mut + // conn)?;` errors with + // `the trait bound `BatchInsert` is not satisfied` + let mut query = String::from( + "INSERT INTO outputs ( commitment, mined_height, mined_in_block, status, mined_timestamp, spending_key, \ + value, output_type, maturity, hash, script, input_data, script_private_key, sender_offset_public_key, \ + metadata_signature_ephemeral_commitment, metadata_signature_ephemeral_pubkey, metadata_signature_u_a, \ + metadata_signature_u_x, metadata_signature_u_y, spending_priority, covenant, encrypted_data, \ + minimum_value_promise + ) + VALUES ", ); - let hash = hash.to_vec(); - let mined_in_block = mined_in_block.to_vec(); - let timestamp = NaiveDateTime::from_timestamp_opt(mined_timestamp as i64, 0).ok_or( - OutputManagerStorageError::ConversionError { - reason: format!("Could not create timestamp mined_timestamp: {}", mined_timestamp), - }, - )?; - diesel::update(outputs::table.filter(outputs::hash.eq(hash))) - .set(( - outputs::mined_height.eq(mined_height as i64), - outputs::mined_in_block.eq(mined_in_block), - outputs::status.eq(status), - outputs::mined_timestamp.eq(timestamp), - outputs::marked_deleted_at_height.eq::>(None), - outputs::marked_deleted_in_block.eq::>>(None), - outputs::last_validation_timestamp.eq::>(None), - )) - .execute(&mut conn) - .num_rows_affected_or_not_found(1)?; + + query.push_str( + &updates + .iter() + .map(|update| { + format!( + "(x'{}', {}, x'{}', {}, '{}', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", + update.commitment.to_hex(), + update.mined_height as i64, + update.mined_in_block.to_hex(), + if update.confirmed { + OutputStatus::Unspent as i32 + } else { + OutputStatus::UnspentMinedUnconfirmed as i32 + }, + if let Some(val) = NaiveDateTime::from_timestamp_opt(update.mined_timestamp as i64, 0) { + val.to_string() + } else { + "NULL".to_string() + }, + ) + }) + .collect::>() + .join(", "), + ); + + query.push_str( + " ON CONFLICT (commitment) DO UPDATE SET mined_height = excluded.mined_height, mined_in_block = \ + excluded.mined_in_block, status = excluded.status, mined_timestamp = excluded.mined_timestamp, \ + marked_deleted_at_height = NULL, marked_deleted_in_block = NULL, last_validation_timestamp = NULL", + ); + + conn.batch_execute(&query)?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, - "sqlite profile - set_received_output_mined_height: lock {} + db_op {} = {} ms", + "sqlite profile - set_received_outputs_mined_height_and_statuses: lock {} + db_op {} = {} ms \ + ({} outputs)", acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), - start.elapsed().as_millis() + start.elapsed().as_millis(), + updates.len() ); } Ok(()) } - fn set_output_to_unmined_and_invalid(&self, hash: FixedHash) -> Result<(), OutputManagerStorageError> { + fn set_outputs_to_unmined_and_invalid(&self, hashes: Vec) -> Result<(), OutputManagerStorageError> { let start = Instant::now(); let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let hash = hash.to_vec(); - diesel::update(outputs::table.filter(outputs::hash.eq(hash))) + + diesel::update(outputs::table.filter(outputs::hash.eq_any(hashes.iter().map(|hash| hash.to_vec())))) .set(( outputs::mined_height.eq::>(None), outputs::mined_in_block.eq::>>(None), @@ -482,14 +518,16 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { outputs::marked_deleted_in_block.eq::>>(None), )) .execute(&mut conn) - .num_rows_affected_or_not_found(1)?; + .num_rows_affected_or_not_found(hashes.len())?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, - "sqlite profile - set_output_to_unmined: lock {} + db_op {} = {} ms", + "sqlite profile - set_outputs_to_unmined_and_invalid: lock {} + db_op {} = {} ms ({} outputs)", acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), - start.elapsed().as_millis() + start.elapsed().as_millis(), + hashes.len() ); } @@ -525,88 +563,200 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { Ok(()) } - fn update_last_validation_timestamp(&self, hash: FixedHash) -> Result<(), OutputManagerStorageError> { + fn update_last_validation_timestamps(&self, commitments: Vec) -> Result<(), OutputManagerStorageError> { let start = Instant::now(); let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let hash = hash.to_vec(); - diesel::update(outputs::table.filter(outputs::hash.eq(hash))) - .set((outputs::last_validation_timestamp - .eq::>(NaiveDateTime::from_timestamp_opt(Utc::now().timestamp(), 0)),)) - .execute(&mut conn) - .num_rows_affected_or_not_found(1)?; + + if !OutputSql::verify_outputs_exist(&commitments, &mut conn)? { + return Err(OutputManagerStorageError::ValuesNotFound); + } + + let last_validation_timestamp = Utc::now().naive_utc(); + + // Three queries were evaluated to determine the most efficient way to update the last validation timestamp + // during system-level stress testing: + // - Using `diesel`: + // - `diesel::update(outputs::table.filter(outputs::hash.eq_any(hashes)).set(...).execute(&mut conn)` + // - Note: `diesel` does not support batch updates, so we have to do it manually. + // - Using a raw query that mimicked the `diesel` query: + // - `UPDATE outputs SET last_validation_timestamp = '{}' WHERE hash IN ({})` + // - 20% faster than `diesel` on average + // - Using a raw query with a batch insert (as implemented below): + // - `INSERT INTO outputs (..) VALUES (...) ON CONFLICT (commitment) DO UPDATE SET ...` + // - 1011% faster than `diesel` on average + + let mut query = String::from( + "INSERT INTO outputs ( commitment, last_validation_timestamp, mined_height, mined_in_block, status, \ + mined_timestamp, spending_key, value, output_type, maturity, hash, script, input_data, \ + script_private_key, sender_offset_public_key, metadata_signature_ephemeral_commitment, \ + metadata_signature_ephemeral_pubkey, metadata_signature_u_a, metadata_signature_u_x, \ + metadata_signature_u_y, spending_priority, covenant, encrypted_data, minimum_value_promise + ) + VALUES ", + ); + + query.push_str( + &commitments + .iter() + .map(|commitment| { + format!( + "(x'{}', '{}', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", + commitment.to_hex(), + last_validation_timestamp, + ) + }) + .collect::>() + .join(", "), + ); + + query.push_str( + " ON CONFLICT (commitment) DO UPDATE SET last_validation_timestamp = excluded.last_validation_timestamp", + ); + + conn.batch_execute(&query)?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, - "sqlite profile - set_output_to_be_revalidated_in_the_future: lock {} + db_op {} = {} ms", + "sqlite profile - update_last_validation_timestamps: lock {} + db_op {} = {} ms ({} outputs)", acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), - start.elapsed().as_millis() + start.elapsed().as_millis(), + commitments.len(), ); } Ok(()) } - fn mark_output_as_spent( - &self, - hash: FixedHash, - mark_deleted_at_height: u64, - mark_deleted_in_block: FixedHash, - confirmed: bool, - ) -> Result<(), OutputManagerStorageError> { + // Perform a batch update of the spent outputs; this is more efficient than updating each output individually. + fn mark_outputs_as_spent(&self, updates: Vec) -> Result<(), OutputManagerStorageError> { let start = Instant::now(); let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let hash = hash.to_vec(); - let mark_deleted_in_block = mark_deleted_in_block.to_vec(); - let status = if confirmed { - OutputStatus::Spent as i32 - } else { - OutputStatus::SpentMinedUnconfirmed as i32 - }; - diesel::update(outputs::table.filter(outputs::hash.eq(hash))) - .set(( - outputs::marked_deleted_at_height.eq(mark_deleted_at_height as i64), - outputs::marked_deleted_in_block.eq(mark_deleted_in_block), - outputs::status.eq(status), - )) - .execute(&mut conn) - .num_rows_affected_or_not_found(1)?; + + let commitments: Vec = updates.iter().map(|update| update.commitment.clone()).collect(); + if !OutputSql::verify_outputs_exist(&commitments, &mut conn)? { + return Err(OutputManagerStorageError::ValuesNotFound); + } + + // This SQL query is a dummy `INSERT INTO` statement combined with an `ON CONFLICT` clause and `UPDATE` action. + // It specifies what action should be taken if a unique constraint violation occurs during the execution of the + // `INSERT INTO` statement. The `INSERT INTO` statement must list all columns that cannot be NULL should it + // succeed. We provide `commitment` values that will cause a unique constraint violation, triggering the + // `ON CONFLICT` clause. The `ON CONFLICT` clause ensures that if a row with a matching commitment already + // exists, the specified columns (`mined_height`, `mined_in_block`, `status`, `mined_timestamp`, + // `marked_deleted_at_height`, `marked_deleted_in_block`, `last_validation_timestamp`) will be updated with the + // provided values. The `UPDATE` action updates the existing row with the new values provided by the + // `INSERT INTO` statement. The `excluded` keyword refers to the new data being inserted or updated and allows + // accessing the values provided in the `VALUES` clause of the `INSERT INTO` statement. + // Note: + // `diesel` does not support batch updates, so we have to do it manually. For example, this + // `diesel::insert_into(...).values(&...).on_conflict(outputs::hash).do_update().set((...)).execute(&mut + // conn)?;` errors with + // `the trait bound `BatchInsert` is not satisfied` + let mut query = String::from( + "INSERT INTO outputs ( commitment, marked_deleted_at_height, marked_deleted_in_block, status, \ + mined_height, mined_in_block, mined_timestamp, spending_key, value, output_type, maturity, hash, script, \ + input_data, script_private_key, sender_offset_public_key, metadata_signature_ephemeral_commitment, \ + metadata_signature_ephemeral_pubkey, metadata_signature_u_a, metadata_signature_u_x, \ + metadata_signature_u_y, spending_priority, covenant, encrypted_data, minimum_value_promise ) VALUES ", + ); + + query.push_str( + &updates + .iter() + .map(|update| { + format!( + "(x'{}', {}, x'{}', {}, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", + update.commitment.to_hex(), + update.mark_deleted_at_height as i64, + update.mark_deleted_in_block.to_hex(), + if update.confirmed { + OutputStatus::Spent as i32 + } else { + OutputStatus::SpentMinedUnconfirmed as i32 + } + ) + }) + .collect::>() + .join(", "), + ); + + query.push_str( + " ON CONFLICT (commitment) DO UPDATE SET marked_deleted_at_height = excluded.marked_deleted_at_height, \ + marked_deleted_in_block = excluded.marked_deleted_in_block, status = excluded.status", + ); + + conn.batch_execute(&query)?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, - "sqlite profile - mark_output_as_spent: lock {} + db_op {} = {} ms", + "sqlite profile - mark_outputs_as_spent: lock {} + db_op {} = {} ms ({} outputs)", acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), - start.elapsed().as_millis() + start.elapsed().as_millis(), + updates.len() ); } Ok(()) } - fn mark_output_as_unspent(&self, hash: FixedHash) -> Result<(), OutputManagerStorageError> { + fn mark_outputs_as_unspent(&self, hashes: Vec<(FixedHash, bool)>) -> Result<(), OutputManagerStorageError> { let start = Instant::now(); let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let hash = hash.to_vec(); - debug!(target: LOG_TARGET, "mark_output_as_unspent({})", hash.to_hex()); - diesel::update(outputs::table.filter(outputs::hash.eq(hash))) + // Split out the confirmed and unconfirmed outputs so that we can handle each of them as a separate batch + // operation + let confirmed_hashes = hashes + .iter() + .filter(|(_hash, confirmed)| *confirmed) + .map(|(hash, _confirmed)| hash) + .collect::>(); + let unconfirmed_hashes = hashes + .iter() + .filter(|(_hash, confirmed)| !*confirmed) + .map(|(hash, _confirmed)| hash) + .collect::>(); + + if !confirmed_hashes.is_empty() { + diesel::update( + outputs::table.filter(outputs::hash.eq_any(confirmed_hashes.iter().map(|hash| hash.to_vec()))), + ) .set(( outputs::marked_deleted_at_height.eq::>(None), outputs::marked_deleted_in_block.eq::>>(None), outputs::status.eq(OutputStatus::Unspent as i32), )) .execute(&mut conn) - .num_rows_affected_or_not_found(1)?; + .num_rows_affected_or_not_found(confirmed_hashes.len())?; + } + + if !unconfirmed_hashes.is_empty() { + diesel::update( + outputs::table.filter(outputs::hash.eq_any(unconfirmed_hashes.iter().map(|hash| hash.to_vec()))), + ) + .set(( + outputs::marked_deleted_at_height.eq::>(None), + outputs::marked_deleted_in_block.eq::>>(None), + outputs::status.eq(OutputStatus::UnspentMinedUnconfirmed as i32), + )) + .execute(&mut conn) + .num_rows_affected_or_not_found(unconfirmed_hashes.len())?; + } + + debug!(target: LOG_TARGET, "mark_outputs_as_unspent: Unspent {}, UnspentMinedUnconfirmed {}", confirmed_hashes.len(), unconfirmed_hashes.len()); if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, - "sqlite profile - mark_output_as_unspent: lock {} + db_op {} = {} ms", + "sqlite profile - mark_outputs_as_unspent: lock {} + db_op {} = {} ms (Unspent {}, UnspentMinedUnconfirmed {})", acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), - start.elapsed().as_millis() + start.elapsed().as_millis(), + confirmed_hashes.len(), unconfirmed_hashes.len() ); } @@ -1032,9 +1182,9 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { .collect::, _>>() } - fn fetch_outputs_by(&self, q: OutputBackendQuery) -> Result, OutputManagerStorageError> { + fn fetch_outputs_by_query(&self, q: OutputBackendQuery) -> Result, OutputManagerStorageError> { let mut conn = self.database_connection.get_pooled_connection()?; - Ok(OutputSql::fetch_outputs_by(q, &mut conn)? + Ok(OutputSql::fetch_outputs_by_query(q, &mut conn)? .into_iter() .filter_map(|x| { x.to_db_wallet_output() @@ -1051,6 +1201,34 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { } } +/// These are the fields to be set for the received outputs batch mode update +#[derive(Clone, Debug, Default)] +pub struct ReceivedOutputInfoForBatch { + /// The Pedersen commitment of the output + pub commitment: Commitment, + /// The height at which the output was mined + pub mined_height: u64, + /// The block hash in which the output was mined + pub mined_in_block: FixedHash, + /// Whether the output is confirmed + pub confirmed: bool, + /// The timestamp at which the output was mined + pub mined_timestamp: u64, +} + +/// These are the fields to be set for the spent outputs batch mode update +#[derive(Clone, Debug, Default)] +pub struct SpentOutputInfoForBatch { + /// The hash of the output + pub commitment: Commitment, + /// Whether the output is confirmed + pub confirmed: bool, + /// The height at which the output was marked as deleted + pub mark_deleted_at_height: u64, + /// The block hash in which the output was marked as deleted + pub mark_deleted_in_block: FixedHash, +} + fn update_outputs_with_tx_id_and_status_to_new_status( conn: &mut PooledConnection>, tx_id: TxId, diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs index 8d722bb046..83d7d2478c 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs @@ -42,6 +42,7 @@ use tari_core::transactions::{ use tari_crypto::tari_utilities::ByteArray; use tari_key_manager::key_manager_service::KeyId; use tari_script::{ExecutionStack, TariScript}; +use tari_utilities::hex::Hex; use crate::{ output_manager_service::{ @@ -57,6 +58,7 @@ use crate::{ }, UtxoSelectionFilter, UtxoSelectionOrdering, + TRANSACTION_INPUTS_LIMIT, }, schema::outputs, }; @@ -120,7 +122,7 @@ impl OutputSql { /// Retrieves UTXOs by a set of given rules #[allow(clippy::cast_sign_loss)] - pub fn fetch_outputs_by( + pub fn fetch_outputs_by_query( q: OutputBackendQuery, conn: &mut SqliteConnection, ) -> Result, OutputManagerStorageError> { @@ -263,7 +265,7 @@ impl OutputSql { }, }; - Ok(query.load(conn)?) + Ok(query.limit(i64::from(TRANSACTION_INPUTS_LIMIT)).load(conn)?) } /// Return all unspent outputs that have a maturity above the provided chain tip @@ -378,6 +380,31 @@ impl OutputSql { .load(conn)?) } + /// Verify that outputs with specified commitments exist in the database + pub fn verify_outputs_exist( + commitments: &[Commitment], + conn: &mut SqliteConnection, + ) -> Result { + #[derive(QueryableByName, Clone)] + struct CountQueryResult { + #[diesel(sql_type = diesel::sql_types::BigInt)] + count: i64, + } + let placeholders = commitments + .iter() + .map(|v| format!("x'{}'", v.to_hex())) + .collect::>() + .join(", "); + let query = sql_query(format!( + "SELECT COUNT(*) as count FROM outputs WHERE commitment IN ({})", + placeholders + )); + let query_result = query.load::(conn)?; + let commitments_len = i64::try_from(commitments.len()) + .map_err(|e| OutputManagerStorageError::ConversionError { reason: e.to_string() })?; + Ok(query_result[0].count == commitments_len) + } + /// Return the available, time locked, pending incoming and pending outgoing balance #[allow(clippy::cast_possible_wrap)] pub fn get_balance( @@ -394,7 +421,7 @@ impl OutputSql { let balance_query_result = if let Some(current_tip) = current_tip_for_time_lock_calculation { let balance_query = sql_query( "SELECT coalesce(sum(value), 0) as amount, 'available_balance' as category \ - FROM outputs WHERE status = ? \ + FROM outputs WHERE status = ? AND maturity <= ? AND script_lock_height <= ? \ UNION ALL \ SELECT coalesce(sum(value), 0) as amount, 'time_locked_balance' as category \ FROM outputs WHERE status = ? AND maturity > ? OR script_lock_height > ? \ @@ -407,6 +434,8 @@ impl OutputSql { ) // available_balance .bind::(OutputStatus::Unspent as i32) + .bind::(current_tip as i64) + .bind::(current_tip as i64) // time_locked_balance .bind::(OutputStatus::Unspent as i32) .bind::(current_tip as i64) diff --git a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs index f5aaeadb8b..45e68e3103 100644 --- a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs +++ b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs @@ -46,6 +46,8 @@ use crate::{ storage::{ database::{OutputManagerBackend, OutputManagerDatabase}, models::DbWalletOutput, + sqlite_db::{ReceivedOutputInfoForBatch, SpentOutputInfoForBatch}, + OutputStatus, }, }, }; @@ -158,29 +160,36 @@ where unmined.len(), self.operation_id ); + + let mut mined_updates = Vec::with_capacity(mined.len()); for mined_info in &mined { info!( target: LOG_TARGET, - "Updating output comm:{}: hash {} as mined at height {} with current tip at {} (Operation ID: - {})", + "Updating output comm:{}: hash {} as mined at height {} with current tip at {} (Operation ID: {})", mined_info.output.commitment.to_hex(), mined_info.output.hash.to_hex(), mined_info.mined_at_height, tip_height, self.operation_id ); - self.update_output_as_mined( - &mined_info.output, - &mined_info.mined_block_hash, - mined_info.mined_at_height, - tip_height, - mined_info.mined_timestamp, - ) - .await?; + mined_updates.push(ReceivedOutputInfoForBatch { + commitment: mined_info.output.commitment.clone(), + mined_height: mined_info.mined_at_height, + mined_in_block: mined_info.mined_block_hash, + confirmed: (tip_height - mined_info.mined_at_height) >= self.config.num_confirmations_required, + mined_timestamp: mined_info.mined_timestamp, + }); + } + if !mined_updates.is_empty() { + self.db + .set_received_outputs_mined_height_and_statuses(mined_updates) + .for_protocol(self.operation_id)?; } - for output in unmined { + + let unmined_info: Vec<_> = unmined.iter().map(|o| o.commitment.clone()).collect(); + if !unmined_info.is_empty() { self.db - .update_last_validation_timestamp(output.hash) + .update_last_validation_timestamps(unmined_info) .for_protocol(self.operation_id)?; } } @@ -223,20 +232,19 @@ where )); } + let mut unmined_and_invalid = Vec::with_capacity(batch.len()); + let mut unspent = Vec::with_capacity(batch.len()); + let mut spent = Vec::with_capacity(batch.len()); for (output, data) in batch.iter().zip(response.data.iter()) { // when checking mined height, 0 can be valid so we need to check the hash if data.block_mined_in.is_empty() { // base node thinks this is unmined or does not know of it. - self.db - .set_output_to_unmined_and_invalid(output.hash) - .for_protocol(self.operation_id)?; + unmined_and_invalid.push(output.hash); continue; }; if data.height_deleted_at == 0 && output.marked_deleted_at_height.is_some() { // this is mined but not yet spent - self.db - .mark_output_as_unspent(output.hash) - .for_protocol(self.operation_id)?; + unspent.push((output.hash, true)); info!( target: LOG_TARGET, "Updating output comm:{}: hash {} as unspent at tip height {} (Operation ID: {})", @@ -257,9 +265,12 @@ where OutputManagerError::InconsistentBaseNodeDataError("Base node sent malformed hash"), ) })?; - self.db - .mark_output_as_spent(output.hash, data.height_deleted_at, block_hash, confirmed) - .for_protocol(self.operation_id)?; + spent.push(SpentOutputInfoForBatch { + commitment: output.commitment.clone(), + confirmed, + mark_deleted_at_height: data.height_deleted_at, + mark_deleted_in_block: block_hash, + }); info!( target: LOG_TARGET, "Updating output comm:{}: hash {} as spent at tip height {} (Operation ID: {})", @@ -270,6 +281,19 @@ where ); } } + if !unmined_and_invalid.is_empty() { + self.db + .set_outputs_to_unmined_and_invalid(unmined_and_invalid) + .for_protocol(self.operation_id)?; + } + if !unspent.is_empty() { + self.db + .mark_outputs_as_unspent(unspent) + .for_protocol(self.operation_id)?; + } + if !spent.is_empty() { + self.db.mark_outputs_as_spent(spent).for_protocol(self.operation_id)?; + } } Ok(()) } @@ -299,6 +323,8 @@ where unmined.len(), self.operation_id ); + + let mut mined_updates = Vec::with_capacity(mined.len()); for mined_info in &mined { info!( target: LOG_TARGET, @@ -309,14 +335,38 @@ where tip_height, self.operation_id ); - self.update_output_as_mined( - &mined_info.output, - &mined_info.mined_block_hash, - mined_info.mined_at_height, - tip_height, - mined_info.mined_timestamp, - ) - .await?; + mined_updates.push(ReceivedOutputInfoForBatch { + commitment: mined_info.output.commitment.clone(), + mined_height: mined_info.mined_at_height, + mined_in_block: mined_info.mined_block_hash, + confirmed: (tip_height - mined_info.mined_at_height) >= self.config.num_confirmations_required, + mined_timestamp: mined_info.mined_timestamp, + }); + } + if !mined_updates.is_empty() { + self.db + .set_received_outputs_mined_height_and_statuses(mined_updates) + .for_protocol(self.operation_id)?; + } + + let unmined_and_invalid: Vec<_> = unmined + .iter() + .filter(|uo| uo.status == OutputStatus::UnspentMinedUnconfirmed) + .map(|uo| { + info!( + target: LOG_TARGET, + "Updating output comm:{}: hash {} as unmined(Operation ID: {})", + uo.commitment.to_hex(), + uo.hash.to_hex(), + self.operation_id + ); + uo.hash + }) + .collect(); + if !unmined_and_invalid.is_empty() { + self.db + .set_outputs_to_unmined_and_invalid(unmined_and_invalid) + .for_protocol(self.operation_id)?; } } @@ -347,7 +397,7 @@ where self.operation_id ); self.db - .set_output_to_unmined_and_invalid(last_spent_output.hash) + .set_outputs_to_unmined_and_invalid(vec![last_spent_output.hash]) .for_protocol(self.operation_id)?; continue; }; @@ -362,7 +412,7 @@ where self.operation_id ); self.db - .set_output_to_unmined_and_invalid(last_spent_output.hash) + .set_outputs_to_unmined_and_invalid(vec![last_spent_output.hash]) .for_protocol(self.operation_id)?; continue; }; @@ -380,8 +430,10 @@ where last_spent_output.commitment.to_hex(), self.operation_id ); + // we mark the output as UnspentMinedUnconfirmed so it wont get picked it by the OMS to be spendable + // immediately as we first need to find out if this output is unspent, in a mempool, or spent. self.db - .mark_output_as_unspent(last_spent_output.hash) + .mark_outputs_as_unspent(vec![(last_spent_output.hash, false)]) .for_protocol(self.operation_id)?; } else { debug!( @@ -403,7 +455,7 @@ where self.operation_id ); self.db - .set_output_to_unmined_and_invalid(last_mined_output.hash) + .set_outputs_to_unmined_and_invalid(vec![last_mined_output.hash]) .for_protocol(self.operation_id)?; continue; } @@ -423,7 +475,7 @@ where self.operation_id ); self.db - .set_output_to_unmined_and_invalid(last_mined_output.hash) + .set_outputs_to_unmined_and_invalid(vec![last_mined_output.hash]) .for_protocol(self.operation_id)?; } else { debug!( @@ -531,30 +583,6 @@ where Ok((mined, unmined, batch_response.best_block_height)) } - #[allow(clippy::ptr_arg)] - async fn update_output_as_mined( - &self, - tx: &DbWalletOutput, - mined_in_block: &BlockHash, - mined_height: u64, - tip_height: u64, - mined_timestamp: u64, - ) -> Result<(), OutputManagerProtocolError> { - let confirmed = (tip_height - mined_height) >= self.config.num_confirmations_required; - - self.db - .set_received_output_mined_height_and_status( - tx.hash, - mined_height, - *mined_in_block, - confirmed, - mined_timestamp, - ) - .for_protocol(self.operation_id)?; - - Ok(()) - } - fn publish_event(&self, event: OutputManagerEvent) { if let Err(e) = self.event_publisher.send(Arc::new(event)) { debug!( diff --git a/base_layer/wallet/src/storage/database.rs b/base_layer/wallet/src/storage/database.rs index cdca0cd43a..f38e3c90ca 100644 --- a/base_layer/wallet/src/storage/database.rs +++ b/base_layer/wallet/src/storage/database.rs @@ -27,7 +27,7 @@ use std::{ use chrono::NaiveDateTime; use log::*; -use tari_common_types::chain_metadata::ChainMetadata; +use tari_common_types::{chain_metadata::ChainMetadata, wallet_types::WalletType}; use tari_comms::{ multiaddr::Multiaddr, peer_manager::{IdentitySignature, PeerFeatures}, @@ -90,6 +90,7 @@ pub enum DbKey { WalletBirthday, LastAccessedNetwork, LastAccessedVersion, + WalletType, } impl DbKey { @@ -109,6 +110,7 @@ impl DbKey { DbKey::CommsIdentitySignature => "CommsIdentitySignature".to_string(), DbKey::LastAccessedNetwork => "LastAccessedNetwork".to_string(), DbKey::LastAccessedVersion => "LastAccessedVersion".to_string(), + DbKey::WalletType => "WalletType".to_string(), } } } @@ -129,6 +131,7 @@ pub enum DbValue { WalletBirthday(String), LastAccessedNetwork(String), LastAccessedVersion(String), + WalletType(WalletType), } #[derive(Clone)] @@ -141,6 +144,7 @@ pub enum DbKeyValuePair { CommsFeatures(PeerFeatures), CommsIdentitySignature(Box), NetworkAndVersion((String, String)), + WalletType(WalletType), } pub enum WriteOperation { @@ -384,6 +388,21 @@ where T: WalletBackend + 'static pub fn delete_burnt_proof(&self, id: u32) -> Result<(), WalletStorageError> { self.db.delete_burnt_proof(id) } + + pub fn get_wallet_type(&self) -> Result, WalletStorageError> { + match self.db.fetch(&DbKey::WalletType) { + Ok(None) => Ok(None), + Ok(Some(DbValue::WalletType(k))) => Ok(Some(k)), + Ok(Some(other)) => unexpected_result(DbKey::WalletType, other), + Err(e) => log_error(DbKey::WalletType, e), + } + } + + pub fn set_wallet_type(&self, wallet_type: WalletType) -> Result<(), WalletStorageError> { + self.db + .write(WriteOperation::Insert(DbKeyValuePair::WalletType(wallet_type)))?; + Ok(()) + } } impl Display for DbValue { @@ -404,6 +423,7 @@ impl Display for DbValue { DbValue::CommsIdentitySignature(_) => f.write_str("CommsIdentitySignature"), DbValue::LastAccessedNetwork(network) => f.write_str(&format!("LastAccessedNetwork: {}", network)), DbValue::LastAccessedVersion(version) => f.write_str(&format!("LastAccessedVersion: {}", version)), + DbValue::WalletType(wallet_type) => f.write_str(&format!("WalletType: {:?}", wallet_type)), } } } diff --git a/base_layer/wallet/src/storage/sqlite_db/wallet.rs b/base_layer/wallet/src/storage/sqlite_db/wallet.rs index 659d5eebe6..5be73d2888 100644 --- a/base_layer/wallet/src/storage/sqlite_db/wallet.rs +++ b/base_layer/wallet/src/storage/sqlite_db/wallet.rs @@ -422,6 +422,11 @@ impl WalletSqliteDatabase { WalletSettingSql::new(DbKey::LastAccessedNetwork, network).set(&mut conn)?; WalletSettingSql::new(DbKey::LastAccessedVersion, version).set(&mut conn)?; }, + DbKeyValuePair::WalletType(wallet_type) => { + kvp_text = "WalletType"; + WalletSettingSql::new(DbKey::WalletType, serde_json::to_string(&wallet_type).unwrap()) + .set(&mut conn)?; + }, } if start.elapsed().as_millis() > 0 { @@ -461,6 +466,7 @@ impl WalletSqliteDatabase { DbKey::SecondaryKeySalt | DbKey::SecondaryKeyHash | DbKey::WalletBirthday | + DbKey::WalletType | DbKey::CommsIdentitySignature | DbKey::LastAccessedNetwork | DbKey::LastAccessedVersion => { @@ -510,6 +516,9 @@ impl WalletBackend for WalletSqliteDatabase { DbKey::SecondaryKeySalt => WalletSettingSql::get(key, &mut conn)?.map(DbValue::SecondaryKeySalt), DbKey::SecondaryKeyHash => WalletSettingSql::get(key, &mut conn)?.map(DbValue::SecondaryKeyHash), DbKey::WalletBirthday => WalletSettingSql::get(key, &mut conn)?.map(DbValue::WalletBirthday), + DbKey::WalletType => { + WalletSettingSql::get(key, &mut conn)?.map(|d| DbValue::WalletType(serde_json::from_str(&d).unwrap())) + }, DbKey::LastAccessedNetwork => WalletSettingSql::get(key, &mut conn)?.map(DbValue::LastAccessedNetwork), DbKey::LastAccessedVersion => WalletSettingSql::get(key, &mut conn)?.map(DbValue::LastAccessedVersion), DbKey::CommsIdentitySignature => WalletSettingSql::get(key, &mut conn)? diff --git a/base_layer/wallet/src/storage/sqlite_utilities/mod.rs b/base_layer/wallet/src/storage/sqlite_utilities/mod.rs index 7345323bb0..078ff857cf 100644 --- a/base_layer/wallet/src/storage/sqlite_utilities/mod.rs +++ b/base_layer/wallet/src/storage/sqlite_utilities/mod.rs @@ -74,6 +74,27 @@ pub fn run_migration_and_create_sqlite_connection>( Ok(WalletDbConnection::new(pool, Some(file_lock))) } +pub fn run_migration_and_create_sqlite_memory_connection( + sqlite_pool_size: usize, +) -> Result { + let mut pool = SqliteConnectionPool::new( + String::from(":memory:"), + sqlite_pool_size, + true, + true, + Duration::from_secs(60), + ); + pool.create_pool()?; + let mut connection = pool.get_pooled_connection()?; + + const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations"); + connection + .run_pending_migrations(MIGRATIONS) + .map_err(|err| WalletStorageError::DatabaseMigrationError(format!("Database migration failed {}", err)))?; + + Ok(WalletDbConnection::new(pool, None)) +} + pub fn acquire_exclusive_file_lock(db_path: &Path) -> Result { let lock_file_path = match db_path.file_name() { None => { diff --git a/base_layer/wallet/src/test_utils.rs b/base_layer/wallet/src/test_utils.rs index 92b366cfe9..c6e372cc2a 100644 --- a/base_layer/wallet/src/test_utils.rs +++ b/base_layer/wallet/src/test_utils.rs @@ -30,6 +30,7 @@ use tempfile::{tempdir, TempDir}; use crate::storage::sqlite_utilities::{ run_migration_and_create_sqlite_connection, + run_migration_and_create_sqlite_memory_connection, wallet_db_connection::WalletDbConnection, }; @@ -58,6 +59,11 @@ pub fn make_wallet_database_connection(path: Option) -> (WalletDbConnect (connection, temp_dir) } +/// A test helper to create a temporary wallet service memory databases +pub fn make_wallet_database_memory_connection() -> WalletDbConnection { + run_migration_and_create_sqlite_memory_connection(16).unwrap() +} + pub fn create_consensus_rules() -> ConsensusManager { ConsensusManager::builder(Network::LocalNet).build().unwrap() } diff --git a/base_layer/wallet/src/transaction_service/error.rs b/base_layer/wallet/src/transaction_service/error.rs index 1743fa8a44..780430e501 100644 --- a/base_layer/wallet/src/transaction_service/error.rs +++ b/base_layer/wallet/src/transaction_service/error.rs @@ -185,6 +185,12 @@ pub enum TransactionServiceError { InvalidKeyId(String), #[error("Invalid key manager data: `{0}`")] KeyManagerServiceError(#[from] KeyManagerServiceError), + #[error("Serialization error: `{0}`")] + SerializationError(String), + #[error("Transaction exceed maximum byte size. Expected < {expected} but got {got}.")] + TransactionTooLarge { got: usize, expected: usize }, + #[error("Pending Transaction was oversized")] + Oversized, } impl From for TransactionServiceError { diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index d1f1c8cccc..22b49cdea4 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -82,6 +82,7 @@ pub enum TransactionServiceRequest { GetCancelledCompletedTransactions, GetCompletedTransaction(TxId), GetAnyTransaction(TxId), + ImportTransaction(WalletTransaction), SendTransaction { destination: TariAddress, amount: MicroMinotari, @@ -165,6 +166,7 @@ impl fmt::Display for TransactionServiceRequest { Self::GetPendingInboundTransactions => write!(f, "GetPendingInboundTransactions"), Self::GetPendingOutboundTransactions => write!(f, "GetPendingOutboundTransactions"), Self::GetCompletedTransactions => write!(f, "GetCompletedTransactions"), + Self::ImportTransaction(tx) => write!(f, "ImportTransaction: {:?}", tx), Self::GetCancelledPendingInboundTransactions => write!(f, "GetCancelledPendingInboundTransactions"), Self::GetCancelledPendingOutboundTransactions => write!(f, "GetCancelledPendingOutboundTransactions"), Self::GetCancelledCompletedTransactions => write!(f, "GetCancelledCompletedTransactions"), @@ -243,6 +245,7 @@ impl fmt::Display for TransactionServiceRequest { #[derive(Debug)] pub enum TransactionServiceResponse { TransactionSent(TxId), + TransactionImported(TxId), BurntTransactionSent { tx_id: TxId, proof: Box, @@ -292,7 +295,6 @@ impl Display for TransactionSendStatus { #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub enum TransactionEvent { MempoolBroadcastTimedOut(TxId), - NewBlockMined(TxId), ReceivedTransaction(TxId), ReceivedTransactionReply(TxId), ReceivedFinalizedTransaction(TxId), @@ -403,9 +405,6 @@ impl fmt::Display for TransactionEvent { TransactionEvent::TransactionValidationFailed(operation_id, reason) => { write!(f, "Transaction validation(#{operation_id}) failed: {reason}") }, - TransactionEvent::NewBlockMined(tx_id) => { - write!(f, "New block mined {tx_id}") - }, } } } @@ -727,6 +726,17 @@ impl TransactionServiceHandle { } } + pub async fn import_transaction(&mut self, tx: WalletTransaction) -> Result { + match self + .handle + .call(TransactionServiceRequest::ImportTransaction(tx)) + .await?? + { + TransactionServiceResponse::TransactionImported(t) => Ok(t), + _ => Err(TransactionServiceError::UnexpectedApiResponse), + } + } + pub async fn import_utxo_with_status( &mut self, amount: MicroMinotari, diff --git a/base_layer/wallet/src/transaction_service/protocols/mod.rs b/base_layer/wallet/src/transaction_service/protocols/mod.rs index 15bdb1dd1c..9c7cfc3ce1 100644 --- a/base_layer/wallet/src/transaction_service/protocols/mod.rs +++ b/base_layer/wallet/src/transaction_service/protocols/mod.rs @@ -20,7 +20,48 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use bincode::serialize_into; +use log::{debug, error}; +use serde::Serialize; +use tari_common_types::transaction::TxId; +use tari_comms::protocol::rpc; + +use crate::transaction_service::error::{TransactionServiceError, TransactionServiceProtocolError}; + pub mod transaction_broadcast_protocol; pub mod transaction_receive_protocol; pub mod transaction_send_protocol; pub mod transaction_validation_protocol; + +const LOG_TARGET: &str = "wallet::transaction_service::protocols"; + +/// Verify that the negotiated transaction is not too large to be broadcast +pub fn check_transaction_size( + transaction: &T, + tx_id: TxId, +) -> Result<(), TransactionServiceProtocolError> { + let mut buf: Vec = Vec::new(); + serialize_into(&mut buf, transaction).map_err(|e| { + TransactionServiceProtocolError::new(tx_id, TransactionServiceError::SerializationError(e.to_string())) + })?; + const SIZE_MARGIN: usize = 1024 * 10; + if buf.len() > rpc::RPC_MAX_FRAME_SIZE.saturating_sub(SIZE_MARGIN) { + let err = TransactionServiceProtocolError::new(tx_id, TransactionServiceError::TransactionTooLarge { + got: buf.len(), + expected: rpc::RPC_MAX_FRAME_SIZE.saturating_sub(SIZE_MARGIN), + }); + error!( + target: LOG_TARGET, + "Transaction '{}' too large, cannot be broadcast ({:?}).", + tx_id, err + ); + Err(err) + } else { + debug!( + target: LOG_TARGET, + "Transaction '{}' size ok, can be broadcast (got: {}, limit: {}).", + tx_id, buf.len(), rpc::RPC_MAX_FRAME_SIZE.saturating_sub(SIZE_MARGIN) + ); + Ok(()) + } +} diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs index f74f97bde9..4f4e32efb3 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs @@ -47,6 +47,7 @@ use crate::{ transaction_service::{ error::{TransactionServiceError, TransactionServiceProtocolError}, handle::TransactionEvent, + protocols::check_transaction_size, service::TransactionServiceResources, storage::{ database::TransactionBackend, @@ -127,6 +128,10 @@ where ); return Ok(self.tx_id); } + if let Err(e) = check_transaction_size(&completed_tx.transaction, self.tx_id) { + self.cancel_transaction(TxCancellationReason::Oversized).await; + return Err(e); + } loop { tokio::select! { diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs index 38d948a5f7..c0a16a38aa 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs @@ -44,6 +44,7 @@ use crate::{ transaction_service::{ error::{TransactionServiceError, TransactionServiceProtocolError}, handle::TransactionEvent, + protocols::check_transaction_size, service::TransactionServiceResources, storage::{ database::TransactionBackend, @@ -159,6 +160,12 @@ where Utc::now().naive_utc(), ); + // Verify that the negotiated transaction is not too large to be broadcast + if let Err(e) = check_transaction_size(&inbound_transaction, self.id) { + self.cancel_oversized_transaction().await?; + return Err(e); + } + self.resources .db .add_pending_inbound_transaction(inbound_transaction.tx_id, inbound_transaction.clone()) @@ -242,6 +249,12 @@ where }, }; + // Verify that the negotiated transaction is not too large to be broadcast + if let Err(e) = check_transaction_size(&inbound_tx, self.id) { + self.cancel_oversized_transaction().await?; + return Err(e); + } + // Determine the time remaining before this transaction times out let elapsed_time = utc_duration_since(&inbound_tx.timestamp) .map_err(|e| TransactionServiceProtocolError::new(self.id, e.into()))?; @@ -469,6 +482,32 @@ where "Cancelling Transaction Receive Protocol (TxId: {}) due to timeout after no counterparty response", self.id ); + self.cancel_transaction(TxCancellationReason::Timeout).await?; + + info!( + target: LOG_TARGET, + "Pending Transaction (TxId: {}) timed out after no response from counterparty", self.id + ); + + Err(TransactionServiceProtocolError::new( + self.id, + TransactionServiceError::Timeout, + )) + } + + async fn cancel_oversized_transaction(&mut self) -> Result<(), TransactionServiceProtocolError> { + info!( + target: LOG_TARGET, + "Cancelling Transaction Receive Protocol (TxId: {}) due to transaction being oversized", self.id + ); + + self.cancel_transaction(TxCancellationReason::Oversized).await + } + + async fn cancel_transaction( + &mut self, + cancel_reason: TxCancellationReason, + ) -> Result<(), TransactionServiceProtocolError> { self.resources.db.cancel_pending_transaction(self.id).map_err(|e| { warn!( target: LOG_TARGET, @@ -486,10 +525,7 @@ where let _size = self .resources .event_publisher - .send(Arc::new(TransactionEvent::TransactionCancelled( - self.id, - TxCancellationReason::Timeout, - ))) + .send(Arc::new(TransactionEvent::TransactionCancelled(self.id, cancel_reason))) .map_err(|e| { trace!( target: LOG_TARGET, @@ -502,14 +538,6 @@ where ) }); - info!( - target: LOG_TARGET, - "Pending Transaction (TxId: {}) timed out after no response from counterparty", self.id - ); - - Err(TransactionServiceProtocolError::new( - self.id, - TransactionServiceError::Timeout, - )) + Ok(()) } } diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs index 30eed38c44..ab12db4f47 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs @@ -63,6 +63,7 @@ use crate::{ config::TransactionRoutingMechanism, error::{TransactionServiceError, TransactionServiceProtocolError}, handle::{TransactionEvent, TransactionSendStatus, TransactionServiceResponse}, + protocols::check_transaction_size, service::{TransactionSendResult, TransactionServiceResources}, storage::{ database::TransactionBackend, @@ -281,28 +282,45 @@ where )); } + // Calculate the size of the transaction - initial send transaction to the peer (always a small message) should + // not be attempted if the final transaction size will be too large to be broadcast + let outbound_tx_check = OutboundTransaction::new( + tx_id, + self.dest_address.clone(), + self.amount, + MicroMinotari::zero(), // This does not matter for the check + sender_protocol.clone(), + TransactionStatus::Pending, // This does not matter for the check + self.message.clone(), + Utc::now().naive_utc(), + true, // This does not matter for the check + ); + // Attempt to send the initial transaction - let SendResult { - direct_send_result, - store_and_forward_send_result, - transaction_status, - } = match self.send_transaction(msg).await { - Ok(val) => val, - Err(e) => { - warn!( - target: LOG_TARGET, - "Problem sending Outbound Transaction TxId: {:?}: {:?}", self.id, e - ); - SendResult { - direct_send_result: false, - store_and_forward_send_result: false, - transaction_status: TransactionStatus::Queued, - } - }, + let mut initial_send = SendResult { + direct_send_result: false, + store_and_forward_send_result: false, + transaction_status: TransactionStatus::Queued, + }; + if let Err(e) = check_transaction_size(&outbound_tx_check, self.id) { + info!( + target: LOG_TARGET, + "Initial Transaction TxId: {:?} will not be sent due to it being oversize ({:?})", self.id, e + ); + } else { + match self.send_transaction(msg).await { + Ok(val) => initial_send = val, + Err(e) => { + warn!( + target: LOG_TARGET, + "Problem sending Outbound Transaction TxId: {:?}: {:?}", self.id, e + ); + }, + } }; // Confirm pending transaction (confirm encumbered outputs) - if transaction_status == TransactionStatus::Pending { + if initial_send.transaction_status == TransactionStatus::Pending { self.resources .output_manager_service .confirm_pending_transaction(self.id) @@ -326,17 +344,21 @@ where self.amount, fee, sender_protocol.clone(), - transaction_status.clone(), + initial_send.transaction_status.clone(), self.message.clone(), Utc::now().naive_utc(), - direct_send_result, + initial_send.direct_send_result, ); self.resources .db - .add_pending_outbound_transaction(outbound_tx.tx_id, outbound_tx) + .add_pending_outbound_transaction(outbound_tx.tx_id, outbound_tx.clone()) .map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?; + if let Err(e) = check_transaction_size(&outbound_tx, self.id) { + self.cancel_oversized_transaction().await?; + return Err(e); + } } - if transaction_status == TransactionStatus::Pending { + if initial_send.transaction_status == TransactionStatus::Pending { self.resources .db .increment_send_count(self.id) @@ -350,13 +372,13 @@ where .send(Arc::new(TransactionEvent::TransactionSendResult( self.id, TransactionSendStatus { - direct_send_result, - store_and_forward_send_result, - queued_for_retry: transaction_status == TransactionStatus::Queued, + direct_send_result: initial_send.direct_send_result, + store_and_forward_send_result: initial_send.store_and_forward_send_result, + queued_for_retry: initial_send.transaction_status == TransactionStatus::Queued, }, ))); - if transaction_status == TransactionStatus::Pending { + if initial_send.transaction_status == TransactionStatus::Pending { info!( target: LOG_TARGET, "Pending Outbound Transaction TxId: {:?} added. Waiting for Reply or Cancellation", self.id, @@ -367,7 +389,7 @@ where "Pending Outbound Transaction TxId: {:?} queued. Waiting for wallet to come online", self.id, ); } - Ok(transaction_status) + Ok(initial_send.transaction_status) } #[allow(clippy::too_many_lines)] @@ -391,6 +413,12 @@ where .get_pending_outbound_transaction(tx_id) .map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?; + // Verify that the negotiated transaction is not too large to be broadcast + if let Err(e) = check_transaction_size(&outbound_tx, self.id) { + self.cancel_oversized_transaction().await?; + return Err(e); + } + if !outbound_tx.sender_protocol.is_collecting_single_signature() { error!( target: LOG_TARGET, @@ -883,6 +911,33 @@ where target: LOG_TARGET, "Cancelling Transaction Send Protocol (TxId: {}) due to timeout after no counterparty response", self.id ); + + self.cancel_transaction(TxCancellationReason::Timeout).await?; + + info!( + target: LOG_TARGET, + "Pending Transaction (TxId: {}) timed out after no response from counterparty", self.id + ); + + Err(TransactionServiceProtocolError::new( + self.id, + TransactionServiceError::Timeout, + )) + } + + async fn cancel_oversized_transaction(&mut self) -> Result<(), TransactionServiceProtocolError> { + info!( + target: LOG_TARGET, + "Cancelling Transaction Send Protocol (TxId: {}) due to transaction being oversized", self.id + ); + + self.cancel_transaction(TxCancellationReason::Oversized).await + } + + async fn cancel_transaction( + &mut self, + cancel_reason: TxCancellationReason, + ) -> Result<(), TransactionServiceProtocolError> { let _ = send_transaction_cancelled_message( self.id, self.dest_address.public_key().clone(), @@ -917,10 +972,7 @@ where let _size = self .resources .event_publisher - .send(Arc::new(TransactionEvent::TransactionCancelled( - self.id, - TxCancellationReason::Timeout, - ))) + .send(Arc::new(TransactionEvent::TransactionCancelled(self.id, cancel_reason))) .map_err(|e| { trace!( target: LOG_TARGET, @@ -933,15 +985,7 @@ where ) }); - info!( - target: LOG_TARGET, - "Pending Transaction (TxId: {}) timed out after no response from counterparty", self.id - ); - - Err(TransactionServiceProtocolError::new( - self.id, - TransactionServiceError::Timeout, - )) + Ok(()) } } diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs index 06295138ae..8c3357a8a9 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs @@ -148,7 +148,6 @@ where ); self.update_transaction_as_unmined(unmined_tx.tx_id, &unmined_tx.status) .await?; - self.publish_event(TransactionEvent::NewBlockMined(unmined_tx.tx_id)); } } } @@ -249,7 +248,7 @@ where let mut unmined = vec![]; #[allow(clippy::mutable_key_type)] let mut batch_signatures = HashMap::new(); - for tx_info in batch.iter() { + for tx_info in batch { // Imported transactions do not have a signature; this is represented by the default signature in info if tx_info.signature != Signature::default() { batch_signatures.insert(tx_info.signature.clone(), tx_info); @@ -286,13 +285,13 @@ where let sig = response.signature; if let Some(unconfirmed_tx) = batch_signatures.get(&sig) { if response.location == TxLocation::Mined && - response.block_hash.is_some() && + response.best_block_hash.is_some() && response.mined_timestamp.is_some() { mined.push(( (*unconfirmed_tx).clone(), - response.block_height, - response.block_hash.unwrap(), + response.best_block_height, + response.best_block_hash.unwrap(), response.confirmations, response.mined_timestamp.unwrap(), )); @@ -308,13 +307,13 @@ where } } - let tip = batch_response.tip_hash.try_into()?; + let tip = batch_response.best_block_hash.try_into()?; Ok(( mined, unmined, Some(( - batch_response.height_of_longest_chain, + batch_response.best_block_height, tip, batch_response.tip_mined_timestamp, )), diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index 2a194648c6..c187c1cf10 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -107,6 +107,7 @@ use crate::{ TransactionServiceResponse, }, protocols::{ + check_transaction_size, transaction_broadcast_protocol::TransactionBroadcastProtocol, transaction_receive_protocol::{TransactionReceiveProtocol, TransactionReceiveProtocolStage}, transaction_send_protocol::{TransactionSendProtocol, TransactionSendProtocolStage}, @@ -114,7 +115,11 @@ use crate::{ }, storage::{ database::{TransactionBackend, TransactionDatabase}, - models::{CompletedTransaction, TxCancellationReason}, + models::{ + CompletedTransaction, + TxCancellationReason, + WalletTransaction::{Completed, PendingInbound, PendingOutbound}, + }, }, tasks::{ check_faux_transaction_status::check_detected_transactions, @@ -774,6 +779,29 @@ where TransactionServiceRequest::GetAnyTransaction(tx_id) => Ok(TransactionServiceResponse::AnyTransaction( Box::new(self.db.get_any_transaction(tx_id)?), )), + TransactionServiceRequest::ImportTransaction(tx) => { + let tx_id = match tx { + PendingInbound(inbound_tx) => { + let tx_id = inbound_tx.tx_id; + check_transaction_size(&inbound_tx, tx_id)?; + self.db.insert_pending_inbound_transaction(tx_id, inbound_tx)?; + tx_id + }, + PendingOutbound(outbound_tx) => { + let tx_id = outbound_tx.tx_id; + check_transaction_size(&outbound_tx, tx_id)?; + self.db.insert_pending_outbound_transaction(tx_id, outbound_tx)?; + tx_id + }, + Completed(completed_tx) => { + let tx_id = completed_tx.tx_id; + check_transaction_size(&completed_tx.transaction, tx_id)?; + self.db.insert_completed_transaction(tx_id, completed_tx)?; + tx_id + }, + }; + Ok(TransactionServiceResponse::TransactionImported(tx_id)) + }, TransactionServiceRequest::ImportUtxoWithStatus { amount, source_address, @@ -798,6 +826,7 @@ where .map(TransactionServiceResponse::UtxoImported), TransactionServiceRequest::SubmitTransactionToSelf(tx_id, tx, fee, amount, message) => self .submit_transaction_to_self(transaction_broadcast_join_handles, tx_id, tx, fee, amount, message) + .await .map(|_| TransactionServiceResponse::TransactionSubmitted), TransactionServiceRequest::SetLowPowerMode => { self.set_power_mode(PowerMode::Low).await?; @@ -924,7 +953,7 @@ where Err(_) => None, }; let tip_height = match metadata { - Some(val) => val.height_of_longest_chain(), + Some(val) => val.best_block_height(), None => 0u64, }; let event_publisher = self.event_publisher.clone(); @@ -1004,7 +1033,8 @@ where None, None, )?, - )?; + ) + .await?; let _result = reply_channel .send(Ok(TransactionServiceResponse::TransactionSent(tx_id))) @@ -1253,7 +1283,8 @@ where None, None, )?, - )?; + ) + .await?; let tx_output = output .to_transaction_output(&self.resources.transaction_key_manager_service) @@ -1441,7 +1472,8 @@ where None, None, )?, - )?; + ) + .await?; Ok(tx_id) } @@ -1702,7 +1734,8 @@ where None, None, )?, - )?; + ) + .await?; info!(target: LOG_TARGET, "Submitted burning transaction - TxId: {}", tx_id); Ok((tx_id, BurntProof { @@ -2836,7 +2869,7 @@ where } /// Submit a completed transaction to the Transaction Manager - fn submit_transaction( + async fn submit_transaction( &mut self, transaction_broadcast_join_handles: &mut FuturesUnordered< JoinHandle>>, @@ -2845,12 +2878,17 @@ where ) -> Result<(), TransactionServiceError> { let tx_id = completed_transaction.tx_id; trace!(target: LOG_TARGET, "Submit transaction ({}) to db.", tx_id); - self.db.insert_completed_transaction(tx_id, completed_transaction)?; + self.db + .insert_completed_transaction(tx_id, completed_transaction.clone())?; trace!( target: LOG_TARGET, "Launch the transaction broadcast protocol for submitted transaction ({}).", tx_id ); + if let Err(e) = check_transaction_size(&completed_transaction.transaction, tx_id) { + self.cancel_transaction(tx_id, TxCancellationReason::Oversized).await; + return Err(e.into()); + } self.complete_send_transaction_protocol( Ok(TransactionSendResult { tx_id, @@ -2861,9 +2899,24 @@ where Ok(()) } + async fn cancel_transaction(&mut self, tx_id: TxId, reason: TxCancellationReason) { + if let Err(e) = self.resources.output_manager_service.cancel_transaction(tx_id).await { + warn!( + target: LOG_TARGET, + "Failed to Cancel outputs for TxId: {} after failed sending attempt with error {:?}", tx_id, e + ); + } + if let Err(e) = self.resources.db.reject_completed_transaction(tx_id, reason) { + warn!( + target: LOG_TARGET, + "Failed to Cancel TxId: {} after failed sending attempt with error {:?}", tx_id, e + ); + } + } + /// Submit a completed coin split transaction to the Transaction Manager. This is different from /// `submit_transaction` in that it will expose less information about the completed transaction. - pub fn submit_transaction_to_self( + pub async fn submit_transaction_to_self( &mut self, transaction_broadcast_join_handles: &mut FuturesUnordered< JoinHandle>>, @@ -2890,7 +2943,8 @@ where None, None, )?, - )?; + ) + .await?; Ok(()) } diff --git a/base_layer/wallet/src/transaction_service/storage/database.rs b/base_layer/wallet/src/transaction_service/storage/database.rs index 8622d6bc70..7e22f7133f 100644 --- a/base_layer/wallet/src/transaction_service/storage/database.rs +++ b/base_layer/wallet/src/transaction_service/storage/database.rs @@ -320,6 +320,30 @@ where T: TransactionBackend + 'static ))) } + pub fn insert_pending_inbound_transaction( + &self, + tx_id: TxId, + transaction: InboundTransaction, + ) -> Result, TransactionStorageError> { + self.db + .write(WriteOperation::Insert(DbKeyValuePair::PendingInboundTransaction( + tx_id, + Box::new(transaction), + ))) + } + + pub fn insert_pending_outbound_transaction( + &self, + tx_id: TxId, + transaction: OutboundTransaction, + ) -> Result, TransactionStorageError> { + self.db + .write(WriteOperation::Insert(DbKeyValuePair::PendingOutboundTransaction( + tx_id, + Box::new(transaction), + ))) + } + pub fn get_pending_outbound_transaction( &self, tx_id: TxId, diff --git a/base_layer/wallet/src/transaction_service/storage/models.rs b/base_layer/wallet/src/transaction_service/storage/models.rs index 1dbc29ab03..9830c2a8fa 100644 --- a/base_layer/wallet/src/transaction_service/storage/models.rs +++ b/base_layer/wallet/src/transaction_service/storage/models.rs @@ -303,7 +303,7 @@ impl From for CompletedTransaction { } } -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize, Clone)] #[allow(clippy::large_enum_variant)] pub enum WalletTransaction { PendingInbound(InboundTransaction), @@ -330,6 +330,7 @@ pub enum TxCancellationReason { Orphan, // 4 TimeLocked, // 5 InvalidTransaction, // 6 + Oversized, // 7 } impl TryFrom for TxCancellationReason { @@ -344,6 +345,7 @@ impl TryFrom for TxCancellationReason { 4 => Ok(TxCancellationReason::Orphan), 5 => Ok(TxCancellationReason::TimeLocked), 6 => Ok(TxCancellationReason::InvalidTransaction), + 7 => Ok(TxCancellationReason::Oversized), code => Err(TransactionConversionError { code: code as i32 }), } } @@ -361,6 +363,7 @@ impl Display for TxCancellationReason { Orphan => "Orphan", TimeLocked => "TimeLocked", InvalidTransaction => "Invalid Transaction", + Oversized => "Oversized", }; fmt.write_str(response) } diff --git a/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs b/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs index 550c3d0e66..ebd3e6f544 100644 --- a/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs +++ b/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs @@ -107,7 +107,8 @@ where Some(peer) => match self.attempt_sync(peer.clone()).await { Ok((num_outputs_recovered, final_height, final_amount, elapsed)) => { debug!(target: LOG_TARGET, "Scanned to height #{}", final_height); - self.finalize(num_outputs_recovered, final_height, final_amount, elapsed)?; + self.finalize(num_outputs_recovered, final_height, final_amount, elapsed) + .await?; return Ok(()); }, Err(e) => { @@ -146,13 +147,18 @@ where } } - fn finalize( - &self, + async fn finalize( + &mut self, num_outputs_recovered: u64, final_height: u64, total_value: MicroMinotari, elapsed: Duration, ) -> Result<(), UtxoScannerError> { + if num_outputs_recovered > 0 { + // this is a best effort, if this fails, its very likely that it's already busy with a validation. + let _result = self.resources.output_manager_service.validate_txos().await; + let _result = self.resources.transaction_service.validate_transactions().await; + } self.publish_event(UtxoScannerEvent::Progress { current_height: final_height, tip_height: final_height, @@ -327,7 +333,7 @@ where client: &mut BaseNodeWalletRpcClient, ) -> Result { let tip_info = client.get_tip_info().await?; - let chain_height = tip_info.metadata.map(|m| m.height_of_longest_chain()).unwrap_or(0); + let chain_height = tip_info.metadata.map(|m| m.best_block_height()).unwrap_or(0); let end_header = client.get_header_by_height(chain_height).await?; let end_header = BlockHeader::try_from(end_header).map_err(UtxoScannerError::ConversionError)?; diff --git a/base_layer/wallet/src/wallet.rs b/base_layer/wallet/src/wallet.rs index ce2d79f9e1..33accdfb68 100644 --- a/base_layer/wallet/src/wallet.rs +++ b/base_layer/wallet/src/wallet.rs @@ -20,10 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{cmp, marker::PhantomData, sync::Arc}; +use std::{cmp, marker::PhantomData, sync::Arc, thread}; use blake2::Blake2b; use digest::consts::U32; +use futures::executor::block_on; use log::*; use rand::rngs::OsRng; use tari_common::configuration::bootstrap::ApplicationType; @@ -31,11 +32,13 @@ use tari_common_types::{ tari_address::TariAddress, transaction::{ImportStatus, TxId}, types::{ComAndPubSignature, Commitment, PrivateKey, PublicKey, SignatureWithDomain}, + wallet_types::WalletType, }; use tari_comms::{ - multiaddr::Multiaddr, + multiaddr::{Error as MultiaddrError, Multiaddr}, net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, + tor::TorIdentity, types::{CommsPublicKey, CommsSecretKey}, CommsNode, NodeIdentity, @@ -72,6 +75,7 @@ use tari_p2p::{ initialization::P2pInitializer, services::liveness::{config::LivenessConfig, LivenessInitializer}, PeerSeedsConfig, + TransportType, }; use tari_script::{one_sided_payment_script, ExecutionStack, TariScript}; use tari_service_framework::StackBuilder; @@ -162,6 +166,7 @@ where key_manager_backend: TKeyManagerBackend, shutdown_signal: ShutdownSignal, master_seed: CipherSeed, + wallet_type: WalletType, ) -> Result { let buf_size = cmp::max(WALLET_BUFFER_MIN_SIZE, config.buffer_size); let (publisher, subscription_factory) = pubsub_connector(buf_size); @@ -200,6 +205,7 @@ where key_manager_backend, master_seed, factories.clone(), + wallet_type, )) .add_initializer(TransactionServiceInitializer::::new( config.transaction_service_config, @@ -252,14 +258,52 @@ where let mut handles = stack.build().await?; + let transaction_service_handle = handles.expect_handle::(); let comms = handles .take_handle::() .expect("P2pInitializer was not added to the stack"); - let comms = initialization::spawn_comms_using_transport(comms, config.p2p.transport).await?; + let comms = if config.p2p.transport.transport_type == TransportType::Tor { + let wallet_db = wallet_database.clone(); + let node_id = comms.node_identity(); + let moved_ts_clone = transaction_service_handle.clone(); + let after_comms = move |identity: TorIdentity| { + // we do this so that we dont have to move in a mut ref and making the closure a FnMut. + let mut ts = moved_ts_clone.clone(); + let address_string = format!("/onion3/{}:{}", identity.service_id, identity.onion_port); + if let Err(e) = wallet_db.set_tor_identity(identity) { + error!(target: LOG_TARGET, "Failed to set wallet db tor identity{:?}", e); + } + let result: Result = address_string.parse(); + if result.is_err() { + error!(target: LOG_TARGET, "Failed to parse tor identity as multiaddr{:?}", result); + return; + } + let address = result.unwrap(); + if !node_id.public_addresses().contains(&address) { + node_id.add_public_address(address.clone()); + } + // Persist the comms node address and features after it has been spawned to capture any modifications + // made during comms startup. In the case of a Tor Transport the public address could + // have been generated + let _result = wallet_db.set_node_address(address); + thread::spawn(move || { + let result = block_on(ts.restart_transaction_protocols()); + if result.is_err() { + warn!( + target: LOG_TARGET, + "Could not restart transaction negotiation protocols: {:?}", result + ); + } + }); + }; + initialization::spawn_comms_using_transport(comms, config.p2p.transport, after_comms).await? + } else { + let after_comms = |_identity| {}; + initialization::spawn_comms_using_transport(comms, config.p2p.transport, after_comms).await? + }; let mut output_manager_handle = handles.expect_handle::(); let key_manager_handle = handles.expect_handle::(); - let transaction_service_handle = handles.expect_handle::(); let contacts_handle = handles.expect_handle::(); let dht = handles.expect_handle::(); let store_and_forward_requester = dht.store_and_forward_requester(); @@ -280,14 +324,6 @@ where e })?; - // Persist the comms node address and features after it has been spawned to capture any modifications made - // during comms startup. In the case of a Tor Transport the public address could have been generated - wallet_database.set_node_address( - comms - .node_identity() - .first_public_address() - .ok_or(WalletError::PublicAddressNotSet)?, - )?; wallet_database.set_node_features(comms.node_identity().features())?; let identity_sig = comms.node_identity().identity_signature_read().as_ref().cloned(); if let Some(identity_sig) = identity_sig { @@ -336,10 +372,10 @@ where pub async fn set_base_node_peer( &mut self, public_key: CommsPublicKey, - address: Multiaddr, + address: Option, ) -> Result<(), WalletError> { info!( - "Wallet setting base node peer, public key: {}, net address: {}.", + "Wallet setting base node peer, public key: {}, net address: {:?}.", public_key, address ); @@ -354,16 +390,19 @@ where let mut connectivity = self.comms.connectivity(); if let Some(mut current_peer) = peer_manager.find_by_public_key(&public_key).await? { // Only invalidate the identity signature if addresses are different - if current_peer.addresses.contains(&address) { - info!( - target: LOG_TARGET, - "Address for base node differs from storage. Was {}, setting to {}", - current_peer.addresses, - address - ); - - current_peer.addresses.add_address(&address, &PeerAddressSource::Config); - peer_manager.add_peer(current_peer.clone()).await?; + if address.is_some() { + let add = address.unwrap(); + if !current_peer.addresses.contains(&add) { + info!( + target: LOG_TARGET, + "Address for base node differs from storage. Was {}, setting to {}", + current_peer.addresses, + add + ); + + current_peer.addresses.add_address(&add, &PeerAddressSource::Config); + peer_manager.add_peer(current_peer.clone()).await?; + } } connectivity .add_peer_to_allow_list(current_peer.node_id.clone()) @@ -371,10 +410,21 @@ where self.wallet_connectivity.set_base_node(current_peer); } else { let node_id = NodeId::from_key(&public_key); + if address.is_none() { + debug!( + target: LOG_TARGET, + "Trying to add new peer without an address", + ); + return Err(WalletError::ArgumentError { + argument: "set_base_node_peer, address".to_string(), + value: "{Missing}".to_string(), + message: "New peers need the address filled in".to_string(), + }); + } let peer = Peer::new( public_key, node_id, - MultiaddressesWithStats::from_addresses_with_source(vec![address], &PeerAddressSource::Config), + MultiaddressesWithStats::from_addresses_with_source(vec![address.unwrap()], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), @@ -714,6 +764,24 @@ pub fn read_or_create_master_seed( Ok(master_seed) } +pub fn read_or_create_wallet_type( + wallet_type: Option, + db: &WalletDatabase, +) -> Result { + let db_wallet_type = db.get_wallet_type()?; + + match (db_wallet_type, wallet_type) { + (None, None) => { + panic!("Something is very wrong, no wallet type was found in the DB, or provided (on first run)") + }, + (None, Some(t)) => { + db.set_wallet_type(t)?; + Ok(t) + }, + (Some(t), _) => Ok(t), + } +} + pub fn derive_comms_secret_key(master_seed: &CipherSeed) -> Result { let comms_key_manager = KeyManager::::from( master_seed.clone(), diff --git a/base_layer/wallet/tests/output_manager_service_tests/service.rs b/base_layer/wallet/tests/output_manager_service_tests/service.rs index 8230901d00..d879c86c1a 100644 --- a/base_layer/wallet/tests/output_manager_service_tests/service.rs +++ b/base_layer/wallet/tests/output_manager_service_tests/service.rs @@ -46,7 +46,7 @@ use rand::{rngs::OsRng, RngCore}; use tari_common::configuration::Network; use tari_common_types::{ transaction::TxId, - types::{ComAndPubSignature, PublicKey}, + types::{ComAndPubSignature, FixedHash, PublicKey}, }; use tari_comms::{ peer_manager::{NodeIdentity, PeerFeatures}, @@ -311,7 +311,7 @@ async fn fee_estimate() { .await; oms.output_manager_handle.add_output(uo.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let fee_calc = Fee::new(*create_consensus_constants(0).transaction_weight_params()); @@ -417,6 +417,7 @@ async fn test_utxo_selection_no_chain_metadata() { assert!(matches!(err, OutputManagerError::NotEnoughFunds)); // create 10 utxos with maturity at heights from 1 to 10 + let mut unspent = Vec::with_capacity(10); for i in 1..=10 { let uo = make_input_with_features( &mut OsRng.clone(), @@ -429,10 +430,9 @@ async fn test_utxo_selection_no_chain_metadata() { ) .await; oms.add_output(uo.clone(), None).await.unwrap(); - backend - .mark_output_as_unspent(uo.hash(&key_manager).await.unwrap()) - .unwrap(); + unspent.push((uo.hash(&key_manager).await.unwrap(), true)); } + backend.mark_outputs_as_unspent(unspent).unwrap(); // but we have no chain state so the lowest maturity should be used let stp = oms @@ -549,6 +549,7 @@ async fn test_utxo_selection_with_chain_metadata() { assert!(matches!(err, OutputManagerError::NotEnoughFunds)); // create 10 utxos with maturity at heights from 1 to 10 + let mut unspent = Vec::with_capacity(10); for i in 1..=10 { let uo = make_input_with_features( &mut OsRng.clone(), @@ -561,10 +562,9 @@ async fn test_utxo_selection_with_chain_metadata() { ) .await; oms.add_output(uo.clone(), None).await.unwrap(); - backend - .mark_output_as_unspent(uo.hash(&key_manager).await.unwrap()) - .unwrap(); + unspent.push((uo.hash(&key_manager).await.unwrap(), true)); } + backend.mark_outputs_as_unspent(unspent).unwrap(); let utxos = oms.get_unspent_outputs().await.unwrap(); assert_eq!(utxos.len(), 10); @@ -710,7 +710,7 @@ async fn test_utxo_selection_with_tx_priority() { .await .unwrap(); backend - .mark_output_as_unspent(uo_high.hash(&key_manager).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo_high.hash(&key_manager).await.unwrap(), true)]) .unwrap(); // Low priority let uo_low_2 = make_input_with_features( @@ -725,7 +725,7 @@ async fn test_utxo_selection_with_tx_priority() { .await; oms.add_output(uo_low_2.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo_low_2.hash(&key_manager).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo_low_2.hash(&key_manager).await.unwrap(), true)]) .unwrap(); let utxos = oms.get_unspent_outputs().await.unwrap(); @@ -769,7 +769,8 @@ async fn send_not_enough_funds() { let backend = OutputManagerSqliteDatabase::new(connection.clone()); let mut oms = setup_output_manager_service(backend.clone(), true).await; - let num_outputs = 20; + let num_outputs = 20usize; + let mut unspent: Vec<(FixedHash, bool)> = Vec::with_capacity(num_outputs); for _i in 0..num_outputs { let uo = make_input( &mut OsRng.clone(), @@ -779,16 +780,15 @@ async fn send_not_enough_funds() { ) .await; oms.output_manager_handle.add_output(uo.clone(), None).await.unwrap(); - backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) - .unwrap(); + unspent.push((uo.hash(&oms.key_manager_handle).await.unwrap(), true)); } + backend.mark_outputs_as_unspent(unspent).unwrap(); match oms .output_manager_handle .prepare_transaction_to_send( TxId::new_random(), - MicroMinotari::from(num_outputs * 2000), + MicroMinotari::from(num_outputs as u64 * 2000), UtxoSelectionCriteria::default(), OutputFeatures::default(), MicroMinotari::from(4), @@ -834,7 +834,7 @@ async fn send_no_change() { oms.output_manager_handle.add_output(uo_1.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo_1.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo_1.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let value2 = 8000; let uo_2 = create_wallet_output_with_data( @@ -848,7 +848,7 @@ async fn send_no_change() { .unwrap(); oms.output_manager_handle.add_output(uo_2.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo_2.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo_2.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let stp = oms @@ -900,7 +900,7 @@ async fn send_not_enough_for_change() { .unwrap(); oms.output_manager_handle.add_output(uo_1.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo_1.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo_1.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let value2 = MicroMinotari(800); let uo_2 = create_wallet_output_with_data( @@ -914,7 +914,7 @@ async fn send_not_enough_for_change() { .unwrap(); oms.output_manager_handle.add_output(uo_2.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo_2.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo_2.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); match oms @@ -945,6 +945,7 @@ async fn cancel_transaction() { let mut oms = setup_output_manager_service(backend.clone(), true).await; let num_outputs = 20; + let mut unspent: Vec<(FixedHash, bool)> = Vec::with_capacity(num_outputs); for _i in 0..num_outputs { let uo = make_input( &mut OsRng.clone(), @@ -954,10 +955,9 @@ async fn cancel_transaction() { ) .await; oms.output_manager_handle.add_output(uo.clone(), None).await.unwrap(); - backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) - .unwrap(); + unspent.push((uo.hash(&oms.key_manager_handle).await.unwrap(), true)); } + backend.mark_outputs_as_unspent(unspent).unwrap(); let stp = oms .output_manager_handle .prepare_transaction_to_send( @@ -1046,7 +1046,7 @@ async fn test_get_balance() { total += uo.value; oms.output_manager_handle.add_output(uo.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let uo = make_input( @@ -1059,7 +1059,7 @@ async fn test_get_balance() { total += uo.value; oms.output_manager_handle.add_output(uo.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let send_value = MicroMinotari::from(1000); @@ -1114,7 +1114,7 @@ async fn sending_transaction_persisted_while_offline() { .await; oms.output_manager_handle.add_output(uo.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let uo = make_input( &mut OsRng.clone(), @@ -1125,7 +1125,7 @@ async fn sending_transaction_persisted_while_offline() { .await; oms.output_manager_handle.add_output(uo.clone(), None).await.unwrap(); backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let balance = oms.output_manager_handle.get_balance().await.unwrap(); @@ -1157,14 +1157,14 @@ async fn sending_transaction_persisted_while_offline() { assert_eq!(balance.pending_outgoing_balance, available_balance / 2); // This simulates an offline wallet with a queued transaction that has not been sent to the receiving wallet - // yet + // This should be cleared as the transaction will be dropped. drop(oms.output_manager_handle); let mut oms = setup_output_manager_service(backend.clone(), true).await; let balance = oms.output_manager_handle.get_balance().await.unwrap(); - assert_eq!(balance.available_balance, available_balance / 2); + assert_eq!(balance.available_balance, available_balance); assert_eq!(balance.time_locked_balance.unwrap(), MicroMinotari::from(0)); - assert_eq!(balance.pending_outgoing_balance, available_balance / 2); + assert_eq!(balance.pending_outgoing_balance, MicroMinotari::from(0)); // Check that is the pending tx is confirmed that the encumberance persists after restart let stp = oms @@ -1193,9 +1193,9 @@ async fn sending_transaction_persisted_while_offline() { let mut oms = setup_output_manager_service(backend, true).await; let balance = oms.output_manager_handle.get_balance().await.unwrap(); - assert_eq!(balance.available_balance, MicroMinotari::from(0)); + assert_eq!(balance.available_balance, MicroMinotari::from(10000)); assert_eq!(balance.time_locked_balance.unwrap(), MicroMinotari::from(0)); - assert_eq!(balance.pending_outgoing_balance, available_balance); + assert_eq!(balance.pending_outgoing_balance, MicroMinotari::from(10000)); } #[tokio::test] @@ -1215,13 +1215,13 @@ async fn coin_split_with_change() { assert!(oms.output_manager_handle.add_output(uo3.clone(), None).await.is_ok()); // lets mark them as unspent so we can use them backend - .mark_output_as_unspent(uo1.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); backend - .mark_output_as_unspent(uo2.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo2.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); backend - .mark_output_as_unspent(uo3.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo3.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let fee_per_gram = MicroMinotari::from(5); @@ -1279,13 +1279,13 @@ async fn coin_split_no_change() { assert!(oms.output_manager_handle.add_output(uo3.clone(), None).await.is_ok()); // lets mark then as unspent so we can use them backend - .mark_output_as_unspent(uo1.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); backend - .mark_output_as_unspent(uo2.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo2.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); backend - .mark_output_as_unspent(uo3.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo3.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let (_tx_id, coin_split_tx, amount) = oms .output_manager_handle @@ -1309,7 +1309,7 @@ async fn it_handles_large_coin_splits() { assert!(oms.output_manager_handle.add_output(uo.clone(), None).await.is_ok()); // lets mark them as unspent so we can use them backend - .mark_output_as_unspent(uo.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let fee_per_gram = MicroMinotari::from(1); @@ -1355,7 +1355,7 @@ async fn test_txo_validation() { .await .unwrap(); oms_db - .mark_output_as_unspent(output1.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(output1.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let output2_value = 2_000_000; @@ -1373,7 +1373,7 @@ async fn test_txo_validation() { .await .unwrap(); oms_db - .mark_output_as_unspent(output2.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(output2.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let output3_value = 4_000_000; @@ -1391,7 +1391,7 @@ async fn test_txo_validation() { .unwrap(); oms_db - .mark_output_as_unspent(output3.hash(&oms.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(output3.hash(&oms.key_manager_handle).await.unwrap(), true)]) .unwrap(); let mut block1_header = BlockHeader::new(1); diff --git a/base_layer/wallet/tests/output_manager_service_tests/storage.rs b/base_layer/wallet/tests/output_manager_service_tests/storage.rs index 5b069b14ba..86653749cf 100644 --- a/base_layer/wallet/tests/output_manager_service_tests/storage.rs +++ b/base_layer/wallet/tests/output_manager_service_tests/storage.rs @@ -20,23 +20,31 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::convert::TryFrom; + use minotari_wallet::output_manager_service::{ error::OutputManagerStorageError, service::Balance, storage::{ database::{OutputManagerBackend, OutputManagerDatabase}, models::DbWalletOutput, - sqlite_db::OutputManagerSqliteDatabase, + sqlite_db::{OutputManagerSqliteDatabase, ReceivedOutputInfoForBatch, SpentOutputInfoForBatch}, OutputSource, + OutputStatus, }, }; use rand::{rngs::OsRng, RngCore}; -use tari_common_types::{transaction::TxId, types::FixedHash}; +use tari_common_types::{ + transaction::TxId, + types::{FixedHash, HashOutput, PrivateKey}, +}; use tari_core::transactions::{ key_manager::create_memory_db_key_manager, tari_amount::MicroMinotari, transaction_components::OutputFeatures, }; +use tari_crypto::keys::SecretKey; +use tari_utilities::{hex::Hex, ByteArray}; use crate::support::{data::get_temp_sqlite_database_connection, utils::make_input}; @@ -47,6 +55,7 @@ pub async fn test_db_backend(backend: T) { // Add some unspent outputs let mut unspent_outputs = Vec::new(); let key_manager = create_memory_db_key_manager(); + let mut unspent = Vec::with_capacity(5); for i in 0..5 { let uo = make_input( &mut OsRng, @@ -60,9 +69,10 @@ pub async fn test_db_backend(backend: T) { .unwrap(); kmo.wallet_output.features.maturity = i; db.add_unspent_output(kmo.clone()).unwrap(); - db.mark_output_as_unspent(kmo.hash).unwrap(); + unspent.push((kmo.hash, true)); unspent_outputs.push(kmo); } + db.mark_outputs_as_unspent(unspent).unwrap(); let time_locked_outputs = db.get_timelocked_outputs(3).unwrap(); assert_eq!(time_locked_outputs.len(), 1); @@ -111,7 +121,7 @@ pub async fn test_db_backend(backend: T) { .await .unwrap(); db.add_unspent_output(kmo.clone()).unwrap(); - db.mark_output_as_unspent(kmo.hash).unwrap(); + db.mark_outputs_as_unspent(vec![(kmo.hash, true)]).unwrap(); pending_tx.outputs_to_be_spent.push(kmo); } for _ in 0..2 { @@ -163,7 +173,7 @@ pub async fn test_db_backend(backend: T) { let balance = db.get_balance(Some(3)).unwrap(); assert_eq!(balance, Balance { - available_balance, + available_balance: available_balance - time_locked_balance, time_locked_balance: Some(time_locked_balance), pending_incoming_balance, pending_outgoing_balance @@ -182,13 +192,27 @@ pub async fn test_db_backend(backend: T) { }); // Set first pending tx to mined but unconfirmed + let mut updates = Vec::new(); for o in &pending_txs[0].outputs_to_be_received { - db.set_received_output_mined_height_and_status(o.hash, 2, FixedHash::zero(), false, 0) - .unwrap(); + updates.push(ReceivedOutputInfoForBatch { + commitment: o.commitment.clone(), + mined_height: 2, + mined_in_block: FixedHash::zero(), + confirmed: false, + mined_timestamp: 0, + }); } + db.set_received_outputs_mined_height_and_statuses(updates).unwrap(); + let mut spent = Vec::new(); for o in &pending_txs[0].outputs_to_be_spent { - db.mark_output_as_spent(o.hash, 3, FixedHash::zero(), false).unwrap(); + spent.push(SpentOutputInfoForBatch { + commitment: o.commitment.clone(), + confirmed: false, + mark_deleted_at_height: 3, + mark_deleted_in_block: FixedHash::zero(), + }); } + db.mark_outputs_as_spent(spent).unwrap(); // Balance shouldn't change let balance = db.get_balance(None).unwrap(); @@ -201,13 +225,27 @@ pub async fn test_db_backend(backend: T) { }); // Set second pending tx to mined and confirmed + let mut updates = Vec::new(); for o in &pending_txs[1].outputs_to_be_received { - db.set_received_output_mined_height_and_status(o.hash, 4, FixedHash::zero(), true, 0) - .unwrap(); + updates.push(ReceivedOutputInfoForBatch { + commitment: o.commitment.clone(), + mined_height: 4, + mined_in_block: FixedHash::zero(), + confirmed: true, + mined_timestamp: 0, + }); } + db.set_received_outputs_mined_height_and_statuses(updates).unwrap(); + let mut spent = Vec::new(); for o in &pending_txs[1].outputs_to_be_spent { - db.mark_output_as_spent(o.hash, 5, FixedHash::zero(), true).unwrap(); + spent.push(SpentOutputInfoForBatch { + commitment: o.commitment.clone(), + confirmed: true, + mark_deleted_at_height: 5, + mark_deleted_in_block: FixedHash::zero(), + }); } + db.mark_outputs_as_spent(spent).unwrap(); // Balance with confirmed second pending tx let mut available_balance = unspent_outputs @@ -288,12 +326,12 @@ pub async fn test_db_backend(backend: T) { assert_eq!(mined_unspent_outputs.len(), 4); // Spend a received and confirmed output - db.mark_output_as_spent( - pending_txs[1].outputs_to_be_received[0].hash, - 6, - FixedHash::zero(), - true, - ) + db.mark_outputs_as_spent(vec![SpentOutputInfoForBatch { + commitment: pending_txs[1].outputs_to_be_received[0].commitment.clone(), + confirmed: true, + mark_deleted_at_height: 6, + mark_deleted_in_block: FixedHash::zero(), + }]) .unwrap(); let mined_unspent_outputs = db.fetch_mined_unspent_outputs().unwrap(); @@ -330,11 +368,203 @@ pub async fn test_db_backend(backend: T) { #[tokio::test] pub async fn test_output_manager_sqlite_db() { + //` cargo test --release --test + //` wallet_integration_tests output_manager_service_tests::storage::test_output_manager_sqlite_db + //` > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + let (connection, _tempdir) = get_temp_sqlite_database_connection(); test_db_backend(OutputManagerSqliteDatabase::new(connection)).await; } +#[tokio::test] +#[allow(clippy::too_many_lines)] +pub async fn test_raw_custom_queries_regression() { + let (connection, _tempdir) = get_temp_sqlite_database_connection(); + let backend = OutputManagerSqliteDatabase::new(connection); + let db = OutputManagerDatabase::new(backend); + + // Add some unspent outputs + let mut unspent_outputs = Vec::new(); + let key_manager = create_memory_db_key_manager(); + let mut unspent = Vec::with_capacity(5); + for i in 0..5 { + let uo = make_input( + &mut OsRng, + MicroMinotari::from(100 + OsRng.next_u64() % 1000), + &OutputFeatures::default(), + &key_manager, + ) + .await; + let mut kmo = DbWalletOutput::from_wallet_output(uo, &key_manager, None, OutputSource::Standard, None, None) + .await + .unwrap(); + kmo.wallet_output.features.maturity = i; + db.add_unspent_output(kmo.clone()).unwrap(); + unspent.push((kmo.hash, true)); + unspent_outputs.push(kmo); + } + + let unknown = HashOutput::try_from(PrivateKey::random(&mut rand::thread_rng()).as_bytes()).unwrap(); + let mut unspent_with_unknown = unspent.clone(); + unspent_with_unknown.push((unknown, true)); + assert!(db.mark_outputs_as_unspent(unspent_with_unknown).is_err()); + + db.mark_outputs_as_unspent(unspent).unwrap(); + + // Add some sent transactions with outputs to be spent and received + struct PendingTransactionOutputs { + tx_id: TxId, + outputs_to_be_spent: Vec, + outputs_to_be_received: Vec, + } + + let mut pending_txs = Vec::new(); + for _ in 0..3 { + let mut pending_tx = PendingTransactionOutputs { + tx_id: TxId::new_random(), + outputs_to_be_spent: vec![], + outputs_to_be_received: vec![], + }; + for _ in 0..4 { + let kmo = make_input( + &mut OsRng, + MicroMinotari::from(100 + OsRng.next_u64() % 1000), + &OutputFeatures::default(), + &key_manager, + ) + .await; + let kmo = DbWalletOutput::from_wallet_output(kmo, &key_manager, None, OutputSource::Standard, None, None) + .await + .unwrap(); + db.add_unspent_output(kmo.clone()).unwrap(); + db.mark_outputs_as_unspent(vec![(kmo.hash, true)]).unwrap(); + pending_tx.outputs_to_be_spent.push(kmo); + } + for _ in 0..2 { + let uo = make_input( + &mut OsRng, + MicroMinotari::from(100 + OsRng.next_u64() % 1000), + &OutputFeatures::default(), + &key_manager, + ) + .await; + let kmo = DbWalletOutput::from_wallet_output(uo, &key_manager, None, OutputSource::Standard, None, None) + .await + .unwrap(); + pending_tx.outputs_to_be_received.push(kmo); + } + db.encumber_outputs( + pending_tx.tx_id, + pending_tx.outputs_to_be_spent.clone(), + pending_tx.outputs_to_be_received.clone(), + ) + .unwrap(); + pending_txs.push(pending_tx); + } + for v in &pending_txs { + db.confirm_encumbered_outputs(v.tx_id).unwrap(); + } + + // Custom query test section + // - `set_received_outputs_mined_height_and_statuses` + + let mut updates_info = Vec::new(); + let mut block_hashes = Vec::new(); + for (i, to_be_received) in pending_txs[0].outputs_to_be_received.iter().enumerate() { + let k = PrivateKey::random(&mut OsRng); + let mined_in_block = FixedHash::from_hex(&k.to_hex()).unwrap(); + block_hashes.push(mined_in_block); + updates_info.push(ReceivedOutputInfoForBatch { + commitment: to_be_received.commitment.clone(), + mined_height: (i + 2) as u64, + mined_in_block, + confirmed: i % 2 == 0, + mined_timestamp: 0, + }); + } + + let uo = make_input( + &mut OsRng, + MicroMinotari::from(100 + OsRng.next_u64() % 1000), + &OutputFeatures::default(), + &key_manager, + ) + .await; + let unknown = DbWalletOutput::from_wallet_output(uo, &key_manager, None, OutputSource::Standard, None, None) + .await + .unwrap(); + let mut updates_info_with_unknown = updates_info.clone(); + updates_info_with_unknown.push(ReceivedOutputInfoForBatch { + commitment: unknown.commitment.clone(), + mined_height: 2, + mined_in_block: block_hashes[0], + confirmed: true, + mined_timestamp: 0, + }); + assert!(db + .set_received_outputs_mined_height_and_statuses(updates_info_with_unknown) + .is_err()); + + db.set_received_outputs_mined_height_and_statuses(updates_info).unwrap(); + + for (i, to_be_received) in pending_txs[0].outputs_to_be_received.iter().enumerate() { + let unspent_output = db.fetch_by_commitment(to_be_received.commitment.clone()).unwrap(); + assert_eq!(unspent_output.mined_height.unwrap(), (i + 2) as u64); + assert_eq!(unspent_output.mined_in_block.unwrap(), block_hashes[i]); + assert_eq!( + unspent_output.status, + if i % 2 == 0 { + OutputStatus::Unspent + } else { + OutputStatus::UnspentMinedUnconfirmed + } + ); + } + + // - `mark_outputs_as_spent` + + let mut updates_info = Vec::new(); + let mut block_hashes = Vec::new(); + for (i, to_be_spent) in pending_txs[0].outputs_to_be_spent.iter().enumerate() { + let k = PrivateKey::random(&mut OsRng); + let mark_deleted_in_block = FixedHash::from_hex(&k.to_hex()).unwrap(); + block_hashes.push(mark_deleted_in_block); + updates_info.push(SpentOutputInfoForBatch { + commitment: to_be_spent.commitment.clone(), + confirmed: i % 2 == 0, + mark_deleted_at_height: (i + 3) as u64, + mark_deleted_in_block, + }); + } + + let mut updates_info_with_unknown = updates_info.clone(); + updates_info_with_unknown.push(SpentOutputInfoForBatch { + commitment: unknown.commitment, + confirmed: true, + mark_deleted_at_height: 4, + mark_deleted_in_block: block_hashes[0], + }); + assert!(db.mark_outputs_as_spent(updates_info_with_unknown).is_err()); + + db.mark_outputs_as_spent(updates_info).unwrap(); + + for (i, to_be_spent) in pending_txs[0].outputs_to_be_spent.iter().enumerate() { + let spent_output = db.fetch_by_commitment(to_be_spent.commitment.clone()).unwrap(); + assert_eq!(spent_output.marked_deleted_at_height.unwrap(), (i + 3) as u64); + assert_eq!(spent_output.marked_deleted_in_block.unwrap(), block_hashes[i]); + assert_eq!( + spent_output.status, + if i % 2 == 0 { + OutputStatus::Spent + } else { + OutputStatus::SpentMinedUnconfirmed + } + ); + } +} + #[tokio::test] pub async fn test_short_term_encumberance() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); @@ -356,7 +586,7 @@ pub async fn test_short_term_encumberance() { .unwrap(); kmo.wallet_output.features.maturity = i; db.add_unspent_output(kmo.clone()).unwrap(); - db.mark_output_as_unspent(kmo.hash).unwrap(); + db.mark_outputs_as_unspent(vec![(kmo.hash, true)]).unwrap(); unspent_outputs.push(kmo); } @@ -418,7 +648,13 @@ pub async fn test_no_duplicate_outputs() { // add it to the database let result = db.add_unspent_output(kmo.clone()); assert!(result.is_ok()); - let result = db.set_received_output_mined_height_and_status(kmo.hash, 1, FixedHash::zero(), true, 0); + let result = db.set_received_outputs_mined_height_and_statuses(vec![ReceivedOutputInfoForBatch { + commitment: kmo.commitment.clone(), + mined_height: 1, + mined_in_block: FixedHash::zero(), + confirmed: true, + mined_timestamp: 0, + }]); assert!(result.is_ok()); let outputs = db.fetch_mined_unspent_outputs().unwrap(); assert_eq!(outputs.len(), 1); @@ -459,14 +695,70 @@ pub async fn test_mark_as_unmined() { // add it to the database db.add_unspent_output(kmo.clone()).unwrap(); - db.set_received_output_mined_height_and_status(kmo.hash, 1, FixedHash::zero(), true, 0) - .unwrap(); + db.set_received_outputs_mined_height_and_statuses(vec![ReceivedOutputInfoForBatch { + commitment: kmo.commitment.clone(), + mined_height: 1, + mined_in_block: FixedHash::zero(), + confirmed: true, + mined_timestamp: 0, + }]) + .unwrap(); let o = db.get_last_mined_output().unwrap().unwrap(); assert_eq!(o.hash, kmo.hash); - db.set_output_to_unmined_and_invalid(kmo.hash).unwrap(); + db.set_outputs_to_unmined_and_invalid(vec![kmo.hash]).unwrap(); assert!(db.get_last_mined_output().unwrap().is_none()); let o = db.get_invalid_outputs().unwrap().pop().unwrap(); assert_eq!(o.hash, kmo.hash); assert!(o.mined_height.is_none()); assert!(o.mined_in_block.is_none()); + + // Test batch mode operations + // - Add 5 outputs and remember the hashes + let batch_count = 7usize; + let mut batch_hashes = Vec::with_capacity(batch_count); + let mut batch_outputs = Vec::with_capacity(batch_count); + let mut batch_info = Vec::with_capacity(batch_count); + for i in 0..batch_count { + let uo = make_input( + &mut OsRng, + MicroMinotari::from(1000), + &OutputFeatures::default(), + &key_manager, + ) + .await; + let kmo = DbWalletOutput::from_wallet_output(uo, &key_manager, None, OutputSource::Standard, None, None) + .await + .unwrap(); + db.add_unspent_output(kmo.clone()).unwrap(); + batch_hashes.push(kmo.hash); + batch_info.push(ReceivedOutputInfoForBatch { + commitment: kmo.commitment.clone(), + mined_height: i as u64 + 1, + mined_in_block: FixedHash::zero(), + confirmed: true, + mined_timestamp: i as u64, + }); + batch_outputs.push(kmo); + } + + // - Perform batch mode operations + db.set_received_outputs_mined_height_and_statuses(batch_info).unwrap(); + + let last = db.get_last_mined_output().unwrap().unwrap(); + assert_eq!(last.hash, batch_outputs.last().unwrap().hash); + + db.set_outputs_to_unmined_and_invalid(batch_hashes).unwrap(); + assert!(db.get_last_mined_output().unwrap().is_none()); + + let invalid_outputs = db.get_invalid_outputs().unwrap(); + let mut batch_invalid_count = 0; + for invalid in invalid_outputs { + if let Some(kmo) = batch_outputs.iter().find(|wo| wo.hash == invalid.hash) { + assert_eq!(invalid.hash, kmo.hash); + assert!(invalid.mined_height.is_none()); + assert!(invalid.mined_in_block.is_none()); + batch_invalid_count += 1; + } + } + assert_eq!(batch_invalid_count, batch_count); } diff --git a/base_layer/wallet/tests/support/base_node_service_mock.rs b/base_layer/wallet/tests/support/base_node_service_mock.rs index 46f755d12d..ebfb37f9ef 100644 --- a/base_layer/wallet/tests/support/base_node_service_mock.rs +++ b/base_layer/wallet/tests/support/base_node_service_mock.rs @@ -79,7 +79,7 @@ impl MockBaseNodeService { pub fn set_base_node_state(&mut self, height: Option) { let (chain_metadata, is_synced) = match height { Some(height) => { - let metadata = ChainMetadata::new(height, FixedHash::zero(), 0, 0, 0.into(), 0); + let metadata = ChainMetadata::new(height, FixedHash::zero(), 0, 0, 1.into(), 0).unwrap(); (Some(metadata), Some(true)) }, None => (None, None), @@ -95,7 +95,7 @@ impl MockBaseNodeService { } pub fn set_default_base_node_state(&mut self) { - let metadata = ChainMetadata::new(i64::MAX as u64, FixedHash::zero(), 0, 0, 0.into(), 0); + let metadata = ChainMetadata::new(i64::MAX as u64, FixedHash::zero(), 0, 0, 1.into(), 0).unwrap(); self.state = BaseNodeState { node_id: None, chain_metadata: Some(metadata), diff --git a/base_layer/wallet/tests/support/comms_and_services.rs b/base_layer/wallet/tests/support/comms_and_services.rs index b6c7344f0e..cf53a469a3 100644 --- a/base_layer/wallet/tests/support/comms_and_services.rs +++ b/base_layer/wallet/tests/support/comms_and_services.rs @@ -58,11 +58,14 @@ pub async fn setup_comms_services( .await .unwrap(); - let comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let mut comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); // Set the public address for tests - comms - .node_identity() - .add_public_address(comms.listening_address().clone()); + comms.node_identity().add_public_address(address.bind_address().clone()); (comms, dht) } diff --git a/base_layer/wallet/tests/support/comms_rpc.rs b/base_layer/wallet/tests/support/comms_rpc.rs index 6d4a41e4b6..351c9c692b 100644 --- a/base_layer/wallet/tests/support/comms_rpc.rs +++ b/base_layer/wallet/tests/support/comms_rpc.rs @@ -134,23 +134,23 @@ impl BaseNodeWalletRpcMockState { })), transaction_query_response: Arc::new(Mutex::new(TxQueryResponse { location: TxLocation::InMempool, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, })), transaction_query_batch_response: Arc::new(Mutex::new(TxQueryBatchResponsesProto { responses: vec![], - tip_hash: FixedHash::zero().to_vec(), + best_block_hash: FixedHash::zero().to_vec(), is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, tip_mined_timestamp: EpochTime::now().as_u64(), })), tip_info_response: Arc::new(Mutex::new(TipInfoResponse { metadata: Some(ChainMetadataProto { - height_of_longest_chain: std::i64::MAX as u64, - best_block: FixedHash::zero().to_vec(), + best_block_height: std::i64::MAX as u64, + best_block_hash: FixedHash::zero().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: EpochTime::now().as_u64(), @@ -930,8 +930,8 @@ mod test { assert_eq!(calls.len(), 1); let chain_metadata = ChainMetadata { - height_of_longest_chain: 444, - best_block: vec![], + best_block_height: 444, + best_block_hash: vec![], accumulated_difficulty: vec![], pruned_height: 0, timestamp: EpochTime::now().as_u64(), @@ -943,6 +943,6 @@ mod test { let resp = client.get_tip_info().await.unwrap(); assert!(!resp.is_synced); - assert_eq!(resp.metadata.unwrap().height_of_longest_chain(), 444); + assert_eq!(resp.metadata.unwrap().best_block_height(), 444); } } diff --git a/base_layer/wallet/tests/support/output_manager_service_mock.rs b/base_layer/wallet/tests/support/output_manager_service_mock.rs index 87cc1b4b1c..9f9e6ea3fc 100644 --- a/base_layer/wallet/tests/support/output_manager_service_mock.rs +++ b/base_layer/wallet/tests/support/output_manager_service_mock.rs @@ -146,6 +146,7 @@ impl OutputManagerServiceMock { e }); }, + OutputManagerRequest::ValidateUtxos => {}, _ => panic!("Output Manager Service Mock does not support this call"), } } diff --git a/base_layer/wallet/tests/support/transaction_service_mock.rs b/base_layer/wallet/tests/support/transaction_service_mock.rs index 7365cf9935..59cdba9e91 100644 --- a/base_layer/wallet/tests/support/transaction_service_mock.rs +++ b/base_layer/wallet/tests/support/transaction_service_mock.rs @@ -110,6 +110,7 @@ impl TransactionServiceMock { e }); }, + TransactionServiceRequest::ValidateTransactions => {}, _ => panic!("Transaction Service Mock does not support this call"), } } diff --git a/base_layer/wallet/tests/support/utils.rs b/base_layer/wallet/tests/support/utils.rs index 704ff911f6..65da7d7baa 100644 --- a/base_layer/wallet/tests/support/utils.rs +++ b/base_layer/wallet/tests/support/utils.rs @@ -52,6 +52,17 @@ pub async fn make_input( .unwrap() } +pub async fn make_fake_input_from_copy( + wallet_output: &mut WalletOutput, + key_manager: &MemoryDbKeyManager, +) -> WalletOutput { + let (spend_key_id, _spend_key_pk, script_key_id, _script_key_pk) = + key_manager.get_next_spend_and_script_key_ids().await.unwrap(); + wallet_output.spending_key_id = spend_key_id; + wallet_output.script_key_id = script_key_id; + wallet_output.clone() +} + pub async fn create_wallet_output_from_sender_data( info: &TransactionSenderMessage, key_manager: &MemoryDbKeyManager, diff --git a/base_layer/wallet/tests/transaction_service_tests/service.rs b/base_layer/wallet/tests/transaction_service_tests/service.rs index 0a7925ac27..5a25872894 100644 --- a/base_layer/wallet/tests/transaction_service_tests/service.rs +++ b/base_layer/wallet/tests/transaction_service_tests/service.rs @@ -53,7 +53,7 @@ use minotari_wallet::{ storage::{ database::{OutputManagerBackend, OutputManagerDatabase}, models::KnownOneSidedPaymentScript, - sqlite_db::OutputManagerSqliteDatabase, + sqlite_db::{OutputManagerSqliteDatabase, ReceivedOutputInfoForBatch}, }, OutputManagerServiceInitializer, UtxoSelectionCriteria, @@ -63,7 +63,12 @@ use minotari_wallet::{ sqlite_db::wallet::WalletSqliteDatabase, sqlite_utilities::{run_migration_and_create_sqlite_connection, WalletDbConnection}, }, - test_utils::{create_consensus_constants, make_wallet_database_connection, random_string}, + test_utils::{ + create_consensus_constants, + make_wallet_database_connection, + make_wallet_database_memory_connection, + random_string, + }, transaction_service::{ config::TransactionServiceConfig, error::TransactionServiceError, @@ -85,12 +90,16 @@ use tari_common_types::{ chain_metadata::ChainMetadata, tari_address::TariAddress, transaction::{ImportStatus, TransactionDirection, TransactionStatus, TxId}, - types::{FixedHash, HashOutput, PrivateKey, PublicKey, Signature}, + types::{FixedHash, PrivateKey, PublicKey, Signature}, + wallet_types::WalletType, }; use tari_comms::{ message::EnvelopeBody, peer_manager::{NodeIdentity, PeerFeatures}, - protocol::rpc::{mock::MockRpcServer, NamedProtocolService}, + protocol::{ + rpc, + rpc::{mock::MockRpcServer, NamedProtocolService}, + }, test_utils::node_identity::build_node_identity, types::CommsDHKE, CommsNode, @@ -161,7 +170,7 @@ use crate::support::{ base_node_service_mock::MockBaseNodeService, comms_and_services::{create_dummy_message, setup_comms_services}, comms_rpc::{connect_rpc_client, BaseNodeWalletRpcMockService, BaseNodeWalletRpcMockState}, - utils::{create_wallet_output_from_sender_data, make_input}, + utils::{create_wallet_output_from_sender_data, make_fake_input_from_copy, make_input}, }; async fn setup_transaction_service>( @@ -195,7 +204,7 @@ async fn setup_transaction_service>( let passphrase = SafePassword::from("My lovely secret passphrase"); let db = WalletDatabase::new(WalletSqliteDatabase::new(db_connection.clone(), passphrase).unwrap()); - let metadata = ChainMetadata::new(std::i64::MAX as u64, FixedHash::zero(), 0, 0, 0.into(), 0); + let metadata = ChainMetadata::new(std::i64::MAX as u64, FixedHash::zero(), 0, 0, 1.into(), 0).unwrap(); db.set_chain_metadata(metadata).unwrap(); @@ -233,6 +242,7 @@ async fn setup_transaction_service>( kms_backend, cipher, factories.clone(), + WalletType::Software, )) .add_initializer(TransactionServiceInitializer::<_, _, MemoryDbKeyManager>::new( TransactionServiceConfig { @@ -530,8 +540,8 @@ async fn manage_single_transaction() { ); let temp_dir = tempdir().unwrap(); let database_path = temp_dir.path().to_str().unwrap().to_string(); - let (alice_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); - let (bob_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let alice_connection = make_wallet_database_memory_connection(); + let bob_connection = make_wallet_database_memory_connection(); let shutdown = Shutdown::new(); let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, alice_key_manager_handle, alice_db) = @@ -595,7 +605,7 @@ async fn manage_single_transaction() { alice_oms.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); let message = "TAKE MAH MONEYS!".to_string(); @@ -693,8 +703,8 @@ async fn large_interactive_transaction() { ); let temp_dir = tempdir().unwrap(); let database_path = temp_dir.path().to_str().unwrap().to_string(); - let (alice_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); - let (bob_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let alice_connection = make_wallet_database_memory_connection(); + let bob_connection = make_wallet_database_memory_connection(); // Alice sets up her Transaction Service let shutdown = Shutdown::new(); @@ -737,8 +747,9 @@ async fn large_interactive_transaction() { .unwrap(); // Alice prepares her large transaction - let outputs_count = 1250u64; + let outputs_count = 1250usize; let output_value = MicroMinotari(20000); + let mut unspent: Vec<(FixedHash, bool)> = Vec::with_capacity(outputs_count); for _ in 0..outputs_count { let uo = make_input( &mut OsRng, @@ -748,11 +759,10 @@ async fn large_interactive_transaction() { ) .await; alice_oms.add_output(uo.clone(), None).await.unwrap(); - alice_db - .mark_output_as_unspent(uo.hash(&alice_key_manager_handle).await.unwrap()) - .unwrap(); + unspent.push((uo.hash(&alice_key_manager_handle).await.unwrap(), true)); } - let transaction_value = output_value * (outputs_count - 1); + alice_db.mark_outputs_as_unspent(unspent).unwrap(); + let transaction_value = output_value * (outputs_count as u64 - 1); let bob_address = TariAddress::new(bob_node_identity.public_key().clone(), network); let message = "TAKE MAH MONEYS!".to_string(); @@ -829,16 +839,391 @@ async fn large_interactive_transaction() { .get_completed_transaction(tx_id) .await .expect("Could not find tx"); - assert_eq!( - bob_completed_tx.transaction.body.inputs().len(), - usize::try_from(outputs_count).unwrap() - ); + assert_eq!(bob_completed_tx.transaction.body.inputs().len(), outputs_count); assert_eq!( bob_oms.get_balance().await.unwrap().pending_incoming_balance, transaction_value ); } +#[allow(clippy::cast_possible_truncation)] +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_spend_dust_to_self_in_oversized_transaction() { + //` cargo test --release --test wallet_integration_tests + //` transaction_service_tests::service::test_spend_dust_to_self_in_oversized_transaction > .\target\output.txt + //` 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + let network = Network::LocalNet; + let consensus_manager = ConsensusManager::builder(network).build().unwrap(); + let factories = CryptoFactories::default(); + let shutdown = Shutdown::new(); + + // Alice's wallet parameters + let alice_node_identity = Arc::new(NodeIdentity::random( + &mut OsRng, + get_next_memory_address(), + PeerFeatures::COMMUNICATION_NODE, + )); + + let bob_node_identity = Arc::new(NodeIdentity::random( + &mut OsRng, + get_next_memory_address(), + PeerFeatures::COMMUNICATION_NODE, + )); + + log::info!( + "manage_single_transaction: Alice: '{}', Bob: '{}'", + alice_node_identity.node_id().short_str(), + bob_node_identity.node_id().short_str(), + ); + let temp_dir = tempdir().unwrap(); + let database_path = temp_dir.path().to_str().unwrap().to_string(); + let alice_connection = make_wallet_database_memory_connection(); + + let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, alice_key_manager_handle, alice_db) = + setup_transaction_service( + alice_node_identity.clone(), + vec![], + consensus_manager.clone(), + factories.clone(), + alice_connection, + database_path.clone(), + Duration::from_secs(0), + shutdown.to_signal(), + ) + .await; + + // Alice create dust + + let amount_per_output = 10_000 * uT; + // This value was determined by running the test and evaluating the error message, + // e.g. `TransactionTooLarge { got: 3379097, expected: 3135488 }` + let max_number_of_outputs_in_frame = (rpc::RPC_MAX_FRAME_SIZE as f64 / 700.0f64).ceil() as usize; + let number_of_outputs = max_number_of_outputs_in_frame + 100; + let mut uo_reference = make_input( + &mut OsRng, + amount_per_output, + &OutputFeatures::default(), + &alice_key_manager_handle, + ) + .await; + let mut unspent: Vec<(FixedHash, bool)> = Vec::with_capacity(number_of_outputs); + for _ in 0..number_of_outputs { + let uo = make_fake_input_from_copy(&mut uo_reference, &alice_key_manager_handle).await; + + alice_oms.add_output(uo.clone(), None).await.unwrap(); + unspent.push((uo.hash(&alice_key_manager_handle).await.unwrap(), true)); + } + alice_db.mark_outputs_as_unspent(unspent).unwrap(); + + let balance = alice_oms.get_balance().await.unwrap(); + let initial_available_balance = balance.available_balance; + + // Alice try to spend too much dust to self + + let fee_per_gram = MicroMinotari::from(1); + let message = "TAKE MAH _OWN_ MONEYS!".to_string(); + let value = balance.available_balance - amount_per_output * 10; + let alice_address = TariAddress::new(alice_node_identity.public_key().clone(), network); + assert!(alice_ts + .send_transaction( + alice_address, + value, + UtxoSelectionCriteria::default(), + OutputFeatures::default(), + fee_per_gram, + message.clone(), + ) + .await + .is_err()); + let balance = alice_oms.get_balance().await.unwrap(); + // Encumbered outputs are re-instated + assert_eq!(balance.available_balance, initial_available_balance); +} + +#[allow(clippy::cast_possible_truncation)] +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_spend_dust_to_other_in_oversized_transaction() { + //` cargo test --release --test wallet_integration_tests + //` transaction_service_tests::service::test_spend_dust_to_other_in_oversized_transaction > .\target\output.txt + //` 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + let network = Network::LocalNet; + let consensus_manager = ConsensusManager::builder(network).build().unwrap(); + let factories = CryptoFactories::default(); + let shutdown = Shutdown::new(); + + // Alice's wallet parameters + let alice_node_identity = Arc::new(NodeIdentity::random( + &mut OsRng, + get_next_memory_address(), + PeerFeatures::COMMUNICATION_NODE, + )); + + let bob_node_identity = Arc::new(NodeIdentity::random( + &mut OsRng, + get_next_memory_address(), + PeerFeatures::COMMUNICATION_NODE, + )); + + log::info!( + "manage_single_transaction: Alice: '{}', Bob: '{}'", + alice_node_identity.node_id().short_str(), + bob_node_identity.node_id().short_str(), + ); + let temp_dir = tempdir().unwrap(); + let database_path = temp_dir.path().to_str().unwrap().to_string(); + let alice_connection = make_wallet_database_memory_connection(); + + let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, alice_key_manager_handle, alice_db) = + setup_transaction_service( + alice_node_identity.clone(), + vec![], + consensus_manager.clone(), + factories.clone(), + alice_connection, + database_path.clone(), + Duration::from_secs(0), + shutdown.to_signal(), + ) + .await; + + // Alice create dust + + let amount_per_output = 10_000 * uT; + // This value was determined by running the test and evaluating the error message, + // e.g. `TransactionTooLarge { got: 3205068, expected: 3135488 }` + let max_number_of_outputs_in_frame = (rpc::RPC_MAX_FRAME_SIZE as f64 / 1175.0f64).ceil() as usize; + let number_of_outputs = max_number_of_outputs_in_frame + 100; + let mut uo_reference = make_input( + &mut OsRng, + amount_per_output, + &OutputFeatures::default(), + &alice_key_manager_handle, + ) + .await; + let mut unspent: Vec<(FixedHash, bool)> = Vec::with_capacity(number_of_outputs); + for _ in 0..number_of_outputs { + let uo = make_fake_input_from_copy(&mut uo_reference, &alice_key_manager_handle).await; + + alice_oms.add_output(uo.clone(), None).await.unwrap(); + unspent.push((uo.hash(&alice_key_manager_handle).await.unwrap(), true)); + } + alice_db.mark_outputs_as_unspent(unspent).unwrap(); + + let balance = alice_oms.get_balance().await.unwrap(); + let initial_available_balance = balance.available_balance; + + // Alice try to spend too much dust to Bob + + let fee_per_gram = MicroMinotari::from(1); + let message = "GIVE MAH _OWN_ MONEYS AWAY!".to_string(); + let value = balance.available_balance - amount_per_output * 10; + let bob_address = TariAddress::new(bob_node_identity.public_key().clone(), network); + let tx_id = alice_ts + .send_transaction( + bob_address, + value, + UtxoSelectionCriteria::default(), + OutputFeatures::default(), + fee_per_gram, + message.clone(), + ) + .await + .unwrap(); + println!("tx_id: {}", tx_id); + + let mut count = 0; + loop { + match alice_ts.get_any_transaction(tx_id).await { + Ok(None) => tokio::time::sleep(Duration::from_millis(100)).await, + Ok(Some(WalletTransaction::PendingOutbound(_))) => { + println!("waited {}ms to detect the transaction", count * 100); + break; + }, + _ => { + panic!( + "waited {}ms to detect the transaction, unexpected error/inbound/completed!", + count * 100 + ); + }, + } + count += 1; + if count > 20 * 10 { + panic!("waited {}ms but could not detect the transaction!", count * 100); + } + } + // Encumbered outputs are re-instated + assert_eq!(balance.available_balance, initial_available_balance); +} + +#[allow(clippy::cast_possible_truncation)] +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_spend_dust_happy_path() { + //` cargo test --release --test wallet_integration_tests + //` transaction_service_tests::service::test_spend_dust_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + let network = Network::LocalNet; + let consensus_manager = ConsensusManager::builder(network).build().unwrap(); + let factories = CryptoFactories::default(); + let shutdown = Shutdown::new(); + + // Alice's wallet parameters + let alice_node_identity = Arc::new(NodeIdentity::random( + &mut OsRng, + get_next_memory_address(), + PeerFeatures::COMMUNICATION_NODE, + )); + + let bob_node_identity = Arc::new(NodeIdentity::random( + &mut OsRng, + get_next_memory_address(), + PeerFeatures::COMMUNICATION_NODE, + )); + + log::info!( + "manage_single_transaction: Alice: '{}', Bob: '{}'", + alice_node_identity.node_id().short_str(), + bob_node_identity.node_id().short_str(), + ); + let temp_dir = tempdir().unwrap(); + let database_path = temp_dir.path().to_str().unwrap().to_string(); + let alice_connection = make_wallet_database_memory_connection(); + + let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, alice_key_manager_handle, alice_db) = + setup_transaction_service( + alice_node_identity.clone(), + vec![], + consensus_manager.clone(), + factories.clone(), + alice_connection, + database_path.clone(), + Duration::from_secs(0), + shutdown.to_signal(), + ) + .await; + + // Alice create dust + + let amount_per_output = 10_000 * uT; + let number_of_outputs = 1000; + let fee_per_gram = MicroMinotari::from(1); + let mut uo_reference = make_input( + &mut OsRng, + amount_per_output, + &OutputFeatures::default(), + &alice_key_manager_handle, + ) + .await; + let mut unspent: Vec<(FixedHash, bool)> = Vec::with_capacity(number_of_outputs as usize); + for _ in 0..number_of_outputs { + let uo = make_fake_input_from_copy(&mut uo_reference, &alice_key_manager_handle).await; + + alice_oms.add_output(uo.clone(), None).await.unwrap(); + unspent.push((uo.hash(&alice_key_manager_handle).await.unwrap(), true)); + } + alice_db.mark_outputs_as_unspent(unspent).unwrap(); + + let balance = alice_oms.get_balance().await.unwrap(); + let initial_available_balance = balance.available_balance; + + // Alice try to spend a fair amount of dust to self [should succeed] (we just need to verify that the + // transaction is created and that the available balance is correct) + + let message = "TAKE MAH _OWN_ MONEYS!".to_string(); + let value_self = (number_of_outputs / 3) * amount_per_output; + let alice_address = TariAddress::new(alice_node_identity.public_key().clone(), network); + let tx_id = alice_ts + .send_transaction( + alice_address, + value_self, + UtxoSelectionCriteria::default(), + OutputFeatures::default(), + fee_per_gram, + message.clone(), + ) + .await + .unwrap(); + let mut count = 0; + let mut fees_self = loop { + match alice_ts.get_any_transaction(tx_id).await { + Ok(None) => tokio::time::sleep(Duration::from_millis(100)).await, + Ok(Some(WalletTransaction::Completed(tx))) => { + println!("waited {}ms to detect the transaction", count * 100); + break tx.fee; + }, + _ => { + panic!( + "waited {}ms to detect the transaction, unexpected error/inbound/outboubd!", + count * 100 + ); + }, + } + count += 1; + if count > 20 * 10 { + panic!("waited {}ms but could not detect the transaction!", count * 100); + } + }; + fees_self = (fees_self.0 as f64 / amount_per_output.0 as f64).ceil() as u64 * amount_per_output; + let balance = alice_oms.get_balance().await.unwrap(); + assert_eq!( + balance.available_balance, + initial_available_balance - value_self - fees_self + ); + + // Alice try to spend a fair amount of dust to Bob [should succeed] (We do not need Bob to be present, + // we just need to verify that the transaction is created and that the available balance is correct) + + let message = "GIVE MAH _OWN_ MONEYS AWAY!".to_string(); + let value_bob = (number_of_outputs / 3) * amount_per_output; + let bob_address = TariAddress::new(bob_node_identity.public_key().clone(), network); + let tx_id = alice_ts + .send_transaction( + bob_address, + value_bob, + UtxoSelectionCriteria::default(), + OutputFeatures::default(), + fee_per_gram, + message.clone(), + ) + .await + .unwrap(); + println!("tx_id: {}", tx_id); + + let mut count = 0; + let mut fees_bob = loop { + match alice_ts.get_any_transaction(tx_id).await { + Ok(None) => tokio::time::sleep(Duration::from_millis(100)).await, + Ok(Some(WalletTransaction::PendingOutbound(tx))) => { + println!("waited {}ms to detect the transaction", count * 100); + break tx.fee; + }, + _ => { + panic!( + "waited {}ms to detect the transaction, unexpected error/inbound/completed!", + count * 100 + ); + }, + } + count += 1; + if count > 20 * 10 { + panic!("waited {}ms but could not detect the transaction!", count * 100); + } + }; + fees_bob = (fees_bob.0 as f64 / amount_per_output.0 as f64).ceil() as u64 * amount_per_output; + let balance = alice_oms.get_balance().await.unwrap(); + assert_eq!( + balance.available_balance, + initial_available_balance - value_self - fees_self - value_bob - fees_bob + ); +} + #[tokio::test] async fn single_transaction_to_self() { let network = Network::LocalNet; @@ -866,7 +1251,7 @@ async fn single_transaction_to_self() { let temp_dir = tempdir().unwrap(); let database_path = temp_dir.path().to_str().unwrap().to_string(); - let (db_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let db_connection = make_wallet_database_memory_connection(); let shutdown = Shutdown::new(); let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, key_manager_handle, alice_db) = @@ -893,7 +1278,7 @@ async fn single_transaction_to_self() { alice_oms.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&key_manager_handle).await.unwrap(), true)]) .unwrap(); let message = "TAKE MAH _OWN_ MONEYS!".to_string(); let value = 10000.into(); @@ -950,7 +1335,7 @@ async fn large_coin_split_transaction() { let temp_dir = tempdir().unwrap(); let database_path = temp_dir.path().to_str().unwrap().to_string(); - let (db_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let db_connection = make_wallet_database_memory_connection(); let shutdown = Shutdown::new(); let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, key_manager_handle, alice_db) = @@ -977,7 +1362,7 @@ async fn large_coin_split_transaction() { alice_oms.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&key_manager_handle).await.unwrap(), true)]) .unwrap(); let fee_per_gram = MicroMinotari::from(1); @@ -1035,7 +1420,7 @@ async fn single_transaction_burn_tari() { let temp_dir = tempdir().unwrap(); let database_path = temp_dir.path().to_str().unwrap().to_string(); - let (db_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let db_connection = make_wallet_database_memory_connection(); let shutdown = Shutdown::new(); let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, key_manager_handle, alice_db) = @@ -1064,7 +1449,7 @@ async fn single_transaction_burn_tari() { alice_oms.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&key_manager_handle).await.unwrap(), true)]) .unwrap(); let message = "BURN MAH _OWN_ MONEYS!".to_string(); let burn_value = 10000.into(); @@ -1183,7 +1568,7 @@ async fn send_one_sided_transaction_to_other() { let temp_dir = tempdir().unwrap(); let database_path = temp_dir.path().to_str().unwrap().to_string(); - let (db_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let db_connection = make_wallet_database_memory_connection(); let shutdown = Shutdown::new(); let (mut alice_ts, mut alice_oms, _alice_comms, _alice_connectivity, key_manager_handle, alice_db) = @@ -1212,7 +1597,7 @@ async fn send_one_sided_transaction_to_other() { let mut alice_oms_clone = alice_oms.clone(); alice_oms_clone.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&key_manager_handle).await.unwrap(), true)]) .unwrap(); let message = "SEE IF YOU CAN CATCH THIS ONE..... SIDED TX!".to_string(); @@ -1301,8 +1686,8 @@ async fn recover_one_sided_transaction() { let database_path = temp_dir.path().to_str().unwrap().to_string(); let database_path2 = temp_dir2.path().to_str().unwrap().to_string(); - let (alice_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); - let (bob_connection, _tempdir) = make_wallet_database_connection(Some(database_path2.clone())); + let alice_connection = make_wallet_database_memory_connection(); + let bob_connection = make_wallet_database_memory_connection(); let shutdown = Shutdown::new(); let (mut alice_ts, alice_oms, _alice_comms, _alice_connectivity, alice_key_manager_handle, alice_db) = @@ -1355,7 +1740,7 @@ async fn recover_one_sided_transaction() { let mut alice_oms_clone = alice_oms; alice_oms_clone.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); let message = "".to_string(); @@ -1424,7 +1809,7 @@ async fn test_htlc_send_and_claim() { let bob_db_name = format!("{}.sqlite3", random::string(8).as_str()); let bob_db_path = format!("{}/{}", path_string, bob_db_name); - let (db_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let db_connection = make_wallet_database_memory_connection(); let bob_connection = run_migration_and_create_sqlite_connection(&bob_db_path, 16).unwrap(); let shutdown = Shutdown::new(); @@ -1460,7 +1845,7 @@ async fn test_htlc_send_and_claim() { .await; alice_oms.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&key_manager_handle).await.unwrap(), true)]) .unwrap(); let message = "".to_string(); @@ -1557,7 +1942,7 @@ async fn send_one_sided_transaction_to_self() { let temp_dir = tempdir().unwrap(); let database_path = temp_dir.path().to_str().unwrap().to_string(); - let (alice_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); + let alice_connection = make_wallet_database_memory_connection(); let shutdown = Shutdown::new(); let (alice_ts, alice_oms, _alice_comms, _alice_connectivity, key_manager_handle, alice_db) = @@ -1584,7 +1969,7 @@ async fn send_one_sided_transaction_to_self() { let mut alice_oms_clone = alice_oms; alice_oms_clone.add_output(uo1.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1.hash(&key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1.hash(&key_manager_handle).await.unwrap(), true)]) .unwrap(); let message = "SEE IF YOU CAN CATCH THIS ONE..... SIDED TX!".to_string(); @@ -1648,6 +2033,7 @@ async fn manage_multiple_transactions() { let database_path = temp_dir.path().to_str().unwrap().to_string(); + // TODO: When using a memory type db connection this test fails at `assert_eq!(tx_reply, 3, "Need 3 replies");` let (alice_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); let (bob_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); let (carol_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); @@ -1727,7 +2113,7 @@ async fn manage_multiple_transactions() { .await; bob_oms.add_output(uo2.clone(), None).await.unwrap(); bob_db - .mark_output_as_unspent(uo2.hash(&bob_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo2.hash(&bob_key_manager_handle).await.unwrap(), true)]) .unwrap(); let uo3 = make_input( &mut OsRng, @@ -1738,7 +2124,7 @@ async fn manage_multiple_transactions() { .await; carol_oms.add_output(uo3.clone(), None).await.unwrap(); carol_db - .mark_output_as_unspent(uo3.hash(&key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo3.hash(&key_manager_handle).await.unwrap(), true)]) .unwrap(); // Add some funds to Alices wallet @@ -1751,7 +2137,7 @@ async fn manage_multiple_transactions() { .await; alice_oms.add_output(uo1a.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1a.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1a.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); let uo1b = make_input( &mut OsRng, @@ -1762,7 +2148,7 @@ async fn manage_multiple_transactions() { .await; alice_oms.add_output(uo1b.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1b.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1b.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); let uo1c = make_input( &mut OsRng, @@ -1773,7 +2159,7 @@ async fn manage_multiple_transactions() { .await; alice_oms.add_output(uo1c.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1c.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1c.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); // A series of interleaved transactions. First with Bob and Carol offline and then two with them online @@ -1958,7 +2344,10 @@ async fn test_accepting_unknown_tx_id_and_malformed_reply() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let bob_address = TariAddress::new(bob_node_identity.public_key().clone(), Network::LocalNet); @@ -2057,7 +2446,10 @@ async fn finalize_tx_with_incorrect_pubkey() { .unwrap(); bob_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&bob_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&bob_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let mut stp = bob_ts_interface .output_manager_service_handle @@ -2182,7 +2574,10 @@ async fn finalize_tx_with_missing_output() { .unwrap(); bob_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&bob_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&bob_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let mut stp = bob_ts_interface @@ -2318,7 +2713,7 @@ async fn discovery_async_return_test() { ); let mut shutdown = Shutdown::new(); - let (carol_connection, _temp_dir1) = make_wallet_database_connection(None); + let carol_connection = make_wallet_database_memory_connection(); let (_carol_ts, _carol_oms, carol_comms, _carol_connectivity, key_manager_handle, _carol_db) = setup_transaction_service( @@ -2333,7 +2728,7 @@ async fn discovery_async_return_test() { ) .await; - let (alice_connection, _temp_dir2) = make_wallet_database_connection(None); + let alice_connection = make_wallet_database_memory_connection(); let (mut alice_ts, mut alice_oms, alice_comms, _alice_connectivity, alice_key_manager_handle, alice_db) = setup_transaction_service( @@ -2358,7 +2753,7 @@ async fn discovery_async_return_test() { .await; alice_oms.add_output(uo1a.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1a.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1a.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); let uo1b = make_input( &mut OsRng, @@ -2369,7 +2764,7 @@ async fn discovery_async_return_test() { .await; alice_oms.add_output(uo1b.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1b.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1b.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); let uo1c = make_input( &mut OsRng, @@ -2380,7 +2775,7 @@ async fn discovery_async_return_test() { .await; alice_oms.add_output(uo1c.clone(), None).await.unwrap(); alice_db - .mark_output_as_unspent(uo1c.hash(&alice_key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![(uo1c.hash(&alice_key_manager_handle).await.unwrap(), true)]) .unwrap(); let initial_balance = alice_oms.get_balance().await.unwrap(); @@ -2488,7 +2883,7 @@ async fn discovery_async_return_test() { #[tokio::test] async fn test_power_mode_updates() { let factories = CryptoFactories::default(); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection, None).await; let tx_backend = alice_ts_interface.ts_db; @@ -2588,10 +2983,10 @@ async fn test_power_mode_updates() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 10, + best_block_height: 10, mined_timestamp: None, }); @@ -2638,7 +3033,7 @@ async fn test_power_mode_updates() { async fn test_set_num_confirmations() { let factories = CryptoFactories::default(); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut ts_interface = setup_transaction_service_no_comms( factories, @@ -2684,7 +3079,7 @@ async fn test_transaction_cancellation() { let bob_node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -2713,7 +3108,10 @@ async fn test_transaction_cancellation() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent = 100000 * uT; @@ -3034,7 +3432,7 @@ async fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { let bob_node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection, None).await; @@ -3053,7 +3451,10 @@ async fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent = 100000 * uT; @@ -3094,7 +3495,7 @@ async fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { }, }; assert_eq!(tx_id, msg_tx_id); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); // Test sending the Reply to a receiver with Direct and then with SAF and never both let mut bob_ts_interface = setup_transaction_service_no_comms( @@ -3146,7 +3547,7 @@ async fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { 0, "Should be no more calls" ); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut bob2_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -3248,7 +3649,10 @@ async fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent = 20000 * uT; @@ -3347,7 +3751,7 @@ async fn test_tx_direct_send_behaviour() { let bob_node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection, None).await; let mut alice_event_stream = alice_ts_interface.transaction_service_handle.get_event_stream(); @@ -3366,7 +3770,10 @@ async fn test_tx_direct_send_behaviour() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let uo = make_input( &mut OsRng, @@ -3382,7 +3789,10 @@ async fn test_tx_direct_send_behaviour() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let uo = make_input( &mut OsRng, @@ -3398,7 +3808,10 @@ async fn test_tx_direct_send_behaviour() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let uo = make_input( &mut OsRng, @@ -3414,7 +3827,10 @@ async fn test_tx_direct_send_behaviour() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent = 100000 * uT; @@ -3616,13 +4032,13 @@ async fn test_tx_direct_send_behaviour() { async fn test_restarting_transaction_protocols() { let network = Network::LocalNet; let factories = CryptoFactories::default(); - let (alice_connection, _temp_dir) = make_wallet_database_connection(None); + let alice_connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), alice_connection, None).await; let alice_backend = alice_ts_interface.ts_db; - let (bob_connection, _temp_dir2) = make_wallet_database_connection(None); + let bob_connection = make_wallet_database_memory_connection(); let mut bob_ts_interface = setup_transaction_service_no_comms(factories.clone(), bob_connection, None).await; let bob_backend = bob_ts_interface.ts_db; @@ -3845,7 +4261,7 @@ async fn test_transaction_resending() { let bob_node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); // Setup Alice wallet with no comms stack - let (connection, _tempdir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -3874,7 +4290,10 @@ async fn test_transaction_resending() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent = 100000 * uT; @@ -3912,7 +4331,7 @@ async fn test_transaction_resending() { } // Setup Bob's wallet with no comms stack - let (connection, _tempdir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut bob_ts_interface = setup_transaction_service_no_comms( factories, @@ -4120,7 +4539,7 @@ async fn test_resend_on_startup() { send_count: 1, last_send_timestamp: Some(Utc::now().naive_utc()), }; - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -4169,7 +4588,7 @@ async fn test_resend_on_startup() { outbound_tx.send_count = 1; outbound_tx.last_send_timestamp = Utc::now().naive_utc().checked_sub_signed(ChronoDuration::seconds(20)); - let (connection2, _temp_dir2) = make_wallet_database_connection(None); + let connection2 = make_wallet_database_memory_connection(); let mut alice2_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -4252,7 +4671,7 @@ async fn test_resend_on_startup() { send_count: 0, last_send_timestamp: Some(Utc::now().naive_utc()), }; - let (bob_connection, _temp_dir) = make_wallet_database_connection(None); + let bob_connection = make_wallet_database_memory_connection(); let mut bob_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -4300,7 +4719,7 @@ async fn test_resend_on_startup() { // Now we do it again with the timestamp prior to the cooldown and see that a message is sent inbound_tx.send_count = 1; inbound_tx.last_send_timestamp = Utc::now().naive_utc().checked_sub_signed(ChronoDuration::seconds(20)); - let (bob_connection2, _temp_dir2) = make_wallet_database_connection(None); + let bob_connection2 = make_wallet_database_memory_connection(); let mut bob2_ts_interface = setup_transaction_service_no_comms( factories, @@ -4359,7 +4778,7 @@ async fn test_replying_to_cancelled_tx() { let bob_node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); // Testing if a Tx Reply is received for a Cancelled Outbound Tx that a Cancelled message is sent back: - let (alice_connection, _tempdir) = make_wallet_database_connection(None); + let alice_connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -4389,7 +4808,10 @@ async fn test_replying_to_cancelled_tx() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent = 100000 * uT; let bob_address = TariAddress::new(bob_node_identity.public_key().clone(), Network::LocalNet); @@ -4426,7 +4848,7 @@ async fn test_replying_to_cancelled_tx() { .unwrap(); // Setup Bob's wallet with no comms stack - let (bob_connection, _tempdir) = make_wallet_database_connection(None); + let bob_connection = make_wallet_database_memory_connection(); let mut bob_ts_interface = setup_transaction_service_no_comms( factories, @@ -4491,7 +4913,7 @@ async fn test_transaction_timeout_cancellation() { let bob_node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); // Testing if a Tx Reply is received for a Cancelled Outbound Tx that a Cancelled message is sent back: - let (alice_connection, _tempdir) = make_wallet_database_connection(None); + let alice_connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -4521,7 +4943,10 @@ async fn test_transaction_timeout_cancellation() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent = 10000 * uT; @@ -4637,7 +5062,7 @@ async fn test_transaction_timeout_cancellation() { send_count: 1, last_send_timestamp: Some(Utc::now().naive_utc()), }; - let (bob_connection, _temp_dir) = make_wallet_database_connection(None); + let bob_connection = make_wallet_database_memory_connection(); let mut bob_ts_interface = setup_transaction_service_no_comms( factories.clone(), @@ -4687,7 +5112,7 @@ async fn test_transaction_timeout_cancellation() { let call = bob_ts_interface.outbound_service_mock_state.pop_call().await.unwrap(); let bob_cancelled_message = try_decode_transaction_cancelled_message(call.1.to_vec()).unwrap(); assert_eq!(bob_cancelled_message.tx_id, tx_id.as_u64()); - let (carol_connection, _temp) = make_wallet_database_connection(None); + let carol_connection = make_wallet_database_memory_connection(); // Now to do this for the Receiver let mut carol_ts_interface = setup_transaction_service_no_comms( @@ -4761,7 +5186,7 @@ async fn transaction_service_tx_broadcast() { let bob_node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection, None).await; let mut alice_event_stream = alice_ts_interface.transaction_service_handle.get_event_stream(); @@ -4770,7 +5195,7 @@ async fn transaction_service_tx_broadcast() { .wallet_connectivity_service_mock .set_base_node(alice_ts_interface.base_node_identity.to_peer()); - let (connection2, _temp_dir2) = make_wallet_database_connection(None); + let connection2 = make_wallet_database_memory_connection(); let mut bob_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection2, None).await; let alice_output_value = MicroMinotari(250000); @@ -4789,7 +5214,10 @@ async fn transaction_service_tx_broadcast() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let uo2 = make_input( @@ -4806,7 +5234,10 @@ async fn transaction_service_tx_broadcast() { .unwrap(); alice_ts_interface .oms_db - .mark_output_as_unspent(uo2.hash(&alice_ts_interface.key_manager_handle).await.unwrap()) + .mark_outputs_as_unspent(vec![( + uo2.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]) .unwrap(); let amount_sent1 = 100000 * uT; @@ -4995,10 +5426,10 @@ async fn transaction_service_tx_broadcast() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: TransactionServiceConfig::default().num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -5064,10 +5495,10 @@ async fn transaction_service_tx_broadcast() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: TransactionServiceConfig::default().num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -5127,7 +5558,7 @@ async fn transaction_service_tx_broadcast() { #[tokio::test] async fn broadcast_all_completed_transactions_on_startup() { let factories = CryptoFactories::default(); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection, None).await; let db = alice_ts_interface.ts_db.clone(); @@ -5208,10 +5639,10 @@ async fn broadcast_all_completed_transactions_on_startup() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: TransactionServiceConfig::default().num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -5267,7 +5698,7 @@ async fn broadcast_all_completed_transactions_on_startup() { async fn test_update_faux_tx_on_oms_validation() { let factories = CryptoFactories::default(); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection, None).await; let alice_address = TariAddress::new( @@ -5352,18 +5783,19 @@ async fn test_update_faux_tx_on_oms_validation() { .add_output_with_tx_id(tx_id, uo.clone(), None) .await .unwrap(); - let _result = alice_ts_interface - .oms_db - .mark_output_as_unspent(uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap()); + let _result = alice_ts_interface.oms_db.mark_outputs_as_unspent(vec![( + uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), + true, + )]); alice_ts_interface .oms_db - .set_received_output_mined_height_and_status( - uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), - 5, - HashOutput::zero(), - false, - 0, - ) + .set_received_outputs_mined_height_and_statuses(vec![ReceivedOutputInfoForBatch { + commitment: uo.commitment(&alice_ts_interface.key_manager_handle).await.unwrap(), + mined_height: 5, + mined_in_block: FixedHash::zero(), + confirmed: false, + mined_timestamp: 0, + }]) .unwrap(); } @@ -5441,7 +5873,7 @@ async fn test_update_faux_tx_on_oms_validation() { async fn test_update_coinbase_tx_on_oms_validation() { let factories = CryptoFactories::default(); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories.clone(), connection, None).await; let alice_address = TariAddress::new( @@ -5529,13 +5961,13 @@ async fn test_update_coinbase_tx_on_oms_validation() { if uo.value != MicroMinotari::from(30000) { alice_ts_interface .oms_db - .set_received_output_mined_height_and_status( - uo.hash(&alice_ts_interface.key_manager_handle).await.unwrap(), - 5, - HashOutput::zero(), - false, - 0, - ) + .set_received_outputs_mined_height_and_statuses(vec![ReceivedOutputInfoForBatch { + commitment: uo.commitment(&alice_ts_interface.key_manager_handle).await.unwrap(), + mined_height: 5, + mined_in_block: FixedHash::zero(), + confirmed: false, + mined_timestamp: 0, + }]) .unwrap(); } } @@ -5613,7 +6045,7 @@ async fn test_update_coinbase_tx_on_oms_validation() { #[tokio::test] async fn test_get_fee_per_gram_per_block_basic() { let factories = CryptoFactories::default(); - let (connection, _temp_dir) = make_wallet_database_connection(None); + let connection = make_wallet_database_memory_connection(); let mut alice_ts_interface = setup_transaction_service_no_comms(factories, connection, None).await; let stats = vec![base_node_proto::MempoolFeePerGramStat { order: 0, diff --git a/base_layer/wallet/tests/transaction_service_tests/storage.rs b/base_layer/wallet/tests/transaction_service_tests/storage.rs index aa92522581..bf32ac5ca4 100644 --- a/base_layer/wallet/tests/transaction_service_tests/storage.rs +++ b/base_layer/wallet/tests/transaction_service_tests/storage.rs @@ -115,8 +115,8 @@ pub async fn test_db_backend(backend: T) { let stp = builder.build().await.unwrap(); - let messages = vec!["Hey!".to_string(), "Yo!".to_string(), "Sup!".to_string()]; - let amounts = vec![ + let messages = ["Hey!".to_string(), "Yo!".to_string(), "Sup!".to_string()]; + let amounts = [ MicroMinotari::from(10_000), MicroMinotari::from(23_000), MicroMinotari::from(5_000), diff --git a/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs b/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs index 9e4772b65e..95ed908ad6 100644 --- a/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs +++ b/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs @@ -417,10 +417,10 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Set Base Node query response to be not stored, as if the base node does not have the tx in its pool rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -447,10 +447,10 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Set Base Node query response to be InMempool as if the base node does not have the tx in its pool rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::InMempool, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -469,10 +469,10 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Set base node response to mined and confirmed rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: resources.config.num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -526,10 +526,10 @@ async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { // Accepted in the mempool on submit but not query rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -629,10 +629,10 @@ async fn tx_broadcast_protocol_submit_already_mined() { // Set base node response to mined and confirmed rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: resources.config.num_confirmations_required, is_synced: true, - height_of_longest_chain: 10, + best_block_height: 10, mined_timestamp: None, }); @@ -667,10 +667,10 @@ async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 1, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -711,10 +711,10 @@ async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { // Set new Base Node response to be accepted new_rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::InMempool, - block_hash: None, + best_block_hash: None, confirmations: resources.config.num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -787,17 +787,17 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [1u8; 32].to_vec(), + best_block_hash: [1u8; 32].to_vec(), confirmations: 0, - block_height: 1, + best_block_height: 1, mined_timestamp: timestamp, }]; let mut batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [1u8; 32].to_vec(), - height_of_longest_chain: 1, + best_block_hash: [1u8; 32].to_vec(), + best_block_height: 1, tip_mined_timestamp: timestamp, }; @@ -859,17 +859,17 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [5u8; 32].to_vec(), + best_block_hash: [5u8; 32].to_vec(), confirmations: 4, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }]; let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [5u8; 32].to_vec(), - height_of_longest_chain: 5, + best_block_hash: [5u8; 32].to_vec(), + best_block_height: 5, tip_mined_timestamp: timestamp, }; @@ -940,17 +940,17 @@ async fn tx_revalidation() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [5u8; 32].to_vec(), + best_block_hash: [5u8; 32].to_vec(), confirmations: 4, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }]; let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [5u8; 32].to_vec(), - height_of_longest_chain: 5, + best_block_hash: [5u8; 32].to_vec(), + best_block_height: 5, tip_mined_timestamp: timestamp, }; @@ -981,17 +981,17 @@ async fn tx_revalidation() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [5u8; 32].to_vec(), + best_block_hash: [5u8; 32].to_vec(), confirmations: 8, - block_height: 10, + best_block_height: 10, mined_timestamp: timestamp, }]; let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [5u8; 32].to_vec(), - height_of_longest_chain: 10, + best_block_hash: [5u8; 32].to_vec(), + best_block_height: 10, tip_mined_timestamp: timestamp, }; @@ -1101,9 +1101,9 @@ async fn tx_validation_protocol_reorg() { tx1.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&5).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&5).unwrap().hash().to_vec(), confirmations: 5, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1111,9 +1111,9 @@ async fn tx_validation_protocol_reorg() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&6).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&6).unwrap().hash().to_vec(), confirmations: 4, - block_height: 6, + best_block_height: 6, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1121,9 +1121,9 @@ async fn tx_validation_protocol_reorg() { tx3.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&7).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&7).unwrap().hash().to_vec(), confirmations: 3, - block_height: 7, + best_block_height: 7, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1131,9 +1131,9 @@ async fn tx_validation_protocol_reorg() { tx4.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), confirmations: 2, - block_height: 8, + best_block_height: 8, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1141,9 +1141,9 @@ async fn tx_validation_protocol_reorg() { tx5.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&9).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&9).unwrap().hash().to_vec(), confirmations: 1, - block_height: 9, + best_block_height: 9, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1151,9 +1151,9 @@ async fn tx_validation_protocol_reorg() { tx6.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), confirmations: 2, - block_height: 8, + best_block_height: 8, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1161,9 +1161,9 @@ async fn tx_validation_protocol_reorg() { tx7.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&9).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&9).unwrap().hash().to_vec(), confirmations: 1, - block_height: 9, + best_block_height: 9, mined_timestamp: timestamp, }, ]; @@ -1171,8 +1171,8 @@ async fn tx_validation_protocol_reorg() { let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: block_headers.get(&10).unwrap().hash().to_vec(), - height_of_longest_chain: 10, + best_block_hash: block_headers.get(&10).unwrap().hash().to_vec(), + best_block_height: 10, tip_mined_timestamp: timestamp, }; @@ -1220,9 +1220,9 @@ async fn tx_validation_protocol_reorg() { tx1.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&5).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&5).unwrap().hash().to_vec(), confirmations: 4, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1230,9 +1230,9 @@ async fn tx_validation_protocol_reorg() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&6).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&6).unwrap().hash().to_vec(), confirmations: 3, - block_height: 6, + best_block_height: 6, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1240,9 +1240,9 @@ async fn tx_validation_protocol_reorg() { tx3.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&7).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&7).unwrap().hash().to_vec(), confirmations: 2, - block_height: 7, + best_block_height: 7, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1250,9 +1250,9 @@ async fn tx_validation_protocol_reorg() { tx5.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), confirmations: 1, - block_height: 8, + best_block_height: 8, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1260,9 +1260,9 @@ async fn tx_validation_protocol_reorg() { tx6.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::NotStored) as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, - block_height: 0, + best_block_height: 0, mined_timestamp: 0, }, TxQueryBatchResponseProto { @@ -1270,9 +1270,9 @@ async fn tx_validation_protocol_reorg() { tx7.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::NotStored) as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, - block_height: 0, + best_block_height: 0, mined_timestamp: 0, }, ]; @@ -1280,8 +1280,8 @@ async fn tx_validation_protocol_reorg() { let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: block_headers.get(&8).unwrap().hash().to_vec(), - height_of_longest_chain: 8, + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_height: 8, tip_mined_timestamp: timestamp, }; diff --git a/base_layer/wallet/tests/utxo_scanner/mod.rs b/base_layer/wallet/tests/utxo_scanner/mod.rs index 414a94335e..ba46a85ec7 100644 --- a/base_layer/wallet/tests/utxo_scanner/mod.rs +++ b/base_layer/wallet/tests/utxo_scanner/mod.rs @@ -313,8 +313,8 @@ async fn test_utxo_scanner_recovery() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -412,8 +412,8 @@ async fn test_utxo_scanner_recovery_with_restart() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -578,8 +578,8 @@ async fn test_utxo_scanner_recovery_with_restart_and_reorg() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -651,8 +651,8 @@ async fn test_utxo_scanner_recovery_with_restart_and_reorg() { .set_utxos_by_block(utxos_by_block.clone()); test_interface2.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: 9, - best_block: block_headers.get(&9).unwrap().clone().hash().to_vec(), + best_block_height: 9, + best_block_hash: block_headers.get(&9).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -776,8 +776,8 @@ async fn test_utxo_scanner_scanned_block_cache_clearing() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: 800 + NUM_BLOCKS - 1, - best_block: block_headers + best_block_height: 800 + NUM_BLOCKS - 1, + best_block_hash: block_headers .get(&(800 + NUM_BLOCKS - 1)) .unwrap() .clone() @@ -878,8 +878,8 @@ async fn test_utxo_scanner_one_sided_payments() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -998,8 +998,8 @@ async fn test_utxo_scanner_one_sided_payments() { .set_one_sided_payment_message("new one-sided message".to_string()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS, - best_block: block_headers.get(&(NUM_BLOCKS)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS, + best_block_hash: block_headers.get(&(NUM_BLOCKS)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -1014,7 +1014,7 @@ async fn test_utxo_scanner_one_sided_payments() { test_interface .base_node_service_event_publisher .send(Arc::new(BaseNodeEvent::NewBlockDetected( - chain_metadata.best_block.try_into().unwrap(), + chain_metadata.best_block_hash.try_into().unwrap(), 11, ))) .unwrap(); @@ -1085,8 +1085,8 @@ async fn test_birthday_timestamp_over_chain() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, diff --git a/base_layer/wallet_ffi/Cargo.toml b/base_layer/wallet_ffi/Cargo.toml index 5edb4b8c23..fbc0b61d3b 100644 --- a/base_layer/wallet_ffi/Cargo.toml +++ b/base_layer/wallet_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "minotari_wallet_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet C FFI bindings" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] @@ -26,7 +26,7 @@ chrono = { version = "0.4.19", default-features = false, features = ["serde"] } futures = { version = "^0.3.1", features =["compat", "std"]} libc = "0.2.65" log = "0.4.6" -log4rs = { git = "https://github.com/tari-project/log4rs.git", features = ["console_appender", "file_appender", "yaml_format"] } +log4rs = { version = "1.3.0", features = ["console_appender", "file_appender", "yaml_format"] } rand = "0.8" thiserror = "1.0.26" tokio = "1.23" @@ -54,3 +54,4 @@ borsh = "1.2" [build-dependencies] cbindgen = "0.24.3" tari_common = { path = "../../common", features = ["build", "static-application-info"] } +tari_features = { path = "../../common/tari_features", version = "1.0.0-pre.11a" } diff --git a/base_layer/wallet_ffi/README.md b/base_layer/wallet_ffi/README.md index d0489ec810..f00bdf193a 100644 --- a/base_layer/wallet_ffi/README.md +++ b/base_layer/wallet_ffi/README.md @@ -132,8 +132,8 @@ Install [Rust](https://www.rust-lang.org/tools/install) Install the following tools and system images ```Shell Script -rustup toolchain add nightly-2023-06-04 -rustup default nightly-2023-06-04 +rustup toolchain add nightly-2024-02-04 +rustup default nightly-2024-02-04 rustup component add rustfmt --toolchain nightly rustup component add clippy rustup target add x86_64-apple-ios aarch64-apple-ios # iPhone and emulator cross compiling diff --git a/base_layer/wallet_ffi/build.rs b/base_layer/wallet_ffi/build.rs index 397735f78e..52774bb276 100644 --- a/base_layer/wallet_ffi/build.rs +++ b/base_layer/wallet_ffi/build.rs @@ -5,8 +5,10 @@ use std::{env, path::PathBuf}; use cbindgen::{Config, ExportConfig, Language, LineEndingStyle, ParseConfig, Style}; use tari_common::build::StaticApplicationInfo; +use tari_features::resolver::build_features; fn main() { + build_features(); let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); // generate version info diff --git a/base_layer/wallet_ffi/src/callback_handler.rs b/base_layer/wallet_ffi/src/callback_handler.rs index e6a170bcf1..cba4099123 100644 --- a/base_layer/wallet_ffi/src/callback_handler.rs +++ b/base_layer/wallet_ffi/src/callback_handler.rs @@ -660,8 +660,8 @@ where TBackend: TransactionBackend + 'static let state = match state.chain_metadata { None => TariBaseNodeState { node_id: state.node_id, - height_of_longest_chain: 0, - best_block: BlockHash::zero(), + best_block_height: 0, + best_block_hash: BlockHash::zero(), best_block_timestamp: 0, pruning_horizon: 0, pruned_height: 0, @@ -672,8 +672,8 @@ where TBackend: TransactionBackend + 'static Some(chain_metadata) => TariBaseNodeState { node_id: state.node_id, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), - best_block: *chain_metadata.best_block(), + best_block_height: chain_metadata.best_block_height(), + best_block_hash: *chain_metadata.best_block_hash(), best_block_timestamp: chain_metadata.timestamp(), pruning_horizon: chain_metadata.pruning_horizon(), pruned_height: chain_metadata.pruned_height(), diff --git a/base_layer/wallet_ffi/src/callback_handler_tests.rs b/base_layer/wallet_ffi/src/callback_handler_tests.rs index 184fe38b08..7ff55a7bc8 100644 --- a/base_layer/wallet_ffi/src/callback_handler_tests.rs +++ b/base_layer/wallet_ffi/src/callback_handler_tests.rs @@ -507,17 +507,15 @@ mod test { 0, 123.into(), ts_now.timestamp_millis() as u64, - ); + ) + .unwrap(); base_node_event_sender .send(Arc::new(BaseNodeEvent::BaseNodeStateChanged(BaseNodeState { node_id: Some(NodeId::new()), chain_metadata: Some(chain_metadata), is_synced: Some(true), - updated: Some(NaiveDateTime::from_timestamp_millis( - ts_now.timestamp_millis() - (60 * 1000), - )) - .unwrap(), + updated: NaiveDateTime::from_timestamp_millis(ts_now.timestamp_millis() - (60 * 1000)), latency: Some(Duration::from_micros(500)), }))) .unwrap(); diff --git a/base_layer/wallet_ffi/src/ffi_basenode_state.rs b/base_layer/wallet_ffi/src/ffi_basenode_state.rs index 4d2880900b..d4c32188bc 100644 --- a/base_layer/wallet_ffi/src/ffi_basenode_state.rs +++ b/base_layer/wallet_ffi/src/ffi_basenode_state.rs @@ -41,10 +41,10 @@ pub struct TariBaseNodeState { pub node_id: Option, /// The current chain height, or the block number of the longest valid chain, or zero if there is no chain - pub height_of_longest_chain: u64, + pub best_block_height: u64, /// The block hash of the current tip of the longest valid chain - pub best_block: BlockHash, + pub best_block_hash: BlockHash, /// Timestamp of the tip block in the longest valid chain pub best_block_timestamp: u64, @@ -56,7 +56,7 @@ pub struct TariBaseNodeState { pub pruning_horizon: u64, /// The height of the pruning horizon. This indicates from what height a full block can be provided - /// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be + /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be /// provided. Archival nodes wil always have an `pruned_height` of zero. pub pruned_height: u64, @@ -124,7 +124,7 @@ pub unsafe extern "C" fn basenode_state_get_height_of_the_longest_chain( return 0; } - (*ptr).height_of_longest_chain + (*ptr).best_block_height } /// Extracts a best block hash [`FixedHash`] represented as a vector of bytes wrapped into a `ByteVector` @@ -154,7 +154,7 @@ pub unsafe extern "C" fn basenode_state_get_best_block( return ptr::null_mut(); } - Box::into_raw(Box::new(ByteVector((*ptr).best_block.to_vec()))) + Box::into_raw(Box::new(ByteVector((*ptr).best_block_hash.to_vec()))) } /// Extracts a timestamp of the best block @@ -227,7 +227,7 @@ pub unsafe extern "C" fn basenode_state_get_pruning_horizon( /// /// ## Returns /// `c_ulonglong` - The height of the pruning horizon. This indicates from what height a full block can be provided -/// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be +/// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be /// provided. Archival nodes wil always have an `pruned_height` of zero. /// /// # Safety @@ -345,8 +345,8 @@ mod tests { let boxed_state = Box::into_raw(Box::new(TariBaseNodeState { node_id: Some(original_node_id.clone()), - height_of_longest_chain: 123, - best_block: original_best_block, + best_block_height: 123, + best_block_hash: original_best_block, best_block_timestamp: 12345, pruning_horizon: 456, pruned_height: 789, diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 807c68ecf9..ef7cdff9de 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -114,16 +114,20 @@ use minotari_wallet::{ }; use num_traits::FromPrimitive; use rand::rngs::OsRng; -use tari_common::configuration::{MultiaddrList, StringList}; +use tari_common::{ + configuration::{MultiaddrList, StringList}, + network_check::set_network_if_choice_valid, +}; use tari_common_types::{ emoji::emoji_set, tari_address::{TariAddress, TariAddressError}, transaction::{TransactionDirection, TransactionStatus, TxId}, types::{ComAndPubSignature, Commitment, PublicKey, SignatureWithDomain}, + wallet_types::WalletType, }; use tari_comms::{ multiaddr::Multiaddr, - peer_manager::NodeIdentity, + peer_manager::{NodeIdentity, PeerQuery}, transports::MemoryTransport, types::CommsPublicKey, }; @@ -288,7 +292,9 @@ pub struct TariUtxo { pub value: u64, pub mined_height: u64, pub mined_timestamp: u64, + pub lock_height: u64, pub status: u8, + pub coinbase_extra: Vec, } impl From for TariUtxo { @@ -299,6 +305,7 @@ impl From for TariUtxo { .into_raw(), value: x.wallet_output.value.as_u64(), mined_height: x.mined_height.unwrap_or(0), + lock_height: x.wallet_output.features.maturity, mined_timestamp: x .mined_timestamp .map(|ts| ts.timestamp_millis() as u64) @@ -316,6 +323,7 @@ impl From for TariUtxo { OutputStatus::SpentMinedUnconfirmed => 9, OutputStatus::NotStored => 10, }, + coinbase_extra: x.wallet_output.features.coinbase_extra, } } } @@ -492,7 +500,7 @@ pub unsafe extern "C" fn create_tari_vector(tag: TariTypeTag) -> *mut TariVector tag, len: v.len(), cap: v.capacity(), - ptr: v.as_mut_ptr() as *mut c_void, + ptr: v.as_mut_ptr(), })) } @@ -1237,6 +1245,38 @@ pub unsafe extern "C" fn tari_address_to_emoji_id( CString::into_raw(result) } +/// Creates a char array from a TariWalletAddress's network +/// +/// ## Arguments +/// `address` - The pointer to a TariWalletAddress +/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions +/// as an out parameter. +/// +/// ## Returns +/// `*mut c_char` - Returns a pointer to a char array. Note that it returns empty +/// if there was an error from TariWalletAddress +/// +/// # Safety +/// The ```string_destroy``` method must be called when finished with a string from rust to prevent a memory leak +#[no_mangle] +pub unsafe extern "C" fn tari_address_network(address: *mut TariWalletAddress, error_out: *mut c_int) -> *mut c_char { + let mut error = 0; + let mut result = CString::new("").expect("Blank CString will not fail."); + ptr::swap(error_out, &mut error as *mut c_int); + if address.is_null() { + error = LibWalletError::from(InterfaceError::NullError("address".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return CString::into_raw(result); + } + let network_string = address + .as_ref() + .expect("Address should not be empty") + .network() + .to_string(); + result = CString::new(network_string).expect("string will not fail."); + CString::into_raw(result) +} + /// Creates a TariWalletAddress from a char array in emoji format /// /// ## Arguments @@ -2467,8 +2507,8 @@ pub unsafe extern "C" fn seed_words_get_at( error = LibWalletError::from(InterfaceError::NullError("seed words".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); } else { - let len = (*seed_words).0.len() - 1; // clamp to length - if position > len as u32 { + let len = (*seed_words).0.len(); // clamp to length + if (*seed_words).0.is_empty() || position > (len - 1) as u32 { error = LibWalletError::from(InterfaceError::PositionInvalidError).code; ptr::swap(error_out, &mut error as *mut c_int); } else if let Ok(v) = CString::new( @@ -4884,6 +4924,7 @@ pub unsafe extern "C" fn comms_list_connected_public_keys( let mut connectivity = (*wallet).wallet.comms.connectivity(); let peer_manager = (*wallet).wallet.comms.peer_manager(); + #[allow(clippy::blocks_in_conditions)] match (*wallet).runtime.block_on(async move { let connections = connectivity.get_active_connections().await?; let mut public_keys = Vec::with_capacity(connections.len()); @@ -5225,6 +5266,8 @@ pub unsafe extern "C" fn wallet_create( passphrase: *const c_char, seed_words: *const TariSeedWords, network_str: *const c_char, + peer_seed_str: *const c_char, + dns_sec: bool, callback_received_transaction: unsafe extern "C" fn(*mut TariPendingInboundTransaction), callback_received_transaction_reply: unsafe extern "C" fn(*mut TariCompletedTransaction), @@ -5287,6 +5330,32 @@ pub unsafe extern "C" fn wallet_create( SafePassword::from(pf) }; + let peer_seed = if peer_seed_str.is_null() { + error = LibWalletError::from(InterfaceError::NullError("peer seed dns".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return ptr::null_mut(); + } else { + let peer_seed = CStr::from_ptr(peer_seed_str) + .to_str() + .expect("A non-null peer seed should be able to be converted to string"); + info!(target: LOG_TARGET, "peer seed dns {}", peer_seed); + peer_seed + }; + + let recovery_seed = if seed_words.is_null() { + None + } else { + match CipherSeed::from_mnemonic(&(*seed_words).0, None) { + Ok(seed) => Some(seed), + Err(e) => { + error!(target: LOG_TARGET, "Mnemonic Error for given seed words: {:?}", e); + error = LibWalletError::from(WalletError::KeyManagerError(e)).code; + ptr::swap(error_out, &mut error as *mut c_int); + return ptr::null_mut(); + }, + } + }; + let network = if network_str.is_null() { error = LibWalletError::from(InterfaceError::NullError("network".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); @@ -5306,19 +5375,12 @@ pub unsafe extern "C" fn wallet_create( }, } }; - - let recovery_seed = if seed_words.is_null() { - None - } else { - match CipherSeed::from_mnemonic(&(*seed_words).0, None) { - Ok(seed) => Some(seed), - Err(e) => { - error!(target: LOG_TARGET, "Mnemonic Error for given seed words: {:?}", e); - error = LibWalletError::from(WalletError::KeyManagerError(e)).code; - ptr::swap(error_out, &mut error as *mut c_int); - return ptr::null_mut(); - }, - } + // Set the static network variable according to the user chosen network (for use with + // `get_current_or_user_setting_or_default()`) - + if let Err(e) = set_network_if_choice_valid(network) { + error = LibWalletError::from(InterfaceError::InvalidArgument(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return ptr::null_mut(); }; let runtime = match Runtime::new() { @@ -5438,7 +5500,8 @@ pub unsafe extern "C" fn wallet_create( let peer_seeds = PeerSeedsConfig { dns_seeds_name_server: DEFAULT_DNS_NAME_SERVER.parse().unwrap(), - dns_seeds_use_dnssec: true, + dns_seeds_use_dnssec: dns_sec, + dns_seeds: StringList::from(vec![peer_seed.to_string()]), ..Default::default() }; @@ -5467,16 +5530,11 @@ pub unsafe extern "C" fn wallet_create( key_manager_backend, shutdown.to_signal(), master_seed, + WalletType::Software, )); match w { Ok(w) => { - // lets ensure the wallet tor_id is saved, this could have been changed during wallet startup - if let Some(hs) = w.comms.hidden_service() { - if let Err(e) = w.db.set_tor_identity(hs.tor_identity().clone()) { - warn!(target: LOG_TARGET, "Could not save tor identity to db: {:?}", e); - } - } let wallet_address = TariAddress::new(w.comms.node_identity().public_key().clone(), w.network.as_network()); // Start Callback Handler @@ -5512,16 +5570,6 @@ pub unsafe extern "C" fn wallet_create( runtime.spawn(callback_handler.start()); - let mut ts = w.transaction_service.clone(); - runtime.spawn(async move { - if let Err(e) = ts.restart_transaction_protocols().await { - warn!( - target: LOG_TARGET, - "Could not restart transaction negotiation protocols: {:?}", e - ); - } - }); - let tari_wallet = TariWallet { wallet: w, runtime, @@ -5721,7 +5769,7 @@ pub unsafe extern "C" fn wallet_get_utxos( }], }; - match (*wallet).wallet.output_db.fetch_outputs_by(q) { + match (*wallet).wallet.output_db.fetch_outputs_by_query(q) { Ok(outputs) => { ptr::replace(error_ptr, 0); Box::into_raw(Box::new(TariVector::from(outputs))) @@ -5791,7 +5839,7 @@ pub unsafe extern "C" fn wallet_get_all_utxos(wallet: *mut TariWallet, error_ptr sorting: vec![], }; - match (*wallet).wallet.output_db.fetch_outputs_by(q) { + match (*wallet).wallet.output_db.fetch_outputs_by_query(q) { Ok(outputs) => { ptr::replace(error_ptr, 0); Box::into_raw(Box::new(TariVector::from(outputs))) @@ -6291,7 +6339,7 @@ pub unsafe extern "C" fn wallet_verify_message_signature( /// # Safety /// None #[no_mangle] -pub unsafe extern "C" fn wallet_add_base_node_peer( +pub unsafe extern "C" fn wallet_set_base_node_peer( wallet: *mut TariWallet, public_key: *mut TariPublicKey, address: *const c_char, @@ -6311,23 +6359,18 @@ pub unsafe extern "C" fn wallet_add_base_node_peer( return false; } - let parsed_addr; - if address.is_null() { - error = LibWalletError::from(InterfaceError::NullError("address".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; + let parsed_addr = if address.is_null() { + None } else { match CStr::from_ptr(address).to_str() { - Ok(v) => { - parsed_addr = match Multiaddr::from_str(v) { - Ok(v) => v, - Err(_) => { - error = LibWalletError::from(InterfaceError::InvalidArgument("address is invalid".to_string())) - .code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - }, - } + Ok(v) => match Multiaddr::from_str(v) { + Ok(v) => Some(v), + Err(_) => { + error = + LibWalletError::from(InterfaceError::InvalidArgument("address is invalid".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return false; + }, }, _ => { error = LibWalletError::from(InterfaceError::PointerError("address".to_string())).code; @@ -6335,7 +6378,7 @@ pub unsafe extern "C" fn wallet_add_base_node_peer( return false; }, } - } + }; if let Err(e) = (*wallet) .runtime @@ -6347,6 +6390,46 @@ pub unsafe extern "C" fn wallet_add_base_node_peer( } true } +/// Gets all seed peers known by the wallet +/// +/// ## Arguments +/// `wallet` - The TariWallet pointer +/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions +/// as an out parameter. +/// +/// ## Returns +/// `TariPublicKeys` - Returns a list of all known public keys +/// +/// # Safety +/// None +#[no_mangle] +pub unsafe extern "C" fn wallet_get_seed_peers(wallet: *mut TariWallet, error_out: *mut c_int) -> *mut TariPublicKeys { + let mut error = 0; + ptr::swap(error_out, &mut error as *mut c_int); + if wallet.is_null() { + error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return ptr::null_mut(); + } + let peer_manager = (*wallet).wallet.comms.peer_manager(); + let query = PeerQuery::new().select_where(|p| p.is_seed()); + #[allow(clippy::blocks_in_conditions)] + match (*wallet).runtime.block_on(async move { + let peers = peer_manager.perform_query(query).await?; + let mut public_keys = Vec::with_capacity(peers.len()); + for peer in peers { + public_keys.push(peer.public_key); + } + Result::<_, WalletError>::Ok(public_keys) + }) { + Ok(public_keys) => Box::into_raw(Box::new(TariPublicKeys(public_keys))), + Err(e) => { + error = LibWalletError::from(e).code; + ptr::swap(error_out, &mut error as *mut c_int); + ptr::null_mut() + }, + } +} /// Upserts a TariContact to the TariWallet. If the contact does not exist it will be Inserted. If it does exist the /// Alias will be updated. @@ -8175,7 +8258,7 @@ pub unsafe extern "C" fn wallet_set_one_sided_payment_message( pub unsafe extern "C" fn get_emoji_set() -> *mut EmojiSet { let current_emoji_set = emoji_set(); let mut emoji_set: Vec = Vec::with_capacity(current_emoji_set.len()); - for emoji in current_emoji_set.iter() { + for emoji in ¤t_emoji_set { let mut b = [0; 4]; // emojis are 4 bytes, unicode character let emoji_char = ByteVector(emoji.encode_utf8(&mut b).as_bytes().to_vec()); emoji_set.push(emoji_char); @@ -8905,6 +8988,11 @@ mod test { // assert!(true); //optimized out by compiler } + #[cfg(tari_target_network_mainnet)] + const NETWORK_STRING: &str = "stagenet"; + #[cfg(tari_target_network_nextnet)] + const NETWORK_STRING: &str = "nextnet"; + #[cfg(not(any(tari_target_network_mainnet, tari_target_network_nextnet)))] const NETWORK_STRING: &str = "localnet"; #[test] @@ -9490,6 +9578,7 @@ mod test { let passphrase: *const c_char = CString::into_raw(CString::new("Hello from Alasca").unwrap()) as *const c_char; + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet = wallet_create( alice_config, ptr::null(), @@ -9499,6 +9588,8 @@ mod test { passphrase, ptr::null(), alice_network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -9543,6 +9634,8 @@ mod test { passphrase, ptr::null(), alice_network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -9646,7 +9739,7 @@ mod test { let passphrase: *const c_char = CString::into_raw(CString::new("dolphis dancing in the coastal waters").unwrap()) as *const c_char; - + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet = wallet_create( alice_config, ptr::null(), @@ -9656,6 +9749,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -9870,7 +9965,7 @@ mod test { let passphrase: *const c_char = CString::into_raw(CString::new("a cat outside in Istanbul").unwrap()) as *const c_char; - + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let wallet = wallet_create( config, ptr::null(), @@ -9880,6 +9975,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -9934,6 +10031,7 @@ mod test { let log_path: *const c_char = CString::into_raw(CString::new(temp_dir.path().join("asdf").to_str().unwrap()).unwrap()) as *const c_char; + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let recovered_wallet = wallet_create( config, log_path, @@ -9943,6 +10041,8 @@ mod test { passphrase, seed_words, network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -9979,7 +10079,6 @@ mod test { #[allow(clippy::too_many_lines)] fn test_wallet_get_utxos() { unsafe { - let key_manager = create_memory_db_key_manager(); let mut error = 0; let error_ptr = &mut error as *mut c_int; let mut recovery_in_progress = true; @@ -10010,7 +10109,7 @@ mod test { let passphrase: *const c_char = CString::into_raw(CString::new("Satoshi Nakamoto").unwrap()) as *const c_char; - + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet = wallet_create( alice_config, ptr::null(), @@ -10020,6 +10119,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -10040,14 +10141,20 @@ mod test { recovery_in_progress_ptr, error_ptr, ); + let alice_wallet_runtime = &(*alice_wallet).runtime; + let key_manager = &(*alice_wallet).wallet.key_manager_service; assert_eq!(error, 0); - for i in 0..10 { - let uout = (*alice_wallet) - .runtime - .block_on(create_test_input((1000 * i).into(), 0, &key_manager)); - (*alice_wallet) - .runtime + let mut test_outputs = Vec::with_capacity(10); + for i in 0..10u8 { + let uout = alice_wallet_runtime.block_on(create_test_input( + (1000u64 * u64::from(i)).into(), + 0, + key_manager, + vec![i, i + 1, i + 2, i + 3, i + 4], + )); + test_outputs.push(uout.clone()); + alice_wallet_runtime .block_on((*alice_wallet).wallet.output_manager_service.add_output(uout, None)) .unwrap(); } @@ -10062,7 +10169,7 @@ mod test { 3000, error_ptr, ); - let utxos: &[TariUtxo] = slice::from_raw_parts_mut((*outputs).ptr as *mut TariUtxo, (*outputs).len); + let utxos: &[TariUtxo] = slice::from_raw_parts((*outputs).ptr as *mut TariUtxo, (*outputs).len); assert_eq!(error, 0); assert_eq!((*outputs).len, 6); assert_eq!(utxos.len(), 6); @@ -10073,6 +10180,22 @@ mod test { .fold((true, utxos[0].value), |acc, x| { (acc.0 && x.value > acc.1, x.value) }) .0 ); + for utxo in utxos { + let output = test_outputs + .iter() + .find(|val| { + alice_wallet_runtime + .block_on(val.commitment(key_manager)) + .unwrap() + .to_hex() == + CStr::from_ptr(utxo.commitment).to_str().unwrap() + }) + .unwrap(); + assert_eq!(output.value.as_u64(), utxo.value); + assert_eq!(output.features.maturity, utxo.lock_height); + assert_eq!(output.features.coinbase_extra, utxo.coinbase_extra); + } + println!(); destroy_tari_vector(outputs); // descending order @@ -10085,7 +10208,7 @@ mod test { 3000, error_ptr, ); - let utxos: &[TariUtxo] = slice::from_raw_parts_mut((*outputs).ptr as *mut TariUtxo, (*outputs).len); + let utxos: &[TariUtxo] = slice::from_raw_parts((*outputs).ptr as *mut TariUtxo, (*outputs).len); assert_eq!(error, 0); assert_eq!((*outputs).len, 6); assert_eq!(utxos.len(), 6); @@ -10159,7 +10282,7 @@ mod test { let passphrase: *const c_char = CString::into_raw(CString::new("J-bay open corona").unwrap()) as *const c_char; - + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet = wallet_create( alice_config, ptr::null(), @@ -10169,6 +10292,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -10196,6 +10321,7 @@ mod test { (1000 * i).into(), 0, &(*alice_wallet).wallet.key_manager_service, + vec![], )); (*alice_wallet) .runtime @@ -10209,12 +10335,13 @@ mod test { (*alice_wallet) .wallet .output_db - .mark_output_as_unspent( + .mark_outputs_as_unspent(vec![( (*alice_wallet) .runtime .block_on(uo.hash(&(*alice_wallet).wallet.key_manager_service)) .unwrap(), - ) + true, + )]) .unwrap(); } @@ -10235,7 +10362,7 @@ mod test { .map(|x| CStr::from_ptr(x.commitment).to_str().unwrap().to_owned()) .collect::>(); - let commitments = Box::into_raw(Box::new(TariVector::from(payload))) as *mut TariVector; + let commitments = Box::into_raw(Box::new(TariVector::from(payload))); let result = wallet_coin_join(alice_wallet, commitments, 5, error_ptr); assert_eq!(error, 0); assert!(result > 0); @@ -10292,7 +10419,7 @@ mod test { let passphrase: *const c_char = CString::into_raw(CString::new("The master and margarita").unwrap()) as *const c_char; - + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet = wallet_create( alice_config, ptr::null(), @@ -10302,6 +10429,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -10329,6 +10458,7 @@ mod test { (15000 * i).into(), 0, &(*alice_wallet).wallet.key_manager_service, + vec![], )); (*alice_wallet) .runtime @@ -10342,12 +10472,13 @@ mod test { (*alice_wallet) .wallet .output_db - .mark_output_as_unspent( + .mark_outputs_as_unspent(vec![( (*alice_wallet) .runtime .block_on(uo.hash(&(*alice_wallet).wallet.key_manager_service)) .unwrap(), - ) + true, + )]) .unwrap(); } @@ -10373,7 +10504,7 @@ mod test { .map(|x| CStr::from_ptr(x.commitment).to_str().unwrap().to_owned()) .collect::>(); - let commitments = Box::into_raw(Box::new(TariVector::from(payload))) as *mut TariVector; + let commitments = Box::into_raw(Box::new(TariVector::from(payload))); let preview = wallet_preview_coin_join(alice_wallet, commitments, 5, error_ptr); assert_eq!(error, 0); @@ -10397,7 +10528,7 @@ mod test { .map(|x| CStr::from_ptr(x.commitment).to_str().unwrap().to_owned()) .collect::>(); - let commitments = Box::into_raw(Box::new(TariVector::from(payload))) as *mut TariVector; + let commitments = Box::into_raw(Box::new(TariVector::from(payload))); let result = wallet_coin_join(alice_wallet, commitments, 5, error_ptr); assert_eq!(error, 0); assert!(result > 0); @@ -10405,7 +10536,7 @@ mod test { let unspent_outputs = (*alice_wallet) .wallet .output_db - .fetch_outputs_by(OutputBackendQuery { + .fetch_outputs_by_query(OutputBackendQuery { status: vec![OutputStatus::Unspent], ..Default::default() }) @@ -10417,7 +10548,7 @@ mod test { let new_pending_outputs = (*alice_wallet) .wallet .output_db - .fetch_outputs_by(OutputBackendQuery { + .fetch_outputs_by_query(OutputBackendQuery { status: vec![OutputStatus::EncumberedToBeReceived], ..Default::default() }) @@ -10505,7 +10636,7 @@ mod test { ); let passphrase: *const c_char = CString::into_raw(CString::new("niao").unwrap()) as *const c_char; - + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet = wallet_create( alice_config, ptr::null(), @@ -10515,6 +10646,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -10541,6 +10674,7 @@ mod test { (15000 * i).into(), 0, &(*alice_wallet).wallet.key_manager_service, + vec![], )); (*alice_wallet) .runtime @@ -10554,12 +10688,13 @@ mod test { (*alice_wallet) .wallet .output_db - .mark_output_as_unspent( + .mark_outputs_as_unspent(vec![( (*alice_wallet) .runtime .block_on(uo.hash(&(*alice_wallet).wallet.key_manager_service)) .unwrap(), - ) + true, + )]) .unwrap(); } @@ -10585,7 +10720,7 @@ mod test { .map(|x| CStr::from_ptr(x.commitment).to_str().unwrap().to_owned()) .collect::>(); - let commitments = Box::into_raw(Box::new(TariVector::from(payload))) as *mut TariVector; + let commitments = Box::into_raw(Box::new(TariVector::from(payload))); let preview = wallet_preview_coin_split(alice_wallet, commitments, 3, 5, error_ptr); assert_eq!(error, 0); @@ -10611,7 +10746,7 @@ mod test { .map(|x| CStr::from_ptr(x.commitment).to_str().unwrap().to_owned()) .collect::>(); - let commitments = Box::into_raw(Box::new(TariVector::from(payload))) as *mut TariVector; + let commitments = Box::into_raw(Box::new(TariVector::from(payload))); let result = wallet_coin_split(alice_wallet, commitments, 3, 5, error_ptr); assert_eq!(error, 0); @@ -10620,7 +10755,7 @@ mod test { let unspent_outputs = (*alice_wallet) .wallet .output_db - .fetch_outputs_by(OutputBackendQuery { + .fetch_outputs_by_query(OutputBackendQuery { status: vec![OutputStatus::Unspent], ..Default::default() }) @@ -10632,7 +10767,7 @@ mod test { let new_pending_outputs = (*alice_wallet) .wallet .output_db - .fetch_outputs_by(OutputBackendQuery { + .fetch_outputs_by_query(OutputBackendQuery { status: vec![OutputStatus::EncumberedToBeReceived], ..Default::default() }) @@ -10726,7 +10861,7 @@ mod test { ); let passphrase: *const c_char = CString::into_raw(CString::new("niao").unwrap()) as *const c_char; - + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet = wallet_create( alice_config, ptr::null(), @@ -10736,6 +10871,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -10758,15 +10895,18 @@ mod test { ); assert_eq!(error, 0); - let key_manager = create_memory_db_key_manager(); + let key_manager = &(*alice_wallet).wallet.key_manager_service; for i in 1..=5 { (*alice_wallet) .runtime .block_on( (*alice_wallet).wallet.output_manager_service.add_output( - (*alice_wallet) - .runtime - .block_on(create_test_input((15000 * i).into(), 0, &key_manager)), + (*alice_wallet).runtime.block_on(create_test_input( + (15000 * i).into(), + 0, + key_manager, + vec![], + )), None, ), ) @@ -10805,7 +10945,7 @@ mod test { assert_eq!(error, 0); assert_eq!((*tv).tag, TariTypeTag::Text); assert_eq!((*tv).len, 1); - assert_eq!((*tv).cap, 1); + assert_eq!((*tv).cap, 12); tari_vector_push_string( tv, @@ -10815,7 +10955,7 @@ mod test { assert_eq!(error, 0); assert_eq!((*tv).tag, TariTypeTag::Text); assert_eq!((*tv).len, 2); - assert_eq!((*tv).cap, 2); + assert_eq!((*tv).cap, 12); tari_vector_push_string( tv, @@ -10825,7 +10965,7 @@ mod test { assert_eq!(error, 0); assert_eq!((*tv).tag, TariTypeTag::Text); assert_eq!((*tv).len, 3); - assert_eq!((*tv).cap, 3); + assert_eq!((*tv).cap, 12); destroy_tari_vector(tv); } @@ -10978,6 +11118,7 @@ mod test { error_ptr, ); let passphrase: *const c_char = CString::into_raw(CString::new("niao").unwrap()) as *const c_char; + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let wallet_ptr = wallet_create( config, ptr::null(), @@ -10987,6 +11128,8 @@ mod test { passphrase, ptr::null(), network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -11008,13 +11151,15 @@ mod test { error_ptr, ); assert_eq!(error, 0); + let key_manager = &(*wallet_ptr).wallet.key_manager_service; + let node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); let base_node_peer_public_key_ptr = Box::into_raw(Box::new(node_identity.public_key().clone())); let base_node_peer_address_ptr = CString::into_raw(CString::new(node_identity.first_public_address().unwrap().to_string()).unwrap()) as *const c_char; - wallet_add_base_node_peer( + wallet_set_base_node_peer( wallet_ptr, base_node_peer_public_key_ptr, base_node_peer_address_ptr, @@ -11022,14 +11167,13 @@ mod test { ); // Test the consistent features case - let key_manager = create_memory_db_key_manager(); let utxo_1 = runtime .block_on(create_wallet_output_with_data( script!(Nop), OutputFeatures::default(), - &runtime.block_on(TestParams::new(&key_manager)), + &runtime.block_on(TestParams::new(key_manager)), MicroMinotari(1234u64), - &key_manager, + key_manager, )) .unwrap(); let amount = utxo_1.value.as_u64(); @@ -11216,6 +11360,7 @@ mod test { error_ptr, ); let passphrase: *const c_char = CString::into_raw(CString::new("niao").unwrap()) as *const c_char; + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let alice_wallet_ptr = wallet_create( alice_config, ptr::null(), @@ -11225,6 +11370,8 @@ mod test { passphrase, ptr::null(), alice_network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -11276,6 +11423,7 @@ mod test { error_ptr, ); let passphrase: *const c_char = CString::into_raw(CString::new("niao").unwrap()) as *const c_char; + let dns_string: *const c_char = CString::into_raw(CString::new("").unwrap()) as *const c_char; let bob_wallet_ptr = wallet_create( bob_config, ptr::null(), @@ -11285,6 +11433,8 @@ mod test { passphrase, ptr::null(), bob_network_str, + dns_string, + false, received_tx_callback, received_tx_reply_callback, received_tx_finalized_callback, @@ -11322,7 +11472,7 @@ mod test { let bob_peer_address_ptr = CString::into_raw(CString::new(bob_node_identity.first_public_address().unwrap().to_string()).unwrap()) as *const c_char; - wallet_add_base_node_peer( + wallet_set_base_node_peer( alice_wallet_ptr, bob_peer_public_key_ptr, bob_peer_address_ptr, @@ -11337,7 +11487,7 @@ mod test { let alice_peer_address_ptr = CString::into_raw( CString::new(alice_node_identity.first_public_address().unwrap().to_string()).unwrap(), ) as *const c_char; - wallet_add_base_node_peer( + wallet_set_base_node_peer( bob_wallet_ptr, alice_peer_public_key_ptr, alice_peer_address_ptr, diff --git a/base_layer/wallet_ffi/wallet.h b/base_layer/wallet_ffi/wallet.h index 329e4fa086..4588864b98 100644 --- a/base_layer/wallet_ffi/wallet.h +++ b/base_layer/wallet_ffi/wallet.h @@ -196,6 +196,8 @@ struct TransportConfig; */ struct UnblindedOutput; +struct Vec_u8; + /** * -------------------------------- Vector ------------------------------------------------ /// */ @@ -345,7 +347,9 @@ struct TariUtxo { uint64_t value; uint64_t mined_height; uint64_t mined_timestamp; + uint64_t lock_height; uint8_t status; + struct Vec_u8 coinbase_extra; }; #ifdef __cplusplus @@ -768,6 +772,24 @@ TariWalletAddress *tari_address_from_hex(const char *address, char *tari_address_to_emoji_id(TariWalletAddress *address, int *error_out); +/** + * Creates a char array from a TariWalletAddress's network + * + * ## Arguments + * `address` - The pointer to a TariWalletAddress + * `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions + * as an out parameter. + * + * ## Returns + * `*mut c_char` - Returns a pointer to a char array. Note that it returns empty + * if there was an error from TariWalletAddress + * + * # Safety + * The ```string_destroy``` method must be called when finished with a string from rust to prevent a memory leak + */ +char *tari_address_network(TariWalletAddress *address, + int *error_out); + /** * Creates a TariWalletAddress from a char array in emoji format * @@ -2688,6 +2710,8 @@ struct TariWallet *wallet_create(TariCommsConfig *config, const char *passphrase, const struct TariSeedWords *seed_words, const char *network_str, + const char *peer_seed_str, + bool dns_sec, void (*callback_received_transaction)(TariPendingInboundTransaction*), void (*callback_received_transaction_reply)(TariCompletedTransaction*), void (*callback_received_finalized_transaction)(TariCompletedTransaction*), @@ -2975,11 +2999,28 @@ bool wallet_verify_message_signature(struct TariWallet *wallet, * # Safety * None */ -bool wallet_add_base_node_peer(struct TariWallet *wallet, +bool wallet_set_base_node_peer(struct TariWallet *wallet, TariPublicKey *public_key, const char *address, int *error_out); +/** + * Gets all seed peers known by the wallet + * + * ## Arguments + * `wallet` - The TariWallet pointer + * `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions + * as an out parameter. + * + * ## Returns + * `TariPublicKeys` - Returns a list of all known public keys + * + * # Safety + * None + */ +struct TariPublicKeys *wallet_get_seed_peers(struct TariWallet *wallet, + int *error_out); + /** * Upserts a TariContact to the TariWallet. If the contact does not exist it will be Inserted. If it does exist the * Alias will be updated. @@ -4058,7 +4099,7 @@ unsigned long long basenode_state_get_pruning_horizon(struct TariBaseNodeState * * * ## Returns * `c_ulonglong` - The height of the pruning horizon. This indicates from what height a full block can be provided - * (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be + * (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be * provided. Archival nodes wil always have an `pruned_height` of zero. * * # Safety diff --git a/buildtools/docker/base_node.Dockerfile b/buildtools/docker/base_node.Dockerfile index 043f96d71c..e5aae31e56 100644 --- a/buildtools/docker/base_node.Dockerfile +++ b/buildtools/docker/base_node.Dockerfile @@ -1,13 +1,13 @@ # syntax=docker/dockerfile:1 #FROM rust:1.42.0 as builder -FROM quay.io/tarilabs/rust_tari-build-with-deps:nightly-2023-06-04 as builder +FROM quay.io/tarilabs/rust_tari-build-with-deps:nightly-2024-02-04 as builder # Copy the dependency lists #ADD Cargo.toml ./ ADD . /minotari_node WORKDIR /minotari_node -# RUN rustup component add rustfmt --toolchain nightly-2023-06-04-x86_64-unknown-linux-gnu +# RUN rustup component add rustfmt --toolchain nightly-2024-02-04-x86_64-unknown-linux-gnu #ARG TBN_ARCH=native ARG TBN_ARCH=x86-64 #ARG TBN_FEATURES=avx2 diff --git a/buildtools/multinet_envs.sh b/buildtools/multinet_envs.sh index 10e2bd488c..54d0424dcd 100644 --- a/buildtools/multinet_envs.sh +++ b/buildtools/multinet_envs.sh @@ -7,21 +7,25 @@ case "$tagnet" in v*-pre.*) echo "esme" export TARI_NETWORK=esme + export TARI_TARGET_NETWORK=testnet export TARI_NETWORK_DIR=testnet ;; v*-rc.*) echo "nextnet" export TARI_NETWORK=nextnet + export TARI_TARGET_NETWORK=nextnet export TARI_NETWORK_DIR=nextnet ;; v*-dan.*) echo "dan" export TARI_NETWORK=igor + export TARI_TARGET_NETWORK=testnet export TARI_NETWORK_DIR=testnetdan ;; *) echo "mainnet" export TARI_NETWORK=mainnet + export TARI_TARGET_NETWORK=mainnet export TARI_NETWORK_DIR=mainnet ;; esac diff --git a/buildtools/windows_inno_installer.iss b/buildtools/windows_inno_installer.iss index ca91ec9d59..a7fd9fc0eb 100644 --- a/buildtools/windows_inno_installer.iss +++ b/buildtools/windows_inno_installer.iss @@ -69,7 +69,7 @@ OutputBaseFilename={#MinotariSuite}-{#MyAppVersion} SetupIconFile=.\tari_logo_black.ico Compression=lzma SolidCompression=yes -MinVersion=0,6.1 +MinVersion=0,6.1sp1 VersionInfoCompany=The Tari Developer Community VersionInfoProductName=minotari_suite InfoAfterFile="..\applications\minotari_node\windows\README.md" diff --git a/changelog-development.md b/changelog-development.md index fc64291b98..4cc803f3e6 100644 --- a/changelog-development.md +++ b/changelog-development.md @@ -2,6 +2,127 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [1.0.0-pre.11a](https://github.com/tari-project/tari/compare/v1.0.0-pre.11...v1.0.0-pre.11a) (2024-03-12) + + +### Bug Fixes + +* downgrade crossterm for windows compatibility ([#6204](https://github.com/tari-project/tari/issues/6204)) ([243243d](https://github.com/tari-project/tari/commit/243243dd7d7030010662f0d514097230d905a4cc)) + +## [1.0.0-pre.11](https://github.com/tari-project/tari/compare/v1.0.0-pre.10...v1.0.0-pre.11) (2024-03-11) + + +### âš  BREAKING CHANGES + +* New gen block + +## [1.0.0-pre.10](https://github.com/tari-project/tari/compare/v1.0.0-pre.9...v1.0.0-pre.10) (2024-03-11) + + +### âš  BREAKING CHANGES + +* change split to 50-50 (#6188) +* implement inflating tail emission (#6160) +* changes balance query (#6158) + +### Features + +* change split to 50-50 ([#6188](https://github.com/tari-project/tari/issues/6188)) ([3b7842a](https://github.com/tari-project/tari/commit/3b7842acb12cfea76652b48c400488e436418d0f)) +* expose extra_data field to wallet ffi ([#6191](https://github.com/tari-project/tari/issues/6191)) ([2f2b139](https://github.com/tari-project/tari/commit/2f2b1391284f4a6ffcacb7a6d5e880f6c51cc8a3)) +* implement inflating tail emission ([#6160](https://github.com/tari-project/tari/issues/6160)) ([63b1f68](https://github.com/tari-project/tari/commit/63b1f6864ef080f9eef9ba9d6a600ab86c8791c5)), closes [#6122](https://github.com/tari-project/tari/issues/6122) [#6131](https://github.com/tari-project/tari/issues/6131) +* lazily evaluate for new random_x template ([#6170](https://github.com/tari-project/tari/issues/6170)) ([d220643](https://github.com/tari-project/tari/commit/d220643b1596955c499bf39df2c58c3052d92724)) +* limit transaction size ([#6154](https://github.com/tari-project/tari/issues/6154)) ([abd64d8](https://github.com/tari-project/tari/commit/abd64d8725f7e94b80bbfcbd97c58d9988571087)) +* make the make_it_rain submission rate a float ([#6180](https://github.com/tari-project/tari/issues/6180)) ([75d773b](https://github.com/tari-project/tari/commit/75d773bba625bb513c7b7bcef0cd6e9b9dda6c83)) +* mining ffi add coinbase add ([#6183](https://github.com/tari-project/tari/issues/6183)) ([820e936](https://github.com/tari-project/tari/commit/820e93676555bc35183470db6bbf3a5fd99eda02)) +* multi-network ci ([#6162](https://github.com/tari-project/tari/issues/6162)) ([8990b57](https://github.com/tari-project/tari/commit/8990b575cd4df01c1a3e5e9385e13a9ce3b9ddd4)) +* wallet ffi use dns ([#6152](https://github.com/tari-project/tari/issues/6152)) ([464f2c3](https://github.com/tari-project/tari/commit/464f2c3bc8495bf4a08e7292829726e8f9e8c747)) + + +### Bug Fixes + +* add .h file to mining helper ([#6194](https://github.com/tari-project/tari/issues/6194)) ([237e6b9](https://github.com/tari-project/tari/commit/237e6b963edd3e4a8986ed4f9767a16f36aff05e)) +* avoid cloning range proofs during verification ([#6166](https://github.com/tari-project/tari/issues/6166)) ([19a824d](https://github.com/tari-project/tari/commit/19a824dea8971f15a7b263122b20e46286f89857)) +* changes balance query ([#6158](https://github.com/tari-project/tari/issues/6158)) ([9ccc615](https://github.com/tari-project/tari/commit/9ccc6153b0fedc1cf40bd547c6987143c23b1649)) +* fixed make-it-rain delay ([#6165](https://github.com/tari-project/tari/issues/6165)) ([5c5da46](https://github.com/tari-project/tari/commit/5c5da461690684e90ecc12565d674fbca06b5f53)) +* hide unmined coinbase ([#6159](https://github.com/tari-project/tari/issues/6159)) ([2ccde17](https://github.com/tari-project/tari/commit/2ccde173834fbbfc617b87001c7364760b81590e)) +* horizon sync ([#6197](https://github.com/tari-project/tari/issues/6197)) ([c96be82](https://github.com/tari-project/tari/commit/c96be82efdbb24f448a5efef3076d0b1819ed07e)) +* oms validation ([#6161](https://github.com/tari-project/tari/issues/6161)) ([f3d1219](https://github.com/tari-project/tari/commit/f3d12196530f9bf7c266cba9eff014cba04cecbb)) +* remove extra range proof verifications ([#6190](https://github.com/tari-project/tari/issues/6190)) ([57330bf](https://github.com/tari-project/tari/commit/57330bf7e0be7d2d4f325e8009d3b10568f3acad)) +* rewind bug causing SMT to be broken ([#6172](https://github.com/tari-project/tari/issues/6172)) ([4cb61a3](https://github.com/tari-project/tari/commit/4cb61a33c60fe18706aae4700e301484abe62471)) +* wallet validation during reorgs ([#6173](https://github.com/tari-project/tari/issues/6173)) ([97fc7b3](https://github.com/tari-project/tari/commit/97fc7b382a078ed2178c650214cb9803daeea87f)) + +## [1.0.0-pre.9](https://github.com/tari-project/tari/compare/v1.0.0-pre.8...v1.0.0-pre.9) (2024-02-20) + + +### âš  BREAKING CHANGES + +* change proof of work to be dependant on target difficulty (#6156) + +### Features + +* add import tx method ([#6132](https://github.com/tari-project/tari/issues/6132)) ([f3d9121](https://github.com/tari-project/tari/commit/f3d91212e1e3a1e450b5f8e71ceacf2673cfc8c2)) +* allow ffi to see lock height ([#6140](https://github.com/tari-project/tari/issues/6140)) ([48af0b8](https://github.com/tari-project/tari/commit/48af0b8615c80019ab1cf38f995a422cb999459e)) +* change CLI get_block to search orphans ([#6153](https://github.com/tari-project/tari/issues/6153)) ([ae1e379](https://github.com/tari-project/tari/commit/ae1e3796d98e55ceb3642128d659c4e181108b85)) +* change proof of work to be dependant on target difficulty ([#6156](https://github.com/tari-project/tari/issues/6156)) ([feb634c](https://github.com/tari-project/tari/commit/feb634cd260a910228e0e9de45c9024b1990683f)) +* check chain metadata ([#6146](https://github.com/tari-project/tari/issues/6146)) ([8a16f7b](https://github.com/tari-project/tari/commit/8a16f7ba83fd200618814b2eaf66c88c5b1dfb79)) +* turn off node metrics by default ([#6073](https://github.com/tari-project/tari/issues/6073)) ([5ed661c](https://github.com/tari-project/tari/commit/5ed661c840795c3419369e865c8969ef7d49aacb)) + + +### Bug Fixes + +* balanced binary merkle tree merged proof ([#6144](https://github.com/tari-project/tari/issues/6144)) ([4d01653](https://github.com/tari-project/tari/commit/4d01653e6780241edfe732761d63d4218a2f742d)) +* wallet clear short term output ([#6151](https://github.com/tari-project/tari/issues/6151)) ([ac6997a](https://github.com/tari-project/tari/commit/ac6997af1a1d9828a93064e849df3dcc4ba019ee)) + +## [1.0.0-pre.8](https://github.com/tari-project/tari/compare/v1.0.0-pre.7...v1.0.0-pre.8) (2024-02-06) + + +### Bug Fixes + +* **comms:** correctly initialize hidden service ([#6124](https://github.com/tari-project/tari/issues/6124)) ([0584782](https://github.com/tari-project/tari/commit/058478255a93e7d50d95c8ac8c196069f76b994b)) +* **libtor:** prevent metrics port conflict ([#6125](https://github.com/tari-project/tari/issues/6125)) ([661af51](https://github.com/tari-project/tari/commit/661af5177863f37f0b01c9846dccc7d24f873fc5)) + +## [1.0.0-pre.7](https://github.com/tari-project/tari/compare/v1.0.0-pre.5...v1.0.0-pre.7) (2024-02-02) + + +### âš  BREAKING CHANGES + +* fix horizon sync after smt upgrade (#6006) + +### Features + +* add search kernels method to nodejs client ([#6082](https://github.com/tari-project/tari/issues/6082)) ([0190221](https://github.com/tari-project/tari/commit/019022149d94afb3c0ed3f75490dd777d60bad1c)) +* do validation after adding utxos and txs ([#6114](https://github.com/tari-project/tari/issues/6114)) ([7d886e6](https://github.com/tari-project/tari/commit/7d886e6c85e463a4f7f4dacc5115e625bb1f37f5)) +* export transaction ([#6111](https://github.com/tari-project/tari/issues/6111)) ([70d5ad3](https://github.com/tari-project/tari/commit/70d5ad3b4f8a1b8efb83a868102b7c846f2bd50c)) +* fix horizon sync after smt upgrade ([#6006](https://github.com/tari-project/tari/issues/6006)) ([b6b80f6](https://github.com/tari-project/tari/commit/b6b80f6ee9b91255815bd2a66f51425c3a628dcf)) +* initial horizon sync from prune node ([#6109](https://github.com/tari-project/tari/issues/6109)) ([2987621](https://github.com/tari-project/tari/commit/2987621b2cef6d3b852ed9a1f4215f19b9838e0f)) +* new release ([#6105](https://github.com/tari-project/tari/issues/6105)) ([554a3b2](https://github.com/tari-project/tari/commit/554a3b23d887eac81be288b2b8651019a6097458)) +* prevent runtime error with compact error input ([#6096](https://github.com/tari-project/tari/issues/6096)) ([69421f5](https://github.com/tari-project/tari/commit/69421f5ef97f0ba4c194162bca0b367dc7714ffe)) +* smt verification ([#6115](https://github.com/tari-project/tari/issues/6115)) ([78a9348](https://github.com/tari-project/tari/commit/78a93480bc00235cbf221ff977f7d87f8008226a)) +* update api ([#6101](https://github.com/tari-project/tari/issues/6101)) ([47e73ac](https://github.com/tari-project/tari/commit/47e73ac2b692bbfc924a4329e29597e49f84af0f)) +* update codeowners ([#6088](https://github.com/tari-project/tari/issues/6088)) ([58a131d](https://github.com/tari-project/tari/commit/58a131d302fd7295134c708e75a0b788205d287e)) +* wallet add restart validation to start ([#6113](https://github.com/tari-project/tari/issues/6113)) ([5c236ce](https://github.com/tari-project/tari/commit/5c236ce9928acd3aa212adab716c93f05e8cac9d)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) +* make monero extra data less strict ([#6117](https://github.com/tari-project/tari/issues/6117)) ([38b9113](https://github.com/tari-project/tari/commit/38b9113375bb90d667718f406e796f6a0e021861)) + +## [1.0.0-pre.6](https://github.com/tari-project/tari/compare/v1.0.0-pre.5...v1.0.0-pre.6) (2024-01-29) + + +### Features + +* add search kernels method to nodejs client ([#6082](https://github.com/tari-project/tari/issues/6082)) ([0190221](https://github.com/tari-project/tari/commit/019022149d94afb3c0ed3f75490dd777d60bad1c)) +* prevent runtime error with compact error input ([#6096](https://github.com/tari-project/tari/issues/6096)) ([69421f5](https://github.com/tari-project/tari/commit/69421f5ef97f0ba4c194162bca0b367dc7714ffe)) +* update api ([#6101](https://github.com/tari-project/tari/issues/6101)) ([47e73ac](https://github.com/tari-project/tari/commit/47e73ac2b692bbfc924a4329e29597e49f84af0f)) +* update codeowners ([#6088](https://github.com/tari-project/tari/issues/6088)) ([58a131d](https://github.com/tari-project/tari/commit/58a131d302fd7295134c708e75a0b788205d287e)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) + ## [1.0.0-pre.5](https://github.com/tari-project/tari/compare/v1.0.0-pre.4...v1.0.0-pre.5) (2024-01-18) diff --git a/changelog-nextnet.md b/changelog-nextnet.md index 00cae88b0e..9813a06969 100644 --- a/changelog-nextnet.md +++ b/changelog-nextnet.md @@ -2,6 +2,121 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [1.0.0-rc.6a](https://github.com/tari-project/tari/compare/v1.0.0-rc.6...v1.0.0-rc.6a) (2024-03-12) + + +### Bug Fixes + +* downgrade crossterm for windows compatibility ([#6204](https://github.com/tari-project/tari/issues/6204)) ([243243d](https://github.com/tari-project/tari/commit/243243dd7d7030010662f0d514097230d905a4cc)) + +## [1.0.0-rc.6](https://github.com/tari-project/tari/compare/v1.0.0-rc.5...v1.0.0-rc.6) (2024-03-11) + + +### âš  BREAKING CHANGES + +* change split to 50-50 (#6188) +* implement inflating tail emission (#6160) +* changes balance query (#6158) +* change proof of work to be dependant on target difficulty (#6156) + +### Features + +* change split to 50-50 ([#6188](https://github.com/tari-project/tari/issues/6188)) ([3b7842a](https://github.com/tari-project/tari/commit/3b7842acb12cfea76652b48c400488e436418d0f)) +* expose extra_data field to wallet ffi ([#6191](https://github.com/tari-project/tari/issues/6191)) ([2f2b139](https://github.com/tari-project/tari/commit/2f2b1391284f4a6ffcacb7a6d5e880f6c51cc8a3)) +* implement inflating tail emission ([#6160](https://github.com/tari-project/tari/issues/6160)) ([63b1f68](https://github.com/tari-project/tari/commit/63b1f6864ef080f9eef9ba9d6a600ab86c8791c5)), closes [#6122](https://github.com/tari-project/tari/issues/6122) [#6131](https://github.com/tari-project/tari/issues/6131) +* lazily evaluate for new random_x template ([#6170](https://github.com/tari-project/tari/issues/6170)) ([d220643](https://github.com/tari-project/tari/commit/d220643b1596955c499bf39df2c58c3052d92724)) +* limit transaction size ([#6154](https://github.com/tari-project/tari/issues/6154)) ([abd64d8](https://github.com/tari-project/tari/commit/abd64d8725f7e94b80bbfcbd97c58d9988571087)) +* make the make_it_rain submission rate a float ([#6180](https://github.com/tari-project/tari/issues/6180)) ([75d773b](https://github.com/tari-project/tari/commit/75d773bba625bb513c7b7bcef0cd6e9b9dda6c83)) +* mining ffi add coinbase add ([#6183](https://github.com/tari-project/tari/issues/6183)) ([820e936](https://github.com/tari-project/tari/commit/820e93676555bc35183470db6bbf3a5fd99eda02)) +* multi-network ci ([#6162](https://github.com/tari-project/tari/issues/6162)) ([8990b57](https://github.com/tari-project/tari/commit/8990b575cd4df01c1a3e5e9385e13a9ce3b9ddd4)) +* wallet ffi use dns ([#6152](https://github.com/tari-project/tari/issues/6152)) ([464f2c3](https://github.com/tari-project/tari/commit/464f2c3bc8495bf4a08e7292829726e8f9e8c747)) +* add import tx method ([#6132](https://github.com/tari-project/tari/issues/6132)) ([f3d9121](https://github.com/tari-project/tari/commit/f3d91212e1e3a1e450b5f8e71ceacf2673cfc8c2)) +* allow ffi to see lock height ([#6140](https://github.com/tari-project/tari/issues/6140)) ([48af0b8](https://github.com/tari-project/tari/commit/48af0b8615c80019ab1cf38f995a422cb999459e)) +* change CLI get_block to search orphans ([#6153](https://github.com/tari-project/tari/issues/6153)) ([ae1e379](https://github.com/tari-project/tari/commit/ae1e3796d98e55ceb3642128d659c4e181108b85)) +* change proof of work to be dependant on target difficulty ([#6156](https://github.com/tari-project/tari/issues/6156)) ([feb634c](https://github.com/tari-project/tari/commit/feb634cd260a910228e0e9de45c9024b1990683f)) +* check chain metadata ([#6146](https://github.com/tari-project/tari/issues/6146)) ([8a16f7b](https://github.com/tari-project/tari/commit/8a16f7ba83fd200618814b2eaf66c88c5b1dfb79)) +* turn off node metrics by default ([#6073](https://github.com/tari-project/tari/issues/6073)) ([5ed661c](https://github.com/tari-project/tari/commit/5ed661c840795c3419369e865c8969ef7d49aacb)) + + +### Bug Fixes + +* add .h file to mining helper ([#6194](https://github.com/tari-project/tari/issues/6194)) ([237e6b9](https://github.com/tari-project/tari/commit/237e6b963edd3e4a8986ed4f9767a16f36aff05e)) +* avoid cloning range proofs during verification ([#6166](https://github.com/tari-project/tari/issues/6166)) ([19a824d](https://github.com/tari-project/tari/commit/19a824dea8971f15a7b263122b20e46286f89857)) +* changes balance query ([#6158](https://github.com/tari-project/tari/issues/6158)) ([9ccc615](https://github.com/tari-project/tari/commit/9ccc6153b0fedc1cf40bd547c6987143c23b1649)) +* fixed make-it-rain delay ([#6165](https://github.com/tari-project/tari/issues/6165)) ([5c5da46](https://github.com/tari-project/tari/commit/5c5da461690684e90ecc12565d674fbca06b5f53)) +* hide unmined coinbase ([#6159](https://github.com/tari-project/tari/issues/6159)) ([2ccde17](https://github.com/tari-project/tari/commit/2ccde173834fbbfc617b87001c7364760b81590e)) +* horizon sync ([#6197](https://github.com/tari-project/tari/issues/6197)) ([c96be82](https://github.com/tari-project/tari/commit/c96be82efdbb24f448a5efef3076d0b1819ed07e)) +* oms validation ([#6161](https://github.com/tari-project/tari/issues/6161)) ([f3d1219](https://github.com/tari-project/tari/commit/f3d12196530f9bf7c266cba9eff014cba04cecbb)) +* remove extra range proof verifications ([#6190](https://github.com/tari-project/tari/issues/6190)) ([57330bf](https://github.com/tari-project/tari/commit/57330bf7e0be7d2d4f325e8009d3b10568f3acad)) +* rewind bug causing SMT to be broken ([#6172](https://github.com/tari-project/tari/issues/6172)) ([4cb61a3](https://github.com/tari-project/tari/commit/4cb61a33c60fe18706aae4700e301484abe62471)) +* wallet validation during reorgs ([#6173](https://github.com/tari-project/tari/issues/6173)) ([97fc7b3](https://github.com/tari-project/tari/commit/97fc7b382a078ed2178c650214cb9803daeea87f)) +* balanced binary merkle tree merged proof ([#6144](https://github.com/tari-project/tari/issues/6144)) ([4d01653](https://github.com/tari-project/tari/commit/4d01653e6780241edfe732761d63d4218a2f742d)) +* wallet clear short term output ([#6151](https://github.com/tari-project/tari/issues/6151)) ([ac6997a](https://github.com/tari-project/tari/commit/ac6997af1a1d9828a93064e849df3dcc4ba019ee)) + + +## [1.0.0-rc.5](https://github.com/tari-project/tari/compare/v1.0.0-rc.4...v1.0.0-rc.5) (2024-02-06) + + +### Bug Fixes + +* **comms:** correctly initialize hidden service ([#6124](https://github.com/tari-project/tari/issues/6124)) ([0584782](https://github.com/tari-project/tari/commit/058478255a93e7d50d95c8ac8c196069f76b994b)) +* **libtor:** prevent metrics port conflict ([#6125](https://github.com/tari-project/tari/issues/6125)) ([661af51](https://github.com/tari-project/tari/commit/661af5177863f37f0b01c9846dccc7d24f873fc5)) + + +## [1.0.0-rc.4](https://github.com/tari-project/tari/compare/v1.0.0-rc.3...v1.0.0-rc.4) (2024-02-02) + + +### âš  BREAKING CHANGES + +* fix horizon sync after smt upgrade (#6006) + +### Features + +* do validation after adding utxos and txs ([#6114](https://github.com/tari-project/tari/issues/6114)) ([7d886e6](https://github.com/tari-project/tari/commit/7d886e6c85e463a4f7f4dacc5115e625bb1f37f5)) +* export transaction ([#6111](https://github.com/tari-project/tari/issues/6111)) ([70d5ad3](https://github.com/tari-project/tari/commit/70d5ad3b4f8a1b8efb83a868102b7c846f2bd50c)) +* fix horizon sync after smt upgrade ([#6006](https://github.com/tari-project/tari/issues/6006)) ([b6b80f6](https://github.com/tari-project/tari/commit/b6b80f6ee9b91255815bd2a66f51425c3a628dcf)) +* initial horizon sync from prune node ([#6109](https://github.com/tari-project/tari/issues/6109)) ([2987621](https://github.com/tari-project/tari/commit/2987621b2cef6d3b852ed9a1f4215f19b9838e0f)) +* smt verification ([#6115](https://github.com/tari-project/tari/issues/6115)) ([78a9348](https://github.com/tari-project/tari/commit/78a93480bc00235cbf221ff977f7d87f8008226a)) +* wallet add restart validation to start ([#6113](https://github.com/tari-project/tari/issues/6113)) ([5c236ce](https://github.com/tari-project/tari/commit/5c236ce9928acd3aa212adab716c93f05e8cac9d)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) +* make monero extra data less strict ([#6117](https://github.com/tari-project/tari/issues/6117)) ([38b9113](https://github.com/tari-project/tari/commit/38b9113375bb90d667718f406e796f6a0e021861)) + +## [1.0.0-rc.3](https://github.com/tari-project/tari/compare/v1.0.0-rc.2...v1.0.0-rc.3) (2024-01-29) + + +### Features + +* add search kernels method to nodejs client ([#6082](https://github.com/tari-project/tari/issues/6082)) ([0190221](https://github.com/tari-project/tari/commit/019022149d94afb3c0ed3f75490dd777d60bad1c)) +* prevent runtime error with compact error input ([#6096](https://github.com/tari-project/tari/issues/6096)) ([69421f5](https://github.com/tari-project/tari/commit/69421f5ef97f0ba4c194162bca0b367dc7714ffe)) +* update api ([#6101](https://github.com/tari-project/tari/issues/6101)) ([47e73ac](https://github.com/tari-project/tari/commit/47e73ac2b692bbfc924a4329e29597e49f84af0f)) +* update codeowners ([#6088](https://github.com/tari-project/tari/issues/6088)) ([58a131d](https://github.com/tari-project/tari/commit/58a131d302fd7295134c708e75a0b788205d287e)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) + +## [1.0.0-rc.2](https://github.com/tari-project/tari/compare/v1.0.0-rc.1...v1.0.0-rc.2) (2024-01-18) + + +### Features + +* add tari address as valid string for discovering a peer ([#6075](https://github.com/tari-project/tari/issues/6075)) ([a4c5bc2](https://github.com/tari-project/tari/commit/a4c5bc2c6c08a5d09b58f13ed9acf561e55478fc)) +* make all apps non interactive ([#6049](https://github.com/tari-project/tari/issues/6049)) ([bafd7e7](https://github.com/tari-project/tari/commit/bafd7e7baadd0f8b82ca8205ec3f18342d74e92a)) +* make libtor on by default for nix builds ([#6060](https://github.com/tari-project/tari/issues/6060)) ([b5e0d06](https://github.com/tari-project/tari/commit/b5e0d0639c540177373b7faa9c2fade64581e46d)) + + +### Bug Fixes + +* fix small error in config.toml ([#6052](https://github.com/tari-project/tari/issues/6052)) ([6518a60](https://github.com/tari-project/tari/commit/6518a60dce9a4b8ace6c5cc4b1ee79045e364e0e)) +* tms validation correctly updating ([#6079](https://github.com/tari-project/tari/issues/6079)) ([34222a8](https://github.com/tari-project/tari/commit/34222a88bd1746869e67ccde9c2f7529862f3b5d)) +* wallet coinbases not validated correctly ([#6074](https://github.com/tari-project/tari/issues/6074)) ([bb66df1](https://github.com/tari-project/tari/commit/bb66df13bcf3d00082e35f7305b1fde72d4ace2a)) + + ## [1.0.0-rc.1](https://github.com/tari-project/tari/compare/v1.0.0-rc.1...v1.0.0-rc.0) (2023-12-14) diff --git a/changelog-stagenet.md b/changelog-stagenet.md index a483b259cb..0983855c42 100644 --- a/changelog-stagenet.md +++ b/changelog-stagenet.md @@ -1,6 +1,401 @@ # Changelog All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [1.0.0-alpha.0a](https://github.com/tari-project/tari/compare/v1.0.0-alpha.0...v1.0.0-alpha.0a) (2024-03-12) + +### Bug Fixes + +* downgrade crossterm for windows compatibility ([#6204](https://github.com/tari-project/tari/issues/6204)) ([243243d](https://github.com/tari-project/tari/commit/243243dd7d7030010662f0d514097230d905a4cc)) + +## [1.0.0-alpha.0](https://github.com/tari-project/tari/compare/v0.49.0-rc.0...v1.0.0-alpha.0) (2024-03-11) + + +### âš  BREAKING CHANGES + +* change split to 50-50 (#6188) +* implement inflating tail emission (#6160) +* changes balance query (#6158) +* change proof of work to be dependant on target difficulty (#6156) +* fix horizon sync after smt upgrade (#6006)* add paging to utxo stream request (#5302) +* add optional range proof types (#5372) +* hash domain consistency (#5556) ([64443c6f](https://github.com/tari-project/tari/commit/64443c6f428fa84f8ab3e4b86949be6faef35aeb)) +* consistent output/kernel versions between sender and receiver (#5553) ([74f9c35f](https://github.com/tari-project/tari/commit/74f9c35f6a34c1cf731274b7febb245734ae7032)) +* New Gen block (#5633) +* Validator mr included in mining hash (#5615) +* Monero merkle proof change (#5602) +* Merge mining hash has changed +* remove timestamp from header in proto files (#5667) +* **comms/dht:** limit number of peer claims and addresses for all sources (#5702) +* **comms:** use noise XX handshake pattern for improved privacy (#5696) +* update faucet for genesis block (#5633) +* limit monero hashes and force coinbase to be tx 0 (#5602) +* add validator mr to mining hash (#5615) +* replace utxo MMR with SMT (#5854) +* update key parsing (#5900) +* **proto:** remove proto timestamp wrapper types (#5833) +* **proto:** remove proto bytes for std bytes (#5835) +* upgrade bitflags crate (#5831) +* improve block add where many orphan chain tips existed (#5763) +* lmdb flag set wrong on database (#5916) +* add validator mmr size (#5873) +* completed transaction use bytes for transaction protocol (not hex string) in wallet database (#5906) +* new faucet for esmeralda (#6001) +* dont store entire monero coinbase transaction (#5991) +* ups the min difficulty (#5999) +* network specific domain hashers (#5980) +* add aux chain support for merge mining (#5976) +* disable console wallet grpc (#5988) +* add one-sided coinbase payments (#5967) +* fix opcode signatures (#5966) +* remove mutable mmr (#5954) +* move kernel MMR position to `u64` (#5956) +* standardize gRPC authentication and mitigate DoS (#5936) +* fix difficulty overflow (#5935) +* update status (#6008) + +### Features + +* change split to 50-50 ([#6188](https://github.com/tari-project/tari/issues/6188)) ([3b7842a](https://github.com/tari-project/tari/commit/3b7842acb12cfea76652b48c400488e436418d0f)) +* expose extra_data field to wallet ffi ([#6191](https://github.com/tari-project/tari/issues/6191)) ([2f2b139](https://github.com/tari-project/tari/commit/2f2b1391284f4a6ffcacb7a6d5e880f6c51cc8a3)) +* implement inflating tail emission ([#6160](https://github.com/tari-project/tari/issues/6160)) ([63b1f68](https://github.com/tari-project/tari/commit/63b1f6864ef080f9eef9ba9d6a600ab86c8791c5)), closes [#6122](https://github.com/tari-project/tari/issues/6122) [#6131](https://github.com/tari-project/tari/issues/6131) +* lazily evaluate for new random_x template ([#6170](https://github.com/tari-project/tari/issues/6170)) ([d220643](https://github.com/tari-project/tari/commit/d220643b1596955c499bf39df2c58c3052d92724)) +* limit transaction size ([#6154](https://github.com/tari-project/tari/issues/6154)) ([abd64d8](https://github.com/tari-project/tari/commit/abd64d8725f7e94b80bbfcbd97c58d9988571087)) +* make the make_it_rain submission rate a float ([#6180](https://github.com/tari-project/tari/issues/6180)) ([75d773b](https://github.com/tari-project/tari/commit/75d773bba625bb513c7b7bcef0cd6e9b9dda6c83)) +* mining ffi add coinbase add ([#6183](https://github.com/tari-project/tari/issues/6183)) ([820e936](https://github.com/tari-project/tari/commit/820e93676555bc35183470db6bbf3a5fd99eda02)) +* multi-network ci ([#6162](https://github.com/tari-project/tari/issues/6162)) ([8990b57](https://github.com/tari-project/tari/commit/8990b575cd4df01c1a3e5e9385e13a9ce3b9ddd4)) +* wallet ffi use dns ([#6152](https://github.com/tari-project/tari/issues/6152)) ([464f2c3](https://github.com/tari-project/tari/commit/464f2c3bc8495bf4a08e7292829726e8f9e8c747)) +* add import tx method ([#6132](https://github.com/tari-project/tari/issues/6132)) ([f3d9121](https://github.com/tari-project/tari/commit/f3d91212e1e3a1e450b5f8e71ceacf2673cfc8c2)) +* allow ffi to see lock height ([#6140](https://github.com/tari-project/tari/issues/6140)) ([48af0b8](https://github.com/tari-project/tari/commit/48af0b8615c80019ab1cf38f995a422cb999459e)) +* change CLI get_block to search orphans ([#6153](https://github.com/tari-project/tari/issues/6153)) ([ae1e379](https://github.com/tari-project/tari/commit/ae1e3796d98e55ceb3642128d659c4e181108b85)) +* change proof of work to be dependant on target difficulty ([#6156](https://github.com/tari-project/tari/issues/6156)) ([feb634c](https://github.com/tari-project/tari/commit/feb634cd260a910228e0e9de45c9024b1990683f)) +* check chain metadata ([#6146](https://github.com/tari-project/tari/issues/6146)) ([8a16f7b](https://github.com/tari-project/tari/commit/8a16f7ba83fd200618814b2eaf66c88c5b1dfb79)) +* turn off node metrics by default ([#6073](https://github.com/tari-project/tari/issues/6073)) ([5ed661c](https://github.com/tari-project/tari/commit/5ed661c840795c3419369e865c8969ef7d49aacb)) +* do validation after adding utxos and txs ([#6114](https://github.com/tari-project/tari/issues/6114)) ([7d886e6](https://github.com/tari-project/tari/commit/7d886e6c85e463a4f7f4dacc5115e625bb1f37f5)) +* export transaction ([#6111](https://github.com/tari-project/tari/issues/6111)) ([70d5ad3](https://github.com/tari-project/tari/commit/70d5ad3b4f8a1b8efb83a868102b7c846f2bd50c)) +* fix horizon sync after smt upgrade ([#6006](https://github.com/tari-project/tari/issues/6006)) ([b6b80f6](https://github.com/tari-project/tari/commit/b6b80f6ee9b91255815bd2a66f51425c3a628dcf)) +* initial horizon sync from prune node ([#6109](https://github.com/tari-project/tari/issues/6109)) ([2987621](https://github.com/tari-project/tari/commit/2987621b2cef6d3b852ed9a1f4215f19b9838e0f)) +* smt verification ([#6115](https://github.com/tari-project/tari/issues/6115)) ([78a9348](https://github.com/tari-project/tari/commit/78a93480bc00235cbf221ff977f7d87f8008226a)) +* wallet add restart validation to start ([#6113](https://github.com/tari-project/tari/issues/6113)) ([5c236ce](https://github.com/tari-project/tari/commit/5c236ce9928acd3aa212adab716c93f05e8cac9d)) +* add search kernels method to nodejs client ([#6082](https://github.com/tari-project/tari/issues/6082)) ([0190221](https://github.com/tari-project/tari/commit/019022149d94afb3c0ed3f75490dd777d60bad1c)) +* prevent runtime error with compact error input ([#6096](https://github.com/tari-project/tari/issues/6096)) ([69421f5](https://github.com/tari-project/tari/commit/69421f5ef97f0ba4c194162bca0b367dc7714ffe)) +* update api ([#6101](https://github.com/tari-project/tari/issues/6101)) ([47e73ac](https://github.com/tari-project/tari/commit/47e73ac2b692bbfc924a4329e29597e49f84af0f)) +* update codeowners ([#6088](https://github.com/tari-project/tari/issues/6088)) ([58a131d](https://github.com/tari-project/tari/commit/58a131d302fd7295134c708e75a0b788205d287e)) +* add tari address as valid string for discovering a peer ([#6075](https://github.com/tari-project/tari/issues/6075)) ([a4c5bc2](https://github.com/tari-project/tari/commit/a4c5bc2c6c08a5d09b58f13ed9acf561e55478fc)) +* make all apps non interactive ([#6049](https://github.com/tari-project/tari/issues/6049)) ([bafd7e7](https://github.com/tari-project/tari/commit/bafd7e7baadd0f8b82ca8205ec3f18342d74e92a)) +* make libtor on by default for nix builds ([#6060](https://github.com/tari-project/tari/issues/6060)) ([b5e0d06](https://github.com/tari-project/tari/commit/b5e0d0639c540177373b7faa9c2fade64581e46d)) +* fix windows installer ([#6043](https://github.com/tari-project/tari/issues/6043)) ([c37a0a8](https://github.com/tari-project/tari/commit/c37a0a89726eec765c9c10d3da0c990d339de9b9)) +* side load chat ([#6042](https://github.com/tari-project/tari/issues/6042)) ([d729c45](https://github.com/tari-project/tari/commit/d729c458b17406d9f5dbb8982a9bf5604f39c63c)) +* add miner timeout config option ([#5331](https://github.com/tari-project/tari/issues/5331)) ([aea14f6](https://github.com/tari-project/tari/commit/aea14f6bf302801c85efa9f304a8f442aaf9a3ff)) +* chat ffi ([#5349](https://github.com/tari-project/tari/issues/5349)) ([f7cece2](https://github.com/tari-project/tari/commit/f7cece27c02ae3b668e1ffbd6629828d0432debf)) +* chat scaffold ([#5244](https://github.com/tari-project/tari/issues/5244)) ([5b09f8e](https://github.com/tari-project/tari/commit/5b09f8e2b630685d9ff748eae772b9798954f6ff)) +* improve message encryption ([#5288](https://github.com/tari-project/tari/issues/5288)) ([7a80716](https://github.com/tari-project/tari/commit/7a80716c71987bae14d83994d7402f96c190242d)) +* **p2p:** allow listener bind to differ from the tor forward address ([#5357](https://github.com/tari-project/tari/issues/5357)) ([857fb55](https://github.com/tari-project/tari/commit/857fb55520145ece48b4b5cca0aa5d7fd8f6c69e))* add extended mask recovery ([#5301](https://github.com/tari-project/tari/issues/5301)) ([23d882e](https://github.com/tari-project/tari/commit/23d882eb783f3d94efbfdd928b3d87b2907bf2d7)) +* add network name to data path and --network flag to the miners ([#5291](https://github.com/tari-project/tari/issues/5291)) ([1f04beb](https://github.com/tari-project/tari/commit/1f04bebd4f6d14432aab923baeab17d1d6cc39bf)) +* add other code template types ([#5242](https://github.com/tari-project/tari/issues/5242)) ([93e5e85](https://github.com/tari-project/tari/commit/93e5e85cbc13be33bea40c7b8289d0ff344df08c)) +* add paging to utxo stream request ([#5302](https://github.com/tari-project/tari/issues/5302)) ([3540309](https://github.com/tari-project/tari/commit/3540309e29d450fc8cb48bc714fb780c1c107b81)) +* add wallet daemon config ([#5311](https://github.com/tari-project/tari/issues/5311)) ([30419cf](https://github.com/tari-project/tari/commit/30419cfcf198fb923ef431316f2915cbc80f1e3b)) +* define different network defaults for bins ([#5307](https://github.com/tari-project/tari/issues/5307)) ([2f5d498](https://github.com/tari-project/tari/commit/2f5d498d2130b5358fbf126c96a917ed98016955)) +* feature gates ([#5287](https://github.com/tari-project/tari/issues/5287)) ([72c19dc](https://github.com/tari-project/tari/commit/72c19dc130b0c7652cca422c9c4c2e08e5b8e555)) +* fix rpc transaction conversion ([#5304](https://github.com/tari-project/tari/issues/5304)) ([344040a](https://github.com/tari-project/tari/commit/344040ac7322bae5604aa9db48d4194c1b3779fa)) +* add metadata signature check ([#5411](https://github.com/tari-project/tari/issues/5411)) ([9c2bf41](https://github.com/tari-project/tari/commit/9c2bf41ec8f649ffac824878256c09598bf52269)) +* add optional range proof types ([#5372](https://github.com/tari-project/tari/issues/5372)) ([f24784f](https://github.com/tari-project/tari/commit/f24784f3a2f3f574cd2ac4e2d9fe963078e4c524)) +* added burn feature to the console wallet ([#5322](https://github.com/tari-project/tari/issues/5322)) ([45685b9](https://github.com/tari-project/tari/commit/45685b9f3acceba483ec30021e8d4894dbf2861c)) +* improved base node monitoring ([#5390](https://github.com/tari-project/tari/issues/5390)) ([c704890](https://github.com/tari-project/tari/commit/c704890ca949bcfcd608e299175694b81cef0165)) +* refactor configuration for chat so ffi can create and accept a config file (#5426) ([9d0d8b52](https://github.com/tari-project/tari/commit/9d0d8b5277bd26e79b7fe5506edcaf197ba63eb7), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* ui for template registration in console wallet (#5444) ([701e3c23](https://github.com/tari-project/tari/commit/701e3c2341d1029c2711b81a66952f3bee7d8e42), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* sparse merkle trees (#5457) ([f536d219](https://github.com/tari-project/tari/commit/f536d21929e4eeb11cc185c013eef0b336def216)* proof of work audit part 2 (#5495) ([af32f96f](https://github.com/tari-project/tari/commit/af32f96f36a32235daf7e3b1d9694af7edcf5f8e) +* improve recovery speed (#5489) ([d128f850](https://github.com/tari-project/tari/commit/d128f850356ff18bfd394f6c3bfe78f5bd0607e1)) +* add consistent ban reason for sync ([#5729](https://github.com/brianp/tari/issues/5729)) ([9564281](https://github.com/brianp/tari/commit/95642811b9df592eb9bddd9b71d10ee30987e59d)) +* add mempool min fee ([#5606](https://github.com/brianp/tari/issues/5606)) ([15c7e8f](https://github.com/brianp/tari/commit/15c7e8f9ca3d656850d6f0041d2f7fc07b4af80b)) +* ban peer unexpected response ([#5608](https://github.com/brianp/tari/issues/5608)) ([02494ae](https://github.com/brianp/tari/commit/02494aee0f97469b9deb9c339b4075b14b69ff6f)) +* change default script to PushPubKey ([#5653](https://github.com/brianp/tari/issues/5653)) ([f5b89ad](https://github.com/brianp/tari/commit/f5b89add6a04b935b9ae8dda0f694eb826ef6d9a)) +* chat ffi status callback ([#5583](https://github.com/brianp/tari/issues/5583)) ([f68b85f](https://github.com/brianp/tari/commit/f68b85f404e524d61d8b6153c13e8b2e6ab2a20b)) +* chat message fetching pagination ([#5594](https://github.com/brianp/tari/issues/5594)) ([2024357](https://github.com/brianp/tari/commit/202435742ed78b0eac80efcd19b357df96a6bbb9)) +* chat-ffi logging ([#5591](https://github.com/brianp/tari/issues/5591)) ([159959c](https://github.com/brianp/tari/commit/159959cc32c341e111a626729fb1bd9a2851e8a7)) +* cleanup errors ([#5655](https://github.com/brianp/tari/issues/5655)) ([c1737b9](https://github.com/brianp/tari/commit/c1737b9d872dbaf858dd46e6350c6febd7f43690)) +* fix formatting block ([#5630](https://github.com/brianp/tari/issues/5630)) ([49732f6](https://github.com/brianp/tari/commit/49732f65339f4c120afb49e9edb72eda8d17b737)) +* improve block sync error handling ([#5691](https://github.com/brianp/tari/issues/5691)) ([251f796](https://github.com/brianp/tari/commit/251f796dc023459338212a852d50059380399be2)) +* new message callback to chat-ffi ([#5592](https://github.com/brianp/tari/issues/5592)) ([bbd543e](https://github.com/brianp/tari/commit/bbd543ee35e4e5fc858d875cf30d6f24fa2e4d96)) +* peer sync limiter ([#5445](https://github.com/brianp/tari/issues/5445)) ([548643b](https://github.com/brianp/tari/commit/548643b723a548fea3e56f938a84db652d3ee630)) +* remove inherent iterator panic ([#5697](https://github.com/brianp/tari/issues/5697)) ([7f153e5](https://github.com/brianp/tari/commit/7f153e5dd613b3e38586b7f8f536035c6ac98dd8)) +* remove orphan validation and only validate on insertion ([#5601](https://github.com/brianp/tari/issues/5601)) ([41244a3](https://github.com/brianp/tari/commit/41244a3ea666f925648aa752c9ac476486702473)) +* remove unused wasm_key_manager ([#5622](https://github.com/brianp/tari/issues/5622)) ([508c971](https://github.com/brianp/tari/commit/508c97198617f116bb0ccd69c8e1eba1341b18ac)) +* update faucet for genesis block ([#5633](https://github.com/brianp/tari/issues/5633)) ([ffb987a](https://github.com/brianp/tari/commit/ffb987a757f2af721ca5772e28da31035fcf741f)) +* update genesis blocks ([#5698](https://github.com/brianp/tari/issues/5698)) ([b9145b3](https://github.com/brianp/tari/commit/b9145b3373319f0c2c25d0e5dd4d393115a4c0bd)) +* add (de)serialize to BalancedBinaryMerkleTree ([#5744](https://github.com/tari-project/tari/issues/5744)) ([c53ec06](https://github.com/tari-project/tari/commit/c53ec065b6f7893fe1a5d3a3ccde826fa09e438f)) +* add config for grpc server methods ([#5886](https://github.com/tari-project/tari/issues/5886)) ([a3d7cf7](https://github.com/tari-project/tari/commit/a3d7cf771663d2b3c3585796ef502ab00f569ba0)) +* add insert function to SMT ([#5776](https://github.com/tari-project/tari/issues/5776)) ([5901b4a](https://github.com/tari-project/tari/commit/5901b4af9fe307cdc379979155961d34dcf8c098)) +* add overflow checks to change and fee calculations ([#5834](https://github.com/tari-project/tari/issues/5834)) ([9725fbd](https://github.com/tari-project/tari/commit/9725fbddf1ee7047d2e7698f4ee1975ce22aa605)) +* allow multiple initial sync peers ([#5890](https://github.com/tari-project/tari/issues/5890)) ([e1c504a](https://github.com/tari-project/tari/commit/e1c504a3d9b9affafb3221e46831d818cbdcc45a)) +* apply obscure_error_if_true consistenlty ([#5892](https://github.com/tari-project/tari/issues/5892)) ([1864203](https://github.com/tari-project/tari/commit/1864203c224611cdcac71adbae83e37161ce0a5c)) +* ban bad block-sync peers ([#5871](https://github.com/tari-project/tari/issues/5871)) ([5c2781e](https://github.com/tari-project/tari/commit/5c2781e86be8efacab52c93a0bc2ee662ca56ec8)) +* chat ffi verbose logging options ([#5789](https://github.com/tari-project/tari/issues/5789)) ([24b4324](https://github.com/tari-project/tari/commit/24b4324f3d5b4386a3df68952fb834d58fa5217d)) +* chatffi simpler callbacks and managed identity and db ([#5681](https://github.com/tari-project/tari/issues/5681)) ([79ab584](https://github.com/tari-project/tari/commit/79ab584100bc6899445fc3789d6e3312a06d21e8)) +* **chatffi:** better message metadata parsing ([#5820](https://github.com/tari-project/tari/issues/5820)) ([9a43eab](https://github.com/tari-project/tari/commit/9a43eab2e81aaaa0a5ad53b3dc5d9388b9d43452)) +* **chatffi:** get conversationalists ([#5849](https://github.com/tari-project/tari/issues/5849)) ([d9e8e22](https://github.com/tari-project/tari/commit/d9e8e22846cc0974abcfe19ab32b41299c0a500a)) +* **chatffi:** message metadata ([#5766](https://github.com/tari-project/tari/issues/5766)) ([a9b730a](https://github.com/tari-project/tari/commit/a9b730aaa2e44dbba7c546b0d78ad0fef4884d29)) +* **chatffi:** tor configuration ([#5752](https://github.com/tari-project/tari/issues/5752)) ([1eeb4a9](https://github.com/tari-project/tari/commit/1eeb4a9abbc29ec16593b1c6bec675b928e7b177)) +* **chat:** read receipt feature ([#5824](https://github.com/tari-project/tari/issues/5824)) ([d81fe7d](https://github.com/tari-project/tari/commit/d81fe7d39fdc120665b90e18163151bdb938beee)) +* cli add list of vns for next epoch ([#5743](https://github.com/tari-project/tari/issues/5743)) ([d2a0c8c](https://github.com/tari-project/tari/commit/d2a0c8cc935bb648460f8095c5f2f7125e642169)) +* **comms:** allow multiple messaging protocol instances ([#5748](https://github.com/tari-project/tari/issues/5748)) ([3fba04e](https://github.com/tari-project/tari/commit/3fba04ec862bf405e96e09b5cc38a5d572b77244)) +* consistent handling of edge cases for header sync ([#5837](https://github.com/tari-project/tari/issues/5837)) ([3e1ec1f](https://github.com/tari-project/tari/commit/3e1ec1f1fe70b82ed0f7517d91eb9f3f352cbe97)) +* enable multiple coinbase utxos ([#5879](https://github.com/tari-project/tari/issues/5879)) ([49e5c9c](https://github.com/tari-project/tari/commit/49e5c9c2fec823f0958a28e5c110cc3f34ba48d6)) +* failure of min difficulty should not add block to list of bad blocks ([#5805](https://github.com/tari-project/tari/issues/5805)) ([38dc014](https://github.com/tari-project/tari/commit/38dc014405eb6887210861bd533f2b1dd17f48c2)) +* improve block add where many orphan chain tips existed ([#5763](https://github.com/tari-project/tari/issues/5763)) ([19b3f21](https://github.com/tari-project/tari/commit/19b3f217aee6818678ed45082d910f1a2335a9ec)) +* make prc errors ban-able for sync ([#5884](https://github.com/tari-project/tari/issues/5884)) ([4ca664e](https://github.com/tari-project/tari/commit/4ca664e5933f2266f594ecccf545d0eec3b18b40)) +* prevent possible division by zero in difficulty calculation ([#5828](https://github.com/tari-project/tari/issues/5828)) ([f85a878](https://github.com/tari-project/tari/commit/f85a8785de49dda05b3dc54dfda4f5081424e06f)) +* print warning for wallets in direct send only ([#5883](https://github.com/tari-project/tari/issues/5883)) ([6d8686d](https://github.com/tari-project/tari/commit/6d8686dc40ef701fe980698c30347da5b690de07)) +* reduce timeouts and increase bans ([#5882](https://github.com/tari-project/tari/issues/5882)) ([df9bc9a](https://github.com/tari-project/tari/commit/df9bc9a912fe6e7c750e34a3dd7bd6796c6d758f)) +* replace utxo MMR with SMT ([#5854](https://github.com/tari-project/tari/issues/5854)) ([ca74c29](https://github.com/tari-project/tari/commit/ca74c29db7264413dc3e6542b599db9760993170)) +* up the timeout for comms ([#5758](https://github.com/tari-project/tari/issues/5758)) ([1054868](https://github.com/tari-project/tari/commit/1054868248342d0a07077d441151dc48adbfddf3)) +* update key parsing ([#5900](https://github.com/tari-project/tari/issues/5900)) ([59d7ceb](https://github.com/tari-project/tari/commit/59d7cebd22cc86ab5d3691aa5dc3d73b37032442)) +* update randomx ([#5894](https://github.com/tari-project/tari/issues/5894)) ([e445244](https://github.com/tari-project/tari/commit/e4452440bd9269402f1a5352e9c93cbfa6c72425)) +* adaptable min difficulty check ([#5896](https://github.com/tari-project/tari/issues/5896)) ([76f323c](https://github.com/tari-project/tari/commit/76f323c67ee3f46d772b85c410a1c49376348195)) +* add robustness to monero block extra field handling ([#5826](https://github.com/tari-project/tari/issues/5826)) ([597b9ef](https://github.com/tari-project/tari/commit/597b9ef7698ef705d550f6d3ecb1c27dbea79636)) +* add validator mmr size ([#5873](https://github.com/tari-project/tari/issues/5873)) ([fd51045](https://github.com/tari-project/tari/commit/fd510452c0bf9eefcc4117f378c6434aea7b9fd1)) +* completed transaction use bytes for transaction protocol (not hex string) in wallet database ([#5906](https://github.com/tari-project/tari/issues/5906)) ([61256cd](https://github.com/tari-project/tari/commit/61256cde3630f8d81e5648b1f5038ed6e847b9c2)) +* add aux chain support for merge mining ([#5976](https://github.com/tari-project/tari/issues/5976)) ([6723dc7](https://github.com/tari-project/tari/commit/6723dc7a88b2c1e40efe51259cb26e12638b9668)) +* add constant time comparison for grpc authentication ([#5902](https://github.com/tari-project/tari/issues/5902)) ([2fe44db](https://github.com/tari-project/tari/commit/2fe44db773bbf8ee7c4e306e08973ba25e6af10e)) +* add getheaderbyhash method to grpc-js ([#5942](https://github.com/tari-project/tari/issues/5942)) ([ebc4539](https://github.com/tari-project/tari/commit/ebc45398ea7f9eda7f08830cec93f2bf8d4a0e38)) +* add one-sided coinbase payments ([#5967](https://github.com/tari-project/tari/issues/5967)) ([89b19f6](https://github.com/tari-project/tari/commit/89b19f6de8f2acf28557ca37feda03af2657cf30)) +* bans for bad incoming blocks ([#5934](https://github.com/tari-project/tari/issues/5934)) ([7acc44d](https://github.com/tari-project/tari/commit/7acc44d3dce5d8c9085ae5246a8a0a7487d19516)) +* block endless peer stream ([#5951](https://github.com/tari-project/tari/issues/5951)) ([16b325d](https://github.com/tari-project/tari/commit/16b325defc2f42b9b34d3e1fd05a4b6cd6bcf965)) +* block wallets from sending if BN connection stale ([#5949](https://github.com/tari-project/tari/issues/5949)) ([18d5f57](https://github.com/tari-project/tari/commit/18d5f57363fb085bfac080a7994cb5ced8c932ab)) +* compile out the metrics ([#5944](https://github.com/tari-project/tari/issues/5944)) ([fa2fb27](https://github.com/tari-project/tari/commit/fa2fb27a5834bd56fda62c82a825a7f6d8391fd3)) +* create min dust fee setting ([#5947](https://github.com/tari-project/tari/issues/5947)) ([8f5466c](https://github.com/tari-project/tari/commit/8f5466cb1d85518ba80190fa312281321aa721ff)) +* disable console wallet grpc ([#5988](https://github.com/tari-project/tari/issues/5988)) ([883de17](https://github.com/tari-project/tari/commit/883de175dadee58c4f49fff9a655cae1a2450b3d)) +* dont store entire monero coinbase transaction ([#5991](https://github.com/tari-project/tari/issues/5991)) ([23b10bf](https://github.com/tari-project/tari/commit/23b10bf2d3fdebd296a93eae0aaa5abcd4156de9)) +* enable revealed-value proofs ([#5983](https://github.com/tari-project/tari/issues/5983)) ([f3f5879](https://github.com/tari-project/tari/commit/f3f5879903c619a9219c27ce4e77450f4a1b247b)) +* fix difficulty overflow ([#5935](https://github.com/tari-project/tari/issues/5935)) ([55bbdf2](https://github.com/tari-project/tari/commit/55bbdf2481bb7522ede5cc3e37ca8cdeb323b4f7)) +* grpc over tls ([#5990](https://github.com/tari-project/tari/issues/5990)) ([b80f7e3](https://github.com/tari-project/tari/commit/b80f7e366b14e10b3fb0e9835fb76dd5596d0cf8)) +* limit max number of addresses ([#5960](https://github.com/tari-project/tari/issues/5960)) ([40fc940](https://github.com/tari-project/tari/commit/40fc9408161e404a9f4062362fe495de3c2e374f)) +* move kernel MMR position to `u64` ([#5956](https://github.com/tari-project/tari/issues/5956)) ([cdd8a31](https://github.com/tari-project/tari/commit/cdd8a3135765c3b5a87027f9a5e0103e737c709a)) +* network specific domain hashers ([#5980](https://github.com/tari-project/tari/issues/5980)) ([d7ab283](https://github.com/tari-project/tari/commit/d7ab2838cc08a7c12ccf443697c1560b1ea40b03)) +* **node grpc:** add grpc authentication to the node ([#5928](https://github.com/tari-project/tari/issues/5928)) ([3d95e8c](https://github.com/tari-project/tari/commit/3d95e8cb0543f5bdb284f2ea0771e2f03748b71a)) +* remove panics from applications ([#5943](https://github.com/tari-project/tari/issues/5943)) ([18c3d0b](https://github.com/tari-project/tari/commit/18c3d0be8123cdc362fdeaed66c45ad17c3e7dfa)) +* sender and receiver protocols use bytes (not hex string) in wallet database ([#5950](https://github.com/tari-project/tari/issues/5950)) ([4cbdfec](https://github.com/tari-project/tari/commit/4cbdfec945857c5b7a334962e137d2c8dc4d4c4a)) +* warnings for untrusted urls ([#5955](https://github.com/tari-project/tari/issues/5955)) ([e2e278c](https://github.com/tari-project/tari/commit/e2e278c9a4d09f8e0136e9b3ae2f93afc3e9ac4a)) +* hazop findings ([#6020](https://github.com/tari-project/tari/issues/6020)) ([a68d0dd](https://github.com/tari-project/tari/commit/a68d0dd2fb7719ae99bcd2b62980b5f37d66284a)) +* add miner input processing ([#6016](https://github.com/tari-project/tari/issues/6016)) ([26f5b60](https://github.com/tari-project/tari/commit/26f5b6044832f737c7019dab0e00d2234aac442f)) +* add wallet ffi shutdown tests ([#6007](https://github.com/tari-project/tari/issues/6007)) ([3129ce8](https://github.com/tari-project/tari/commit/3129ce8dd066ea16900ee8add4e608c1890c6545)) +* fix hazop findings ([#6017](https://github.com/tari-project/tari/issues/6017)) ([0bc62b4](https://github.com/tari-project/tari/commit/0bc62b4a5b78893a226700226bac01590a543bb8)) +* make base node support 1 click mining ([#6019](https://github.com/tari-project/tari/issues/6019)) ([d377269](https://github.com/tari-project/tari/commit/d3772690c36e0dcb6476090fc428e5745298e398)) +* update faucets ([#6024](https://github.com/tari-project/tari/issues/6024)) ([394976c](https://github.com/tari-project/tari/commit/394976cc591f9551e1542f2730a8ec299b524229)) +* update status ([#6008](https://github.com/tari-project/tari/issues/6008)) ([e19ce15](https://github.com/tari-project/tari/commit/e19ce15549b138d462060997d40147bad39a1871)) +* console wallet use dns seeds ([#6034](https://github.com/tari-project/tari/issues/6034)) ([b194954](https://github.com/tari-project/tari/commit/b194954f489bd8ac234993e65463a24808dce8f2)) +* update tests and constants ([#6028](https://github.com/tari-project/tari/issues/6028)) ([d558206](https://github.com/tari-project/tari/commit/d558206ea62c12f3258ede8cfcbf9d44f139ccdd)) + + +### Bug Fixes + +* add .h file to mining helper ([#6194](https://github.com/tari-project/tari/issues/6194)) ([237e6b9](https://github.com/tari-project/tari/commit/237e6b963edd3e4a8986ed4f9767a16f36aff05e)) +* avoid cloning range proofs during verification ([#6166](https://github.com/tari-project/tari/issues/6166)) ([19a824d](https://github.com/tari-project/tari/commit/19a824dea8971f15a7b263122b20e46286f89857)) +* changes balance query ([#6158](https://github.com/tari-project/tari/issues/6158)) ([9ccc615](https://github.com/tari-project/tari/commit/9ccc6153b0fedc1cf40bd547c6987143c23b1649)) +* fixed make-it-rain delay ([#6165](https://github.com/tari-project/tari/issues/6165)) ([5c5da46](https://github.com/tari-project/tari/commit/5c5da461690684e90ecc12565d674fbca06b5f53)) +* hide unmined coinbase ([#6159](https://github.com/tari-project/tari/issues/6159)) ([2ccde17](https://github.com/tari-project/tari/commit/2ccde173834fbbfc617b87001c7364760b81590e)) +* horizon sync ([#6197](https://github.com/tari-project/tari/issues/6197)) ([c96be82](https://github.com/tari-project/tari/commit/c96be82efdbb24f448a5efef3076d0b1819ed07e)) +* oms validation ([#6161](https://github.com/tari-project/tari/issues/6161)) ([f3d1219](https://github.com/tari-project/tari/commit/f3d12196530f9bf7c266cba9eff014cba04cecbb)) +* remove extra range proof verifications ([#6190](https://github.com/tari-project/tari/issues/6190)) ([57330bf](https://github.com/tari-project/tari/commit/57330bf7e0be7d2d4f325e8009d3b10568f3acad)) +* rewind bug causing SMT to be broken ([#6172](https://github.com/tari-project/tari/issues/6172)) ([4cb61a3](https://github.com/tari-project/tari/commit/4cb61a33c60fe18706aae4700e301484abe62471)) +* wallet validation during reorgs ([#6173](https://github.com/tari-project/tari/issues/6173)) ([97fc7b3](https://github.com/tari-project/tari/commit/97fc7b382a078ed2178c650214cb9803daeea87f)) +* balanced binary merkle tree merged proof ([#6144](https://github.com/tari-project/tari/issues/6144)) ([4d01653](https://github.com/tari-project/tari/commit/4d01653e6780241edfe732761d63d4218a2f742d)) +* wallet clear short term output ([#6151](https://github.com/tari-project/tari/issues/6151)) ([ac6997a](https://github.com/tari-project/tari/commit/ac6997af1a1d9828a93064e849df3dcc4ba019ee)) +* added transaction revalidation to the wallet startup sequence [#5227](https://github.com/tari-project/tari/issues/5227) ([#5246](https://github.com/tari-project/tari/issues/5246)) ([7b4e2d2](https://github.com/tari-project/tari/commit/7b4e2d2cd41c3173c9471ed987a43ae0978afd57)) +* immediately fail to compile on 32-bit systems ([#5237](https://github.com/tari-project/tari/issues/5237)) ([76aeed7](https://github.com/tari-project/tari/commit/76aeed79ae0774bfb4cd94f9f27093394808bae1)) +* **wallet:** correct change checks in transaction builder ([#5235](https://github.com/tari-project/tari/issues/5235)) ([768a0cf](https://github.com/tari-project/tari/commit/768a0cf310aaf20cc5697eaea32c824f812bc233)) +* **wallet:** ensure burn shared keys and hashes match dan layer ([#5245](https://github.com/tari-project/tari/issues/5245)) ([024ce64](https://github.com/tari-project/tari/commit/024ce64843d282981efb366a3a1a5be36c0fb21d)) +* windows path format in log4rs files ([#5234](https://github.com/tari-project/tari/issues/5234)) ([acfecfb](https://github.com/tari-project/tari/commit/acfecfb0b52868bdfbee9accb4d03b8a4a59d90b)) +* ffi hot fix ([#5251](https://github.com/tari-project/tari/issues/5251)) ([9533e40](https://github.com/tari-project/tari/commit/9533e4017f1229f6de31966a9d5f19ea906117f3)) +* reduce warn log to debug in utxo scanner ([#5256](https://github.com/tari-project/tari/issues/5256)) ([3946641](https://github.com/tari-project/tari/commit/394664177dcbd05fdd43d54b3bd9f77bc52ecd88)) +* wallet sending local address out to network ([#5258](https://github.com/tari-project/tari/issues/5258)) ([6bfa6f9](https://github.com/tari-project/tari/commit/6bfa6f9fecdd594386ef07169d0e68777b3becd5)) +* ensures mutable MMR bitmaps are compressed ([#5278](https://github.com/tari-project/tari/issues/5278)) ([dfddc66](https://github.com/tari-project/tari/commit/dfddc669e3e1271b098c8b271e13f076ca79b039)) +* resize transaction tab windows ([#5290](https://github.com/tari-project/tari/issues/5290)) ([bd95a85](https://github.com/tari-project/tari/commit/bd95a853b2eb166a4aa8e32778ed72bb1f8172ad)), closes [#4942](https://github.com/tari-project/tari/issues/4942) [#5289](https://github.com/tari-project/tari/issues/5289) [#12365](https://github.com/tari-project/tari/issues/12365) +* **comms:** correctly initialize hidden service ([#6124](https://github.com/tari-project/tari/issues/6124)) ([0584782](https://github.com/tari-project/tari/commit/058478255a93e7d50d95c8ac8c196069f76b994b)) +* **libtor:** prevent metrics port conflict ([#6125](https://github.com/tari-project/tari/issues/6125)) ([661af51](https://github.com/tari-project/tari/commit/661af5177863f37f0b01c9846dccc7d24f873fc5)) +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) +* make monero extra data less strict ([#6117](https://github.com/tari-project/tari/issues/6117)) ([38b9113](https://github.com/tari-project/tari/commit/38b9113375bb90d667718f406e796f6a0e021861)) +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) +* fix small error in config.toml ([#6052](https://github.com/tari-project/tari/issues/6052)) ([6518a60](https://github.com/tari-project/tari/commit/6518a60dce9a4b8ace6c5cc4b1ee79045e364e0e)) +* tms validation correctly updating ([#6079](https://github.com/tari-project/tari/issues/6079)) ([34222a8](https://github.com/tari-project/tari/commit/34222a88bd1746869e67ccde9c2f7529862f3b5d)) +* wallet coinbases not validated correctly ([#6074](https://github.com/tari-project/tari/issues/6074)) ([bb66df1](https://github.com/tari-project/tari/commit/bb66df13bcf3d00082e35f7305b1fde72d4ace2a)) +* add SECURITY.md Vulnerability Disclosure Policy ([#5351](https://github.com/tari-project/tari/issues/5351)) ([72daaf5](https://github.com/tari-project/tari/commit/72daaf5ef614ceb805f690db12c7fefc642d5453)) +* added missing log4rs features ([#5356](https://github.com/tari-project/tari/issues/5356)) ([b9031bb](https://github.com/tari-project/tari/commit/b9031bbbece1988c1de180cabbf4e3acfcb50836)) +* allow public addresses from command line ([#5303](https://github.com/tari-project/tari/issues/5303)) ([349ac89](https://github.com/tari-project/tari/commit/349ac8957bc513cd4110eaac69550ffa0816862b)) +* clippy issues with config ([#5334](https://github.com/tari-project/tari/issues/5334)) ([026f0d5](https://github.com/tari-project/tari/commit/026f0d5e33d524ad302e7edd0c82e108a17800b6)) +* default network selection ([#5333](https://github.com/tari-project/tari/issues/5333)) ([cf4b2c8](https://github.com/tari-project/tari/commit/cf4b2c8a4f5849ba51dab61595dfed1a9249c580)) +* make the first output optional in the wallet ([#5352](https://github.com/tari-project/tari/issues/5352)) ([bf16140](https://github.com/tari-project/tari/commit/bf16140ecd1ad0ae25f8a9b8cde9c3e4f1d12a02)) +* remove wallet panic ([#5338](https://github.com/tari-project/tari/issues/5338)) ([536d16d](https://github.com/tari-project/tari/commit/536d16d2feea283ac1b8f546f479b76465938c4b)) +* wallet .h file for lib wallets ([#5330](https://github.com/tari-project/tari/issues/5330)) ([22a3a17](https://github.com/tari-project/tari/commit/22a3a17db6ef8889cb3a73dfe2db081a0691a68c)) +* **comms:** only set final forward address if configured to port 0 ([#5406](https://github.com/tari-project/tari/issues/5406)) ([ff7fb6d](https://github.com/tari-project/tari/commit/ff7fb6d6b4ab4f77d108b2d9b7fd010c77e613c7)) +* deeplink to rfc spec ([#5342](https://github.com/tari-project/tari/issues/5342)) ([806d3b8](https://github.com/tari-project/tari/commit/806d3b8cc6668f23bb77ca7040833e080c173063)) +* don't use in memory datastores for chat client dht in integration tests ([#5399](https://github.com/tari-project/tari/issues/5399)) ([cbdca6f](https://github.com/tari-project/tari/commit/cbdca6fcc8ae61ed2dbfacca9da1a59c78945045)) +* fix panic when no public addresses ([#5367](https://github.com/tari-project/tari/issues/5367)) ([49be2a2](https://github.com/tari-project/tari/commit/49be2a27a8aead96c180cb988614e3696c338530)) +* loop on mismatched passphrase entry ([#5396](https://github.com/tari-project/tari/issues/5396)) ([ed120b2](https://github.com/tari-project/tari/commit/ed120b277371be7b9bd61c825aa7d61b104d3ac6)) +* use domain separation for wallet message signing ([#5400](https://github.com/tari-project/tari/issues/5400)) ([7d71f8b](https://github.com/tari-project/tari/commit/7d71f8bef94fddf1ffa345e6b599cf02ee6ab935)) +* use mined at timestamp in fauxconfirmation (#5443) ([f3833c9f](https://github.com/tari-project/tari/commit/f3833c9fc46d77fddaa7a23ef1d53ba9d860182a), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* fix custom wallet startup logic for console wallet (#5429) ([0c1e5765](https://github.com/tari-project/tari/commit/0c1e5765676a9281b45fd66c8846b78ea4c76125), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **balanced_mp:** removes some panics, adds some checks and new tests (#5432) ([602f416f](https://github.com/tari-project/tari/commit/602f416f674b5e1835a634f3c8ab123001af600e), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **comms:** validate onion3 checksum (#5440) ([0dfdb3a4](https://github.com/tari-project/tari/commit/0dfdb3a4bef51952f0cecf6f6fcb00f6b2bfe302), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **wallet-ffi:** don't block on start (#5437) ([27fe8d9d](https://github.com/tari-project/tari/commit/27fe8d9d2fc3ea6468605ef5edea56efdcc8248f), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **mmr:** support zero sized balanced merkle proof (#5474) ([ef984823](https://github.com/tari-project/tari/commit/ef98482313c9b9480ac663709162ae62e9c26978) +* **wallet:** use correct output features for send to self (#5472) ([ce1f0686](https://github.com/tari-project/tari/commit/ce1f0686f56367ff094bf28cfd0388b2ea94a8c9) +* covenant nit picking (#5506) ([301ca495](https://github.com/tari-project/tari/commit/301ca49513948e84bc972e5d75e16f6882d8fb8b) +* overflow of target difficulty (#5493) ([822dac60](https://github.com/tari-project/tari/commit/822dac609a4d148227c1bac61d9d81bc1a5925ac) +* coinbase recovery (#5487) ([48dd157a](https://github.com/tari-project/tari/commit/48dd157a82c4459021a1a02d14f7a3e95e24ebd3)) +* **core:** + * minor audit improvements (#5486) ([8756e0b3](https://github.com/tari-project/tari/commit/8756e0b3c0030700a2409e7d29c4822f8e75aacb) + * remove implicit change in protocol for partial/full signatures (#5488) ([fef701ef](https://github.com/tari-project/tari/commit/fef701efbd07eb769dbe11b5a0cb74c807d7d88c) + * compile error in wallet/FFI (#5497) ([49610736](https://github.com/tari-project/tari/commit/49610736b839c1067820ad841d4730ae8032eb2b) +* **core/base_node:** safe `mmr_position` cast in horizon sync (#5503) ([fb3ac60b](https://github.com/tari-project/tari/commit/fb3ac60b163184f89b2d69b0b9ce3d9b2cfdeeee) +* **core/consensus:** include `coinbase_extra` max size into coinbase weight calculation (#5501) ([4554cc5f](https://github.com/tari-project/tari/commit/4554cc5f075bf9392c75fedb7576753612b374ee) +* **core/keymanager:** use tokio rwlock for keymanagers (#5494) ([229aee02](https://github.com/tari-project/tari/commit/229aee029dbb8d401feb74be51caa4f26dd93be1) +* **core/transactions:** resolve or remove TODOs (#5500) ([4a9f73c7](https://github.com/tari-project/tari/commit/4a9f73c79b98298e61115744b3e467622dd4945b) +* **core/weighting:** remove optional and define correct rounding for usize::MAX (#5490) ([38c399a2](https://github.com/tari-project/tari/commit/38c399a2e5ee28878e0238e2b8e13c15f658ffbc) +* **mempool:** remove TODOs and other minor changes (#5498) ([a1f24417](https://github.com/tari-project/tari/commit/a1f244179390d9a4845bce96e3c6a506a59e4b16) +* mempool should use the correct version of the consensus constant (#5549) ([46ab3ef0](https://github.com/tari-project/tari/commit/46ab3ef07e41b091b869ef59376d0709a24e7437)) +* mempool fetch_highest_priority_txs (#5551) ([f7f749c4](https://github.com/tari-project/tari/commit/f7f749c4c476f489f9e30afb87461780d1996834) +* remove optional timestamp verification bypass (#5552) ([b5a5bed2](https://github.com/tari-project/tari/commit/b5a5bed2c23c273d3787afa1c845f62badec1a46)) +* update code coverage approach (#5540) ([7a9830ed](https://github.com/tari-project/tari/commit/7a9830edb66b6be3edc40b84ae8a1a9c3f4ef525) +* use correct TOML field for console wallet network address (#5531) ([70763dde](https://github.com/tari-project/tari/commit/70763dde25c1569013e489a0798540fd66dfa571) +* llvm-tools installed correctly (#5534) ([4ab4b965](https://github.com/tari-project/tari/commit/4ab4b965e5f0556d508ec071a152deb5ad8ea8cc)) +* push test coverage even if some tests fail (#5533) ([053c748d](https://github.com/tari-project/tari/commit/053c748d3d7aee674bada24609612bde9ba1420e) +* **console-wallet:** fix possible subtract underflow panic in list (#5535) ([8d5e8e6e](https://github.com/tari-project/tari/commit/8d5e8e6eac45b11867cee6104c207f6559851405) +* **core:** disable covenants for all networks except igor and localnet (#5505) ([308f5299](https://github.com/tari-project/tari/commit/308f5299007a67df8fb9fe73763809264005e35c) +* add a not before proof (#5560) ([11f42fb0](https://github.com/tari-project/tari/commit/11f42fb0942da3bd64db8ad203b75c364dbe0926) +* borsh sized serialization should be fallible (#5537) ([53058ce2](https://github.com/tari-project/tari/commit/53058ce299cb89f118017ccec5e98a991a7fcbcc) +* add documentation to covenant crate (#5524) ([442d75b0](https://github.com/tari-project/tari/commit/442d75b09f439e4bc81919fc42eaf43846b2c8ca) +* covenants audit (#5526) ([dbb59758](https://github.com/tari-project/tari/commit/dbb59758a92cdf4483574dc6e7c719efa94eedfd) +* add validator mr to mining hash ([#5615](https://github.com/brianp/tari/issues/5615)) ([91db6fb](https://github.com/brianp/tari/commit/91db6fb3b9ee1998d186fba3bbb57c970d8e4c5c)) +* add-peer also dials the peer ([#5727](https://github.com/brianp/tari/issues/5727)) ([cc8573a](https://github.com/brianp/tari/commit/cc8573ae3ec69d748d3793f02136fd6772983850)) +* addition overflow when coinbase + fees is too high ([#5706](https://github.com/brianp/tari/issues/5706)) ([13993f1](https://github.com/brianp/tari/commit/13993f1763eee84f566d6aea83661eb868e47eff)) +* adds bans for horizon sync ([#5661](https://github.com/brianp/tari/issues/5661)) ([826473d](https://github.com/brianp/tari/commit/826473d2a96fc6c978e5ccdce38c052919514a37)) +* ban peers if they send a bad protobuf message ([#5693](https://github.com/brianp/tari/issues/5693)) ([58cbfe6](https://github.com/brianp/tari/commit/58cbfe677f7328d4c9f9c98b1ada1acb369a47ac)) +* better timeout for lagging ([#5705](https://github.com/brianp/tari/issues/5705)) ([5e8a3ec](https://github.com/brianp/tari/commit/5e8a3ecbc9a00cee823260d4a5e33b3e3a60bc9c)) +* check bytes remaining on monero blocks ([#5610](https://github.com/brianp/tari/issues/5610)) ([1087fa9](https://github.com/brianp/tari/commit/1087fa9d7846b1bd11431475cc8ca3fd9def8ec6)) +* **comms/dht:** limit number of peer claims and addresses for all sources ([#5702](https://github.com/brianp/tari/issues/5702)) ([88ed293](https://github.com/brianp/tari/commit/88ed2935f5094e669470f2c015d055f9c3286941)) +* **comms:** check multiple addresses for inbound liveness check ([#5611](https://github.com/brianp/tari/issues/5611)) ([3937ae4](https://github.com/brianp/tari/commit/3937ae422f57f936ad3d2ead8b92ce4fa5adf855)) +* **comms:** dont overwrite ban-reason in add_peer ([#5720](https://github.com/brianp/tari/issues/5720)) ([3b9890b](https://github.com/brianp/tari/commit/3b9890ba5857cc8767be77a024d01bf4826e3956)) +* **comms:** greatly reduce timeouts for first byte and noise handshake ([#5728](https://github.com/brianp/tari/issues/5728)) ([47a3196](https://github.com/brianp/tari/commit/47a319616dde78c243b4558a51a7d81efc8393e1)) +* **comms:** only permit a single inbound messaging substream per peer ([#5731](https://github.com/brianp/tari/issues/5731)) ([c91a35f](https://github.com/brianp/tari/commit/c91a35f82557afd39c9b83f643876630bb4275c5)) +* **comms:** timeout and ban for bad behaviour in protocol negotation ([#5679](https://github.com/brianp/tari/issues/5679)) ([d03d0b5](https://github.com/brianp/tari/commit/d03d0b5fc58d4e284b1f6ce4554830fdbbb78efe)) +* **comms:** use noise XX handshake pattern for improved privacy ([#5696](https://github.com/brianp/tari/issues/5696)) ([d0ea406](https://github.com/brianp/tari/commit/d0ea406e57b8bbb65196c2e880671da2e51f2b62)) +* **core:** always pass the correct timestamp window to header validatior ([#5624](https://github.com/brianp/tari/issues/5624)) ([29700c3](https://github.com/brianp/tari/commit/29700c3d9aa4698742c0c9cd5e313fd3d0727626)) +* **dht:** add SAF bans ([#5711](https://github.com/brianp/tari/issues/5711)) ([594e03e](https://github.com/brianp/tari/commit/594e03eada389c1a131d5877f42f8c43b85a9fbe)) +* **dht:** limit peer sync and ban on server-caused errors ([#5714](https://github.com/brianp/tari/issues/5714)) ([b3f2dca](https://github.com/brianp/tari/commit/b3f2dcae88740abd1bd4c64f64d89010a13a214b)) +* duplicate tari header in monero coinbase ([#5604](https://github.com/brianp/tari/issues/5604)) ([f466840](https://github.com/brianp/tari/commit/f466840a24cd678aac82ae4eaa2661dca2567675)) +* error out the stx protocol if the sender sends unsupported data ([#5572](https://github.com/brianp/tari/issues/5572)) ([8a085cd](https://github.com/brianp/tari/commit/8a085cded40b95fb5d3136743a97e50874ee2903)) +* handle out of sync errors when returning mempool transactions ([#5701](https://github.com/brianp/tari/issues/5701)) ([b0337cf](https://github.com/brianp/tari/commit/b0337cfaac92939db968231cc368b56836c2cf7e)) +* handle target difficulty conversion failure ([#5710](https://github.com/brianp/tari/issues/5710)) ([431c35a](https://github.com/brianp/tari/commit/431c35ac5006d5cd265484e98a224b7f7e75703f)) +* header sync ([#5647](https://github.com/brianp/tari/issues/5647)) ([4583eef](https://github.com/brianp/tari/commit/4583eef444f4f71d6d702a9997566dad42a9fce4)) +* horizon sync ([#5724](https://github.com/brianp/tari/issues/5724)) ([660a5c1](https://github.com/brianp/tari/commit/660a5c1119f76ce30386860b27ed21316d9ace55)) +* **horizon_sync:** check for leftover unpruned outputs ([#5704](https://github.com/brianp/tari/issues/5704)) ([dc5cfce](https://github.com/brianp/tari/commit/dc5cfced6b81b8c7c036db920f7cbbf36d601789)) +* **horizon_sync:** check max number of kernels/utxos from peer ([#5703](https://github.com/brianp/tari/issues/5703)) ([5e4f3c2](https://github.com/brianp/tari/commit/5e4f3c20f0de1d0d7c525cdcfbe86e56b9e909f3)) +* **horizon_sync:** try sync with next next peer if current one fails ([#5699](https://github.com/brianp/tari/issues/5699)) ([a58ec1f](https://github.com/brianp/tari/commit/a58ec1f40fbc57e147e6fb5c21c6b2b5151150df)) +* limit monero hashes and force coinbase to be tx 0 ([#5602](https://github.com/brianp/tari/issues/5602)) ([2af1198](https://github.com/brianp/tari/commit/2af119824e3b21294c4545b18b2fb6a86bb96ea4)) +* make sure all needed libs are required for chatffi ([#5659](https://github.com/brianp/tari/issues/5659)) ([241ca67](https://github.com/brianp/tari/commit/241ca673ee5b3503198f3e662383ad0f6387313c)) +* memory overflow panic ([#5658](https://github.com/brianp/tari/issues/5658)) ([304e40f](https://github.com/brianp/tari/commit/304e40fb44a3dd9765c10147e1ee85344769c55a)) +* miner delay attack ([#5582](https://github.com/brianp/tari/issues/5582)) ([bece2d0](https://github.com/brianp/tari/commit/bece2d0bf82c757808723dba6ec3456bb8e23b2e)) +* minor fixes for multiple address support ([#5617](https://github.com/brianp/tari/issues/5617)) ([efa36eb](https://github.com/brianp/tari/commit/efa36eb7dc92905cc085359c35255678136a15b1)) +* monero fork attack ([#5603](https://github.com/brianp/tari/issues/5603)) ([9c81b4d](https://github.com/brianp/tari/commit/9c81b4d875aa7794226a97a4a90c9c0b3d6d4585)) +* only allow a monero header if it serializes back to the same data ([#5716](https://github.com/brianp/tari/issues/5716)) ([e70c752](https://github.com/brianp/tari/commit/e70c752d6014f0dd9d1a7aeda9a39bbd6dabc21b)) +* peer connection to stale nodes ([#5579](https://github.com/brianp/tari/issues/5579)) ([eebda00](https://github.com/brianp/tari/commit/eebda00bd28aae70813c644ff2b63925cc934ced)) +* potential u64 overflow panic ([#5688](https://github.com/brianp/tari/issues/5688)) ([f261b79](https://github.com/brianp/tari/commit/f261b7900f879ad991de42073094f8cb4443b8d2)) +* prevent access violation when running multiple vms at the same time ([#5734](https://github.com/brianp/tari/issues/5734)) ([18aead2](https://github.com/brianp/tari/commit/18aead232c2da7f6ec4eda152f8ce53e2601a92d)) +* remove potential u64 overflow panic ([#5686](https://github.com/brianp/tari/issues/5686)) ([90a8a21](https://github.com/brianp/tari/commit/90a8a21765f2c1a6930775ed4cd95fe8766b02d8)) +* remove tari prefix and only allow one mergemining tag ([#5722](https://github.com/brianp/tari/issues/5722)) ([3a7c227](https://github.com/brianp/tari/commit/3a7c227002f8bfacde2ab8081c79bfac435484ce)) +* remove timestamp from header in proto files ([#5667](https://github.com/brianp/tari/issues/5667)) ([403b0c6](https://github.com/brianp/tari/commit/403b0c62af9ed2f2eefc48e0feb5025d8c853ecc)) +* save dial result on error ([#5717](https://github.com/brianp/tari/issues/5717)) ([c66af69](https://github.com/brianp/tari/commit/c66af69e5ccb31d2fcaf9a8fa29d2e0b5470eeba)) +* sorted edge case ([#5590](https://github.com/brianp/tari/issues/5590)) ([f7b2193](https://github.com/brianp/tari/commit/f7b21930c7841e7a8801f4c37d1ee0e8111162bb)) +* sparse Merkle tree key querying ([#5566](https://github.com/brianp/tari/issues/5566)) ([623839f](https://github.com/brianp/tari/commit/623839f58116c0828bc5406adbd1dd1b68e7bb3d)) +* syncing from prune node ([#5733](https://github.com/brianp/tari/issues/5733)) ([166f469](https://github.com/brianp/tari/commit/166f469cd1122676ec95b88163ee97058cc28fdf)) +* **sync:** remove mem::take in all syncs ([#5721](https://github.com/brianp/tari/issues/5721)) ([a48e430](https://github.com/brianp/tari/commit/a48e430b6b5bc21c5998009738be1436e479f7ec)) +* **sync:** unify ban logic in all sync processes ([#5713](https://github.com/brianp/tari/issues/5713)) ([4b2b28b](https://github.com/brianp/tari/commit/4b2b28bf2390c400d547cdaa801ff967eb92ac38)) +* update peers seed for esme ([#5573](https://github.com/brianp/tari/issues/5573)) ([0f6b750](https://github.com/brianp/tari/commit/0f6b7504bbfc902ffab89f1904dee237270c690b)) +* add lock height and kernel features checks on default transactions ([#5836](https://github.com/tari-project/tari/issues/5836)) ([1f87226](https://github.com/tari-project/tari/commit/1f87226722b12750424ab2f4861fe0475a67dfd6)) +* ban peer if it sends bad liveness data ([#5844](https://github.com/tari-project/tari/issues/5844)) ([eb40fc4](https://github.com/tari-project/tari/commit/eb40fc44cfc0605545ba9e831c8d27209a4db51f)) +* change truncate_from_bits to from_bits ([#5773](https://github.com/tari-project/tari/issues/5773)) ([fb18078](https://github.com/tari-project/tari/commit/fb18078d888b7c65601e8261d66fca366ffff28b)) +* chat ffi seed peers ([#5786](https://github.com/tari-project/tari/issues/5786)) ([c04996f](https://github.com/tari-project/tari/commit/c04996f01f3e5627acc376a27e7abcb61d7dda5c)) +* **chatffi:** return and read from ptrs ([#5827](https://github.com/tari-project/tari/issues/5827)) ([dd2eddb](https://github.com/tari-project/tari/commit/dd2eddbe9280870485974edd611e224ae585b76a)) +* **comms+dht:** mark peers as online inbound connection,join ([#5741](https://github.com/tari-project/tari/issues/5741)) ([e8413ea](https://github.com/tari-project/tari/commit/e8413ea364c0a17785b475ac57d74244b62a7375)) +* **diagrams:** missing quotes for messaging diagram ([#5750](https://github.com/tari-project/tari/issues/5750)) ([a8f6eb5](https://github.com/tari-project/tari/commit/a8f6eb5e48e6e823b96919bec87843300311caae)) +* **diagrams:** missing quotes for protocol negotiation diagram ([#5751](https://github.com/tari-project/tari/issues/5751)) ([45c20a3](https://github.com/tari-project/tari/commit/45c20a30b849b92e1f6fe402d7e7e657ccf9f663)) +* don't ban a peer for sending a banned peer ([#5843](https://github.com/tari-project/tari/issues/5843)) ([12f8a75](https://github.com/tari-project/tari/commit/12f8a75060e1d15fbeac589c568f7ee9e04eb900)) +* fix erroneous warning message ([#5846](https://github.com/tari-project/tari/issues/5846)) ([8afcd8b](https://github.com/tari-project/tari/commit/8afcd8b5545a433c92d3a47b4f85b4e89a5408b8)) +* get rid of possible 'expect' ([#5794](https://github.com/tari-project/tari/issues/5794)) ([467a8d4](https://github.com/tari-project/tari/commit/467a8d4f4493814f1102d6863fc844896e94a8ec)) +* grpc request overflows ([#5812](https://github.com/tari-project/tari/issues/5812)) ([36d72e8](https://github.com/tari-project/tari/commit/36d72e8b2239870550060fc9e0c183131ee3c2fa)) +* handle possible underflow in smt ([#5769](https://github.com/tari-project/tari/issues/5769)) ([558e6f2](https://github.com/tari-project/tari/commit/558e6f2bf7d00fb2c7c506b7000237aba928238c)) +* listing mode is synced ([#5830](https://github.com/tari-project/tari/issues/5830)) ([ff5a5d8](https://github.com/tari-project/tari/commit/ff5a5d82e3ddbe191bda8b8132590c2afb3282f2)) +* mempool panic ([#5814](https://github.com/tari-project/tari/issues/5814)) ([754fb16](https://github.com/tari-project/tari/commit/754fb16e4ae79bb8d712419f0f6bf59efbaf0ce1)) +* **p2p:** enable auto join when online ([#5738](https://github.com/tari-project/tari/issues/5738)) ([eb74bbb](https://github.com/tari-project/tari/commit/eb74bbba3746b78c3fd8e0ee5066f1d4d987af3e)) +* panic overflow ([#5819](https://github.com/tari-project/tari/issues/5819)) ([af31ba1](https://github.com/tari-project/tari/commit/af31ba1e6deb64a68ec74eac090fdcfc9e8a52ca)) +* possible exception in request_context ([#5784](https://github.com/tari-project/tari/issues/5784)) ([6c8e2d3](https://github.com/tari-project/tari/commit/6c8e2d395799757e5a946fe01226f739d0706741)) +* potential index out of bounds ([#5775](https://github.com/tari-project/tari/issues/5775)) ([f17ac6b](https://github.com/tari-project/tari/commit/f17ac6b61edfe47dacf091969382c6b17e7bf214)) +* potential overflow ([#5759](https://github.com/tari-project/tari/issues/5759)) ([5c93e35](https://github.com/tari-project/tari/commit/5c93e35c785a7a19f8e6c762e3f1df8f8207877e)) +* potential overflow ([#5778](https://github.com/tari-project/tari/issues/5778)) ([1d1332d](https://github.com/tari-project/tari/commit/1d1332d21ba0db18e9f3a3c253963fc1735b8193)) +* potential sync stuck ([#5760](https://github.com/tari-project/tari/issues/5760)) ([c5ed816](https://github.com/tari-project/tari/commit/c5ed816c80eae43348593e636e4b56da98d8af6b)) +* recovery passphrase flow ([#5877](https://github.com/tari-project/tari/issues/5877)) ([4159b76](https://github.com/tari-project/tari/commit/4159b766669e682bb9593c4e7cd3ddb298a56e0b)) +* remove peer ([#5757](https://github.com/tari-project/tari/issues/5757)) ([4c48a26](https://github.com/tari-project/tari/commit/4c48a26f20d800b2098c18b723dfb83cb878f0ad)) +* remove statement from sparse Merkle tree proofs ([#5768](https://github.com/tari-project/tari/issues/5768)) ([d630d11](https://github.com/tari-project/tari/commit/d630d114f1866f24e729cda0f8cf19f298e7bd50)) +* stuck on sync ([#5739](https://github.com/tari-project/tari/issues/5739)) ([33b37a8](https://github.com/tari-project/tari/commit/33b37a8c37f3e1883ef3ebf27a8e18d4dd63fc92)) +* unwraps in rpc client ([#5770](https://github.com/tari-project/tari/issues/5770)) ([6f0d20a](https://github.com/tari-project/tari/commit/6f0d20aa30d3dcc23630d3a9650802f8c1ce3a61)) +* **proto:** remove proto bytes for std bytes ([#5835](https://github.com/tari-project/tari/issues/5835)) ([491ed83](https://github.com/tari-project/tari/commit/491ed83aaea166a6e60d40e76b8574625b56cf98)) +* **proto:** remove proto timestamp wrapper types ([#5833](https://github.com/tari-project/tari/issues/5833)) ([43b994e](https://github.com/tari-project/tari/commit/43b994e62378a9ed241842fc18f01d69231f089f)) +* upgrade bitflags crate ([#5831](https://github.com/tari-project/tari/issues/5831)) ([dae7dd9](https://github.com/tari-project/tari/commit/dae7dd9d1f2277b6192dc0ed7bea26b7d2d946ac)) +* lmdb flag set wrong on database ([#5916](https://github.com/tari-project/tari/issues/5916)) ([60efd35](https://github.com/tari-project/tari/commit/60efd353973a87b1e0cebc7246649a38b5731051)) +* **tariscript:** protect compare and check height from underflows ([#5872](https://github.com/tari-project/tari/issues/5872)) ([aa2ae10](https://github.com/tari-project/tari/commit/aa2ae1066818c1776bd268932fbd3be09f21bf0e)) +* display ([#5982](https://github.com/tari-project/tari/issues/5982)) ([8cce48c](https://github.com/tari-project/tari/commit/8cce48cd8bd9b6f780376030918972e993fc1ab7)) +* fix opcode signatures ([#5966](https://github.com/tari-project/tari/issues/5966)) ([dc26ca6](https://github.com/tari-project/tari/commit/dc26ca6aeeb4196d0496f2977027ac63a4324043)) +* fix the windows installer ([#5938](https://github.com/tari-project/tari/issues/5938)) ([3e65a28](https://github.com/tari-project/tari/commit/3e65a28c5e3729024d70e2b7f55910c8c808495c)) +* fix the windows installer auto build ([#5939](https://github.com/tari-project/tari/issues/5939)) ([a138b78](https://github.com/tari-project/tari/commit/a138b7892d4b41a460b8dd8b9466f34e90f65469)) +* **shutdown:** is_triggered returns up-to-date value without first polling ([#5997](https://github.com/tari-project/tari/issues/5997)) ([49f2053](https://github.com/tari-project/tari/commit/49f20534ec808427d059cde6892adc5597f33391)) +* standardize gRPC authentication and mitigate DoS ([#5936](https://github.com/tari-project/tari/issues/5936)) ([623f127](https://github.com/tari-project/tari/commit/623f12768daf8329731249cf7e4c644e338d9700)) +* **tariscript:** multisig ordered signatures and pubkeys ([#5961](https://github.com/tari-project/tari/issues/5961)) ([14e334a](https://github.com/tari-project/tari/commit/14e334aff346aae8a081599488135c905c2c1f84)) +* update `ToRistrettoPoint` handling ([#5973](https://github.com/tari-project/tari/issues/5973)) ([12e84f4](https://github.com/tari-project/tari/commit/12e84f42ee1842875f72716833e96d0b84460c78)) +* new faucet for esmeralda ([#6001](https://github.com/tari-project/tari/issues/6001)) ([4eccc39](https://github.com/tari-project/tari/commit/4eccc392394b03e974b36538096f640d2b98d25d)) +* remove mutable mmr ([#5954](https://github.com/tari-project/tari/issues/5954)) ([0855583](https://github.com/tari-project/tari/commit/0855583c9fb138f7d1633c1829a8cf3f23048c49)) +* ups the min difficulty ([#5999](https://github.com/tari-project/tari/issues/5999)) ([fc1e555](https://github.com/tari-project/tari/commit/fc1e555edc56c9d01d7e9cb4d2c7cd0421616034)) +* **chat:** chat client possible panics ([#6015](https://github.com/tari-project/tari/issues/6015)) ([cf66c51](https://github.com/tari-project/tari/commit/cf66c51483f4b2744221fb652f3b32340d2ee693)) +* chat build ([#6026](https://github.com/tari-project/tari/issues/6026)) ([15793b7](https://github.com/tari-project/tari/commit/15793b7e4dfdcaaad6ec90e357348daf42300eab)) +* remove duplicate config settings ([#6029](https://github.com/tari-project/tari/issues/6029)) ([662af28](https://github.com/tari-project/tari/commit/662af28bf811c771cf0fdf9b583c1296a2283188)) + +## [0.49.0-rc.0](https://github.com/tari-project/tari/compare/v0.48.0-rc.0...v0.49.0-rc.0) (2023-04-12) + + +### âš  BREAKING CHANGES + +* **wallet:** use ECDH shard secret for burn mask with claim pubkey (#5238) +* **wallet:** ensure burn shared keys and hashes match dan layer (#5245) +* add claim public key to OutputFeatures (#5239) +* change signature construction to allow better HW support (#5282) +* move key manager service to key_manager (#5284) +* add igor faucet (#5281) +* reset dates for networks (#5283) +* add paging to utxo stream request (#5302) + +### Features + +* add necessary trait bounds to balanced merkle tree ([#5232](https://github.com/tari-project/tari/issues/5232)) ([3b971a3](https://github.com/tari-project/tari/commit/3b971a3b0e39be774a1a21c477222d95a0e1b242)) +* update tari-crypto to v0.16.8 ([#5236](https://github.com/tari-project/tari/issues/5236)) ([c9d355b](https://github.com/tari-project/tari/commit/c9d355baeea2d6087f72df8c2c1645ef2c06ce88)) +* **wallet:** use ECDH shard secret for burn mask with claim pubkey ([#5238](https://github.com/tari-project/tari/issues/5238)) ([78838bf](https://github.com/tari-project/tari/commit/78838bfc64839be0ba79d1d668d0c6fb2e72e69e)) +* add claim public key to OutputFeatures ([#5239](https://github.com/tari-project/tari/issues/5239)) ([3e7d82c](https://github.com/tari-project/tari/commit/3e7d82c440b162cc5a7e3e97b1fb18acdc6dd681)) +* reset esmeralda ([#5247](https://github.com/tari-project/tari/issues/5247)) ([aa2a3ad](https://github.com/tari-project/tari/commit/aa2a3ad5910312642c8652996942993cf6b9df52)) +* added FFI function `wallet_get_network_and_version` [#5252](https://github.com/tari-project/tari/issues/5252) ([#5263](https://github.com/tari-project/tari/issues/5263)) ([4b09b59](https://github.com/tari-project/tari/commit/4b09b59ce0cbc7e5c270c4c06a671c2fcff18bfc)) +* change signature construction to allow better HW support ([#5282](https://github.com/tari-project/tari/issues/5282)) ([82d2dcb](https://github.com/tari-project/tari/commit/82d2dcb04ced94f05a0801c5cb97bbebc41ca3e0)) +* improved passphrase flow ([#5279](https://github.com/tari-project/tari/issues/5279)) ([ac21da6](https://github.com/tari-project/tari/commit/ac21da60abec25db14e7201a5f82e15e4f7f2fe0)) +* added auxiliary callback to push base node state changes [#5109](https://github.com/tari-project/tari/issues/5109) ([#5257](https://github.com/tari-project/tari/issues/5257)) ([b7f7d31](https://github.com/tari-project/tari/commit/b7f7d31fb634804ecf2f8ba1c39094163944f584)) +* move key manager service to key_manager ([#5284](https://github.com/tari-project/tari/issues/5284)) ([d50ed02](https://github.com/tari-project/tari/commit/d50ed02675dbca9294882e5bbe522b8fda00fb2a)) +* reset dates for networks ([#5283](https://github.com/tari-project/tari/issues/5283)) ([d6342a4](https://github.com/tari-project/tari/commit/d6342a4200cb7de469575d67129f9214535cf237)) +* add extended mask recovery ([#5301](https://github.com/tari-project/tari/issues/5301)) ([23d882e](https://github.com/tari-project/tari/commit/23d882eb783f3d94efbfdd928b3d87b2907bf2d7)) +* add network name to data path and --network flag to the miners ([#5291](https://github.com/tari-project/tari/issues/5291)) ([1f04beb](https://github.com/tari-project/tari/commit/1f04bebd4f6d14432aab923baeab17d1d6cc39bf)) +* add other code template types ([#5242](https://github.com/tari-project/tari/issues/5242)) ([93e5e85](https://github.com/tari-project/tari/commit/93e5e85cbc13be33bea40c7b8289d0ff344df08c)) +* add paging to utxo stream request ([#5302](https://github.com/tari-project/tari/issues/5302)) ([3540309](https://github.com/tari-project/tari/commit/3540309e29d450fc8cb48bc714fb780c1c107b81)) +* add wallet daemon config ([#5311](https://github.com/tari-project/tari/issues/5311)) ([30419cf](https://github.com/tari-project/tari/commit/30419cfcf198fb923ef431316f2915cbc80f1e3b)) +* define different network defaults for bins ([#5307](https://github.com/tari-project/tari/issues/5307)) ([2f5d498](https://github.com/tari-project/tari/commit/2f5d498d2130b5358fbf126c96a917ed98016955)) +* feature gates ([#5287](https://github.com/tari-project/tari/issues/5287)) ([72c19dc](https://github.com/tari-project/tari/commit/72c19dc130b0c7652cca422c9c4c2e08e5b8e555)) +* fix rpc transaction conversion ([#5304](https://github.com/tari-project/tari/issues/5304)) ([344040a](https://github.com/tari-project/tari/commit/344040ac7322bae5604aa9db48d4194c1b3779fa)) + +### Bug Fixes + ## [0.48.0](https://github.com/tari-project/tari/compare/v0.45.0...v0.48.0) (2023-04-12) diff --git a/clients/ffi_client/index.js b/clients/ffi_client/index.js index 7df6d3d98f..bfb30fdea1 100644 --- a/clients/ffi_client/index.js +++ b/clients/ffi_client/index.js @@ -200,7 +200,7 @@ try { let publicKey = lib.public_key_create(publicKeyByteVector, err); console.log("Set base node peer...", publicKeyHex); - lib.wallet_add_base_node_peer( + lib.wallet_set_base_node_peer( wallet, publicKey, "/onion3/2m2xnylrsqbaozsndkbmfisxxbwh2vgvs6oyfak2qah4snnxykrf7zad:18141", diff --git a/clients/ffi_client/lib/index.js b/clients/ffi_client/lib/index.js index 7696653141..941f9ba732 100644 --- a/clients/ffi_client/lib/index.js +++ b/clients/ffi_client/lib/index.js @@ -44,7 +44,7 @@ const libWallet = ffi.Library("./libminotari_wallet_ffi.dylib", { transportRef, ["string", u8ArrayPtr, u16, "string", "string", errPtr], ], - wallet_add_base_node_peer: [bool, [walletRef, u8ArrayPtr, "string", errPtr]], + wallet_set_base_node_peer: [bool, [walletRef, u8ArrayPtr, "string", errPtr]], wallet_create: [ walletRef, [ diff --git a/clients/ffi_client/package-lock.json b/clients/ffi_client/package-lock.json index 86709b835e..bf06cfb527 100644 --- a/clients/ffi_client/package-lock.json +++ b/clients/ffi_client/package-lock.json @@ -46,13 +46,21 @@ } }, "es5-ext": { - "version": "0.10.53", - "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", - "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", + "version": "0.10.63", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.63.tgz", + "integrity": "sha512-hUCZd2Byj/mNKjfP9jXrdVZ62B8KuA/VoK7X8nUh5qT+AxDmcbvZz041oDVZdbIN1qW6XY9VDNwzkvKnZvK2TQ==", "requires": { - "es6-iterator": "~2.0.3", - "es6-symbol": "~3.1.3", - "next-tick": "~1.0.0" + "es6-iterator": "^2.0.3", + "es6-symbol": "^3.1.3", + "esniff": "^2.0.1", + "next-tick": "^1.1.0" + }, + "dependencies": { + "next-tick": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz", + "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==" + } } }, "es6-iterator": { @@ -74,6 +82,33 @@ "ext": "^1.1.2" } }, + "esniff": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/esniff/-/esniff-2.0.1.tgz", + "integrity": "sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg==", + "requires": { + "d": "^1.0.1", + "es5-ext": "^0.10.62", + "event-emitter": "^0.3.5", + "type": "^2.7.2" + }, + "dependencies": { + "type": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/type/-/type-2.7.2.tgz", + "integrity": "sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==" + } + } + }, + "event-emitter": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", + "integrity": "sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==", + "requires": { + "d": "1", + "es5-ext": "~0.10.14" + } + }, "ext": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", @@ -120,11 +155,6 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, - "next-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" - }, "node-addon-api": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", @@ -189,60 +219,6 @@ } } }, - "ref-struct-napi": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz", - "integrity": "sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw==", - "requires": { - "debug": "2", - "ref-napi": "^1.4.2" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node-addon-api": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz", - "integrity": "sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA==" - }, - "ref-napi": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz", - "integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==", - "requires": { - "debug": "^3.1.0", - "node-addon-api": "^2.0.0", - "node-gyp-build": "^4.2.1" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "requires": { - "ms": "^2.1.1" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - } - } - } - } - }, "type": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", diff --git a/clients/ffi_client/recovery.js b/clients/ffi_client/recovery.js index 199272d394..02875dea0a 100644 --- a/clients/ffi_client/recovery.js +++ b/clients/ffi_client/recovery.js @@ -186,7 +186,7 @@ try { let publicKey = lib.public_key_create(publicKeyByteVector, err); console.log("Set base node peer...", publicKeyHex); - lib.wallet_add_base_node_peer( + lib.wallet_set_base_node_peer( wallet, publicKey, "/onion3/2m2xnylrsqbaozsndkbmfisxxbwh2vgvs6oyfak2qah4snnxykrf7zad:18141", diff --git a/clients/nodejs/base_node_grpc_client/src/index.test.js b/clients/nodejs/base_node_grpc_client/src/index.test.js index 632d2bb7d3..36c82fadf3 100644 --- a/clients/nodejs/base_node_grpc_client/src/index.test.js +++ b/clients/nodejs/base_node_grpc_client/src/index.test.js @@ -20,5 +20,5 @@ test("getTipInfo", async () => { const response = await baseNode.getTipInfo(); expect(response.metadata).toBeDefined(); const metadata = response.metadata; - expect(metadata.height_of_longest_chain).toMatch(/\d+/); + expect(metadata.best_block_height).toMatch(/\d+/); }); diff --git a/common/Cargo.toml b/common/Cargo.toml index 200a87910e..71629ba30c 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [features] @@ -15,14 +15,15 @@ static-application-info = ["git2"] [dependencies] tari_crypto = { version = "0.20" } +tari_features = { path = "./tari_features", version = "1.0.0-pre.11a"} anyhow = "1.0.53" blake2 = "0.10" -config = { version = "0.13.0", default_features = false, features = ["toml"] } +config = { version = "0.14.0", default_features = false, features = ["toml"] } dirs-next = "1.0.2" git2 = { version = "0.18", default_features = false, optional = true } log = "0.4.8" -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format"] } +log4rs = { version = "1.3.0", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format"] } multiaddr = { version = "0.14.0" } path-clean = "0.1.0" prost-build = { version = "0.11.9", optional = true } @@ -40,4 +41,4 @@ tari_test_utils = { path = "../infrastructure/test_utils"} toml = "0.5.8" [build-dependencies] -tari_features = { path = "./tari_features"} +tari_features = { path = "./tari_features", version = "1.0.0-pre.11a"} diff --git a/common/config/presets/c_base_node_b_non_mining_deny_methods.toml b/common/config/presets/c_base_node_b_mining_allow_methods.toml similarity index 60% rename from common/config/presets/c_base_node_b_non_mining_deny_methods.toml rename to common/config/presets/c_base_node_b_mining_allow_methods.toml index 0dfcc8d0f6..085017b383 100644 --- a/common/config/presets/c_base_node_b_non_mining_deny_methods.toml +++ b/common/config/presets/c_base_node_b_mining_allow_methods.toml @@ -1,7 +1,8 @@ - [base_node] +#mining_enabled = false +#second_layer_grpc_enabled = false # Set to false to disable the base node GRPC server (default = true) -grpc_enabled = false +grpc_enabled = true # The socket to expose for the gRPC base node server (default = "/ip4/127.0.0.1/tcp/18142") #grpc_address = "/ip4/127.0.0.1/tcp/18142" @@ -12,26 +13,28 @@ grpc_enabled = false # Use gRPC over TLS (default = false) #grpc_tls_enabled = false -# Uncomment all gRPC server methods that should be denied default (only active when `grpc_enabled = true`) -grpc_server_deny_methods = [ +# Uncomment all gRPC server methods that should be allowed (only active when `grpc_enabled = true`) +grpc_server_allow_methods = [ "get_version", - "check_for_updates", - "get_sync_info", - "get_sync_progress", + #"check_for_updates", + #"get_sync_info", + #"get_sync_progress", "get_tip_info", - "identify", - "get_network_status", + #"identify", + #"get_network_status", "list_headers", - "get_header_by_hash", - "get_blocks", - "get_block_timing", - "get_constants", - "get_block_size", - "get_block_fees", + #"get_header_by_hash", + #"get_blocks", + #"get_block_timing", + #"get_constants", + #"get_block_size", + #"get_block_fees", "get_tokens_in_circulation", "get_network_difficulty", "get_new_block_template", "get_new_block", + "get_new_block_with_coinbases", + "get_new_block_template_with_coinbases", "get_new_block_blob", "submit_block", "submit_block_blob", @@ -39,8 +42,8 @@ grpc_server_deny_methods = [ "search_kernels", "search_utxos", "fetch_matching_utxos", - "get_peers", - "get_mempool_transactions", + #"get_peers", + #"get_mempool_transactions", "transaction_state", "list_connected_peers", "get_mempool_stats", diff --git a/common/config/presets/c_base_node_b_mining_deny_methods.toml b/common/config/presets/c_base_node_b_non_mining_allow_methods.toml similarity index 61% rename from common/config/presets/c_base_node_b_mining_deny_methods.toml rename to common/config/presets/c_base_node_b_non_mining_allow_methods.toml index f0f1d5fcf2..bf27e6186c 100644 --- a/common/config/presets/c_base_node_b_mining_deny_methods.toml +++ b/common/config/presets/c_base_node_b_non_mining_allow_methods.toml @@ -1,7 +1,8 @@ - [base_node] +#mining_enabled = false +#second_layer_grpc_enabled = false # Set to false to disable the base node GRPC server (default = true) -grpc_enabled = true +grpc_enabled = false # The socket to expose for the gRPC base node server (default = "/ip4/127.0.0.1/tcp/18142") #grpc_address = "/ip4/127.0.0.1/tcp/18142" @@ -12,25 +13,27 @@ grpc_enabled = true # Use gRPC over TLS (default = false) #grpc_tls_enabled = false -# Uncomment all gRPC server methods that should be denied default (only active when `grpc_enabled = true`) -grpc_server_deny_methods = [ +# Uncomment all gRPC server methods that should be allowed (only active when `grpc_enabled = true`) +grpc_server_allow_methods = [ "get_version", - "check_for_updates", - "get_sync_info", - "get_sync_progress", + #"check_for_updates", + #"get_sync_info", + #"get_sync_progress", #"get_tip_info", - "identify", - "get_network_status", + #"identify", + #"get_network_status", #"list_headers", - "get_header_by_hash", - "get_blocks", - "get_block_timing", - "get_constants", - "get_block_size", - "get_block_fees", + #"get_header_by_hash", + #"get_blocks", + #"get_block_timing", + #"get_constants", + #"get_block_size", + #"get_block_fees", #"get_tokens_in_circulation", #"get_network_difficulty", #"get_new_block_template", + #"get_new_block_with_coinbases", + #"get_new_block_template_with_coinbases", #"get_new_block", #"get_new_block_blob", #"submit_block", @@ -39,8 +42,8 @@ grpc_server_deny_methods = [ #"search_kernels", #"search_utxos", #"fetch_matching_utxos", - "get_peers", - "get_mempool_transactions", + #"get_peers", + #"get_mempool_transactions", #"transaction_state", #"list_connected_peers", #"get_mempool_stats", diff --git a/common/config/presets/d_console_wallet.toml b/common/config/presets/d_console_wallet.toml index 176cca2c85..d54d355e8d 100644 --- a/common/config/presets/d_console_wallet.toml +++ b/common/config/presets/d_console_wallet.toml @@ -95,6 +95,11 @@ # An example script is available here: applications/minotari_console_wallet/src/notifier/notify_example.sh #notify_file = "/path/to/script" +# The cool down period between balance enquiry checks in seconds; requests faster than this will be ignored. +# For specialized wallets processing many batch transactions this setting could be increased to 60 s to retain +# responsiveness of the wallet with slightly delayed balance updates (default = 5): +#balance_enquiry_cooldown_period = 5 + [wallet.transactions] # This is the timeout period that will be used for base node broadcast monitoring tasks (default = 30) broadcast_monitoring_timeout = 180 diff --git a/common/config/presets/g_miner.toml b/common/config/presets/g_miner.toml index 35b58500f9..45e473b9c9 100644 --- a/common/config/presets/g_miner.toml +++ b/common/config/presets/g_miner.toml @@ -42,11 +42,6 @@ # Base node reconnect timeout after any GRPC or miner error (default: 10 s) #wait_timeout_on_error = 10 -# The extra data to store in the coinbase, usually some data about the mining pool. -# Note that this data is publicly readable, but it is suggested you populate it so that -# pool dominance can be seen before any one party has more than 51%. (default = "minotari_miner") -#coinbase_extra = "minotari_miner" - # The Tari wallet address (valid address in hex) where the mining funds will be sent to - must be assigned # e.g. "78e724f466d202abdee0f23c261289074e4a2fc9eb61e83e0179eead76ce2d3f17" #wallet_payment_address = "YOUR_WALLET_TARI_ADDRESS" diff --git a/common/src/configuration/error.rs b/common/src/configuration/error.rs index 740698874c..b347477a0d 100644 --- a/common/src/configuration/error.rs +++ b/common/src/configuration/error.rs @@ -5,6 +5,8 @@ use std::fmt; use structopt::clap::Error as ClapError; +use crate::network_check::NetworkCheckError; + #[derive(Debug)] pub struct ConfigError { pub(crate) cause: &'static str, @@ -17,6 +19,15 @@ impl ConfigError { } } +impl From for ConfigError { + fn from(err: NetworkCheckError) -> Self { + Self { + cause: "Failed to set the network", + source: Some(err.to_string()), + } + } +} + impl std::error::Error for ConfigError {} impl fmt::Display for ConfigError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/common/src/configuration/mod.rs b/common/src/configuration/mod.rs index 44f30f90c8..bcb57ab324 100644 --- a/common/src/configuration/mod.rs +++ b/common/src/configuration/mod.rs @@ -69,13 +69,13 @@ pub fn socket_or_multi(addr: &str) -> Result { /// Implement this trait to specify custom configuration overrides for a network when loading the config pub trait ConfigOverrideProvider { - fn get_config_property_overrides(&self, default_network: Network) -> Vec<(String, String)>; + fn get_config_property_overrides(&self, network: &mut Network) -> Vec<(String, String)>; } pub struct NoConfigOverrides; impl ConfigOverrideProvider for NoConfigOverrides { - fn get_config_property_overrides(&self, _default_network: Network) -> Vec<(String, String)> { + fn get_config_property_overrides(&self, _network: &mut Network) -> Vec<(String, String)> { Vec::new() } } diff --git a/common/src/configuration/network.rs b/common/src/configuration/network.rs index a78aa7d4a1..5c40470fd1 100644 --- a/common/src/configuration/network.rs +++ b/common/src/configuration/network.rs @@ -49,10 +49,16 @@ pub enum Network { } impl Network { - pub fn get_current_or_default() -> Self { + pub fn get_current_or_user_setting_or_default() -> Self { match CURRENT_NETWORK.get() { Some(&network) => network, - None => Network::default(), + None => { + // Check to see if the network has been set by the environment, otherwise use the default + match std::env::var("TARI_NETWORK") { + Ok(network) => Network::from_str(network.as_str()).unwrap_or(Network::default()), + Err(_) => Network::default(), + } + }, } } @@ -80,17 +86,20 @@ impl Network { /// The default network for all applications impl Default for Network { - #[cfg(tari_network_mainnet)] + #[cfg(tari_target_network_mainnet)] fn default() -> Self { - Network::StageNet + match std::env::var("TARI_NETWORK") { + Ok(network) => Network::from_str(network.as_str()).unwrap_or(Network::StageNet), + Err(_) => Network::StageNet, + } } - #[cfg(tari_network_nextnet)] + #[cfg(tari_target_network_nextnet)] fn default() -> Self { Network::NextNet } - #[cfg(all(not(tari_network_mainnet), not(tari_network_nextnet)))] + #[cfg(not(any(tari_target_network_mainnet, tari_target_network_nextnet)))] fn default() -> Self { Network::Esmeralda } @@ -191,6 +200,11 @@ mod test { #[test] fn network_default() { let network = Network::default(); + #[cfg(tari_target_network_mainnet)] + assert!(matches!(network, Network::MainNet | Network::StageNet)); + #[cfg(tari_target_network_nextnet)] + assert_eq!(network, Network::NextNet); + #[cfg(not(any(tari_target_network_mainnet, tari_target_network_nextnet)))] assert_eq!(network, Network::Esmeralda); } diff --git a/common/src/configuration/utils.rs b/common/src/configuration/utils.rs index 4985b23c6b..cacd4cc0a6 100644 --- a/common/src/configuration/utils.rs +++ b/common/src/configuration/utils.rs @@ -14,6 +14,7 @@ use serde::{ use crate::{ configuration::{bootstrap::prompt, ConfigOverrideProvider, Network}, + network_check::set_network_if_choice_valid, ConfigError, LOG_TARGET, }; @@ -65,7 +66,7 @@ pub fn load_configuration_with_overrides, TOverride: ConfigOverri .build() .map_err(|ce| ConfigError::new("Could not build config", Some(ce.to_string())))?; - let network = match cfg.get_string("network") { + let mut network = match cfg.get_string("network") { Ok(network) => { Network::from_str(&network).map_err(|e| ConfigError::new("Invalid network", Some(e.to_string())))? }, @@ -82,7 +83,11 @@ pub fn load_configuration_with_overrides, TOverride: ConfigOverri }; info!(target: LOG_TARGET, "Configuration file loaded."); - let overrides = overrides.get_config_property_overrides(network); + let overrides = overrides.get_config_property_overrides(&mut network); + // Set the static network variable according to the user chosen network (for use with + // `get_current_or_user_setting_or_default()`) - + set_network_if_choice_valid(network)?; + if overrides.is_empty() { return Ok(cfg); } @@ -114,10 +119,10 @@ pub fn prompt_default_config() -> [&'static str; 12] { /// Returns the default configuration file template in parts from the embedded presets. If use_mining_config is true, /// the base node configuration that enables mining is returned, otherwise the non-mining configuration is returned. pub fn get_default_config(use_mining_config: bool) -> [&'static str; 12] { - let base_node_deny_methods = if use_mining_config { - include_str!("../../config/presets/c_base_node_b_mining_deny_methods.toml") + let base_node_allow_methods = if use_mining_config { + include_str!("../../config/presets/c_base_node_b_mining_allow_methods.toml") } else { - include_str!("../../config/presets/c_base_node_b_non_mining_deny_methods.toml") + include_str!("../../config/presets/c_base_node_b_non_mining_allow_methods.toml") }; let common = include_str!("../../config/presets/a_common.toml"); @@ -125,7 +130,7 @@ pub fn get_default_config(use_mining_config: bool) -> [&'static str; 12] { common, include_str!("../../config/presets/b_peer_seeds.toml"), include_str!("../../config/presets/c_base_node_a.toml"), - base_node_deny_methods, + base_node_allow_methods, include_str!("../../config/presets/c_base_node_c.toml"), include_str!("../../config/presets/d_console_wallet.toml"), include_str!("../../config/presets/g_miner.toml"), diff --git a/common/src/lib.rs b/common/src/lib.rs index 9386df0a41..8df6bddc02 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -53,6 +53,7 @@ #[cfg(any(feature = "build", feature = "static-application-info"))] pub mod build; pub mod exit_codes; +pub mod network_check; #[macro_use] mod logging; pub mod configuration; diff --git a/applications/minotari_app_utilities/src/network_check.rs b/common/src/network_check.rs similarity index 96% rename from applications/minotari_app_utilities/src/network_check.rs rename to common/src/network_check.rs index ab742bb7cd..a4465f7edb 100644 --- a/applications/minotari_app_utilities/src/network_check.rs +++ b/common/src/network_check.rs @@ -20,12 +20,13 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_common::{ +use tari_features::resolver::Target; +use thiserror::Error; + +use crate::{ configuration::Network, exit_codes::{ExitCode, ExitError}, }; -use tari_features::resolver::Target; -use thiserror::Error; #[derive(Debug, Error)] pub enum NetworkCheckError { @@ -48,13 +49,13 @@ impl From for ExitError { } } -#[cfg(tari_network_mainnet)] +#[cfg(tari_target_network_mainnet)] pub const TARGET_NETWORK: Target = Target::MainNet; -#[cfg(tari_network_nextnet)] +#[cfg(tari_target_network_nextnet)] pub const TARGET_NETWORK: Target = Target::NextNet; -#[cfg(all(not(tari_network_mainnet), not(tari_network_nextnet)))] +#[cfg(all(not(tari_target_network_mainnet), not(tari_target_network_nextnet)))] pub const TARGET_NETWORK: Target = Target::TestNet; pub fn is_network_choice_valid(network: Network) -> Result { diff --git a/common/tari_features/Cargo.toml b/common/tari_features/Cargo.toml index e918ea5142..eed257cad4 100644 --- a/common/tari_features/Cargo.toml +++ b/common/tari_features/Cargo.toml @@ -6,11 +6,11 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # So you're thinking about adding a dependency here? # This crate is utilized in the compilation of _most_ of our other crates. You're probably about -# to create a cyclic depedency. Please think hard whether this change is actually required. \ No newline at end of file +# to create a cyclic dependency. Please think hard whether this change is actually required. diff --git a/common/tari_features/README.md b/common/tari_features/README.md new file mode 100644 index 0000000000..3b0c92dd3d --- /dev/null +++ b/common/tari_features/README.md @@ -0,0 +1,107 @@ +# Feature gating + +## Step 1: define features + +In build.rs, we define our features. + +Features have a +* name - will also be the attribute name you use in code +* description - a short description of the features +* issue tracking number on Github +* The current status. + +#### Status + +* New: New feature, may not even be working or compiling. Will be present on `testnet` +* Testing: Feature is undergoing testing on `nextnet`. Does not exist on `mainnet` or `stagenet`. +* Active: Feature is live and gate has been removed. Will be running on all networks. +* Removed: Feature has been cancelled. Can not be invoked anywhere. + +In `build.rs`, we maintain the list of feature flags. For example: + +```rust +const FEATURE_LIST: [Feature; 4] = [ + Feature::new("add_pair", "Allows you to add two numbers together", Some(123), Status::New), + Feature::new("about_to_launch", "Live in NextNet. If we stabilise, will go to mainnet", Some(123), Status::Testing), + Feature::new("live_feature", "This feature has been stabilised and is live!", Some(150), Status::Active), + Feature::new("old_feature", "We decided not to go with this featuree", Some(145), Status::Removed), +]; +``` + +## Step 2: Demarcate feature flag code + +In your code, you can now use any of the flags as attributes to mark code belonging to a feature. + +Example + +```rust +#[cfg(tari_feature_add_pair)] +use add_pair::add_pair; + +fn main() { + println!("Hello world!"); + #[cfg(tari_feature_add_pair)] + println!("40 + 2 = {}", add_pair(40, 2)); + println!("foo={}", foo()); + println!("Bye, world!"); +} + +#[cfg(tari_feature_add_pair)] +fn foo() -> usize { + 1 +} + +#[cfg(not(tari_feature_add_pair))] +fn foo() -> usize { + 2 +} +``` + +## Step 3: Specify the target network when building + +This PoC uses the `TARI_NETWORK` envar to specify the target network, but in principle, we can also read the `Cargo.toml` +file, or compiler flags. + +`$ TARI_NETWORK=dibbler cargo run -vv` + +Filtered output: + +```text +Building for Dibbler +These features are ACTIVE on mainnet (no special code handling is done) +live_feature. This feature has been stabilised and is live!. Tracking issue: https://github.com/tari-project/tari/issues/150 + +These features are DEPRECATED and will never be compiled +old_feature. We decided not to go with this featuree. Tracking issue: https://github.com/tari-project/tari/issues/145 + +** Activating add_pair. Allows you to add two numbers together. Tracking issue: https://github.com/tari-project/tari/issues/123 ** +** Activating about_to_launch. Live in NextNet. If we stabilise, will go to mainnet. Tracking issue: https://github.com/tari-project/tari/issues/123 ** + +Finished dev [unoptimized + debuginfo] target(s) in 7.44s + Running `target/debug/feature_gates` +Hello world! +40 + 2 = 42 +foo=1 +Bye, world! +``` + +Or compiling for MainNet: + +`$ TARI_NETWORK=mainnet cargo run -vv` + +Filtered output: + +```text +Building for MainNet + +These features are ACTIVE on mainnet (no special code handling is done) +live_feature. This feature has been stabilised and is live!. Tracking issue: https://github.com/tari-project/tari/issues/150 + +These features are DEPRECATED and will never be compiled +old_feature. We decided not to go with this featuree. Tracking issue: https://github.com/tari-project/tari/issues/145 + Finished dev [unoptimized + debuginfo] target(s) in 6.15s + Running `target/debug/feature_gates` +Hello world! +foo=2 +Bye, world! +``` diff --git a/common/tari_features/src/resolver.rs b/common/tari_features/src/resolver.rs index 3edb0edfc7..ba48fc916c 100644 --- a/common/tari_features/src/resolver.rs +++ b/common/tari_features/src/resolver.rs @@ -64,10 +64,10 @@ impl Display for Target { // Identify the target network by // 1. Checking whether --config tari-network=xxx was passed in as a config flag to cargo (or from Cargo.toml) // 2. Checking the environment variable TARI_NETWORK is set -// 3. default to mainnet +// 3. default to testnet (should be mainnet after launch) pub fn identify_target() -> Target { - check_envar("CARGO_CFG_TARI_NETWORK") - .or_else(|| check_envar("TARI_NETWORK")) + check_envar("CARGO_CFG_TARI_TARGET_NETWORK") + .or_else(|| check_envar("TARI_TARGET_NETWORK")) .unwrap_or(Target::TestNet) } @@ -79,19 +79,21 @@ pub fn check_envar(envar: &str) -> Option { } pub fn list_active_features() { - println!("These features are ACTIVE on mainnet (no special code handling is done)"); + println!("tari_feature:These features are ACTIVE on mainnet (no special code handling is done)"); FEATURE_LIST .iter() .filter(|f| f.is_active()) - .for_each(|f| println!("{}", f)); + .for_each(|f| println!("tari_feature:{f}")); + println!("tari_feature:End of ACTIVE feature list"); } pub fn list_removed_features() { - println!("These features are DEPRECATED and will never be compiled"); + println!("tari_feature:These features are DEPRECATED and will never be compiled"); FEATURE_LIST .iter() .filter(|f| f.was_removed()) - .for_each(|f| println!("{}", f)); + .for_each(|f| println!("tari_feature:{f}")); + println!("tari_feature:End of DEPRECATED feature list"); } pub fn resolve_features(target: Target) -> Result<(), String> { @@ -110,17 +112,17 @@ pub fn resolve_features(target: Target) -> Result<(), String> { } pub fn activate_feature(feature: &Feature) { - println!("** Activating {} **", feature); + println!("tari_feature:** Activating {feature} **"); println!("cargo:rustc-cfg={}", feature.attr_name()); } pub fn build_features() { // Make sure to rebuild when the network changes - println!("cargo:rerun-if-env-changed=TARI_NETWORK"); + println!("cargo:rerun-if-env-changed=TARI_TARGET_NETWORK"); let target = identify_target(); - println!("cargo:rustc-cfg=tari_network_{}", target.as_key_str()); - println!("Building for {}", target); + println!("cargo:rustc-cfg=tari_target_network_{}", target.as_key_str()); + println!("tari_feature:Building for {target}"); list_active_features(); list_removed_features(); if let Err(e) = resolve_features(target) { diff --git a/common_sqlite/Cargo.toml b/common_sqlite/Cargo.toml index 7dfa030010..7f441ee3e2 100644 --- a/common_sqlite/Cargo.toml +++ b/common_sqlite/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_common_sqlite" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet library" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -16,7 +16,7 @@ diesel_migrations = "2.0.0" log = "0.4.6" serde = "1.0.90" thiserror = "1.0.26" -tokio = { version = "1.23", features = ["sync", "macros"] } +tokio = { version = "1.36", features = ["sync", "macros", "rt"] } [dev-dependencies] tari_test_utils = { path = "../infrastructure/test_utils" } diff --git a/common_sqlite/README.md b/common_sqlite/README.md new file mode 100644 index 0000000000..878acbacb7 --- /dev/null +++ b/common_sqlite/README.md @@ -0,0 +1,5 @@ +# tari_common_sqlite + +Implementation of Tari's sqlite wrapper + +This crate is part of the [Tari Cryptocurrency](https://tari.com) project. diff --git a/comms/core/Cargo.toml b/comms/core/Cargo.toml index 4eb7c1dcbf..2f729659ac 100644 --- a/comms/core/Cargo.toml +++ b/comms/core/Cargo.toml @@ -6,14 +6,14 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] tari_crypto = { version = "0.20" } -tari_metrics = { path = "../../infrastructure/metrics", optional = true } -tari_storage = { path = "../../infrastructure/storage" } -tari_shutdown = { path = "../../infrastructure/shutdown" } +tari_metrics = { path = "../../infrastructure/metrics", optional = true, version = "1.0.0-pre.11a" } +tari_storage = { path = "../../infrastructure/storage", version = "1.0.0-pre.11a" } +tari_shutdown = { path = "../../infrastructure/shutdown" , version = "1.0.0-pre.11a"} tari_utilities = { version = "0.7" } anyhow = "1.0.53" @@ -39,9 +39,9 @@ rand = "0.8" serde = "1.0.119" serde_derive = "1.0.119" sha3 = "0.10" -snow = { version = "0.9.4", features = ["default-resolver"] } +snow = { version = "0.9.5", features = ["default-resolver"] } thiserror = "1.0.26" -tokio = { version = "1.23", features = ["rt-multi-thread", "time", "sync", "signal", "net", "macros", "io-util"] } +tokio = { version = "1.36", features = ["rt-multi-thread", "time", "sync", "signal", "net", "macros", "io-util"] } tokio-stream = { version = "0.1.9", features = ["sync"] } tokio-util = { version = "0.6.7", features = ["codec", "compat"] } tower = {version = "0.4", features = ["util"]} @@ -58,7 +58,7 @@ serde_json = "1.0.39" tempfile = "3.1.0" [build-dependencies] -tari_common = { path = "../../common", features = ["build"] } +tari_common = { path = "../../common", features = ["build"], version = "1.0.0-pre.11a" } [features] c_integration = [] diff --git a/comms/core/examples/stress/service.rs b/comms/core/examples/stress/service.rs index 7880c07519..2199638f4b 100644 --- a/comms/core/examples/stress/service.rs +++ b/comms/core/examples/stress/service.rs @@ -63,10 +63,9 @@ pub fn start_service( let (request_tx, request_rx) = mpsc::channel(1); println!( - "Node credentials are {}::{:?} (local_listening_addr='{}')", + "Node credentials are {}::{:?})", node_identity.public_key().to_hex(), node_identity.public_addresses(), - comms_node.listening_address(), ); let service = StressTestService::new( diff --git a/comms/core/examples/stress_test.rs b/comms/core/examples/stress_test.rs index a101198b9e..b39cc07d1a 100644 --- a/comms/core/examples/stress_test.rs +++ b/comms/core/examples/stress_test.rs @@ -95,7 +95,7 @@ async fn run() -> Result<(), Error> { temp_dir.as_ref(), public_ip, port, - tor_identity, + tor_identity.clone(), is_tcp, shutdown.to_signal(), ) @@ -105,7 +105,7 @@ async fn run() -> Result<(), Error> { } if !is_tcp { if let Some(tor_identity_path) = tor_identity_path.as_ref() { - save_json(comms_node.hidden_service().unwrap().tor_identity(), tor_identity_path)?; + save_json(&tor_identity.unwrap(), tor_identity_path)?; } } diff --git a/comms/core/examples/tor.rs b/comms/core/examples/tor.rs index ac33ee50c7..cf3b6ef1d9 100644 --- a/comms/core/examples/tor.rs +++ b/comms/core/examples/tor.rs @@ -87,16 +87,14 @@ async fn run() -> Result<(), Error> { println!("Comms nodes started!"); println!( - "Node 1 is '{}' with address '{:?}' (local_listening_addr='{}')", + "Node 1 is '{}' with address '{:?}')", node_identity1.node_id().short_str(), node_identity1.public_addresses(), - comms_node1.listening_address(), ); println!( - "Node 2 is '{}' with address '{:?}' (local_listening_addr='{}')", + "Node 2 is '{}' with address '{:?}')", node_identity2.node_id().short_str(), node_identity2.public_addresses(), - comms_node2.listening_address(), ); // Let's add node 2 as a peer to node 1 diff --git a/comms/core/src/builder/comms_node.rs b/comms/core/src/builder/comms_node.rs index 649497c2c7..b9bd002a98 100644 --- a/comms/core/src/builder/comms_node.rs +++ b/comms/core/src/builder/comms_node.rs @@ -23,7 +23,6 @@ use std::{iter, sync::Arc, time::Duration}; use log::*; -use multiaddr::{multiaddr, Protocol}; use tari_shutdown::ShutdownSignal; use tokio::{ io::{AsyncRead, AsyncWrite}, @@ -37,7 +36,6 @@ use crate::{ ConnectionManagerEvent, ConnectionManagerRequest, ConnectionManagerRequester, - ListenerInfo, LivenessCheck, LivenessStatus, }, @@ -143,7 +141,7 @@ impl UnspawnedCommsNode { let UnspawnedCommsNode { builder, connection_manager_request_rx, - mut connection_manager_requester, + connection_manager_requester, connectivity_requester, connectivity_rx, node_identity, @@ -155,7 +153,6 @@ impl UnspawnedCommsNode { let CommsBuilder { dial_backoff, - hidden_service_ctl, connection_manager_config, connectivity_config, .. @@ -217,29 +214,6 @@ impl UnspawnedCommsNode { "Your node's network ID is '{}'", node_identity.node_id() ); - - let listening_info = connection_manager_requester.wait_until_listening().await?; - - // Final setup of the hidden service. - let mut hidden_service = None; - if let Some(mut ctl) = hidden_service_ctl { - // Only set the address to the bind address it is set to TCP port 0 - let mut proxied_addr = ctl.proxied_address(); - if proxied_addr.ends_with(&multiaddr!(Tcp(0u16))) { - // Remove the TCP port 0 address and replace it with the actual listener port - if let Some(Protocol::Tcp(port)) = listening_info.bind_address().iter().last() { - proxied_addr.pop(); - proxied_addr.push(Protocol::Tcp(port)); - ctl.set_proxied_addr(&proxied_addr); - } - } - let hs = ctl.create_hidden_service().await?; - let onion_addr = hs.get_onion_address(); - if !node_identity.public_addresses().contains(&onion_addr) { - node_identity.add_public_address(onion_addr); - } - hidden_service = Some(hs); - } info!( target: LOG_TARGET, "Your node's public addresses are '{}'", @@ -266,11 +240,9 @@ impl UnspawnedCommsNode { shutdown_signal, connection_manager_requester, connectivity_requester, - listening_info, node_identity, peer_manager, liveness_watch, - hidden_service, complete_signals: ext_context.drain_complete_signals(), }) } @@ -312,12 +284,8 @@ pub struct CommsNode { node_identity: Arc, /// Shared PeerManager instance peer_manager: Arc, - /// The bind addresses of the listener(s) - listening_info: ListenerInfo, /// Current liveness status liveness_watch: watch::Receiver, - /// `Some` if the comms node is configured to run via a hidden service, otherwise `None` - hidden_service: Option, /// The 'reciprocal' shutdown signals for each comms service complete_signals: Vec, } @@ -328,6 +296,10 @@ impl CommsNode { self.connection_manager_requester.get_event_subscription() } + pub fn connection_manager_requester(&mut self) -> &mut ConnectionManagerRequester { + &mut self.connection_manager_requester + } + /// Get a subscription to `ConnectivityEvent`s pub fn subscribe_connectivity_events(&self) -> ConnectivityEventRx { self.connectivity_requester.get_event_subscription() @@ -348,26 +320,11 @@ impl CommsNode { &self.node_identity } - /// Return the Ip/Tcp address that this node is listening on - pub fn listening_address(&self) -> &Multiaddr { - self.listening_info.bind_address() - } - - /// Return [ListenerInfo] - pub fn listening_info(&self) -> &ListenerInfo { - &self.listening_info - } - /// Returns the current liveness status pub fn liveness_status(&self) -> LivenessStatus { *self.liveness_watch.borrow() } - /// Return the Ip/Tcp address that this node is listening on - pub fn hidden_service(&self) -> Option<&tor::HiddenService> { - self.hidden_service.as_ref() - } - /// Return a handle that is used to call the connectivity service. pub fn connectivity(&self) -> ConnectivityRequester { self.connectivity_requester.clone() diff --git a/comms/core/src/builder/tests.rs b/comms/core/src/builder/tests.rs index a4d8a0ae9c..02626c75e7 100644 --- a/comms/core/src/builder/tests.rs +++ b/comms/core/src/builder/tests.rs @@ -88,7 +88,7 @@ async fn spawn_node( .unwrap(); let (messaging_events_sender, _) = broadcast::channel(100); - let comms_node = comms_node + let mut comms_node = comms_node .add_protocol_extensions(protocols.into()) .add_protocol_extension( MessagingProtocolExtension::new( @@ -107,8 +107,12 @@ async fn spawn_node( .spawn_with_transport(MemoryTransport) .await .unwrap(); - - unpack_enum!(Protocol::Memory(_port) = comms_node.listening_address().iter().next().unwrap()); + let address = comms_node + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); + unpack_enum!(Protocol::Memory(_port) = address.bind_address().iter().next().unwrap()); (comms_node, inbound_rx, outbound_tx, messaging_events_sender) } diff --git a/comms/core/src/connection_manager/dialer.rs b/comms/core/src/connection_manager/dialer.rs index b5bc59565c..8226eab9d5 100644 --- a/comms/core/src/connection_manager/dialer.rs +++ b/comms/core/src/connection_manager/dialer.rs @@ -324,7 +324,7 @@ where "Dial to peer '{}' already pending - adding to wait queue", peer.node_id ); if let Some(reply_tx) = reply_tx { - let entry = self.pending_dial_requests.entry(peer.node_id).or_insert_with(Vec::new); + let entry = self.pending_dial_requests.entry(peer.node_id).or_default(); entry.push(reply_tx); } return; @@ -519,7 +519,7 @@ where // Inflight dial was cancelled (state, Err(ConnectionManagerError::DialCancelled)) => break (state, Err(ConnectionManagerError::DialCancelled)), (state, Err(err)) => { - warn!(target: LOG_TARGET, "Failed to dial peer {} | Attempt {} | Error: {}", state.peer().node_id.short_str(), state.num_attempts(), err); + debug!(target: LOG_TARGET, "Failed to dial peer {} | Attempt {} | Error: {}", state.peer().node_id.short_str(), state.num_attempts(), err); if state.num_attempts() >= config.max_dial_attempts { break (state, Err(ConnectionManagerError::ConnectFailedMaximumAttemptsReached)); } diff --git a/comms/core/src/connectivity/manager.rs b/comms/core/src/connectivity/manager.rs index 5efae47525..1e9b7d18e3 100644 --- a/comms/core/src/connectivity/manager.rs +++ b/comms/core/src/connectivity/manager.rs @@ -781,6 +781,7 @@ impl ConnectivityManagerActor { #[cfg(not(feature = "metrics"))] fn update_connectivity_metrics(&mut self) {} + #[allow(clippy::cast_possible_wrap)] #[cfg(feature = "metrics")] fn update_connectivity_metrics(&mut self) { use std::convert::TryFrom; diff --git a/comms/core/src/multiplexing/mod.rs b/comms/core/src/multiplexing/mod.rs index 378fdaab9e..5b53a07e9c 100644 --- a/comms/core/src/multiplexing/mod.rs +++ b/comms/core/src/multiplexing/mod.rs @@ -26,4 +26,4 @@ mod metrics; mod yamux; -pub use self::yamux::{ConnectionError, Control, IncomingSubstreams, Substream, Yamux}; +pub use self::yamux::{Control, IncomingSubstreams, Substream, Yamux}; diff --git a/comms/core/src/peer_manager/manager.rs b/comms/core/src/peer_manager/manager.rs index f455d456e2..a383fdf437 100644 --- a/comms/core/src/peer_manager/manager.rs +++ b/comms/core/src/peer_manager/manager.rs @@ -80,6 +80,7 @@ impl PeerManager { #[cfg(feature = "metrics")] { let count = lock.count(); + #[allow(clippy::cast_possible_wrap)] metrics::peer_list_size().set(count as i64); } Ok(peer_id) @@ -92,6 +93,7 @@ impl PeerManager { #[cfg(feature = "metrics")] { let count = lock.count(); + #[allow(clippy::cast_possible_wrap)] metrics::peer_list_size().set(count as i64); } Ok(()) @@ -389,7 +391,7 @@ mod test { // Create 1 to 4 random addresses for _i in 1..=rand::thread_rng().gen_range(1..4) { - let n = vec![ + let n = [ rand::thread_rng().gen_range(1..9), rand::thread_rng().gen_range(1..9), rand::thread_rng().gen_range(1..9), diff --git a/comms/core/src/peer_manager/migrations.rs b/comms/core/src/peer_manager/migrations.rs index 8672bfe967..e933c3bc54 100644 --- a/comms/core/src/peer_manager/migrations.rs +++ b/comms/core/src/peer_manager/migrations.rs @@ -31,7 +31,7 @@ pub(super) const MIGRATION_VERSION_KEY: u64 = u64::MAX; pub fn migrate(database: &LMDBDatabase) -> Result<(), LMDBError> { // Add migrations here in version order - let migrations = vec![v7::Migration.boxed()]; + let migrations = [v7::Migration.boxed()]; if migrations.is_empty() { return Ok(()); } diff --git a/comms/core/src/peer_manager/node_distance.rs b/comms/core/src/peer_manager/node_distance.rs index f2c98a47ba..bcba3d8ccb 100644 --- a/comms/core/src/peer_manager/node_distance.rs +++ b/comms/core/src/peer_manager/node_distance.rs @@ -205,7 +205,7 @@ mod test { let (_, pk) = CommsPublicKey::random_keypair(&mut OsRng); let b = NodeId::from_public_key(&pk); let dist = NodeDistance::from_node_ids(&a, &b); - let i = u32::try_from(dist.get_bucket_index()).unwrap(); + let i = u32::from(dist.get_bucket_index()); let dist = dist.as_u128(); assert!(2u128.pow(i) <= dist, "Failed for {}, i = {}", dist, i); assert!(dist < 2u128.pow(i + 1), "Failed for {}, i = {}", dist, i,); diff --git a/comms/core/src/peer_manager/node_id.rs b/comms/core/src/peer_manager/node_id.rs index 49b4623754..f6eca4fc68 100644 --- a/comms/core/src/peer_manager/node_id.rs +++ b/comms/core/src/peer_manager/node_id.rs @@ -173,7 +173,7 @@ impl PartialEq for NodeId { impl PartialOrd for NodeId { fn partial_cmp(&self, other: &NodeId) -> Option { - self.0.partial_cmp(&other.0) + Some(self.cmp(other)) } } diff --git a/comms/core/src/peer_manager/peer_storage.rs b/comms/core/src/peer_manager/peer_storage.rs index 9a13381337..760d823cf3 100644 --- a/comms/core/src/peer_manager/peer_storage.rs +++ b/comms/core/src/peer_manager/peer_storage.rs @@ -516,6 +516,15 @@ impl Into for PeerStorage { } } +fn is_active_peer(peer: &Peer, features: Option, excluded_peers: &[NodeId]) -> bool { + features.map(|f| peer.features == f).unwrap_or(true) && + !excluded_peers.contains(&peer.node_id) && + !peer.is_banned() && + peer.deleted_at.is_none() && + peer.last_seen_since().is_some() && + peer.last_seen_since().expect("Last seen to exist") <= Duration::from_secs(PEER_ACTIVE_WITHIN_DURATION) +} + #[cfg(test)] mod test { use std::{borrow::BorrowMut, iter::repeat_with}; @@ -773,7 +782,7 @@ mod test { // Create 1 to 4 random addresses for _i in 1..=rand::thread_rng().gen_range(1..4) { - let n = vec![ + let n = [ rand::thread_rng().gen_range(1..9), rand::thread_rng().gen_range(1..9), rand::thread_rng().gen_range(1..9), @@ -878,12 +887,3 @@ mod test { ); } } - -fn is_active_peer(peer: &Peer, features: Option, excluded_peers: &[NodeId]) -> bool { - features.map(|f| peer.features == f).unwrap_or(true) && - !excluded_peers.contains(&peer.node_id) && - !peer.is_banned() && - peer.deleted_at.is_none() && - peer.last_seen_since().is_some() && - peer.last_seen_since().expect("Last seen to exist") <= Duration::from_secs(PEER_ACTIVE_WITHIN_DURATION) -} diff --git a/comms/core/src/protocol/rpc/body.rs b/comms/core/src/protocol/rpc/body.rs index ae0f749b68..fcf1060d42 100644 --- a/comms/core/src/protocol/rpc/body.rs +++ b/comms/core/src/protocol/rpc/body.rs @@ -165,7 +165,7 @@ impl BodyBytes { } pub fn into_bytes_mut(self) -> BytesMut { - self.0.map(|v| v.into_iter().collect()).unwrap_or_else(BytesMut::new) + self.0.map(|v| v.into_iter().collect()).unwrap_or_default() } pub fn len(&self) -> usize { @@ -177,7 +177,7 @@ impl BodyBytes { } pub fn into_vec(self) -> Vec { - self.0.map(|bytes| bytes.into()).unwrap_or_else(Vec::new) + self.0.map(|bytes| bytes.into()).unwrap_or_default() } pub fn into_bytes(self) -> Option { @@ -188,7 +188,7 @@ impl BodyBytes { #[allow(clippy::from_over_into)] impl Into for BodyBytes { fn into(self) -> Bytes { - self.0.map(Bytes::from).unwrap_or_else(Bytes::new) + self.0.map(Bytes::from).unwrap_or_default() } } diff --git a/comms/core/src/protocol/rpc/test/smoke.rs b/comms/core/src/protocol/rpc/test/smoke.rs index 5957715355..f2c17e1e59 100644 --- a/comms/core/src/protocol/rpc/test/smoke.rs +++ b/comms/core/src/protocol/rpc/test/smoke.rs @@ -186,7 +186,7 @@ async fn request_response_errors_and_streaming() { let stream = client.streaming_error2().await.unwrap(); let results = stream.collect::>().await; assert_eq!(results.len(), 2); - let first_reply = results.get(0).unwrap().as_ref().unwrap(); + let first_reply = results.first().unwrap().as_ref().unwrap(); assert_eq!(first_reply, "This is ok"); let second_reply = results.get(1).unwrap().as_ref().unwrap_err(); diff --git a/comms/core/src/test_utils/mocks/connection_manager.rs b/comms/core/src/test_utils/mocks/connection_manager.rs index 66b8cc41e3..a84a2a65f6 100644 --- a/comms/core/src/test_utils/mocks/connection_manager.rs +++ b/comms/core/src/test_utils/mocks/connection_manager.rs @@ -139,7 +139,7 @@ impl ConnectionManagerMock { .lock() .await .get(&node_id) - .map(Clone::clone) + .cloned() .ok_or(ConnectionManagerError::DialConnectFailedAllAddresses); let _result = reply_tx.take().map(|tx| tx.send(result)); }, diff --git a/comms/core/src/tor/control_client/client.rs b/comms/core/src/tor/control_client/client.rs index 7c4fc63d9a..f9ce4f0e29 100644 --- a/comms/core/src/tor/control_client/client.rs +++ b/comms/core/src/tor/control_client/client.rs @@ -125,9 +125,6 @@ impl TorControlPortClient { pub async fn get_info(&mut self, key_name: &'static str) -> Result>, TorClientError> { let command = commands::get_info(key_name); let response = self.request_response(command).await?; - if response.is_empty() { - return Err(TorClientError::ServerNoResponse); - } Ok(response) } @@ -202,7 +199,6 @@ impl TorControlPortClient { let cmd_str = command.to_command_string().map_err(Into::into)?; self.send_line(cmd_str).await?; let responses = self.recv_next_responses().await?; - trace!(target: LOG_TARGET, "Response from tor: {:?}", responses); if responses.is_empty() { return Err(TorClientError::ServerNoResponse); } diff --git a/comms/core/src/tor/control_client/commands/mod.rs b/comms/core/src/tor/control_client/commands/mod.rs index 71c3fbcd77..fe95608e15 100644 --- a/comms/core/src/tor/control_client/commands/mod.rs +++ b/comms/core/src/tor/control_client/commands/mod.rs @@ -29,7 +29,7 @@ mod protocol_info; pub use add_onion::{AddOnion, AddOnionFlag, AddOnionResponse}; pub use del_onion::DelOnion; -pub use key_value::{get_conf, get_info, set_events, KeyValueCommand}; +pub use key_value::{get_conf, get_info, set_events}; pub use protocol_info::{ProtocolInfo, ProtocolInfoResponse}; pub trait TorCommand { diff --git a/comms/core/src/tor/control_client/monitor.rs b/comms/core/src/tor/control_client/monitor.rs index 91cf700545..4185f85824 100644 --- a/comms/core/src/tor/control_client/monitor.rs +++ b/comms/core/src/tor/control_client/monitor.rs @@ -53,7 +53,7 @@ where match either { // Received a command to send to the control server Either::Left(Some(line)) => { - trace!(target: LOG_TARGET, "Writing command of length '{}'", line.len()); + trace!(target: LOG_TARGET, "Tor send: {}", line); if let Err(err) = sink.send(line).await { error!( target: LOG_TARGET, @@ -64,7 +64,7 @@ where }, // Command stream ended Either::Left(None) => { - debug!( + warn!( target: LOG_TARGET, "Tor control server command receiver closed. Monitor is exiting." ); @@ -73,7 +73,7 @@ where // Received a line from the control server Either::Right(Some(Ok(line))) => { - trace!(target: LOG_TARGET, "Read line of length '{}'", line.len()); + trace!(target: LOG_TARGET, "Tor recv: {}", line); match parsers::response_line(&line) { Ok(mut line) => { if line.is_multiline { @@ -116,7 +116,7 @@ where // The control server disconnected Either::Right(None) => { cmd_rx.close(); - debug!( + warn!( target: LOG_TARGET, "Connection to tor control port closed. Monitor is exiting." ); diff --git a/comms/core/src/tor/hidden_service/controller.rs b/comms/core/src/tor/hidden_service/controller.rs index a706da54df..66a26fb4e6 100644 --- a/comms/core/src/tor/hidden_service/controller.rs +++ b/comms/core/src/tor/hidden_service/controller.rs @@ -83,7 +83,7 @@ pub struct HiddenServiceController { proxied_port_mapping: PortMapping, socks_address_override: Option, socks_auth: socks::Authentication, - identity: Option, + pub identity: Option, hs_flags: HsFlags, is_authenticated: bool, proxy_opts: TorProxyOpts, @@ -125,6 +125,7 @@ impl HiddenServiceController { pub async fn initialize_transport(&mut self) -> Result { self.connect_and_auth().await?; + let socks_addr = self.get_socks_address().await?; Ok(SocksTransport::new(SocksConfig { proxy_address: socks_addr, @@ -364,7 +365,7 @@ impl HiddenServiceController { }, }; - let identity = self.identity.as_ref().map(Clone::clone).expect("already checked"); + let identity = self.identity.clone().expect("already checked"); debug!( target: LOG_TARGET, "Added hidden service with service id '{}' on port '{}'", identity.service_id, identity.onion_port diff --git a/comms/core/src/transports/hidden_service_transport.rs b/comms/core/src/transports/hidden_service_transport.rs new file mode 100644 index 0000000000..04ae542a7e --- /dev/null +++ b/comms/core/src/transports/hidden_service_transport.rs @@ -0,0 +1,141 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{io, io::ErrorKind, sync::Arc}; + +use log::*; +use multiaddr::{multiaddr, Multiaddr, Protocol}; +use tokio::sync::RwLock; + +use crate::{ + tor::{HiddenServiceController, TorIdentity}, + transports::{tcp::TcpInbound, SocksTransport, Transport}, +}; + +const LOG_TARGET: &str = "comms::transports::hidden_service_transport"; + +#[derive(thiserror::Error, Debug)] +pub enum HiddenServiceTransportError { + #[error("Tor hidden service transport error: `{0}`")] + HiddenServiceControllerError(#[from] crate::tor::HiddenServiceControllerError), + #[error("Tor hidden service socks error: `{0}`")] + SocksTransportError(#[from] io::Error), +} + +struct HiddenServiceTransportInner { + socks_transport: Option, + hidden_service_ctl: Option, +} + +#[derive(Clone)] +pub struct HiddenServiceTransport { + inner: Arc>, + after_init: F, +} + +impl HiddenServiceTransport { + pub fn new(hidden_service_ctl: HiddenServiceController, after_init: F) -> Self { + Self { + inner: Arc::new(RwLock::new(HiddenServiceTransportInner { + socks_transport: None, + hidden_service_ctl: Some(hidden_service_ctl), + })), + after_init, + } + } + + async fn is_initialized(&self) -> bool { + self.inner.read().await.socks_transport.is_some() + } + + async fn initialize(&self, listen_addr: &Multiaddr) -> Result<(TcpInbound, Multiaddr), io::Error> { + let mut inner_mut = self.inner.write().await; + let mut hs_ctl = inner_mut.hidden_service_ctl.take().ok_or(io::Error::new( + ErrorKind::Other, + "BUG: Hidden service controller not set in transport".to_string(), + ))?; + + let transport = hs_ctl.initialize_transport().await.map_err(|e| { + error!( + target: LOG_TARGET, + "Error initializing hidden transport service stack{}", + e + ); + io::Error::new(ErrorKind::Other, e.to_string()) + })?; + let (inbound, listen_addr) = transport.listen(listen_addr).await?; + inner_mut.socks_transport = Some(transport); + + // Set the proxied address to the port we just listened on + let mut proxied_addr = hs_ctl.proxied_address(); + if proxied_addr.ends_with(&multiaddr!(Tcp(0u16))) { + if let Some(Protocol::Tcp(port)) = listen_addr.iter().last() { + proxied_addr.pop(); + proxied_addr.push(Protocol::Tcp(port)); + } + hs_ctl.set_proxied_addr(&proxied_addr); + } + + let hidden_service = hs_ctl.create_hidden_service().await.map_err(|err| { + error!( + target: LOG_TARGET, + "Error creating hidden service: {}", + err + ); + io::Error::new(ErrorKind::Other, err.to_string()) + })?; + + (self.after_init)(hidden_service.tor_identity().clone()); + Ok((inbound, listen_addr)) + } +} +#[crate::async_trait] +impl Transport for HiddenServiceTransport { + type Error = ::Error; + type Listener = ::Listener; + type Output = ::Output; + + async fn listen(&self, addr: &Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { + if self.is_initialized().await { + // For now, we only can listen on a single Tor hidden service. This behaviour is not technically correct as + // per the Transport trait, but we only ever call listen once in practice. The fix for this is to + // improve the tor client implementation to allow for multiple hidden services. + return Err(io::Error::new( + ErrorKind::Other, + "BUG: Hidden service transport already initialized".to_string(), + )); + } + let (listener, addr) = self.initialize(addr).await?; + Ok((listener, addr)) + } + + async fn dial(&self, addr: &Multiaddr) -> Result { + let inner = self.inner.read().await; + let transport = inner.socks_transport.as_ref().ok_or_else(|| { + io::Error::new( + ErrorKind::Other, + "BUG: Hidden service transport not initialized before dialling".to_string(), + ) + })?; + transport.dial(addr).await + } +} diff --git a/comms/core/src/transports/mod.rs b/comms/core/src/transports/mod.rs index 45050f540d..1c4d40dd1b 100644 --- a/comms/core/src/transports/mod.rs +++ b/comms/core/src/transports/mod.rs @@ -47,7 +47,9 @@ pub use socks::{SocksConfig, SocksTransport}; mod tcp; pub use tcp::TcpTransport; +mod hidden_service_transport; mod tcp_with_tor; +pub use hidden_service_transport::HiddenServiceTransport; pub use tcp_with_tor::TcpWithTorTransport; /// Defines an abstraction for implementations that can dial and listen for connections over a provided address. diff --git a/comms/core/tests/tests/rpc.rs b/comms/core/tests/tests/rpc.rs index d97a0596d4..d4845d226f 100644 --- a/comms/core/tests/tests/rpc.rs +++ b/comms/core/tests/tests/rpc.rs @@ -44,15 +44,20 @@ async fn spawn_node(signal: ShutdownSignal) -> (CommsNode, RpcServerHandle) { .add_service(GreetingServer::new(GreetingService::default())); let rpc_server_hnd = rpc_server.get_handle(); - let comms = create_comms(signal) + let mut comms = create_comms(signal) .add_rpc_server(rpc_server) .spawn_with_transport(TcpTransport::new()) .await .unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); comms .node_identity() - .set_public_addresses(vec![comms.listening_address().clone()]); + .set_public_addresses(vec![address.bind_address().clone()]); (comms, rpc_server_hnd) } diff --git a/comms/core/tests/tests/rpc_stress.rs b/comms/core/tests/tests/rpc_stress.rs index 0e27fa38f9..9a445e8f14 100644 --- a/comms/core/tests/tests/rpc_stress.rs +++ b/comms/core/tests/tests/rpc_stress.rs @@ -46,15 +46,20 @@ async fn spawn_node(signal: ShutdownSignal) -> CommsNode { .finish() .add_service(GreetingServer::new(GreetingService::default())); - let comms = create_comms(signal) + let mut comms = create_comms(signal) .add_rpc_server(rpc_server) .spawn_with_transport(TcpTransport::new()) .await .unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); comms .node_identity() - .set_public_addresses(vec![comms.listening_address().clone()]); + .set_public_addresses(vec![address.bind_address().clone()]); comms } diff --git a/comms/core/tests/tests/substream_stress.rs b/comms/core/tests/tests/substream_stress.rs index d36a26d673..488ec9064c 100644 --- a/comms/core/tests/tests/substream_stress.rs +++ b/comms/core/tests/tests/substream_stress.rs @@ -41,15 +41,20 @@ const PROTOCOL_NAME: &[u8] = b"test/dummy/protocol"; pub async fn spawn_node(signal: ShutdownSignal) -> (CommsNode, ProtocolNotificationRx) { let (notif_tx, notif_rx) = mpsc::channel(1); - let comms = create_comms(signal) + let mut comms = create_comms(signal) .add_protocol(&[ProtocolId::from_static(PROTOCOL_NAME)], ¬if_tx) .spawn_with_transport(TcpTransport::new()) .await .unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); comms .node_identity() - .set_public_addresses(vec![comms.listening_address().clone()]); + .set_public_addresses(vec![address.bind_address().clone()]); (comms, notif_rx) } diff --git a/comms/dht/Cargo.toml b/comms/dht/Cargo.toml index 89afdfbff3..1764989c3c 100644 --- a/comms/dht/Cargo.toml +++ b/comms/dht/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_comms_dht" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" authors = ["The Tari Development Community"] description = "Tari comms DHT module" repository = "https://github.com/tari-project/tari" @@ -10,14 +10,14 @@ license = "BSD-3-Clause" edition = "2018" [dependencies] -tari_comms = { path = "../core", features = ["rpc"] } -tari_common = { path = "../../common" } -tari_comms_rpc_macros = { path = "../rpc_macros" } +tari_comms = { path = "../core", features = ["rpc"], version = "1.0.0-pre.11a" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } +tari_comms_rpc_macros = { path = "../rpc_macros" , version = "1.0.0-pre.11a"} tari_crypto = { version = "0.20" } tari_utilities = { version = "0.7" } -tari_shutdown = { path = "../../infrastructure/shutdown" } -tari_storage = { path = "../../infrastructure/storage" } -tari_common_sqlite = { path = "../../common_sqlite" } +tari_shutdown = { path = "../../infrastructure/shutdown", version = "1.0.0-pre.11a" } +tari_storage = { path = "../../infrastructure/storage", version = "1.0.0-pre.11a" } +tari_common_sqlite = { path = "../../common_sqlite", version = "1.0.0-pre.11a" } anyhow = "1.0.53" bitflags = { version = "2.4", features = ["serde"] } @@ -40,9 +40,9 @@ zeroize = "1" # Uncomment for tokio tracing via tokio-console (needs "tracing" features) #console-subscriber = "0.1.3" -#tokio = { version = "1.20", features = ["rt", "macros", "tracing"] } +#tokio = { version = "1.36", features = ["rt", "macros", "tracing"] } # Uncomment for normal use (non tokio-console tracing) -tokio = { version = "1.23", features = ["rt", "macros"] } +tokio = { version = "1.36", features = ["rt", "macros"] } # tower-filter dependencies pin-project = "0.4" @@ -62,7 +62,7 @@ clap = "3.2" [build-dependencies] -tari_common = { path = "../../common" } +tari_common = { path = "../../common", version = "1.0.0-pre.11a" } [features] test-mocks = [] diff --git a/comms/dht/examples/memory_net/utilities.rs b/comms/dht/examples/memory_net/utilities.rs index e65a6a43dc..3d07a3edd5 100644 --- a/comms/dht/examples/memory_net/utilities.rs +++ b/comms/dht/examples/memory_net/utilities.rs @@ -306,7 +306,6 @@ pub async fn do_network_wide_propagation(nodes: &mut [TestNode], origin_node_ind let mut connectivity = node.comms.connectivity(); let mut ims_rx = node.ims_rx.take().unwrap(); let start = Instant::now(); - let start_global = start_global; let node_name = node.name.clone(); task::spawn(async move { diff --git a/comms/dht/src/actor.rs b/comms/dht/src/actor.rs index 194acd6993..819cf7075b 100644 --- a/comms/dht/src/actor.rs +++ b/comms/dht/src/actor.rs @@ -336,7 +336,7 @@ impl DhtActor { "DhtActor started. {}", offline_ts .map(|dt| format!("Dht has been offline since '{}'", dt)) - .unwrap_or_else(String::new) + .unwrap_or_default() ); let mut pending_jobs = FuturesUnordered::new(); diff --git a/comms/dht/src/connectivity/mod.rs b/comms/dht/src/connectivity/mod.rs index f3188a0c64..eb2b3dbdc0 100644 --- a/comms/dht/src/connectivity/mod.rs +++ b/comms/dht/src/connectivity/mod.rs @@ -253,7 +253,7 @@ impl DhtConnectivity { .high_failure_rate_cooldown .saturating_sub(ts.elapsed()) )) - .unwrap_or_else(String::new), + .unwrap_or_default(), self.neighbours.len(), self.config.num_neighbouring_nodes, neighbour_connected.len(), diff --git a/comms/dht/src/dedup/dedup_cache.rs b/comms/dht/src/dedup/dedup_cache.rs index 9f03cb591c..c18034af09 100644 --- a/comms/dht/src/dedup/dedup_cache.rs +++ b/comms/dht/src/dedup/dedup_cache.rs @@ -85,6 +85,7 @@ impl DedupCacheDatabase { /// Trims the dedup cache to the configured limit by removing the oldest entries pub fn trim_entries(&self) -> Result { + #[allow(clippy::cast_possible_wrap)] let capacity = self.capacity as i64; let mut num_removed = 0; let mut conn = self.connection.get_pooled_connection()?; diff --git a/comms/dht/src/envelope.rs b/comms/dht/src/envelope.rs index f06e63b544..2cb1df8454 100644 --- a/comms/dht/src/envelope.rs +++ b/comms/dht/src/envelope.rs @@ -251,7 +251,7 @@ impl From for DhtHeader { .ephemeral_public_key .as_ref() .map(ByteArray::to_vec) - .unwrap_or_else(Vec::new), + .unwrap_or_default(), message_signature: header.message_signature, destination: Some(header.destination.into()), message_type: header.message_type as i32, diff --git a/comms/dht/src/network_discovery/state_machine.rs b/comms/dht/src/network_discovery/state_machine.rs index feacb54872..78281d56b9 100644 --- a/comms/dht/src/network_discovery/state_machine.rs +++ b/comms/dht/src/network_discovery/state_machine.rs @@ -22,7 +22,7 @@ use std::{ fmt, - fmt::Display, + fmt::{Display, Write}, future::Future, sync::{ atomic::{AtomicUsize, Ordering}, @@ -304,7 +304,10 @@ impl Display for DiscoveryParams { f, "DiscoveryParams({} peer(s) ({}), num_peers_to_request = {})", self.peers.len(), - self.peers.iter().map(|p| format!("{}, ", p)).collect::(), + self.peers.iter().fold(String::new(), |mut peers, p| { + let _ = write!(peers, "{p}, "); + peers + }), self.num_peers_to_request ) } diff --git a/comms/dht/src/outbound/broadcast.rs b/comms/dht/src/outbound/broadcast.rs index f82f4e0b8c..5a06922866 100644 --- a/comms/dht/src/outbound/broadcast.rs +++ b/comms/dht/src/outbound/broadcast.rs @@ -431,7 +431,7 @@ where S: Service // Construct a DhtOutboundMessage for each recipient let messages = selected_peers.into_iter().map(|node_id| { let (reply_tx, reply_rx) = oneshot::channel(); - let tag = tag.unwrap_or_else(MessageTag::new); + let tag = tag.unwrap_or_default(); let send_state = MessageSendState::new(tag, reply_rx); ( DhtOutboundMessage { diff --git a/comms/dht/src/storage/mod.rs b/comms/dht/src/storage/mod.rs index fc793f7ecd..b413cdc116 100644 --- a/comms/dht/src/storage/mod.rs +++ b/comms/dht/src/storage/mod.rs @@ -29,7 +29,7 @@ mod error; pub use error::StorageError; mod dht_setting_entry; -pub use dht_setting_entry::{DhtMetadataEntry, DhtMetadataKey}; +pub use dht_setting_entry::DhtMetadataKey; mod database; pub use database::DhtDatabase; diff --git a/comms/dht/src/store_forward/service.rs b/comms/dht/src/store_forward/service.rs index d39132ff76..f3b09b3643 100644 --- a/comms/dht/src/store_forward/service.rs +++ b/comms/dht/src/store_forward/service.rs @@ -20,11 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, - time::Duration, -}; +use std::{convert::TryInto, sync::Arc, time::Duration}; use chrono::{DateTime, NaiveDateTime, Utc}; use log::*; @@ -485,9 +481,10 @@ impl StoreAndForwardService { fn handle_fetch_message_query(&self, query: &FetchStoredMessageQuery) -> SafResult> { use SafResponseType::{Anonymous, Discovery, ForMe, Join}; + #[allow(clippy::cast_possible_wrap)] let limit = query .limit - .and_then(|v| i64::try_from(v).ok()) + .map(i64::from) .unwrap_or(self.config.max_returned_messages as i64); let db = &self.database; let messages = match query.response_type { diff --git a/comms/dht/src/test_utils/dht_actor_mock.rs b/comms/dht/src/test_utils/dht_actor_mock.rs index b8714ffa36..7ccbd2fd09 100644 --- a/comms/dht/src/test_utils/dht_actor_mock.rs +++ b/comms/dht/src/test_utils/dht_actor_mock.rs @@ -72,7 +72,7 @@ impl DhtMockState { } pub fn get_setting(&self, key: DhtMetadataKey) -> Option> { - self.settings.read().unwrap().get(&key.to_string()).map(Clone::clone) + self.settings.read().unwrap().get(&key.to_string()).cloned() } } @@ -124,13 +124,7 @@ impl DhtActorMock { .unwrap(); }, GetMetadata(key, reply_tx) => { - let _result = reply_tx.send(Ok(self - .state - .settings - .read() - .unwrap() - .get(&key.to_string()) - .map(Clone::clone))); + let _result = reply_tx.send(Ok(self.state.settings.read().unwrap().get(&key.to_string()).cloned())); }, SetMetadata(key, value, reply_tx) => { self.state.settings.write().unwrap().insert(key.to_string(), value); diff --git a/comms/dht/src/test_utils/mod.rs b/comms/dht/src/test_utils/mod.rs index 39e8fff377..be9c47cdad 100644 --- a/comms/dht/src/test_utils/mod.rs +++ b/comms/dht/src/test_utils/mod.rs @@ -54,7 +54,7 @@ mod service; pub use service::service_spy; mod store_and_forward_mock; -pub use store_and_forward_mock::{create_store_and_forward_mock, StoreAndForwardMockState}; +pub use store_and_forward_mock::create_store_and_forward_mock; pub fn assert_send_static_service(_: &S) where diff --git a/comms/dht/tests/dht.rs b/comms/dht/tests/dht.rs index 2eb654af18..09ed64aa60 100644 --- a/comms/dht/tests/dht.rs +++ b/comms/dht/tests/dht.rs @@ -244,7 +244,7 @@ async fn test_dht_store_forward() { .unwrap(); // Wait for node C to and receive a response from the SAF request let event = collect_try_recv!(node_C_msg_events, take = 1, timeout = Duration::from_secs(20)); - unpack_enum!(MessagingEvent::MessageReceived(_node_id, _msg) = event.get(0).unwrap()); + unpack_enum!(MessagingEvent::MessageReceived(_node_id, _msg) = event.first().unwrap()); let msg = node_C.next_inbound_message(Duration::from_secs(5)).await.unwrap(); assert_eq!( @@ -273,7 +273,7 @@ async fn test_dht_store_forward() { // Check that Node C emitted the StoreAndForwardMessagesReceived event when it went Online let event = collect_try_recv!(node_C_dht_events, take = 1, timeout = Duration::from_secs(20)); - unpack_enum!(DhtEvent::StoreAndForwardMessagesReceived = event.get(0).unwrap().as_ref()); + unpack_enum!(DhtEvent::StoreAndForwardMessagesReceived = event.first().unwrap().as_ref()); node_A.shutdown().await; node_B.shutdown().await; @@ -926,7 +926,7 @@ fn count_messages_received(events: &[MessagingEvent], node_ids: &[&NodeId]) -> u } async fn wait_for_connectivity(nodes: &[&TestNode]) { - for node in nodes.iter() { + for node in nodes { node.comms .connectivity() .wait_for_connectivity(Duration::from_secs(10)) diff --git a/comms/rpc_macros/Cargo.toml b/comms/rpc_macros/Cargo.toml index 4159e2e200..f2904d1499 100644 --- a/comms/rpc_macros/Cargo.toml +++ b/comms/rpc_macros/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [lib] @@ -16,7 +16,7 @@ proc-macro = true proc-macro2 = "1.0.24" quote = "1.0.7" -syn = { version = "1.0.38", features = ["fold"] } +syn = { version = "1.0.38", features = ["fold", "full", "extra-traits"] } [dev-dependencies] tari_comms = { path = "../core", features = ["rpc"] } diff --git a/comms/rpc_macros/README.md b/comms/rpc_macros/README.md new file mode 100644 index 0000000000..f2454fe245 --- /dev/null +++ b/comms/rpc_macros/README.md @@ -0,0 +1,5 @@ +# tari_comms_rpc_macros + +Implementation of `tari_comms_rpc_macros` macros for Tari. + +This crate is part of the [Tari Cryptocurrency](https://tari.com) project. diff --git a/hash_domains/Cargo.toml b/hash_domains/Cargo.toml deleted file mode 100644 index 28b7a1f3d8..0000000000 --- a/hash_domains/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "tari_hash_domains" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -tari_crypto = "0.20.0" diff --git a/hashing/Cargo.toml b/hashing/Cargo.toml new file mode 100644 index 0000000000..2d440c5d6f --- /dev/null +++ b/hashing/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "tari_hashing" +version = "1.0.0-pre.11a" +edition = "2021" +description = "Tari hash domains" +authors = ["The Tari Development Community"] +repository = "https://github.com/tari-project/tari" +homepage = "https://tari.com" +readme = "README.md" +license = "BSD-3-Clause" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tari_crypto = "0.20.0" +digest = "0.10" +borsh = "1.2" + +[dev-dependencies] +blake2 = "0.10" \ No newline at end of file diff --git a/hashing/README.md b/hashing/README.md new file mode 100644 index 0000000000..0fb57977b5 --- /dev/null +++ b/hashing/README.md @@ -0,0 +1,5 @@ +# tari_hashing + +Common hash domains and hashers for Tari. + +This crate is part of the [Tari Cryptocurrency](https://tari.com) project. diff --git a/hashing/src/borsh_hasher.rs b/hashing/src/borsh_hasher.rs new file mode 100644 index 0000000000..47c876fbe8 --- /dev/null +++ b/hashing/src/borsh_hasher.rs @@ -0,0 +1,160 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{io, io::Write, marker::PhantomData}; + +use borsh::BorshSerialize; +use digest::Digest; +use tari_crypto::hashing::DomainSeparation; + +/// Domain separated borsh-encoding hasher. +pub struct DomainSeparatedBorshHasher { + writer: WriteHashWrapper, + _m: PhantomData, +} + +impl DomainSeparatedBorshHasher { + #[allow(clippy::new_ret_no_self)] + pub fn new_with_label(label: &str) -> Self { + let mut digest = D::default(); + M::add_domain_separation_tag(&mut digest, label); + Self { + writer: WriteHashWrapper(digest), + _m: PhantomData, + } + } + + pub fn finalize(self) -> digest::Output { + self.writer.0.finalize() + } + + pub fn update_consensus_encode(&mut self, data: &T) { + BorshSerialize::serialize(data, &mut self.writer) + .expect("Incorrect implementation of BorshSerialize encountered. Implementations MUST be infallible."); + } + + pub fn chain(mut self, data: &T) -> Self { + self.update_consensus_encode(data); + self + } +} + +/// This private struct wraps a Digest and implements the Write trait to satisfy the consensus encoding trait. +/// Do not use the DomainSeparatedHasher with this. +#[derive(Clone)] +struct WriteHashWrapper(D); + +impl Write for WriteHashWrapper { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.update(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use blake2::Blake2b; + use digest::consts::U32; + use tari_crypto::hash_domain; + + use super::*; + + #[derive(Debug, BorshSerialize)] + pub struct TestStruct { + pub a: u64, + pub b: u64, + } + + hash_domain!(TestHashDomain, "com.tari.test.test_hash", 0); + + #[test] + fn label_yields_distinct_hash() { + let input = [1u8; 32]; + + let hash_label1 = DomainSeparatedBorshHasher::>::new_with_label("label1") + .chain(&input) + .finalize(); + + let hash_label2 = DomainSeparatedBorshHasher::>::new_with_label("label2") + .chain(&input) + .finalize(); + + // They should be distinct + assert_ne!(hash_label1, hash_label2); + } + + #[test] + fn it_hashes_using_the_domain_hasher() { + // Script is chosen because the consensus encoding impl for TariScript has 2 writes + let mut hasher = Blake2b::::default(); + TestHashDomain::add_domain_separation_tag(&mut hasher, "foo"); + + let expected_hash = hasher.chain_update(b"\xff\x00\x00\x00\x00\x00\x00\x00").finalize(); + let hash = DomainSeparatedBorshHasher::>::new_with_label("foo") + .chain(&255u64) + .finalize(); + + assert_eq!(hash, expected_hash); + } + + #[test] + fn it_adds_to_hash_challenge_in_complete_chunks() { + // The borsh implementation contains 2 writes, 1 per field. See the macro expansion for details. + let test_subject1 = TestStruct { a: 1, b: 2 }; + let test_subject2 = TestStruct { a: 3, b: 4 }; + let mut hasher = Blake2b::::default(); + TestHashDomain::add_domain_separation_tag(&mut hasher, "foo"); + + let mut buf = Vec::new(); + BorshSerialize::serialize(&test_subject1, &mut buf).unwrap(); + BorshSerialize::serialize(&test_subject2, &mut buf).unwrap(); + + // Write to the test hasher as one chunk + let expected_hash = hasher.chain_update(&buf).finalize(); + + // The domain-separated one must do the same + let hash = DomainSeparatedBorshHasher::>::new_with_label("foo") + .chain(&test_subject1) + .chain(&test_subject2) + .finalize(); + + assert_eq!(hash, expected_hash); + } + + #[test] + fn default_consensus_hash_is_not_blake_default_hash() { + let blake_hasher = Blake2b::::default(); + let blake_hash = blake_hasher.chain_update(b"").finalize(); + + let default_consensus_hasher = DomainSeparatedBorshHasher::>::new_with_label(""); + let default_consensus_hash = default_consensus_hasher.chain(b"").finalize(); + + assert_ne!(blake_hash.as_slice(), default_consensus_hash.as_slice()); + } +} diff --git a/hashing/src/domains.rs b/hashing/src/domains.rs new file mode 100644 index 0000000000..cf400cc38e --- /dev/null +++ b/hashing/src/domains.rs @@ -0,0 +1,30 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use tari_crypto::hash_domain; + +// These are the hash domains that are also used in tari-dan. + +hash_domain!(ConfidentialOutputHashDomain, "com.tari.dan.confidential_output", 1); +hash_domain!(TariEngineHashDomain, "com.tari.dan.engine", 0); + +// Hash domain used to derive the final AEAD encryption key for encrypted data in UTXOs +hash_domain!( + TransactionSecureNonceKdfDomain, + "com.tari.base_layer.core.transactions.secure_nonce_kdf", + 0 +); +hash_domain!( + ValidatorNodeBmtHashDomain, + "com.tari.base_layer.core.validator_node_mmr", + 1 +); +hash_domain!( + WalletOutputEncryptionKeysDomain, + "com.tari.base_layer.wallet.output_encryption_keys", + 1 +); + +// Hash domain for all transaction-related hashes, including the script signature challenge, transaction hash and kernel +// signature challenge +hash_domain!(TransactionHashDomain, "com.tari.base_layer.core.transactions", 0); diff --git a/hash_domains/src/lib.rs b/hashing/src/lib.rs similarity index 69% rename from hash_domains/src/lib.rs rename to hashing/src/lib.rs index a88f9b4519..8993831cc4 100644 --- a/hash_domains/src/lib.rs +++ b/hashing/src/lib.rs @@ -20,26 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_crypto::hash_domain; +mod domains; +pub use domains::*; -// These are the hash domains that are also used in tari-dan. - -hash_domain!(ConfidentialOutputHashDomain, "com.tari.dan.confidential_output", 1); -hash_domain!(TariEngineHashDomain, "com.tari.dan.engine", 0); - -// Hash domain used to derive the final AEAD encryption key for encrypted data in UTXOs -hash_domain!( - TransactionSecureNonceKdfDomain, - "com.tari.base_layer.core.transactions.secure_nonce_kdf", - 0 -); -hash_domain!( - ValidatorNodeBmtHashDomain, - "com.tari.base_layer.core.validator_node_mmr", - 1 -); -hash_domain!( - WalletOutputEncryptionKeysDomain, - "com.tari.base_layer.wallet.output_encryption_keys", - 1 -); +mod borsh_hasher; +pub use borsh_hasher::*; diff --git a/infrastructure/derive/Cargo.toml b/infrastructure/derive/Cargo.toml index 071727ba71..62040fd737 100644 --- a/infrastructure/derive/Cargo.toml +++ b/infrastructure/derive/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [lib] @@ -15,4 +15,4 @@ proc-macro = true [dependencies] quote = "0.6.11" syn = "0.15.29" -proc-macro2 = "0.4.27" +proc-macro2 = "0.4.27" diff --git a/infrastructure/libtor/Cargo.toml b/infrastructure/libtor/Cargo.toml index 0828186d30..f3844bbd4f 100644 --- a/infrastructure/libtor/Cargo.toml +++ b/infrastructure/libtor/Cargo.toml @@ -1,13 +1,12 @@ [package] name = "tari_libtor" -version = "0.24.0" +version = "1.0.0-pre.11a" edition = "2021" license = "BSD-3-Clause" [dependencies] tari_common = { path = "../../common" } tari_p2p = { path = "../../base_layer/p2p" } -tari_shutdown = { path = "../shutdown"} derivative = "2.2.0" log = "0.4.8" @@ -16,7 +15,7 @@ tempfile = "3.1.0" tor-hash-passwd = "1.0.1" [target.'cfg(unix)'.dependencies] -libtor = { version="46.9.0"} +libtor = { version = "46.9.0" } openssl = { version = "0.10.61", features = ["vendored"] } [package.metadata.cargo-machete] diff --git a/infrastructure/libtor/src/tor.rs b/infrastructure/libtor/src/tor.rs index e9b6387194..218e6f1928 100644 --- a/infrastructure/libtor/src/tor.rs +++ b/infrastructure/libtor/src/tor.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{fmt, io, net::TcpListener}; +use std::{fmt, io, net::TcpListener, path::PathBuf, thread}; use derivative::Derivative; use libtor::{LogDestination, LogLevel, TorFlag}; @@ -28,7 +28,6 @@ use log::*; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use tari_common::exit_codes::{ExitCode, ExitError}; use tari_p2p::{TorControlAuthentication, TransportConfig, TransportType}; -use tari_shutdown::ShutdownSignal; use tempfile::{tempdir, NamedTempFile, TempDir, TempPath}; use tor_hash_passwd::EncryptedKey; @@ -46,7 +45,7 @@ impl fmt::Debug for TorPassword { #[derivative(Debug)] pub struct Tor { control_port: u16, - data_dir: String, + data_dir: PathBuf, log_destination: String, log_level: LogLevel, #[derivative(Debug = "ignore")] @@ -59,12 +58,12 @@ pub struct Tor { impl Default for Tor { fn default() -> Tor { Tor { - control_port: 19_051, + control_port: 0, data_dir: "/tmp/tor-data".into(), log_destination: "/tmp/tor.log".into(), log_level: LogLevel::Err, passphrase: TorPassword(None), - socks_port: 19_050, + socks_port: 0, temp_dir: None, temp_file: None, } @@ -83,6 +82,7 @@ impl Tor { // check for unused ports to assign let (socks_port, control_port) = get_available_ports()?; + debug!(target: LOG_TARGET, "Using socks port {socks_port} and control_port {control_port}"); instance.socks_port = socks_port; instance.control_port = control_port; @@ -96,9 +96,8 @@ impl Tor { // data dir let temp = tempdir()?; - let dir = temp.path().to_string_lossy().to_string(); + instance.data_dir = temp.path().to_path_buf(); instance.temp_dir = Some(temp); - instance.data_dir = dir; // log destination let temp = NamedTempFile::new()?.into_temp_path(); @@ -128,8 +127,8 @@ impl Tor { } } - /// Run the Tor instance until the shutdown signal is received - pub async fn run(self, mut shutdown_signal: ShutdownSignal) -> Result<(), ExitError> { + /// Run the Tor instance in the background and return a handle to the thread. + pub fn run_background(self) -> thread::JoinHandle> { info!(target: LOG_TARGET, "Starting Tor instance"); let Tor { @@ -144,23 +143,35 @@ impl Tor { let mut tor = libtor::Tor::new(); - tor.flag(TorFlag::DataDirectory(data_dir.clone())) - .flag(TorFlag::SocksPort(socks_port)) - .flag(TorFlag::ControlPort(control_port)) + tor.flag(TorFlag::DataDirectory(data_dir.to_string_lossy().to_string())) + // Disable signal handlers so that ctrl+c can be handled by our application + // https://github.com/torproject/torspec/blob/8961bb4d83fccb2b987f9899ca83aa430f84ab0c/control-spec.txt#L3946 + .flag(TorFlag::Custom("__DisableSignalHandlers 1".to_string())) + // Prevent conflicts with multiple instances using the same listener port for Prometheus metrics + .flag(TorFlag::Custom("MetricsPort 0".to_string())) + // Write the final control port to a file. This could be used to configure the node to use this port when auto is set. + .flag(TorFlag::ControlPortWriteToFile(data_dir.join("control_port").to_string_lossy().to_string())) .flag(TorFlag::Hush()) .flag(TorFlag::LogTo(log_level, LogDestination::File(log_destination))); + if socks_port == 0 { + tor.flag(TorFlag::SocksPortAuto); + } else { + tor.flag(TorFlag::SocksPort(socks_port)); + } + + if control_port == 0 { + tor.flag(TorFlag::ControlPortAuto); + } else { + tor.flag(TorFlag::ControlPort(control_port)); + } + if let Some(secret) = passphrase.0 { let hash = EncryptedKey::hash_password(&secret).to_string(); tor.flag(TorFlag::HashedControlPassword(hash)); } - tor.start_background(); - - shutdown_signal.wait().await; - info!(target: LOG_TARGET, "Shutting down Tor instance"); - - Ok(()) + tor.start_background() } } diff --git a/infrastructure/metrics/Cargo.toml b/infrastructure/metrics/Cargo.toml index 2ccbf6959f..8def9a2867 100644 --- a/infrastructure/metrics/Cargo.toml +++ b/infrastructure/metrics/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "tari_metrics" description = "Tari metrics" -version = "0.1.0" +version = "1.0.0-pre.11a" edition = "2021" authors = ["The Tari Development Community"] repository = "https://github.com/tari-project/tari" diff --git a/infrastructure/metrics/README.md b/infrastructure/metrics/README.md new file mode 100644 index 0000000000..4505d37fe9 --- /dev/null +++ b/infrastructure/metrics/README.md @@ -0,0 +1,4 @@ +# Tari metrics +Implementation of `Tari metrics` for Tari. + +This crate is part of the [Tari Cryptocurrency](https://tari.com) project. diff --git a/infrastructure/shutdown/Cargo.toml b/infrastructure/shutdown/Cargo.toml index 8036c2601b..76b9417218 100644 --- a/infrastructure/shutdown/Cargo.toml +++ b/infrastructure/shutdown/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/infrastructure/storage/Cargo.toml b/infrastructure/storage/Cargo.toml index d881c20832..90f603109b 100644 --- a/infrastructure/storage/Cargo.toml +++ b/infrastructure/storage/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" edition = "2018" [dependencies] @@ -14,7 +14,7 @@ bincode = "1.1" log = "0.4.0" lmdb-zero = "0.4.4" thiserror = "1.0.26" -serde = "1.0.80" +serde = { version = "1.0.80", features = ["derive"] } [dev-dependencies] rand = "0.8" diff --git a/infrastructure/storage/src/lmdb_store/mod.rs b/infrastructure/storage/src/lmdb_store/mod.rs index 2833649193..2e2cf94c86 100644 --- a/infrastructure/storage/src/lmdb_store/mod.rs +++ b/infrastructure/storage/src/lmdb_store/mod.rs @@ -28,4 +28,4 @@ pub use lmdb_zero::{ db, traits::{AsLmdbBytes, FromLmdbBytes}, }; -pub use store::{DatabaseRef, LMDBBuilder, LMDBConfig, LMDBDatabase, LMDBStore}; +pub use store::{DatabaseRef, LMDBBuilder, LMDBConfig, LMDBDatabase, LMDBStore, BYTES_PER_MB}; diff --git a/infrastructure/storage/src/lmdb_store/store.rs b/infrastructure/storage/src/lmdb_store/store.rs index 0756a9cc52..bf2cc33fc3 100644 --- a/infrastructure/storage/src/lmdb_store/store.rs +++ b/infrastructure/storage/src/lmdb_store/store.rs @@ -9,6 +9,7 @@ use std::{ convert::TryInto, path::{Path, PathBuf}, sync::Arc, + time::Instant, }; use lmdb_zero::{ @@ -41,7 +42,7 @@ use crate::{ }; const LOG_TARGET: &str = "lmdb"; -const BYTES_PER_MB: usize = 1024 * 1024; +pub const BYTES_PER_MB: usize = 1024 * 1024; /// An atomic pointer to an LMDB database instance pub type DatabaseRef = Arc>; @@ -92,7 +93,8 @@ impl LMDBConfig { impl Default for LMDBConfig { fn default() -> Self { - Self::new_from_mb(16, 16, 4) + // Do not choose these values too small, as the entire SMT is replaced for every new block + Self::new_from_mb(128, 128, 64) } } @@ -186,7 +188,7 @@ impl LMDBBuilder { let flags = self.env_flags | open::NOTLS; let env = builder.open(&path, flags, 0o600)?; // SAFETY: no transactions can be open at this point - LMDBStore::resize_if_required(&env, &self.env_config)?; + LMDBStore::resize_if_required(&env, &self.env_config, None)?; Arc::new(env) }; @@ -346,16 +348,15 @@ pub struct LMDBStore { } impl LMDBStore { - /// Close all databases and close the environment. You cannot be guaranteed that the dbs will be closed after - /// calling this function because there still may be threads accessing / writing to a database that will block - /// this call. However, in that case `shutdown` returns an error. + /// Force flush the data buffers to disk. pub fn flush(&self) -> Result<(), lmdb_zero::error::Error> { - trace!(target: LOG_TARGET, "Forcing flush of buffers to disk"); + let start = Instant::now(); self.env.sync(true)?; - debug!(target: LOG_TARGET, "LMDB Buffers have been flushed"); + trace!(target: LOG_TARGET, "LMDB buffers flushed in {:.2?}", start.elapsed()); Ok(()) } + /// Write log information about the LMDB environment and databases to the log. pub fn log_info(&self) { match self.env.info() { Err(e) => warn!( @@ -406,10 +407,12 @@ impl LMDBStore { self.databases.get(db_name).cloned() } + /// Returns the LMDB environment configuration pub fn env_config(&self) -> LMDBConfig { self.env_config.clone() } + /// Returns the LMDB environment with handle pub fn env(&self) -> Arc { self.env.clone() } @@ -421,30 +424,39 @@ impl LMDBStore { /// not check for this condition, the caller must ensure it explicitly. /// /// - pub unsafe fn resize_if_required(env: &Environment, config: &LMDBConfig) -> Result<(), LMDBError> { - let env_info = env.info()?; - let stat = env.stat()?; - let size_used_bytes = stat.psize as usize * env_info.last_pgno; - let size_left_bytes = env_info.mapsize - size_used_bytes; - debug!( - target: LOG_TARGET, - "Resize check: Used bytes: {}, Remaining bytes: {}", size_used_bytes, size_left_bytes - ); - - if size_left_bytes <= config.resize_threshold_bytes { - Self::resize(env, config)?; + pub unsafe fn resize_if_required( + env: &Environment, + config: &LMDBConfig, + increase_threshold_by: Option, + ) -> Result<(), LMDBError> { + let (mapsize, size_used_bytes, size_left_bytes) = LMDBStore::get_stats(env)?; + if size_left_bytes <= config.resize_threshold_bytes + increase_threshold_by.unwrap_or_default() { debug!( target: LOG_TARGET, - "({}) LMDB size used {:?} MB, environment space left {:?} MB, increased by {:?} MB", - env.path()?.to_str()?, + "Resize required: mapsize: {} MB, used: {} MB, remaining: {} MB", + mapsize / BYTES_PER_MB, size_used_bytes / BYTES_PER_MB, - size_left_bytes / BYTES_PER_MB, - config.grow_size_bytes / BYTES_PER_MB, + size_left_bytes / BYTES_PER_MB ); + Self::resize(env, config, Some(increase_threshold_by.unwrap_or_default()))?; } Ok(()) } + /// Returns the LMDB environment statistics. + /// Note: + /// In Windows and Ubuntu, this function does not always return the actual used size of the + /// database on disk when the database has grown large (> 700MB), reason unknown (not tested + /// on Mac). + pub fn get_stats(env: &Environment) -> Result<(usize, usize, usize), LMDBError> { + let env_info = env.info()?; + let stat = env.stat()?; + let size_used_bytes = stat.psize as usize * env_info.last_pgno; + let size_left_bytes = env_info.mapsize - size_used_bytes; + + Ok((env_info.mapsize, size_used_bytes, size_left_bytes)) + } + /// Grows the LMDB environment by the configured amount /// /// # Safety @@ -452,19 +464,25 @@ impl LMDBStore { /// not check for this condition, the caller must ensure it explicitly. /// /// - pub unsafe fn resize(env: &Environment, config: &LMDBConfig) -> Result<(), LMDBError> { + pub unsafe fn resize( + env: &Environment, + config: &LMDBConfig, + increase_threshold_by: Option, + ) -> Result<(), LMDBError> { + let start = Instant::now(); let env_info = env.info()?; let current_mapsize = env_info.mapsize; - env.set_mapsize(current_mapsize + config.grow_size_bytes)?; + env.set_mapsize(current_mapsize + config.grow_size_bytes + increase_threshold_by.unwrap_or_default())?; let env_info = env.info()?; let new_mapsize = env_info.mapsize; debug!( target: LOG_TARGET, - "({}) LMDB MB, mapsize was grown from {:?} MB to {:?} MB, increased by {:?} MB", + "({}) LMDB MB, mapsize was grown from {} MB to {} MB, increased by {} MB, in {:.2?}", env.path()?.to_str()?, current_mapsize / BYTES_PER_MB, new_mapsize / BYTES_PER_MB, - config.grow_size_bytes / BYTES_PER_MB, + (config.grow_size_bytes + increase_threshold_by.unwrap_or_default()) / BYTES_PER_MB, + start.elapsed() ); Ok(()) @@ -487,18 +505,20 @@ impl LMDBDatabase { K: AsLmdbBytes + ?Sized, V: Serialize, { - const MAX_RESIZES: usize = 5; + // Resize this many times before assuming something is not right (up to 1 GB) + let max_resizes = 1024 * BYTES_PER_MB / self.env_config.grow_size_bytes(); let value = LMDBWriteTransaction::convert_value(value)?; - for _ in 0..MAX_RESIZES { + for i in 0..max_resizes { match self.write(key, &value) { Ok(txn) => return Ok(txn), Err(error::Error::Code(error::MAP_FULL)) => { info!( target: LOG_TARGET, - "Failed to obtain write transaction because the database needs to be resized" + "Database resize required (resized {} time(s) in this transaction)", + i + 1 ); unsafe { - LMDBStore::resize(&self.env, &self.env_config)?; + LMDBStore::resize(&self.env, &self.env_config, Some(value.len()))?; } }, Err(e) => return Err(e.into()), diff --git a/infrastructure/storage/tests/lmdb.rs b/infrastructure/storage/tests/lmdb.rs index 45740521c7..ce14dd3386 100644 --- a/infrastructure/storage/tests/lmdb.rs +++ b/infrastructure/storage/tests/lmdb.rs @@ -222,9 +222,9 @@ fn test_multi_thread_writes() { #[test] fn test_multi_writes() { { - let env = init("multi-writes").unwrap(); + let store = init("multi-writes").unwrap(); for i in 0..2 { - let db = env.get_handle("users").unwrap(); + let db = store.get_handle("users").unwrap(); let res = db.with_write_transaction(|mut txn| { for j in 0..1000 { let v = i * 1000 + j; @@ -235,7 +235,7 @@ fn test_multi_writes() { }); assert!(res.is_ok()); } - env.flush().unwrap(); + store.flush().unwrap(); } clean_up("multi-writes"); // In Windows file handles must be released before files can be deleted } @@ -277,7 +277,7 @@ fn test_lmdb_resize_on_create() { let db_name = "test"; { // Create db with large preset environment size - let env = LMDBBuilder::new() + let store = LMDBBuilder::new() .set_path(&path) .set_env_config(LMDBConfig::new( 100 * PRESET_SIZE * 1024 * 1024, @@ -289,17 +289,17 @@ fn test_lmdb_resize_on_create() { .build() .unwrap(); // Add some data that is `>= 2 * (PRESET_SIZE * 1024 * 1024)` - let db = env.get_handle(db_name).unwrap(); + let db = store.get_handle(db_name).unwrap(); let users = load_users(); for i in 0..100 { db.insert(&i, &users).unwrap(); } // Ensure enough data is loaded - let env_info = env.env().info().unwrap(); - let env_stat = env.env().stat().unwrap(); + let env_info = store.env().info().unwrap(); + let env_stat = store.env().stat().unwrap(); size_used_round_1 = env_stat.psize as usize * env_info.last_pgno; assert!(size_used_round_1 >= 2 * (PRESET_SIZE * 1024 * 1024)); - env.flush().unwrap(); + store.flush().unwrap(); } { diff --git a/infrastructure/tari_script/Cargo.toml b/infrastructure/tari_script/Cargo.toml index 31f1777dae..07cec2bcea 100644 --- a/infrastructure/tari_script/Cargo.toml +++ b/infrastructure/tari_script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_script" -version = "0.12.0" +version = "1.0.0-pre.11a" edition = "2021" description = "Tari script library" authors = ["The Tari Development Community"] diff --git a/infrastructure/tari_script/README.md b/infrastructure/tari_script/README.md new file mode 100644 index 0000000000..e73a0f547e --- /dev/null +++ b/infrastructure/tari_script/README.md @@ -0,0 +1,14 @@ +# Tari Script +Implementation of `Tari Script` for Tari. + +This crate is part of the [Tari Cryptocurrency](https://tari.com) project. + +For more details see: + +[TariScript for dummies](https://tlu.tarilabs.com/tari/TariScript_for_dummies) explain how Tari script works. + +[Tari script RFC](https://rfc.tari.com/RFC-0201_TariScript.html) + +[Tari Script Opcodes](https://rfc.tari.com/RFC-0202_TariScriptOpcodes.html) + + diff --git a/infrastructure/test_utils/Cargo.toml b/infrastructure/test_utils/Cargo.toml index 7fae60c892..8170c06f4f 100644 --- a/infrastructure/test_utils/Cargo.toml +++ b/infrastructure/test_utils/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "tari_test_utils" description = "Utility functions used in Tari test functions" -version = "1.0.0-pre.5" +version = "1.0.0-pre.11a" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" @@ -9,12 +9,12 @@ license = "BSD-3-Clause" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tari_shutdown = { path = "../shutdown" } -tari_comms = { path = "../../comms/core" } +tari_shutdown = { path = "../shutdown", version = "1.0.0-pre.11a" } +tari_comms = { path = "../../comms/core", version = "1.0.0-pre.11a" } futures = { version = "^0.3.1" } rand = "0.8" -tokio = { version = "1.23", features = ["rt-multi-thread", "time", "sync"] } +tokio = { version = "1.36", features = ["rt-multi-thread", "time", "sync"] } tempfile = "3.1.0" [dev-dependencies] diff --git a/infrastructure/test_utils/src/futures/mod.rs b/infrastructure/test_utils/src/futures/mod.rs index cf4165bfa0..e63fcfa8a8 100644 --- a/infrastructure/test_utils/src/futures/mod.rs +++ b/infrastructure/test_utils/src/futures/mod.rs @@ -105,7 +105,7 @@ mod test { #[should_panic] fn panic_context() { let mut my_fut = future::poll_fn::<(), _>(|cx: &mut Context<'_>| { - cx.waker().clone().wake(); + cx.waker().wake_by_ref(); Poll::Pending }); panic_context!(cx); diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index c9ce21a052..95022046d5 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] minotari_app_grpc = { path = "../applications/minotari_app_grpc" } minotari_app_utilities = { path = "../applications/minotari_app_utilities" } -minotari_node = { path = "../applications/minotari_node" } +minotari_node = { path = "../applications/minotari_node", features = ["metrics"] } minotari_node_grpc_client = { path = "../clients/rust/base_node_grpc_client" } tari_chat_client = { path = "../base_layer/contacts/src/chat_client" } minotari_chat_ffi = { path = "../base_layer/chat_ffi" } @@ -35,7 +35,7 @@ tari_key_manager = { path = "../base_layer/key_manager" } anyhow = "1.0.53" async-trait = "0.1.50" chrono = { version = "0.4.22", default-features = false } -config = "0.13.0" +config = "0.14.0" csv = "1.1" cucumber = { version = "0.20.0", features = ["default", "libtest", "output-junit"] } futures = { version = "^0.3.1" } @@ -48,7 +48,7 @@ serde_json = "1.0.64" tempfile = "3.3.0" thiserror = "^1.0.20" time = "0.3.15" -tokio = { version = "1.10", features = ["macros", "time", "sync", "rt-multi-thread"] } +tokio = { version = "1.36", features = ["macros", "time", "sync", "rt-multi-thread"] } tonic = "0.8.3" [package.metadata.cargo-machete] diff --git a/integration_tests/src/base_node_process.rs b/integration_tests/src/base_node_process.rs index cefe6b9af9..675bee11ff 100644 --- a/integration_tests/src/base_node_process.rs +++ b/integration_tests/src/base_node_process.rs @@ -30,10 +30,13 @@ use std::{ }; use minotari_app_utilities::identity_management::save_as_json; -use minotari_node::{run_base_node, BaseNodeConfig, MetricsConfig}; +use minotari_node::{config::GrpcMethod, run_base_node, BaseNodeConfig, MetricsConfig}; use minotari_node_grpc_client::BaseNodeGrpcClient; use rand::rngs::OsRng; -use tari_common::configuration::{CommonConfig, MultiaddrList}; +use tari_common::{ + configuration::{CommonConfig, MultiaddrList}, + network_check::set_network_if_choice_valid, +}; use tari_comms::{multiaddr::Multiaddr, peer_manager::PeerFeatures, NodeIdentity}; use tari_comms_dht::{DbConnectionUrl, DhtConfig}; use tari_p2p::{auto_update::AutoUpdateConfig, Network, PeerSeedsConfig, TransportType}; @@ -88,6 +91,9 @@ pub async fn spawn_base_node_with_config( peers: Vec, mut base_node_config: BaseNodeConfig, ) { + std::env::set_var("TARI_NETWORK", "localnet"); + set_network_if_choice_valid(Network::LocalNet).unwrap(); + let port: u64; let grpc_port: u64; let temp_dir_path: PathBuf; @@ -184,8 +190,44 @@ pub async fn spawn_base_node_with_config( if base_node_config.base_node.storage.pruning_horizon != 0 { base_node_config.base_node.storage.pruning_interval = 1; }; - - base_node_config.base_node.grpc_server_deny_methods = vec![]; + base_node_config.base_node.grpc_server_allow_methods = vec![ + GrpcMethod::ListHeaders, + GrpcMethod::GetHeaderByHash, + GrpcMethod::GetBlocks, + GrpcMethod::GetBlockTiming, + GrpcMethod::GetConstants, + GrpcMethod::GetBlockSize, + GrpcMethod::GetBlockFees, + GrpcMethod::GetVersion, + GrpcMethod::CheckForUpdates, + GrpcMethod::GetTokensInCirculation, + GrpcMethod::GetNetworkDifficulty, + GrpcMethod::GetNewBlockTemplate, + GrpcMethod::GetNewBlock, + GrpcMethod::GetNewBlockWithCoinbases, + GrpcMethod::GetNewBlockTemplateWithCoinbases, + GrpcMethod::GetNewBlockBlob, + GrpcMethod::SubmitBlock, + GrpcMethod::SubmitBlockBlob, + GrpcMethod::SubmitTransaction, + GrpcMethod::GetSyncInfo, + GrpcMethod::GetSyncProgress, + GrpcMethod::GetTipInfo, + GrpcMethod::SearchKernels, + GrpcMethod::SearchUtxos, + GrpcMethod::FetchMatchingUtxos, + GrpcMethod::GetPeers, + GrpcMethod::GetMempoolTransactions, + GrpcMethod::TransactionState, + GrpcMethod::Identify, + GrpcMethod::GetNetworkStatus, + GrpcMethod::ListConnectedPeers, + GrpcMethod::GetMempoolStats, + GrpcMethod::GetActiveValidatorNodes, + GrpcMethod::GetShardKey, + GrpcMethod::GetTemplateRegistrations, + GrpcMethod::GetSideChainUtxos, + ]; // Heirachically set the base path for all configs base_node_config.base_node.set_base_path(temp_dir_path.clone()); diff --git a/integration_tests/src/ffi/ffi_import.rs b/integration_tests/src/ffi/ffi_import.rs index 5e88c0e3d4..01f7405477 100644 --- a/integration_tests/src/ffi/ffi_import.rs +++ b/integration_tests/src/ffi/ffi_import.rs @@ -386,6 +386,8 @@ extern "C" { passphrase: *const c_char, seed_words: *const TariSeedWords, network_str: *const c_char, + peer_seed_str: *const c_char, + dns_sec: bool, callback_received_transaction: unsafe extern "C" fn(*mut TariPendingInboundTransaction), callback_received_transaction_reply: unsafe extern "C" fn(*mut TariCompletedTransaction), callback_received_finalized_transaction: unsafe extern "C" fn(*mut TariCompletedTransaction), @@ -451,7 +453,7 @@ extern "C" { msg: *const c_char, error_out: *mut c_int, ) -> bool; - pub fn wallet_add_base_node_peer( + pub fn wallet_set_base_node_peer( wallet: *mut TariWallet, public_key: *mut TariPublicKey, address: *const c_char, diff --git a/integration_tests/src/ffi/wallet.rs b/integration_tests/src/ffi/wallet.rs index 5a9ec92f05..45f28c51a4 100644 --- a/integration_tests/src/ffi/wallet.rs +++ b/integration_tests/src/ffi/wallet.rs @@ -178,6 +178,8 @@ impl Wallet { CString::new("kensentme").unwrap().into_raw(), seed_words_ptr, CString::new("localnet").unwrap().into_raw(), + CString::new("").unwrap().into_raw(), + false, callback_received_transaction, callback_received_transaction_reply, callback_received_finalized_transaction, @@ -202,6 +204,7 @@ impl Wallet { println!("wallet_create error {}", error); } } + #[allow(clippy::arc_with_non_send_sync)] let wallet = Arc::new(Mutex::new(Self { ptr, liveness_data: Default::default(), @@ -235,14 +238,14 @@ impl Wallet { let mut error = 0; let success; unsafe { - success = ffi_import::wallet_add_base_node_peer( + success = ffi_import::wallet_set_base_node_peer( self.ptr, base_node.get_ptr(), CString::new(address).unwrap().into_raw(), &mut error, ); if error > 0 { - println!("wallet_add_base_node_peer error {}", error); + println!("wallet_set_base_node_peer error {}", error); } } success diff --git a/integration_tests/src/merge_mining_proxy.rs b/integration_tests/src/merge_mining_proxy.rs index 4beb8a8fb6..87e7efa4e8 100644 --- a/integration_tests/src/merge_mining_proxy.rs +++ b/integration_tests/src/merge_mining_proxy.rs @@ -27,7 +27,7 @@ use minotari_app_utilities::common_cli_args::CommonCliArgs; use minotari_merge_mining_proxy::{merge_miner, Cli}; use minotari_wallet_grpc_client::WalletGrpcClient; use serde_json::{json, Value}; -use tari_common::configuration::Network; +use tari_common::{configuration::Network, network_check::set_network_if_choice_valid}; use tari_common_types::{tari_address::TariAddress, types::PublicKey}; use tari_utilities::ByteArray; use tempfile::tempdir; @@ -74,6 +74,9 @@ pub async fn register_merge_mining_proxy_process( impl MergeMiningProxyProcess { pub async fn start(&self, world: &mut TariWorld) { + std::env::set_var("TARI_NETWORK", "localnet"); + set_network_if_choice_valid(Network::LocalNet).unwrap(); + let temp_dir = tempdir().unwrap(); let data_dir = temp_dir.path().join("data/miner"); let data_dir_str = data_dir.clone().into_os_string().into_string().unwrap(); @@ -112,7 +115,7 @@ impl MergeMiningProxyProcess { ), ( "merge_mining_proxy.monerod_url".to_string(), - vec![ + [ "http://stagenet.xmr-tw.org:38081", "http://stagenet.community.xmr.to:38081", "http://monero-stagenet.exan.tech:38081", diff --git a/integration_tests/src/miner.rs b/integration_tests/src/miner.rs index cf03553175..840107b3a2 100644 --- a/integration_tests/src/miner.rs +++ b/integration_tests/src/miner.rs @@ -35,7 +35,7 @@ use minotari_app_utilities::common_cli_args::CommonCliArgs; use minotari_miner::{run_miner, Cli}; use minotari_node_grpc_client::BaseNodeGrpcClient; use minotari_wallet_grpc_client::WalletGrpcClient; -use tari_common::configuration::Network; +use tari_common::{configuration::Network, network_check::set_network_if_choice_valid}; use tari_common_types::{tari_address::TariAddress, types::PublicKey}; use tari_core::{ consensus::ConsensusManager, @@ -88,6 +88,9 @@ impl MinerProcess { miner_min_diff: Option, miner_max_diff: Option, ) { + std::env::set_var("TARI_NETWORK", "localnet"); + set_network_if_choice_valid(Network::LocalNet).unwrap(); + let mut wallet_client = create_wallet_client(world, self.wallet_name.clone()) .await .expect("wallet grpc client"); diff --git a/integration_tests/src/wallet_ffi.rs b/integration_tests/src/wallet_ffi.rs index 601f4b3485..f1834ebf28 100644 --- a/integration_tests/src/wallet_ffi.rs +++ b/integration_tests/src/wallet_ffi.rs @@ -145,8 +145,7 @@ impl WalletFFI { } pub fn get_counters(&self) -> &mut Callbacks { - let callback = Callbacks::instance(); - callback + Callbacks::instance() } pub fn start_txo_validation(&self) -> u64 { diff --git a/integration_tests/src/wallet_process.rs b/integration_tests/src/wallet_process.rs index 555d31fb4d..f79d3ab5b4 100644 --- a/integration_tests/src/wallet_process.rs +++ b/integration_tests/src/wallet_process.rs @@ -27,7 +27,10 @@ use minotari_app_utilities::common_cli_args::CommonCliArgs; use minotari_console_wallet::{run_wallet_with_cli, Cli}; use minotari_wallet::{transaction_service::config::TransactionRoutingMechanism, WalletConfig}; use minotari_wallet_grpc_client::WalletGrpcClient; -use tari_common::configuration::{CommonConfig, MultiaddrList}; +use tari_common::{ + configuration::{CommonConfig, MultiaddrList}, + network_check::set_network_if_choice_valid, +}; use tari_comms::multiaddr::Multiaddr; use tari_comms_dht::{DbConnectionUrl, DhtConfig}; use tari_p2p::{auto_update::AutoUpdateConfig, Network, PeerSeedsConfig, TransportType}; @@ -62,6 +65,9 @@ pub async fn spawn_wallet( routing_mechanism: Option, cli: Option, ) { + std::env::set_var("TARI_NETWORK", "localnet"); + set_network_if_choice_valid(Network::LocalNet).unwrap(); + let port: u64; let grpc_port: u64; let temp_dir_path: PathBuf; diff --git a/integration_tests/tests/features/BlockTemplate.feature b/integration_tests/tests/features/BlockTemplate.feature index c7854f2dd1..12b4533c7f 100644 --- a/integration_tests/tests/features/BlockTemplate.feature +++ b/integration_tests/tests/features/BlockTemplate.feature @@ -9,3 +9,10 @@ Scenario: Verify UTXO and kernel MMR size in header Given I have a seed node SEED_A When I have 1 base nodes connected to all seed nodes Then meddling with block template data from node SEED_A is not allowed + + @critical + Scenario: Verify gprc cna create block with more than 1 coinbase + Given I have a seed node SEED_A + When I have 1 base nodes connected to all seed nodes + Then generate a block with 2 coinbases from node SEED_A + Then generate a block with 2 coinbases as a single request from node SEED_A \ No newline at end of file diff --git a/integration_tests/tests/features/StressTest.feature b/integration_tests/tests/features/StressTest.feature index 8f57c391f5..19257ad98f 100644 --- a/integration_tests/tests/features/StressTest.feature +++ b/integration_tests/tests/features/StressTest.feature @@ -22,7 +22,7 @@ Feature: Stress Test # When mining node MINER mines 3 blocks # When mining node MINER mines blocks # Then all nodes are on the same chain tip - # Then wallet WALLET_A detects all transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A detects all transactions as Mined_or_OneSidedConfirmed # When I send transactions of 1111 uT each from wallet WALLET_A to wallet WALLET_B at fee_per_gram 4 # # Mine enough blocks for the first block of transactions to be confirmed. # When mining node MINER mines 4 blocks @@ -30,9 +30,9 @@ Feature: Stress Test # # Now wait until all transactions are detected as confirmed in WALLET_A, continue to mine blocks if transactions # # are not found to be confirmed as sometimes the previous mining occurs faster than transactions are submitted # # to the mempool - # Then while mining via SHA3 miner MINER all transactions in wallet WALLET_A are found to be Mined_or_Faux_Confirmed - # # Then wallet WALLET_B detects all transactions as Mined_or_Faux_Confirmed - # Then while mining via node NODE1 all transactions in wallet WALLET_B are found to be Mined_or_Faux_Confirmed + # Then while mining via SHA3 miner MINER all transactions in wallet WALLET_A are found to be Mined_or_OneSidedConfirmed + # # Then wallet WALLET_B detects all transactions as Mined_or_OneSidedConfirmed + # Then while mining via node NODE1 all transactions in wallet WALLET_B are found to be Mined_or_OneSidedConfirmed # @flaky # Examples: @@ -71,7 +71,7 @@ Feature: Stress Test # When mining node MINER mines 8 blocks # Then all nodes are on the same chain tip - # Then wallet WALLET_A detects all transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A detects all transactions as Mined_or_OneSidedConfirmed # When I send 2000 transactions of 1111 uT each from wallet WALLET_A to wallet WALLET_B at fee_per_gram 4 # # Mine enough blocks for the first block of transactions to be confirmed. # When mining node MINER mines 4 blocks @@ -79,6 +79,6 @@ Feature: Stress Test # # Now wait until all transactions are detected as confirmed in WALLET_A, continue to mine blocks if transactions # # are not found to be confirmed as sometimes the previous mining occurs faster than transactions are submitted # # to the mempool - # Then while mining via SHA3 miner MINER all transactions in wallet WALLET_A are found to be Mined_or_Faux_Confirmed - # # Then wallet WALLET_B detects all transactions as Mined_or_Faux_Confirmed - # Then while mining via node NODE1 all transactions in wallet WALLET_B are found to be Mined_or_Faux_Confirmed + # Then while mining via SHA3 miner MINER all transactions in wallet WALLET_A are found to be Mined_or_OneSidedConfirmed + # # Then wallet WALLET_B detects all transactions as Mined_or_OneSidedConfirmed + # Then while mining via node NODE1 all transactions in wallet WALLET_B are found to be Mined_or_OneSidedConfirmed diff --git a/integration_tests/tests/features/TransactionInfo.feature b/integration_tests/tests/features/TransactionInfo.feature index 0093e6e000..74b8dfcc92 100644 --- a/integration_tests/tests/features/TransactionInfo.feature +++ b/integration_tests/tests/features/TransactionInfo.feature @@ -26,11 +26,11 @@ Scenario: Get Transaction Info Then wallet WALLET_B detects all transactions are at least Broadcast When mining node MINER2 mines 1 blocks Then all nodes are at height 5 - Then wallet WALLET_A detects all transactions are at least Mined_or_Faux_Unconfirmed - Then wallet WALLET_B detects all transactions are at least Mined_or_Faux_Unconfirmed + Then wallet WALLET_A detects all transactions are at least Mined_or_OneSidedUnconfirmed + Then wallet WALLET_B detects all transactions are at least Mined_or_OneSidedUnconfirmed When mining node MINER2 mines 10 blocks Then all nodes are at height 15 - Then wallet WALLET_A detects all transactions as Mined_or_Faux_Confirmed - Then wallet WALLET_B detects all transactions as Mined_or_Faux_Confirmed + Then wallet WALLET_A detects all transactions as Mined_or_OneSidedConfirmed + Then wallet WALLET_B detects all transactions as Mined_or_OneSidedConfirmed # This wait is needed to stop base nodes from shutting down When I wait 1 seconds diff --git a/integration_tests/tests/features/WalletCli.feature b/integration_tests/tests/features/WalletCli.feature index 61d8cc682a..a3d7d76080 100644 --- a/integration_tests/tests/features/WalletCli.feature +++ b/integration_tests/tests/features/WalletCli.feature @@ -82,9 +82,8 @@ Feature: Wallet CLI When I have mining node MINE connected to base node BASE and wallet SENDER When mining node MINE mines 15 blocks Then wallets SENDER should have AT_LEAST 12 spendable coinbase outputs - When I wait 30 seconds Then I stop wallet SENDER - When I make it rain from wallet SENDER 1 tx per sec 10 sec 8000 uT 100 increment to RECEIVER via command line + When I make-it-rain from SENDER rate 10 txns_per_sec duration 1 sec value 8000 uT increment 100 uT to RECEIVER via command line Then wallet SENDER has at least 10 transactions that are all TRANSACTION_STATUS_BROADCAST and not cancelled Then wallet RECEIVER has at least 10 transactions that are all TRANSACTION_STATUS_BROADCAST and not cancelled When mining node MINE mines 5 blocks diff --git a/integration_tests/tests/features/WalletFFI.feature b/integration_tests/tests/features/WalletFFI.feature index 702c0f5f6a..9ff6705fc9 100644 --- a/integration_tests/tests/features/WalletFFI.feature +++ b/integration_tests/tests/features/WalletFFI.feature @@ -92,7 +92,7 @@ Feature: Wallet FFI Then I wait for ffi wallet FFI_WALLET to have at least 2 contacts to be Online And I stop ffi wallet FFI_WALLET - @critical @brokenFFI @broken + @critical Scenario: As a client I want to retrieve a list of transactions I have made and received Given I have a seed node SEED When I have a base node BASE1 connected to all seed nodes @@ -171,7 +171,7 @@ Feature: Wallet FFI Then I wait for ffi wallet FFI_WALLET to have at least 3000000 uT And I stop ffi wallet FFI_WALLET - @critical @brokenFFI @broken + @critical Scenario: As a client I want to send a one-sided transaction Given I have a seed node SEED When I have a base node BASE1 connected to all seed nodes @@ -202,13 +202,13 @@ Feature: Wallet FFI Then ffi wallet FFI_WALLET detects AT_LEAST 3 ffi transactions to be TRANSACTION_STATUS_BROADCAST When mining node MINER mines 2 blocks Then all nodes are at height 22 - Then wallet RECEIVER has at least 1 transactions that are all TRANSACTION_STATUS_FAUX_UNCONFIRMED and not cancelled + Then wallet RECEIVER has at least 1 transactions that are all TRANSACTION_STATUS_ONE_SIDED_UNCONFIRMED and not cancelled When mining node MINER mines 5 blocks Then all nodes are at height 27 - Then wallet RECEIVER has at least 1 transactions that are all TRANSACTION_STATUS_FAUX_CONFIRMED and not cancelled + Then wallet RECEIVER has at least 1 transactions that are all TRANSACTION_STATUS_ONE_SIDED_CONFIRMED and not cancelled And I stop ffi wallet FFI_WALLET - @critical @brokenFFI @broken + @critical Scenario: As a client I want to receive a one-sided transaction Given I have a seed node SEED When I have a base node BASE1 connected to all seed nodes @@ -221,12 +221,12 @@ Feature: Wallet FFI Then I send a one-sided transaction of 1000000 uT from SENDER to FFI_RECEIVER at fee 20 When mining node MINER mines 2 blocks Then all nodes are at height 12 - Then ffi wallet FFI_RECEIVER detects AT_LEAST 1 ffi transactions to be TRANSACTION_STATUS_FAUX_UNCONFIRMED + Then ffi wallet FFI_RECEIVER detects AT_LEAST 1 ffi transactions to be TRANSACTION_STATUS_ONE_SIDED_UNCONFIRMED And I send 1000000 uT from wallet SENDER to wallet FFI_RECEIVER at fee 20 Then ffi wallet FFI_RECEIVER detects AT_LEAST 1 ffi transactions to be TRANSACTION_STATUS_BROADCAST When mining node MINER mines 5 blocks Then all nodes are at height 17 - Then ffi wallet FFI_RECEIVER detects AT_LEAST 1 ffi transactions to be TRANSACTION_STATUS_FAUX_CONFIRMED + Then ffi wallet FFI_RECEIVER detects AT_LEAST 1 ffi transactions to be TRANSACTION_STATUS_ONE_SIDED_CONFIRMED And I stop ffi wallet FFI_RECEIVER Scenario: As a client I want to get fee per gram stats diff --git a/integration_tests/tests/features/WalletMonitoring.feature b/integration_tests/tests/features/WalletMonitoring.feature index 6a967dfbea..4d9fc2ac78 100644 --- a/integration_tests/tests/features/WalletMonitoring.feature +++ b/integration_tests/tests/features/WalletMonitoring.feature @@ -21,7 +21,7 @@ Feature: Wallet Monitoring # And I list all COINBASE transactions for wallet WALLET_A1 # Then wallet WALLET_A1 has 10 coinbase transactions # Then all COINBASE transactions for wallet WALLET_A1 are valid - # Then wallet WALLET_A1 detects at least 7 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A1 detects at least 7 coinbase transactions as CoinbaseConfirmed # # # # Chain 2: # # Collects 10 coinbases into one wallet @@ -36,7 +36,7 @@ Feature: Wallet Monitoring # And I list all COINBASE transactions for wallet WALLET_B1 # Then wallet WALLET_B1 has 10 coinbase transactions # Then all COINBASE transactions for wallet WALLET_B1 are valid - # Then wallet WALLET_B1 detects at least 7 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_B1 detects at least 7 coinbase transactions as CoinbaseConfirmed # # # # Connect Chain 1 and 2 # # @@ -66,15 +66,15 @@ Feature: Wallet Monitoring # When mining node MINING_A mines 10 blocks with min difficulty 20 and max difficulty 9999999999 # Then node SEED_A is at height 10 # Then node NODE_A1 is at height 10 - # Then wallet WALLET_A1 detects exactly 7 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A1 detects exactly 7 coinbase transactions as CoinbaseConfirmed # # Use 7 of the 10 coinbase UTXOs in transactions (others require 3 confirmations) # And I multi-send 7 transactions of 1000000 uT from wallet WALLET_A1 to wallet WALLET_A2 at fee 100 # When mining node MINING_A mines 10 blocks with min difficulty 20 and max difficulty 9999999999 # Then node SEED_A is at height 20 # Then node NODE_A1 is at height 20 - # Then wallet WALLET_A2 detects all transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A2 detects all transactions as Mined_or_OneSidedConfirmed # Then all NORMAL transactions for wallet WALLET_A1 are valid - # Then wallet WALLET_A1 detects exactly 17 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A1 detects exactly 17 coinbase transactions as CoinbaseConfirmed # # # # Chain 2: # # Collects 10 coinbases into one wallet, send 7 transactions @@ -88,15 +88,15 @@ Feature: Wallet Monitoring # When mining node MINING_B mines 10 blocks with min difficulty 1 and max difficulty 2 # Then node SEED_B is at height 10 # Then node NODE_B1 is at height 10 - # Then wallet WALLET_B1 detects exactly 7 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_B1 detects exactly 7 coinbase transactions as CoinbaseConfirmed # # Use 7 of the 10 coinbase UTXOs in transactions (others require 3 confirmations) # And I multi-send 7 transactions of 1000000 uT from wallet WALLET_B1 to wallet WALLET_B2 at fee 100 # When mining node MINING_B mines 10 blocks with min difficulty 1 and max difficulty 2 # Then node SEED_B is at height 20 # Then node NODE_B1 is at height 20 - # Then wallet WALLET_B2 detects all transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_B2 detects all transactions as Mined_or_OneSidedConfirmed # Then all NORMAL transactions for wallet WALLET_B1 are valid - # Then wallet WALLET_B1 detects exactly 17 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_B1 detects exactly 17 coinbase transactions as CoinbaseConfirmed # # # # Connect Chain 1 and 2 # # @@ -105,8 +105,8 @@ Feature: Wallet Monitoring # # When tip advances past required confirmations, invalid coinbases still being monitored will be cancelled. # And mining node NODE_C mines 6 blocks # Then all nodes are at height 26 - # Then wallet WALLET_A1 detects exactly 20 coinbase transactions as Mined_or_Faux_Confirmed - # Then wallet WALLET_B1 detects exactly 17 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A1 detects exactly 20 coinbase transactions as CoinbaseConfirmed + # Then wallet WALLET_B1 detects exactly 17 coinbase transactions as CoinbaseConfirmed # And I list all NORMAL transactions for wallet WALLET_A1 # And I list all NORMAL transactions for wallet WALLET_B1 # # Uncomment this step when wallets can handle reorg diff --git a/integration_tests/tests/features/WalletQuery.feature b/integration_tests/tests/features/WalletQuery.feature index 09e6fe575d..f97709ac59 100644 --- a/integration_tests/tests/features/WalletQuery.feature +++ b/integration_tests/tests/features/WalletQuery.feature @@ -12,7 +12,7 @@ Feature: Wallet Querying When mining node MINER mines 5 blocks Then all nodes are at height 5 When I mine 5 blocks on NODE - Then all wallets detect all transactions as Mined_or_Faux_Confirmed + Then all wallets detect all transactions as Mined_or_OneSidedConfirmed @critical Scenario: As a wallet I want to submit a transaction @@ -23,5 +23,5 @@ Feature: Wallet Querying When I wait 5 seconds When I transfer 5T from WALLET_A to WALLET_B When I mine 5 blocks on NODE - Then all wallets detect all transactions as Mined_or_Faux_Confirmed + Then all wallets detect all transactions as Mined_or_OneSidedConfirmed diff --git a/integration_tests/tests/features/WalletRoutingMechanism.feature b/integration_tests/tests/features/WalletRoutingMechanism.feature index 3f7a006aaa..bb4e9ef69e 100644 --- a/integration_tests/tests/features/WalletRoutingMechanism.feature +++ b/integration_tests/tests/features/WalletRoutingMechanism.feature @@ -23,12 +23,12 @@ Feature: Wallet Routing Mechanism When I wait 1 seconds # And mining node MINER mines 1 blocks # Then all nodes are at height 21 - # Then all wallets detect all transactions as Mined_or_Faux_Unconfirmed + # Then all wallets detect all transactions as Mined_or_OneSidedUnconfirmed # # This wait is needed to stop next merge mining task from continuing When I wait 1 seconds # And mining node MINER mines 11 blocks # Then all nodes are at height 32 - # Then all wallets detect all transactions as Mined_or_Faux_Confirmed + # Then all wallets detect all transactions as Mined_or_OneSidedConfirmed # This wait is needed to stop base nodes from shutting down When I wait 1 seconds # @long-running diff --git a/integration_tests/tests/features/WalletTransactions.feature b/integration_tests/tests/features/WalletTransactions.feature index 50d138ba1f..1e2409d8d4 100644 --- a/integration_tests/tests/features/WalletTransactions.feature +++ b/integration_tests/tests/features/WalletTransactions.feature @@ -226,7 +226,7 @@ Feature: Wallet Transactions # Then node SEED_A is at height 7 # Then node NODE_A1 is at height 7 # When I mine 3 blocks on SEED_A - # Then wallet WALLET_A1 detects at least 7 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A1 detects at least 7 coinbase transactions as CoinbaseConfirmed # Then node SEED_A is at height 10 # Then node NODE_A1 is at height 10 # When I multi-send 7 transactions of 1000000 uT from wallet WALLET_A1 to wallet WALLET_A2 at fee 100 @@ -244,7 +244,7 @@ Feature: Wallet Transactions # Then node SEED_B is at height 7 # Then node NODE_B1 is at height 7 # When I mine 5 blocks on SEED_B - # Then wallet WALLET_B1 detects at least 7 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_B1 detects at least 7 coinbase transactions as CoinbaseConfirmed # Then node SEED_B is at height 12 # Then node NODE_B1 is at height 12 # When I multi-send 7 transactions of 1000000 uT from wallet WALLET_B1 to wallet WALLET_B2 at fee 100 @@ -306,7 +306,7 @@ Feature: Wallet Transactions # Then node SEED_A is at height 1 # Then node NODE_A1 is at height 1 # When I mine 3 blocks on SEED_A - # Then wallet WALLET_A1 detects at least 1 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_A1 detects at least 1 coinbase transactions as CoinbaseConfirmed # Then node SEED_A is at height 4 # Then node NODE_A1 is at height 4 # When I multi-send 1 transactions of 10000 uT from wallet WALLET_A1 to wallet WALLET_A2 at fee 20 @@ -324,7 +324,7 @@ Feature: Wallet Transactions # Then node SEED_B is at height 2 # Then node NODE_B1 is at height 2 # When I mine 3 blocks on SEED_B - # Then wallet WALLET_B1 detects at least 2 coinbase transactions as Mined_or_Faux_Confirmed + # Then wallet WALLET_B1 detects at least 2 coinbase transactions as CoinbaseConfirmed # Then node SEED_B is at height 5 # Then node NODE_B1 is at height 5 # When I multi-send 2 transactions of 10000 uT from wallet WALLET_B1 to wallet WALLET_B2 at fee 20 @@ -380,7 +380,7 @@ Feature: Wallet Transactions # When I wait 30 seconds # When mining node MINER mines 5 blocks # Then all nodes are at height 15 - # When wallet WALLET_SENDER detects all transactions as Mined_or_Faux_Confirmed + # When wallet WALLET_SENDER detects all transactions as Mined_or_OneSidedConfirmed # When I start wallet WALLET_RECV # When I wait 5 seconds # Then I restart wallet WALLET_RECV @@ -422,5 +422,5 @@ Feature: Wallet Transactions When I create a burn transaction of 201552500000 uT from WALLET_A at fee 100 When mining node MINER_B mines 5 blocks Then all nodes are at height 20 - Then wallet WALLET_A detects all transactions as Mined_or_Faux_Confirmed + Then wallet WALLET_A detects all transactions as Mined_or_OneSidedConfirmed When I wait for wallet WALLET_A to have at least 20000000000 uT diff --git a/integration_tests/tests/features/WalletTransfer.feature b/integration_tests/tests/features/WalletTransfer.feature index df45336c93..210cada8ad 100644 --- a/integration_tests/tests/features/WalletTransfer.feature +++ b/integration_tests/tests/features/WalletTransfer.feature @@ -40,7 +40,7 @@ Feature: Wallet Transfer When I transfer 50000 uT from WALLET_A to WALLET_B and WALLET_C at fee 20 When mining node MINER mines 10 blocks Then all nodes are at height 20 - Then all wallets detect all transactions as Mined_or_Faux_Confirmed + Then all wallets detect all transactions as Mined_or_OneSidedConfirmed Scenario: As a wallet I want to submit transfers to myself @@ -55,7 +55,7 @@ Feature: Wallet Transfer When I transfer 50000 uT to self from wallet WALLET_A at fee 25 When I mine 5 blocks on NODE Then all nodes are at height 15 - Then all wallets detect all transactions as Mined_or_Faux_Confirmed + Then all wallets detect all transactions as Mined_or_OneSidedConfirmed Scenario: As a wallet I want to create a HTLC transaction Given I have a seed node NODE diff --git a/integration_tests/tests/steps/mining_steps.rs b/integration_tests/tests/steps/mining_steps.rs index a9dab35920..29362e216b 100644 --- a/integration_tests/tests/steps/mining_steps.rs +++ b/integration_tests/tests/steps/mining_steps.rs @@ -132,7 +132,7 @@ async fn sha3_miner_connected_to_base_node(world: &mut TariWorld, miner: String, #[then( expr = "while mining via SHA3 miner {word} all transactions in wallet {word} are found to be \ - Mined_or_Faux_Confirmed" + Mined_or_OneSidedConfirmed" )] async fn while_mining_all_txs_in_wallet_are_mined_confirmed(world: &mut TariWorld, miner: String, wallet: String) { let mut wallet_client = create_wallet_client(world, wallet.clone()).await.unwrap(); @@ -146,7 +146,7 @@ async fn while_mining_all_txs_in_wallet_are_mined_confirmed(world: &mut TariWorl let miner_ps = world.miners.get(&miner).unwrap(); let num_retries = 100; println!( - "Detecting {} Mined_or_Faux_Confirmed transactions for wallet {}", + "Detecting {} Mined_or_OneSidedConfirmed transactions for wallet {}", wallet_tx_ids.len(), wallet ); @@ -158,10 +158,11 @@ async fn while_mining_all_txs_in_wallet_are_mined_confirmed(world: &mut TariWorl }; let res = wallet_client.get_transaction_info(req).await.unwrap().into_inner(); let tx_status = res.transactions.first().unwrap().status; - // TRANSACTION_STATUS_MINED_CONFIRMED code is currently 6 - if tx_status == 6 { + if tx_status == grpc::TransactionStatus::MinedConfirmed as i32 || + tx_status == grpc::TransactionStatus::OneSidedConfirmed as i32 + { println!( - "Wallet transaction with id {} has been detected with status Mined_or_Faux_Confirmed", + "Wallet transaction with id {} has been detected with status Mined_or_OneSidedConfirmed", tx_id ); break 'inner; @@ -169,13 +170,13 @@ async fn while_mining_all_txs_in_wallet_are_mined_confirmed(world: &mut TariWorl if retry == num_retries { panic!( - "Unable to have wallet transaction with tx_id = {} with status Mined_or_Faux_Confirmed", + "Unable to have wallet transaction with tx_id = {} with status Mined_or_OneSidedConfirmed", tx_id ); } println!( - "Mine a block for tx_id {} to have status Mined_or_Faux_Confirmed", + "Mine a block for tx_id {} to have status Mined_or_OneSidedConfirmed", tx_id ); miner_ps.mine(world, Some(1), None, None).await; @@ -185,7 +186,9 @@ async fn while_mining_all_txs_in_wallet_are_mined_confirmed(world: &mut TariWorl } } -#[then(expr = "while mining via node {word} all transactions in wallet {word} are found to be Mined_or_Faux_Confirmed")] +#[then( + expr = "while mining via node {word} all transactions in wallet {word} are found to be Mined_or_OneSidedConfirmed" +)] async fn while_mining_in_node_all_txs_in_wallet_are_mined_confirmed( world: &mut TariWorld, node: String, @@ -205,13 +208,13 @@ async fn while_mining_in_node_all_txs_in_wallet_are_mined_confirmed( let mut mined_status_flag = false; println!( - "Detecting transactions on wallet {}, while mining on node {}, to be Mined_or_Faux_Confirmed", + "Detecting transactions on wallet {}, while mining on node {}, to be Mined_or_OneSidedConfirmed", &wallet, &node ); for tx_id in wallet_tx_ids { println!( - "Waiting for transaction with id {} to have status Mined_or_Faux_Confirmed, while mining on node {}", + "Waiting for transaction with id {} to have status Mined_or_OneSidedConfirmed, while mining on node {}", tx_id, &node ); @@ -221,15 +224,16 @@ async fn while_mining_in_node_all_txs_in_wallet_are_mined_confirmed( }; let res = wallet_client.get_transaction_info(req).await.unwrap().into_inner(); let tx_status = res.transactions.first().unwrap().status; - // TRANSACTION_STATUS_MINED_CONFIRMED code is currently 6 - if tx_status == 6 { - println!("Transaction with id {} has been Mined_or_Faux_Confirmed", tx_id); + if tx_status == grpc::TransactionStatus::MinedConfirmed as i32 || + tx_status == grpc::TransactionStatus::OneSidedConfirmed as i32 + { + println!("Transaction with id {} has been Mined_or_OneSidedConfirmed", tx_id); mined_status_flag = true; break 'inner; } println!( - "Mine a block for tx_id {} to have status Mined_or_Faux_Confirmed", + "Mine a block for tx_id {} to have status Mined_or_OneSidedConfirmed", tx_id ); mine_block( @@ -248,14 +252,14 @@ async fn while_mining_in_node_all_txs_in_wallet_are_mined_confirmed( if !mined_status_flag { panic!( "Failed to have transaction with id {} on wallet {}, while mining on node {}, to be \ - Mined_or_Faux_Confirmed", + Mined_or_OneSidedConfirmed", tx_id, &wallet, &node ); } } println!( - "Wallet {} has all transactions Mined_or_Faux_Confirmed, while mining on node {}", + "Wallet {} has all transactions Mined_or_OneSidedConfirmed, while mining on node {}", &wallet, &node ); } diff --git a/integration_tests/tests/steps/node_steps.rs b/integration_tests/tests/steps/node_steps.rs index c86016fbb9..f6c1049ab6 100644 --- a/integration_tests/tests/steps/node_steps.rs +++ b/integration_tests/tests/steps/node_steps.rs @@ -20,15 +20,29 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, time::Duration}; +use std::{ + convert::{TryFrom, TryInto}, + time::Duration, +}; use cucumber::{given, then, when}; use futures::StreamExt; use indexmap::IndexMap; -use minotari_app_grpc::tari_rpc::{self as grpc, GetBlocksRequest, ListHeadersRequest}; +use minotari_app_grpc::tari_rpc::{ + self as grpc, + pow_algo::PowAlgos, + GetBlocksRequest, + GetNewBlockTemplateWithCoinbasesRequest, + GetNewBlockWithCoinbasesRequest, + ListHeadersRequest, + NewBlockCoinbase, + NewBlockTemplateRequest, + PowAlgo, +}; use minotari_node::BaseNodeConfig; use minotari_wallet_grpc_client::grpc::{Empty, GetIdentityRequest}; -use tari_core::blocks::Block; +use tari_common_types::tari_address::TariAddress; +use tari_core::{blocks::Block, transactions::aggregated_body::AggregateBody}; use tari_integration_tests::{ base_node_process::{spawn_base_node, spawn_base_node_with_config}, get_peer_addresses, @@ -129,7 +143,7 @@ async fn wait_for_node_have_x_connections(world: &mut TariWorld, node: String, n async fn all_nodes_on_same_chain_at_height(world: &mut TariWorld, height: u64) { let mut nodes_at_height: IndexMap<&String, (u64, Vec)> = IndexMap::new(); - for (name, _) in world.base_nodes.iter() { + for (name, _) in &world.base_nodes { nodes_at_height.insert(name, (0, vec![])); } @@ -144,7 +158,7 @@ async fn all_nodes_on_same_chain_at_height(world: &mut TariWorld, height: u64) { let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); let metadata = chain_tip.metadata.unwrap(); - nodes_at_height.insert(name, (metadata.height_of_longest_chain, metadata.best_block)); + nodes_at_height.insert(name, (metadata.best_block_height, metadata.best_block_hash)); } if nodes_at_height @@ -168,7 +182,7 @@ async fn all_nodes_on_same_chain_at_height(world: &mut TariWorld, height: u64) { async fn all_nodes_are_at_height(world: &mut TariWorld, height: u64) { let mut nodes_at_height: IndexMap<&String, u64> = IndexMap::new(); - for (name, _) in world.base_nodes.iter() { + for (name, _) in &world.base_nodes { nodes_at_height.insert(name, 0); } @@ -182,7 +196,7 @@ async fn all_nodes_are_at_height(world: &mut TariWorld, height: u64) { let mut client = world.get_node_client(name).await.unwrap(); let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let chain_hgt = chain_tip.metadata.unwrap().height_of_longest_chain; + let chain_hgt = chain_tip.metadata.unwrap().best_block_height; nodes_at_height.insert(name, chain_hgt); } @@ -208,7 +222,7 @@ async fn node_is_at_height(world: &mut TariWorld, base_node: String, height: u64 for _ in 0..=(TWO_MINUTES_WITH_HALF_SECOND_SLEEP) { let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - chain_hgt = chain_tip.metadata.unwrap().height_of_longest_chain; + chain_hgt = chain_tip.metadata.unwrap().best_block_height; if chain_hgt >= height { return; @@ -506,7 +520,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; let mut base_node_client = world.get_node_client(&base_node).await.unwrap(); let mut current_height = 0; @@ -521,7 +535,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; if current_height >= expected_height { break 'inner; } @@ -536,7 +550,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; current_height = base_node_client .get_tip_info(req.clone()) @@ -545,7 +559,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; if current_height == expected_height { break 'outer; @@ -644,7 +658,7 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { // No meddling let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let current_height = chain_tip.metadata.unwrap().height_of_longest_chain; + let current_height = chain_tip.metadata.unwrap().best_block_height; let script_key_id = &world.script_key_id().await; let block = mine_block_before_submit( &mut client, @@ -658,7 +672,7 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { let _sumbmit_res = client.submit_block(block).await.unwrap(); let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let new_height = chain_tip.metadata.unwrap().height_of_longest_chain; + let new_height = chain_tip.metadata.unwrap().best_block_height; assert_eq!( current_height + 1, new_height, @@ -687,7 +701,7 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { Ok(_) => panic!("The block should not have been valid"), Err(e) => assert_eq!( "Chain storage error: Validation error: Block validation error: MMR size for Kernel does not match. \ - Expected: 3, received: 4" + Expected: 2, received: 3" .to_string(), e.message() ), @@ -712,13 +726,150 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { Ok(_) => panic!("The block should not have been valid"), Err(e) => assert_eq!( "Chain storage error: Validation error: Block validation error: MMR size for UTXO does not match. \ - Expected: 102, received: 103" + Expected: 2, received: 3" .to_string(), e.message() ), } } +#[then(expr = "generate a block with 2 coinbases from node {word}")] +async fn generate_block_with_2_coinbases(world: &mut TariWorld, node: String) { + let mut client = world.get_node_client(&node).await.unwrap(); + + let template_req = NewBlockTemplateRequest { + algo: Some(PowAlgo { + pow_algo: PowAlgos::Sha3x.into(), + }), + max_weight: 0, + }; + + let template_response = client.get_new_block_template(template_req).await.unwrap().into_inner(); + + let block_template = template_response.new_block_template.clone().unwrap(); + let miner_data = template_response.miner_data.clone().unwrap(); + let amount = miner_data.reward + miner_data.total_fees; + let request = GetNewBlockWithCoinbasesRequest { + new_template: Some(block_template), + coinbases: vec![ + NewBlockCoinbase { + address: TariAddress::from_hex("30a815df7b8d7f653ce3252f08a21d570b1ac44958cb4d7af0e0ef124f89b11943") + .unwrap() + .to_hex(), + value: amount - 1000, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + NewBlockCoinbase { + address: TariAddress::from_hex("3e596f98f6904f0fc1c8685e2274bd8b2c445d5dac284a9398d09a0e9a760436d0") + .unwrap() + .to_hex(), + value: 1000, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + ], + }; + + let new_block = client.get_new_block_with_coinbases(request).await.unwrap().into_inner(); + + let new_block = new_block.block.unwrap(); + let mut coinbase_kernel_count = 0; + let mut coinbase_utxo_count = 0; + let body: AggregateBody = new_block.body.clone().unwrap().try_into().unwrap(); + for kernel in body.kernels() { + if kernel.is_coinbase() { + coinbase_kernel_count += 1; + } + } + for utxo in body.outputs() { + if utxo.is_coinbase() { + coinbase_utxo_count += 1; + } + } + assert_eq!(coinbase_kernel_count, 1); + assert_eq!(coinbase_utxo_count, 2); + + match client.submit_block(new_block).await { + Ok(_) => (), + Err(e) => panic!("The block should have been valid, {}", e), + } +} + +#[then(expr = "generate a block with 2 coinbases as a single request from node {word}")] +async fn generate_block_with_2_as_single_request_coinbases(world: &mut TariWorld, node: String) { + let mut client = world.get_node_client(&node).await.unwrap(); + + let template_req = GetNewBlockTemplateWithCoinbasesRequest { + algo: Some(PowAlgo { + pow_algo: PowAlgos::Sha3x.into(), + }), + max_weight: 0, + coinbases: vec![ + NewBlockCoinbase { + address: TariAddress::from_hex("30a815df7b8d7f653ce3252f08a21d570b1ac44958cb4d7af0e0ef124f89b11943") + .unwrap() + .to_hex(), + value: 1, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + NewBlockCoinbase { + address: TariAddress::from_hex("3e596f98f6904f0fc1c8685e2274bd8b2c445d5dac284a9398d09a0e9a760436d0") + .unwrap() + .to_hex(), + value: 2, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + ], + }; + let new_block = client + .get_new_block_template_with_coinbases(template_req) + .await + .unwrap() + .into_inner(); + + let new_block = new_block.block.unwrap(); + let mut coinbase_kernel_count = 0; + let mut coinbase_utxo_count = 0; + let body: AggregateBody = new_block.body.clone().unwrap().try_into().unwrap(); + for kernel in body.kernels() { + if kernel.is_coinbase() { + coinbase_kernel_count += 1; + } + } + println!("{}", body); + for utxo in body.outputs() { + if utxo.is_coinbase() { + coinbase_utxo_count += 1; + } + } + assert_eq!(coinbase_kernel_count, 1); + assert_eq!(coinbase_utxo_count, 2); + let mut num_6154266700 = 0; + let mut num_12308533398 = 0; + for output in body.outputs() { + if output.minimum_value_promise.as_u64() == 6154266700 { + num_6154266700 += 1; + } + if output.minimum_value_promise.as_u64() == 12308533398 { + num_12308533398 += 1; + } + } + assert_eq!(num_6154266700, 1); + assert_eq!(num_12308533398, 1); + + match client.submit_block(new_block).await { + Ok(_) => (), + Err(e) => panic!("The block should have been valid, {}", e), + } +} + #[when(expr = "I have a lagging delayed node {word} connected to node {word} with \ blocks_behind_before_considered_lagging {int}")] async fn lagging_delayed_node(world: &mut TariWorld, delayed_node: String, node: String, delay: u64) { @@ -736,7 +887,7 @@ async fn node_reached_sync(world: &mut TariWorld, node: String) { for _ in 0..(TWO_MINUTES_WITH_HALF_SECOND_SLEEP * 11) { let tip_info = client.get_tip_info(Empty {}).await.unwrap().into_inner(); let metadata = tip_info.metadata.unwrap(); - longest_chain = metadata.height_of_longest_chain; + longest_chain = metadata.best_block_height; if tip_info.initial_sync_achieved { return; diff --git a/integration_tests/tests/steps/wallet_cli_steps.rs b/integration_tests/tests/steps/wallet_cli_steps.rs index 2ff0be5da9..83b7c889a0 100644 --- a/integration_tests/tests/steps/wallet_cli_steps.rs +++ b/integration_tests/tests/steps/wallet_cli_steps.rs @@ -217,13 +217,13 @@ async fn send_one_sided_tx_via_cli(world: &mut TariWorld, amount: u64, wallet_a: } #[when( - expr = "I make it rain from wallet {word} {int} tx per sec {int} sec {int} uT {int} increment to {word} via \ - command line" + expr = "I make-it-rain from {word} rate {int} txns_per_sec duration {int} sec value {int} uT increment {int} uT \ + to {word} via command line" )] async fn make_it_rain( world: &mut TariWorld, wallet_a: String, - txs_per_second: u64, + txs_per_second: u32, duration: u64, start_amount: u64, increment_amount: u64, @@ -248,7 +248,7 @@ async fn make_it_rain( let args = MakeItRainArgs { start_amount: MicroMinotari(start_amount), - transactions_per_second: u32::try_from(txs_per_second).unwrap(), + transactions_per_second: f64::from(txs_per_second), duration: Duration::from_secs(duration), message: format!( "Make it raing amount {} from {} to {}", diff --git a/integration_tests/tests/steps/wallet_ffi_steps.rs b/integration_tests/tests/steps/wallet_ffi_steps.rs index 2a9fd450ca..a9dd1a360e 100644 --- a/integration_tests/tests/steps/wallet_ffi_steps.rs +++ b/integration_tests/tests/steps/wallet_ffi_steps.rs @@ -262,7 +262,7 @@ async fn ffi_check_number_of_outbound_transactions(world: &mut TariWorld, wallet #[then(expr = "I wait for ffi wallet {word} to have at least {int} contacts to be {word}")] async fn ffi_check_contacts(world: &mut TariWorld, wallet: String, cnt: u64, status: String) { assert!( - vec!["Online", "Offline", "NeverSeen"].contains(&status.as_str()), + ["Online", "Offline", "NeverSeen"].contains(&status.as_str()), "Unknown status: {}", status ); @@ -412,12 +412,12 @@ async fn ffi_detects_transaction( status: String, ) { let ffi_wallet = world.get_ffi_wallet(&wallet).unwrap(); - assert!(vec![ + assert!([ "TRANSACTION_STATUS_BROADCAST", "TRANSACTION_STATUS_MINED_UNCONFIRMED", "TRANSACTION_STATUS_MINED", - "TRANSACTION_STATUS_FAUX_UNCONFIRMED", - "TRANSACTION_STATUS_FAUX_CONFIRMED" + "TRANSACTION_STATUS_ONE_SIDED_UNCONFIRMED", + "TRANSACTION_STATUS_ONE_SIDED_CONFIRMED" ] .contains(&status.as_str())); println!( @@ -430,8 +430,8 @@ async fn ffi_detects_transaction( "TRANSACTION_STATUS_BROADCAST" => ffi_wallet.get_counters().get_transaction_broadcast(), "TRANSACTION_STATUS_MINED_UNCONFIRMED" => ffi_wallet.get_counters().get_transaction_mined_unconfirmed(), "TRANSACTION_STATUS_MINED" => ffi_wallet.get_counters().get_transaction_mined(), - "TRANSACTION_STATUS_FAUX_UNCONFIRMED" => ffi_wallet.get_counters().get_transaction_faux_unconfirmed(), - "TRANSACTION_STATUS_FAUX_CONFIRMED" => ffi_wallet.get_counters().get_transaction_faux_confirmed(), + "TRANSACTION_STATUS_ONE_SIDED_UNCONFIRMED" => ffi_wallet.get_counters().get_transaction_faux_unconfirmed(), + "TRANSACTION_STATUS_ONE_SIDED_CONFIRMED" => ffi_wallet.get_counters().get_transaction_faux_confirmed(), _ => unreachable!(), }; if found_count >= count { diff --git a/integration_tests/tests/steps/wallet_steps.rs b/integration_tests/tests/steps/wallet_steps.rs index 7dff333a31..47568e3a2c 100644 --- a/integration_tests/tests/steps/wallet_steps.rs +++ b/integration_tests/tests/steps/wallet_steps.rs @@ -39,7 +39,7 @@ use grpc::{ TransferRequest, ValidateRequest, }; -use minotari_app_grpc::tari_rpc::{self as grpc}; +use minotari_app_grpc::tari_rpc::{self as grpc, TransactionStatus}; use minotari_console_wallet::{CliCommands, ExportUtxosArgs}; use minotari_wallet::transaction_service::config::TransactionRoutingMechanism; use tari_common_types::types::{ComAndPubSignature, Commitment, PrivateKey, PublicKey}; @@ -219,7 +219,7 @@ async fn wallet_detects_all_txs_as_mined_status(world: &mut TariWorld, wallet_na }, _ => (), }, - "Mined_or_Faux_Unconfirmed" => match tx_info.status() { + "Mined_or_OneSidedUnconfirmed" => match tx_info.status() { grpc::TransactionStatus::MinedUnconfirmed | grpc::TransactionStatus::MinedConfirmed | grpc::TransactionStatus::OneSidedUnconfirmed | @@ -230,7 +230,7 @@ async fn wallet_detects_all_txs_as_mined_status(world: &mut TariWorld, wallet_na }, _ => (), }, - "Mined_or_Faux_Confirmed" => match tx_info.status() { + "Mined_or_OneSidedConfirmed" => match tx_info.status() { grpc::TransactionStatus::MinedConfirmed | grpc::TransactionStatus::OneSidedConfirmed | grpc::TransactionStatus::CoinbaseConfirmed => { @@ -322,7 +322,7 @@ async fn wallet_detects_all_txs_are_at_least_in_some_status( }, _ => (), }, - "Mined_or_Faux_Unconfirmed" => match tx_info.status() { + "Mined_or_OneSidedUnconfirmed" => match tx_info.status() { grpc::TransactionStatus::MinedUnconfirmed | grpc::TransactionStatus::MinedConfirmed | grpc::TransactionStatus::OneSidedUnconfirmed | @@ -523,10 +523,13 @@ async fn wallet_has_at_least_num_txs(world: &mut TariWorld, wallet: String, num_ "TRANSACTION_STATUS_COINBASE" => 5, "TRANSACTION_STATUS_MINED_CONFIRMED" => 6, "TRANSACTION_STATUS_REJECTED" => 7, - "TRANSACTION_STATUS_FAUX_UNCONFIRMED" => 8, - "TRANSACTION_STATUS_FAUX_CONFIRMED" => 9, + "TRANSACTION_STATUS_ONE_SIDED_UNCONFIRMED" => 8, + "TRANSACTION_STATUS_ONE_SIDED_CONFIRMED" => 9, "TRANSACTION_STATUS_QUEUED" => 10, "TRANSACTION_STATUS_NOT_FOUND" => 11, + "TRANSACTION_STATUS_COINBASE_UNCONFIRMED" => 12, + "TRANSACTION_STATUS_COINBASE_CONFIRMED" => 13, + "TRANSACTION_STATUS_COINBASE_NOT_IN_BLOCK_CHAIN" => 14, _ => panic!("Invalid transaction status {}", transaction_status), }; @@ -935,7 +938,7 @@ async fn send_amount_from_wallet_to_wallet_at_fee( ); } -#[then(expr = "wallet {word} detects at least {int} coinbase transactions as Mined_or_Faux_Confirmed")] +#[then(expr = "wallet {word} detects at least {int} coinbase transactions as CoinbaseConfirmed")] async fn wallet_detects_at_least_coinbase_transactions(world: &mut TariWorld, wallet_name: String, coinbases: u64) { let mut client = create_wallet_client(world, wallet_name.clone()).await.unwrap(); let mut completed_tx_res = client @@ -948,7 +951,7 @@ async fn wallet_detects_at_least_coinbase_transactions(world: &mut TariWorld, wa let mut total_mined_confirmed_coinbases = 0; 'outer: for _ in 0..num_retries { - println!("Detecting mined confirmed coinbase transactions"); + println!("Detecting coinbase confirmed transactions"); 'inner: while let Some(tx_info) = completed_tx_res.next().await { let tx_id = tx_info.unwrap().transaction.unwrap().tx_id; let request = GetTransactionInfoRequest { @@ -957,7 +960,7 @@ async fn wallet_detects_at_least_coinbase_transactions(world: &mut TariWorld, wa let tx_info = client.get_transaction_info(request).await.unwrap().into_inner(); let tx_info = tx_info.transactions.first().unwrap(); match tx_info.status() { - grpc::TransactionStatus::MinedConfirmed => { + grpc::TransactionStatus::CoinbaseConfirmed => { total_mined_confirmed_coinbases += 1; if total_mined_confirmed_coinbases >= coinbases { break 'outer; @@ -976,19 +979,23 @@ async fn wallet_detects_at_least_coinbase_transactions(world: &mut TariWorld, wa if total_mined_confirmed_coinbases >= coinbases { println!( - "Wallet {} detected at least {} coinbase transactions as Mined_or_Faux_Confirmed", + "Wallet {} detected at least {} coinbase transactions as CoinbaseConfirmed", &wallet_name, coinbases ); } else { panic!( - "Wallet {} failed to detect at least {} coinbase transactions as Mined_or_Faux_Confirmed", + "Wallet {} failed to detect at least {} coinbase transactions as CoinbaseConfirmed", wallet_name, coinbases ); } } -#[then(expr = "wallet {word} detects at least {int} coinbase transactions as Mined_or_Faux_Unconfirmed")] -async fn wallet_detects_at_least_unmined_transactions(world: &mut TariWorld, wallet_name: String, coinbases: u64) { +#[then(expr = "wallet {word} detects at least {int} coinbase transactions as CoinbaseUnconfirmed")] +async fn wallet_detects_at_least_coinbase_unconfirmed_transactions( + world: &mut TariWorld, + wallet_name: String, + coinbases: u64, +) { let mut client = create_wallet_client(world, wallet_name.clone()).await.unwrap(); let mut completed_tx_res = client .get_completed_transactions(GetCompletedTransactionsRequest {}) @@ -1000,7 +1007,7 @@ async fn wallet_detects_at_least_unmined_transactions(world: &mut TariWorld, wal let mut total_mined_unconfirmed_coinbases = 0; 'outer: for _ in 0..num_retries { - println!("Detecting mined unconfirmed coinbase transactions"); + println!("Detecting coinbase unconfirmed transactions"); 'inner: while let Some(tx_info) = completed_tx_res.next().await { let tx_id = tx_info.unwrap().transaction.unwrap().tx_id; let request = GetTransactionInfoRequest { @@ -1009,7 +1016,7 @@ async fn wallet_detects_at_least_unmined_transactions(world: &mut TariWorld, wal let tx_info = client.get_transaction_info(request).await.unwrap().into_inner(); let tx_info = tx_info.transactions.first().unwrap(); match tx_info.status() { - grpc::TransactionStatus::MinedUnconfirmed => { + grpc::TransactionStatus::CoinbaseUnconfirmed | grpc::TransactionStatus::CoinbaseNotInBlockChain => { total_mined_unconfirmed_coinbases += 1; if total_mined_unconfirmed_coinbases >= coinbases { break 'outer; @@ -1028,18 +1035,18 @@ async fn wallet_detects_at_least_unmined_transactions(world: &mut TariWorld, wal if total_mined_unconfirmed_coinbases >= coinbases { println!( - "Wallet {} detected at least {} coinbase transactions as Mined_or_Faux_Unconfirmed", + "Wallet {} detected at least {} coinbase transactions as CoinbaseConfirmed", &wallet_name, coinbases ); } else { panic!( - "Wallet {} failed to detect at least {} coinbase transactions as Mined_or_Faux_Unconfirmed", + "Wallet {} failed to detect at least {} coinbase transactions as CoinbaseConfirmed", wallet_name, coinbases ); } } -#[then(expr = "wallet {word} detects exactly {int} coinbase transactions as Mined_or_Faux_Confirmed")] +#[then(expr = "wallet {word} detects exactly {int} coinbase transactions as CoinbaseConfirmed")] async fn wallet_detects_exactly_coinbase_transactions(world: &mut TariWorld, wallet_name: String, coinbases: u64) { let mut client = create_wallet_client(world, wallet_name.clone()).await.unwrap(); let wallet_address = world.get_wallet_address(&wallet_name).await.unwrap(); @@ -1049,7 +1056,7 @@ async fn wallet_detects_exactly_coinbase_transactions(world: &mut TariWorld, wal let mut total_mined_confirmed_coinbases = 0; 'outer: for _ in 0..num_retries { - println!("Detecting mined confirmed coinbase transactions"); + println!("Detecting coinbase confirmed transactions"); 'inner: for tx_id in tx_ids { let request = GetTransactionInfoRequest { transaction_ids: vec![*tx_id], @@ -1057,7 +1064,7 @@ async fn wallet_detects_exactly_coinbase_transactions(world: &mut TariWorld, wal let tx_info = client.get_transaction_info(request).await.unwrap().into_inner(); let tx_info = tx_info.transactions.first().unwrap(); match tx_info.status() { - grpc::TransactionStatus::MinedConfirmed => total_mined_confirmed_coinbases += 1, + grpc::TransactionStatus::CoinbaseConfirmed => total_mined_confirmed_coinbases += 1, _ => continue 'inner, } } @@ -1073,12 +1080,12 @@ async fn wallet_detects_exactly_coinbase_transactions(world: &mut TariWorld, wal if total_mined_confirmed_coinbases == coinbases { println!( - "Wallet {} detected exactly {} coinbase transactions as Mined_or_Faux_Confirmed", + "Wallet {} detected exactly {} coinbase transactions as CoinbaseConfirmed", &wallet_name, coinbases ); } else { panic!( - "Wallet {} failed to detect exactly {} coinbase transactions as Mined_or_Faux_Confirmed", + "Wallet {} failed to detect exactly {} coinbase transactions as CoinbaseConfirmed", wallet_name, coinbases ); } @@ -1125,7 +1132,7 @@ async fn start_wallet_without_node(world: &mut TariWorld, wallet: String) { } } -#[then(expr = "all wallets detect all transactions as Mined_or_Faux_Confirmed")] +#[then(expr = "all wallets detect all transactions as Mined_or_OneSidedConfirmed")] async fn all_wallets_detect_all_txs_as_mined_confirmed(world: &mut TariWorld) { for wallet in world.wallets.keys() { let mut wallet_client = create_wallet_client(world, wallet.clone()).await.unwrap(); @@ -1153,10 +1160,11 @@ async fn all_wallets_detect_all_txs_as_mined_confirmed(world: &mut TariWorld) { let res = wallet_client.get_transaction_info(req).await.unwrap().into_inner(); let tx_status = res.transactions.first().unwrap().status; - // TRANSACTION_STATUS_MINED_CONFIRMED code is currently 6 - if tx_status == 6 { + if tx_status == TransactionStatus::MinedConfirmed as i32 || + tx_status == TransactionStatus::OneSidedConfirmed as i32 + { println!( - "Wallet {} has detected transaction with id {} as Mined_or_Faux_Confirmed", + "Wallet {} has detected transaction with id {} as Mined_or_OneSidedConfirmed", &wallet, tx_id ); break 'inner; @@ -1164,7 +1172,7 @@ async fn all_wallets_detect_all_txs_as_mined_confirmed(world: &mut TariWorld) { if retry == num_retries { panic!( - "Transaction with id {} does not have status as Mined_or_Faux_Confirmed, on wallet {}", + "Transaction with id {} does not have status as Mined_or_OneSidedConfirmed, on wallet {}", tx_id, &wallet ); } @@ -1196,7 +1204,7 @@ async fn wallets_should_have_at_least_num_spendable_coinbase_outs( } let num_retries = 100; - let mut coinbase_count = 0; + let mut unspendable_coinbase_count = 0; let mut spendable_coinbase_count = 0; for ind in 0..wallets_clients.len() { @@ -1211,28 +1219,35 @@ async fn wallets_should_have_at_least_num_spendable_coinbase_outs( .into_inner(); while let Some(completed_tx) = stream.next().await { let tx_info = completed_tx.unwrap().transaction.unwrap(); - - if tx_info.message.contains("Coinbase Transaction for Block ") && tx_info.fee == 0 { - let tx_id = tx_info.tx_id; - coinbase_count += 1; - - println!("Found coinbase transaction with id {} for wallet {}", tx_id, &wallet); - - // MINED_CONFIRMED status = 6 - if tx_info.status == 6 { - println!( - "Coinbase transaction with id {} for wallet {} is Mined_or_Faux_Confirmed", - tx_id, &wallet - ); - spendable_coinbase_count += 1; - } + if tx_info.status == grpc::TransactionStatus::CoinbaseUnconfirmed as i32 { + unspendable_coinbase_count += 1; + println!( + "Found coinbase transaction with id {} for wallet '{}' as 'CoinbaseUnconfirmed'", + tx_info.tx_id, &wallet + ); + } + if tx_info.status == grpc::TransactionStatus::CoinbaseNotInBlockChain as i32 { + unspendable_coinbase_count += 1; + println!( + "Found coinbase transaction with id {} for wallet '{}' as 'CoinbaseNotInBlockChain'", + tx_info.tx_id, &wallet + ); + } + if tx_info.status == grpc::TransactionStatus::CoinbaseConfirmed as i32 { + spendable_coinbase_count += 1; + println!( + "Found coinbase transaction with id {} for wallet '{}' as 'CoinbaseConfirmed'", + tx_info.tx_id, &wallet + ); } } if spendable_coinbase_count >= amount_of_coinbases { println!( - "Wallet {} has found at least {} within total {} coinbase transaction", - &wallet, amount_of_coinbases, coinbase_count + "Wallet '{}' has found at least {} spendable coinbases within a total of {} coinbase transactions", + &wallet, + amount_of_coinbases, + spendable_coinbase_count + unspendable_coinbase_count ); break 'inner; } @@ -1545,7 +1560,7 @@ async fn wallet_with_tari_connected_to_base_node( let mut base_node_client = world.get_node_client(&base_node).await.unwrap(); let tip_info_res = base_node_client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let mut current_height = tip_info_res.metadata.unwrap().height_of_longest_chain; + let mut current_height = tip_info_res.metadata.unwrap().best_block_height; let mut num_blocks = 0; let mut reward = 0; diff --git a/lints.toml b/lints.toml index a7a7465c02..cb44026f34 100644 --- a/lints.toml +++ b/lints.toml @@ -69,4 +69,5 @@ allow = [ 'clippy::too_many_arguments', # `assert!(!foo(bar))` is misread the majority of the time, while `assert_eq!(foo(bar), false)` is crystal clear 'clippy::bool-assert-comparison', + 'clippy::blocks_in_conditions', ] diff --git a/meta/crates.io/update_owners.sh b/meta/crates.io/update_owners.sh new file mode 100755 index 0000000000..b0e68c50a3 --- /dev/null +++ b/meta/crates.io/update_owners.sh @@ -0,0 +1,170 @@ +#!/bin/bash + +CHECK_ONLY=0 +# Check if the first command-line argument is '-c' +if [[ $1 == "-c" ]]; then + CHECK_ONLY=1 +fi + +# Declare associative arrays +declare -A package_group_map +declare -A group_user_map + +# Populate group_user_map +group_user_map["ignore"]="CjS77 stringhandler SWvheerden" +group_user_map["leads"]="CjS77 stringhandler SWvheerden" +group_user_map["dan"]="CjS77 stringhandler sdbondi" + +# Minotari crates and libraries + +package_group_map["minotari_app_grpc"]="leads" +package_group_map["minotari_app_utilities"]="leads" +package_group_map["minotari_chat_ffi"]="leads" +package_group_map["minotari_console_wallet"]="leads" +package_group_map["minotari_merge_mining_proxy"]="leads" +package_group_map["minotari_miner"]="leads" +package_group_map["minotari_mining_helper_ffi"]="leads" +package_group_map["minotari_wallet"]="leads" +package_group_map["minotari_wallet_ffi"]="leads" +package_group_map["minotari_node"]="leads" +package_group_map["tari_crypto"]="leads" +package_group_map["tari_common"]="leads" +package_group_map["tari_utilities"]="leads" +package_group_map["tari_bulletproofs_plus"]="leads" +package_group_map["tari_comms_dht"]="leads" +package_group_map["tari_core"]="leads" +package_group_map["tari_common_types"]="leads" +package_group_map["tari_comms"]="leads" +package_group_map["tari_key_manager"]="leads" +package_group_map["tari_p2p"]="leads" +package_group_map["tari_protobuf_build"]="leads" +package_group_map["tari_script"]="leads" +package_group_map["tari_features"]="leads" +package_group_map["tari_comms_rpc_macros"]="leads" +package_group_map["tari_contacts"]="leads" +package_group_map["tari_service_framework"]="leads" + +# Tari/DAN crates and libraries +package_group_map["tari_template_lib"]="dan" +package_group_map["tari_dan_app_utilities"]="dan" +package_group_map["tari_dan_common_types"]="dan" +package_group_map["tari_dan_engine"]="dan" +package_group_map["tari_dan_p2p"]="dan" +package_group_map["tari_dan_storage"]="dan" +package_group_map["tari_dan_storage_lmdb"]="dan" +package_group_map["tari_dan_storage_sqlite"]="dan" +package_group_map["tari_dan_wallet_cli"]="dan" +package_group_map["tari_dan_wallet_daemon"]="dan" +package_group_map["tari_engine_types"]="dan" +package_group_map["tari_epoch_manager"]="dan" +package_group_map["tari_state_store_sqlite"]="dan" +package_group_map["tari_template_abi"]="dan" +package_group_map["tari_template_builtin"]="dan" +package_group_map["tari_template_macros"]="dan" +package_group_map["tari_template_test_tooling"]="dan" +package_group_map["tari_transaction"]="dan" +package_group_map["tari_transaction_manifest"]="dan" +package_group_map["tari_indexer"]="dan" +package_group_map["tari_indexer_client"]="dan" +package_group_map["tari_indexer_lib"]="dan" + +# Deprecated, unused, or unclassified packages. +package_group_map["tari_signaling_server"]="ignore" +package_group_map["tari_bor"]="ignore" +package_group_map["tari_comms_logging"]="ignore" +package_group_map["tari_comms_rpc_state_sync"]="ignore" +package_group_map["tari_consensus"]="ignore" +package_group_map["tari_wallet_ffi"]="ignore" +package_group_map["tari_storage"]="ignore" +package_group_map["tari_wallet"]="ignore" +package_group_map["tari_comms_middleware"]="ignore" +package_group_map["tari_infra_derive"]="ignore" +package_group_map["tari-curve25519-dalek"]="ignore" +package_group_map["tari_shutdown"]="ignore" +package_group_map["tari_mmr"]="ignore" +package_group_map["tari_base_node"]="ignore" +package_group_map["tari_base_node_client"]="ignore" +package_group_map["tari_broadcast_channel"]="ignore" +package_group_map["tari_bulletproofs"]="ignore" +package_group_map["tari_validator_node"]="ignore" +package_group_map["tari_validator_node_cli"]="ignore" +package_group_map["tari_validator_node_client"]="ignore" +package_group_map["tari_validator_node_rpc"]="ignore" +package_group_map["tari_wallet_daemon_client"]="ignore" +package_group_map["tari_transactions"]="ignore" +package_group_map["tari_mining"]="ignore" +package_group_map["tari_mmr_integration_tests"]="ignore" +package_group_map["tari_pubsub"]="ignore" +package_group_map["tari_test_utils"]="ignore" +package_group_map["tari_libtor"]="ignore" +package_group_map["tari_metrics"]="ignore" +package_group_map["tari_scaffolder"]="ignore" + +########################## Owner management functions ########################## +remove_owner() { + echo "Removing $1 as owner of $package" + cargo owner -q --remove $1 $package + sleep 3 +} + +verify_owner() { + # No-op + : +} + +add_owner() { + echo "Adding $1 to $package" + cargo owner -q --add $1 $package + sleep 3 +} + +################################## Main script ################################## + +# Iterate over packages +for package in "${!package_group_map[@]}"; do + echo "" + echo "Processing $package..." + # Get the expected owners + group=${package_group_map[$package]} + # If group is 'ignore', skip this iteration + if [[ $group == "ignore" ]]; then + echo "Ignoring $package" + continue + fi + expected_owners=(${group_user_map[$group]}) + + # Get the current owners + current_owners=($(cargo owner -q --list $package | awk '{print $1}')) + + # Convert the arrays to space-separated strings for comparison + current_owners_str=" ${current_owners[*]} " + expected_owners_str=" ${expected_owners[*]} " + + echo "Current owners vs: $current_owners_str" + echo "Expected owners : $expected_owners_str" + + if [[ $CHECK_ONLY == 1 ]]; then + continue + fi + + # Iterate over the current owners + for user in "${current_owners[@]}"; do + if [[ $expected_owners_str == *" $user "* ]]; then + # User is in both current and expected owners + verify_owner $user + else + # User is in current owners but not in expected owners + remove_owner $user + fi + done + + # Iterate over the expected owners + for user in "${expected_owners[@]}"; do + if [[ $current_owners_str != *" $user "* ]]; then + # User is in expected owners but not in current owners + add_owner $user + fi + done + echo "... Done processing $package" + echo "" +done diff --git a/meta/gpg_keys/cifko.asc b/meta/gpg_keys/cifko.asc new file mode 100644 index 0000000000..5a57f84f73 --- /dev/null +++ b/meta/gpg_keys/cifko.asc @@ -0,0 +1,41 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQGNBGCwydUBDADDjVNeEwq9FKYuuydsd/ImHs6jDcu0k4Bq6dgcM3wDc2SVAg+a +7SpnLnSi/pl1jNYHSxBB3hK/pfJE/6Wf9vsnkzHYaFiVA5ikpWGhgeIU9tD1Z+4K +R/lCUyl4m3SLhVbHtW95+XD0KyLFqurkt18lcV1O9QZL7fCX43ENojUQynlJL4L0 +XAT+bfS9Ydpzu1UBQeUloFQxxNJ5YoH4tzjuw/rO67bTEOW/ktP59KK5M/VkaDjF +Mq2OmoWAzl7bAoMCF1LumJe9A3GFhFZ9m8NR32TDIJQFr1+9h3x6kmbTgu65IEoo +gUJNRn7zAUyblDqMBw7UvDN9WUr/duqp6ix2G8R7B3McLXWApBL+XU2igONklAlZ +FViL1aR3Bq6qOiG9PfrwA2Oc1LsL+N8tbqzZBx7cbICgqHwMHTxZsiCk3rs3MGyO +L4TUvmN/ApuFn6yLTaKab26ZadXsAZysFFJOP26f3QVCdpGdtHOkwlBwnPbWzLfG +O3Ae7i37FNQJz+8AEQEAAbQYQ2lma28gPGdjaWZrb0BnbWFpbC5jb20+iQHOBBMB +CAA4FiEEoeEccxHuEUSgd5h31x5mpTvWu7oFAmCwydUCGwMFCwkIBwIGFQoJCAsC +BBYCAwECHgECF4AACgkQ1x5mpTvWu7pNpwv9EchQJ2R9yDvxwLLWlWwUOh994JwZ +hZiJs44mRM7+sFVfq1OruOg846m8FdkSMlXeLznzbodtdKc4ssfitulx8vgcy49d +pyIvWbbLxjz+39LEK9rYQJXd8TLLRmMUu/eagYNRhEAAE/ecHc/7wtOEWE+a2ydb +crNqwJwUG6XOeoh+duSPjms+xLRhnoh13oHfktubU/pkE7DqMSWTCBMdZ1U0g+ju +8icOWi2DzQtsndgLJvRw7yaMozQsg4rYk8tCEBxrgLd8T3nJuiruYtCPYgGgkMva +nMcdO6pbaAL8MuAxVyaylW+IDhXGBZhZA/Mjy6rfX6eOZiftM0wioNKCt/YDxOAr +PD76eGB8ZVlQYZpCGa/ryGBh/kpkcRAidPZ8qxWVtikZQWOcQA/wnJRrKA2+jtEw +nhb3Lw+jjUyM8FO1fSWqJWlW2Hond883FTA7/JvW4MqO86jnzBdV3Ca5tLeNaH5m +T8x1a4uudkCahFWcZ1ps9Levqdbc2Kie36gVuQGNBGCwydUBDACuCFU2YXSHEJsP +wt5YTvRq1s79IwDyJrqvUSKC0MUeTO1I9pzZ+c+SCvSzlV15ruDVXrC8mCFzMhyS +rrKvDxGf+LoeykRS2r5TGnXVqIpZaWzn+ZJo3ML63URySS4kBQPMJ4iykeU0Ze9V +mxtSAx4+fVNJ+IyXzKfut48w2NLVSpxGW9QC43tMf5TCPWBOa6JmG1CVvX7yzP/y +EH0MH9oUx8KObUdkcRygUJfoUYFaP+16Bfllf2zWWzs5Njk4Rfjl06sweMCHRCdQ +WmKP7JB/P8e+YwFvamD4EWePjz2TLDG4Voej9IU+Iex4rBWRBbbAVjstCW8oGPuD +2EoTAGzgXVdOGXSvqm1Xb4gVlnllqD7pxouchoiRzEx2blp+d6id0PeQ26vOPZSN +GvDRseB9/zoD+HjoNbMUinu0wEqDIeeqOKd+HPKq4CAFtMoVa6F47gl1g88L9YWu +ATzQgucnB2G9ZsL6jyM1JjKo/Ij4i7U328JlXwG/zXAjiaMywnMAEQEAAYkBtgQY +AQgAIBYhBKHhHHMR7hFEoHeYd9ceZqU71ru6BQJgsMnVAhsMAAoJENceZqU71ru6 +Z/UMAIn6jq+K5M+NoLUDDYBfMyyFnovRe+ZDQbsnuM7uDdaCHeTpcRs6HR9IM5Qr +4R+PeJRTyMQBsBBa536qzFVGceoYRtA1jXh/vB86C2m0Bjvqc5mq+S0K0ywxEVfI +pjgm4xtibJZsRkvx6dVueBQzs7PcnKRyONSaxcOyzfzijOF68wXjZlxkJugAgmHb +iNuvY7X3lerpCk66KRCDjf+lr5DmcQ6er9SUVnrVvhb6VIQMMLlpr9MS19FiWehe +p4mCckhZgXUPXzgKnkrqEO4q3dmd0mRnIZxQHzkrDPyPpEhk74m5HMV+9fZWuNL0 +d2GwMshpfKVGwubBlr+XONtRO6a65O6Rgh3oXVIVjLGsL7a8sMQgintiiYVUNZUA +0B/kB1G4B4/ovPEAFV6uexe9gv+mfxiEHmLQnwIM9iJI7FW50rIrYzDkBhq5QVQk +3ikt7Gj6rzSwZB2dhMwTFWfLc1rt+jzr9RSYmayJwhRPfOukfomaMEMOZ67fcwI1 +oboBkg== +=ErbU +-----END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/delta1.asc b/meta/gpg_keys/delta1.asc deleted file mode 100644 index 1c8d4b7a7b..0000000000 --- a/meta/gpg_keys/delta1.asc +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBF8yonQBEAC6l4ZPIBi7wTi1Id+eCKVRK4fhwgXzQUXfpGCF1lvGnHlQZbr9 -vhi/v/2uiCdzcqjNpPaSAKwTqjvthOlFIcmyk2ZtkfRR9n9T/3rbhgWb2sSBr9da -jgGKK0QdaMYj7o3754VknCCVZtZfqn7WdM8/c6++m32K3gpYTjKhz3ZRD5O0qrpf -2RMIiy8vdQLfKYx8Ro+ihpC1dnOnQCg/BJEcXQ+rjnKsIlZmZ04wX4nCdMzgu0BR -lQvvvkppyf2qeMRN2KITregpOygxVdoqsImeMfv0b0CamrlT6Hg47MYXvdYuwJgG -ex3iU0jCYOJOFtiFrc7bTRWKPT/+X9BM1X60wjFMldXJ5gElw3bfh6zkGA3cnSMz -UpTtqd3mGe6EZ2YTzU5mttEW2VwpeqC89pXljdA7K4aCQ1S6CZYpy3/nrvuFMr1r -dDPJaE4fVslFGKFSGfKdo7ngZXsxS2Lbd6iHfPg1GnIvzC9zzC/yB3mK9gdmUZTU -vs0B4xxi5ixo5aBqnf1LPowruO+e6/56FX3uFl7OemfewrQjUAHtVeu67y5JS65T -g7s/DT6yWKo7fQbuMz4Y4BxOeCVNIPya+4Zgov5QkcPKXig7ZqEsDNu/j75e7B8A -Tpv8t46cw0AMYKEedxGWjp2eWn1C0IHPq8OsUSakPoc25wvpvxjw+UHWxQARAQAB -tB5CeXJvbiBIYW1ibHkgPGJpenpsZUB0YXJpLmNvbT6JAk4EEwEIADgWIQR6OYEg -67Z9l4onCwt6Zpt0lpUiXgUCXzKidAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIX -gAAKCRB6Zpt0lpUiXtfPD/98oD8vKl9E51wDt/GorEK6Ze/sx6HG4RDERk+cKN0k -RnyuOVm24eSkMwlgAs+eFU5k0KMs9MwszQh/zIH98XAjzQsSW+WQCmMNHLtVCn7u -wWG5gYi3OirvluG8tOQLbqs2yPa7edrN1ynA+7L1uXgvVQbcOosFskDftoYTqWCf -xJOlmHZfe7W+Y/4PhS8cATwijywFPjeyuWTjYTny3aT8e4Js2vQHB3cGEwJbBUos -kCY0F5AZ4YoZZ9V6s+Xs7fXDCiw1SERwmSbcDyriEjGcukt6xwXKSo3SjDQUBeWi -6H/3nhHCoKF26yw3cZzDTQDXBIey+RPcZMMxC5PK6kUNAKT/bo3YZl7kOad5OqIe -3pl0O9h0hWSEa0O4frhpmgaNpGBTjl0JXzkdXSzC4SgaLREN9sCM2ugy3Z5K2vKs -txqkicLX+zZpNUecmKi2ILQpmUom3ZfQHLS2ycWlgRvY5tEIdXE5CxvqnKaogYFv -H4t6HDgTLmryNRBStLpVNki9Ezxe4YAGengeLSjXP6FYGzDSeAIvFICSMVr8GZUa -rNEfpj2rLiE8DPhArqbhXr9+h9DqmYHf2xATQRhVwGmlH9WZy0EpfXCWAbLsHecV -qlI1hxwbkb61e8W94mUtoC/EpszeGl6YMi0GJSw6Pcl0isNEIiTh+zsHYnOWQPsZ -77kCDQRfMqJ0ARAA47fAmlcD2U0UnwS0ZXnggsMWOMMPeG+x7KvVvKWnirwnrE2A -Gw2xHn02pZLtKRnKlf9KU2s0wyFk/nf5NoPBepgNWkX9c2yLlfAFVbT+WJUss/D1 -K3d5CDPbc06LvC5c0js1YrnEj4/8jQe0YxZu6hodxkcgZyeEkCjBN+g7E/ejCAjp -Wfp+Bfls8+yI/nVQJgH1q2FYhqFqN5kBp1hHhA5s0m0oBKofO1xECnhPobuPfVxA -+NY1Jr+tvP9DIVaMeyCMu0EzmghouVZfcvhxEOC/roTGK0zyWqCEjQ3xgCU5OUXQ -RXmUFm2uaCVO5sAtpX3KmkS5QbtDBb24N0CbfS8fwExt93zxwnL9ZbyxCMKVoCsb -P31fOvNtQyt/cD34gjnRemjSC6HInrHft3fod73I1NfvM2ovF5cm1JBU5rankImh -iQ04KA9OJlT3BSLf9CerN2D79u/egOSCHk29V8JvMnZA5zBE/2j7ZiYR/k2n55xQ -fSVqqvxoqSadNCs57SKiSTVT+3o4DKptZvDny8SrZPlMnGUuzobrrDL7K/3c9I9l -RC/cRtNHEVE+d7FUWDXG1N1DS+5e6Oh+nX2GvmHRCQptTEVnOA1ikt3thSPTYIwu -OIvJeQ3Ox/6W7LLLFWlYLoXrHR5/LUqo/+tKT/NHLhKigZ+Hrjk7KmHs2rkAEQEA -AYkCNgQYAQgAIBYhBHo5gSDrtn2XiicLC3pmm3SWlSJeBQJfMqJ0AhsMAAoJEHpm -m3SWlSJeTFgP/iQwdijzHWq1HzPDWRB2g5VaAVWmn2bVA5s3eRq9POYeDecr1aXK -AwnbTkp2470iOPokjY6dPuWIJreDSsPnW0Sv4OdM5W/3GS8h5RYEkteS5/JQ/KeT -ZFcApfvYI8530esAx0W3qxDMkK3crwXI/6p+PW0836pXdoFCVzKaO00eEIXZwLvi -LArYb6BmI+d1zXgBFluZQJOD2XU6lNMD4f2vsn/NUhnukvMygcTOWQ/wVgHpFGEl -/5pBjgOhZwIHzu97FfGl6pQ/hREZ9TQL+NCwyd/Sbp5rWJIa8CCJf+boev1iBgzo -VaNHNtgCB9wUkSm3Xiyt9qVMt/R4cG4BMb9g5x+ukxFJDiK20O18exqDcsZrAqus -lPzWdrziBpLMj4SHIIIpzZ0oArq60zPFu0MIm56QsQ2gERJaBFr8+COSDr1wzmIX -60rYlmzF9DIKOWMtRtKOYoZ3es7Mof03+ROEOGZUqRYBZPWkNuDji8E/60CNGNFX -cJU3TgRUDB1X+8zKftQ40d7uHHD957bQRGVp7o2qsJO25U7PI8Ohf5lJTAjDl8xM -HEokWCXSWnH5raxSrguWf6JY262+4Mpj5/2jQjrP/sTjHjb18JSGzAMYJbQbW5/o -xFHHHnirHQnSjr7+WDcO0SW+X2FWYNcunU26u9i/3oAIiOrtF7K0P1jJ -=KZWW ------END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/meta/gpg_keys/hansieodendaal.asc b/meta/gpg_keys/hansieodendaal.asc index baad1345f6..15522c394c 100644 --- a/meta/gpg_keys/hansieodendaal.asc +++ b/meta/gpg_keys/hansieodendaal.asc @@ -2,12 +2,50 @@ mDMEXFAOHRYJKwYBBAHaRw8BAQdA5Pr1oWCXuk4KOLww8nzozn1R/TvL5AvIdScU 9R7W2D+0JEhhbnNpZSBPZGVuZGFhbCBFQ0MgPHBsdXRvQHRhcmkuY29tPoiWBBMW -CAA+FiEEiTBA/hklxU/poczD00Haf8YJhicFAlxQDh0CGyMFCQlnaQMFCwkIBwIG -FQoJCAsCBBYCAwECHgECF4AACgkQ00Haf8YJhid3AQD/elT+/dn+IdfEGf4Veu4L -VdE0uAmg+3JKcxSFAzuJD1sA/23QkxDAqDnBVxX1vqLzMx4WmB30w8Qqjd6NSuqF -WD0AuDgEXFAOHRIKKwYBBAGXVQEFAQEHQJDBWycKABkLFhl+/3wq9jXFQNKu2fhl -bG8o6qkFgdIHAwEIB4h+BBgWCAAmFiEEiTBA/hklxU/poczD00Haf8YJhicFAlxQ -Dh0CGwwFCQlnaQMACgkQ00Haf8YJhifHDQEA8wC6rEJ0q3hzi7gPhwv/J1waspjd -ic5qhFtth9VSH64A/3nwUCTxOfSN4O2BJbEyIbTvNKrqfU0xfIPb5fjLz40H -=mR5Q +CAA+AhsjBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAFiEEiTBA/hklxU/poczD00Ha +f8YJhicFAmW4lxAFCR5fHEEACgkQ00Haf8YJhicyVQD/RrWwkz5z8e5HgrinBVFH +hfAoCmPBQkI5+zBndtnnYo8A/jcj/AIH4h3lsxPuC2DG3mMoAsr2/MVxP1f+WHtg +XvkCiQIzBBABCAAdFiEEriHzWWFX1DYW7Df67QUeLkD7PZ0FAlxaoR4ACgkQ7QUe +LkD7PZ1GVhAAiQR/aoYL13Sj0Xh1CgVYMoYCGm+ulpi/s5U42WxiMw4SOFVqdI7q +0tHCS0IyxomBXQrW7UsNne4ErfmL6D2j2CS5pkBaG+6/gzCFk18zXdRUC87no8rE ++DIjh6XIPNvxxJmxzPJBN1JLiI9xsQGcFGOJE/MSL1PxpXZ/n/zYoxQqvz1eqNy0 +nHiuHSTwuTc+/4Quu48U2115ttqcqe8QZJ224oDjQRtCNIzzQUwSmw05OySsq2Zx +8q2PwWBeElKDIVT8sIdl/VSKU41j+Tt8Bvqi3Av/Egu94d+VhdXDMlBctl3ana23 +WYyjxm8U0onu5Z2K6CD3SSZoXsTOgY2eYR5pWigT3JoceI5nHhRxXxpN5yCKZ4ap +tfjzEuUjLGtL30b9QsK7cwQK9lC1nmvyvzcu/BkkewAh+fmYnckuhNtTv/XRtgto +TIaESKQ+vOxBa2qxbvDKKSkEbchPLp/zqjcB7APtl3bGei5fMDb3LNJvKlmUCySi +lghBOJHjXCJGr3d/oGWz+Lr9Dk57KRRkUxpspN2hMmISDib1J2Pmwh/ZbBsy7gpU +mISAkq+yIShBsddAxUVZ2JIwd2Lqe+K0dXmvgFvSPTRtRZaPiWWyqKkDGw/fmN7I +oX2ZqgmSLZBPkmcyJHwp2OYpkgwZvOdV9RNEafG/p5RD/LDQKoDOp7+JAjMEEAEI +AB0WIQSaNRgoWBvbb8MsADdrUDZi9rVc4gUCXFqsFwAKCRBrUDZi9rVc4lcQD/4p +YST4cg8ZaurMMqPd4zbWJsj5inW/2MaiWt6fgm0P+OCSy84i3WF2cgWGAq9z6aM+ +IMKLNJh+diu6lqbDTvANqQPCOB+GIQHsQMVU00mcpAbgZATTanZjp6DaK/6s7qRs +3sBrcBdOphMZl4wfF09LAAFuWDEVNNY/DRCywMBEkKXBBOpYFPaah40i6cSogegs +ZmclthXqfTfRs1zLBCPGLqEbiJ1nOgAdWM4vsNZw7I4VlTAN5w+1RaaOOMP9Sy83 +H5ZgJ5i3Jfjx7qb7nrIRwLZe7d3unkt6FGboRODJkoUc0Q4M9QNTEhTJgJEHub2L +UKemJfPwLFp++MCsXgC9pI1SsAc1+KA5HFMh1PKbNIh6JCZX2BN2ZjAY3GSaczwM +BXLJ4ouDX/iDztEV16RjpEVWCF/s+gUdHTT9dzSuf5svYdJSSdBRH0jMqCXhhQbS +Xh/9ZAh1iqjoJf8dKSpVQphbE0B2iZIyZXJghFoZb/CF/y/idvXCFQ95lVuSXy/v +jBd3RjWGZcBATpCl9ie8OZP4jyCQQt4YzJy1oyWMVu6fWk3v6dK2g86N21Ci2Tkw +EspYV2Q4RMfh0LThulLhNgZYAlMwCRPq1Pyl63eYOwb5U4ClTIiRJNyamDiUMZkP +IAfLZmuX8dZKey3xRTN6XY5KkPFSdkUyql0pgWk7Q4h1BBAWCAAdFiEEXEnXYm0J +2S2WpzH4BPPYqOZYQswFAlxapDkACgkQBPPYqOZYQsy33AD6AzO52ZpS2pz89sV2 +A2ZE7Lmh7qbj7OZl+r36KvnS9gUA/A/nOown4pzNfMnh6lLBIpNkLp752D5LVCM7 +zgpRGN4GiQIzBBABCAAdFiEE6dgmhm4UIrfqKEWccigxfjhEf0oFAlxiudQACgkQ +cigxfjhEf0rV4g//blrFre5rDJCmlTepzBcZ59Bj3840j1gnbI7IRaQOlIu4l1G0 +QFiamzRZcg2jovCPMWIVoDbg8ejH3kB+PPHx635FlLKGqlBL0uvE2lxyj2FBzVH1 ++VnHlxFObMdg6CmOixLrEjYV64/bmqOQ4scM+MQfwsJgBIjtrazpndoKDNActP/q +S77UnNA85OdSUa16odz/miLj34ov4I1wBFv4xWEJGvyvtMgN+qjJfUGB5nHW7Ya7 +Ythzr0Y244tCbnkRsiNqJCw6N6PgG3HBJqL7W9P5zI7o/JL6XSPPajIdc/qz4B9K +EObiNJyBujGPHiYOgsQESwx7qi8131bdxXCLDYTRJ+8SsyjCcVghSZ4fVJTJzHCK +wJ0J4MqJQvqOxpYg3xbgjkIOUoqtjFQSjL42RTgv8siafsYTFJMaoA0hjbWYnlza +I8noZlryxlzkEpKMDCwqjVVRoR17Gr2cXKCWvk9XN3sUsY7Y2AD5GC5W+w92dOk2 ++1duFfwIcSUmuWDeDNDTSPlALbqC1C+L2rxIzqQk0JJ6WAolMA60TmwUOGWus1Ot +3KRWoOjSK9bCKDnRXoRjwYlVkJqm6ksCisUOVbOZVJSSoIuvK9f1ePAbE/UryZDA +rTnmt5vUuNT/y/4vLUc0hACk5oh40x1+AtWd4uCzXA/wOtPVglqif53sXcO4OARc +UA4dEgorBgEEAZdVAQUBAQdAkMFbJwoAGQsWGX7/fCr2NcVA0q7Z+GVsbyjqqQWB +0gcDAQgHiH4EGBYIACYWIQSJMED+GSXFT+mhzMPTQdp/xgmGJwUCXFAOHQIbDAUJ +CWdpAwAKCRDTQdp/xgmGJ8cNAQDzALqsQnSreHOLuA+HC/8nXBqymN2JzmqEW22H +1VIfrgD/efBQJPE59I3g7YElsTIhtO80qup9TTF8g9vl+MvPjQc= +=UPcF -----END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/neonknight64.asc b/meta/gpg_keys/neonknight64.asc deleted file mode 100644 index 1868b833d0..0000000000 --- a/meta/gpg_keys/neonknight64.asc +++ /dev/null @@ -1,86 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBFyHmAwBEACWiQp6Qpz7berVB54ozWC9Vp1Ax8K4V0LOeynVLb67ukVTjJcG -Xoz1c/wWBF68hmOiePZbY1vYZt15D27LpiUJa7KGK+vLILvcbxDPLxrbIo2Gq+cj -aV8HICYSigczg6qEfrsjNrYo9yth93XhERugyHjXwiawW4s8vepAsqIXsl0B05Uy -AEstfd43SFt2Jl9kbJiH/X5yi0bySvIZMjYixYCnMvyunXOmvEwDPrDqyiRcvFIC -d5BnSnSAs3zoywVPWY56tKAYNFS7/+1HCNffGUhe/dWme06SMWcB8OL0jeisIqfJ -Lr1ZQ2JKBdyfGZOfZ4GCDLkCz2gjNb+8HkZ5DPo+jQZl6yHMXvz3rtaa8+gOSr7D -5JYTx0u5pPYVumTS5L/9jjdY3a+arRgwRBNWD+8er5y8YA7arLi4u6q27uSCgXpb -Mf2dw1qjoKmO8Ry2zjcZoCVdYs58E+lEilVbtnQ60OXcPKSa81v3Rf72E4PBXKuG -53bSdSnsAPGKKzFWRrgMEgxSoEvY32+xPe5mgpdhRIrhSEAg8j3up0IA1Yxm+aGI -zfCpxr4QQH+RSki8zQjwthEnFqUEdyOf5zqjp0FbG4dCLgiIuE7lGoHEltb5nGjn -y73OGkUDXyPWs+pXjrajrU1WNcqLBoMcbREH/Qt10QYhvnXYlz/djiueAwARAQAB -tCBZdWtvIFJvb2R0IDxuZW9ua25pZ2h0QHRhcmkuY29tPokCVAQTAQgAPhYhBJZH -lqe7PuQHyD91iMTrHaDUniKrBQJch5gMAhsDBQkHhh+ABQsJCAcCBhUKCQgLAgQW -AgMBAh4BAheAAAoJEMTrHaDUniKrxikP/AqPmuDhXrACjJLDV2pwvfLg83AKmrui -sg1sWw44FpknXjdQGBf4llqyGTe1/yUs8NKgA+NSAAIm4qZPRjZk+3ErMUMXjbDI -Pucb6krZs80S5KDNjzq1BCb30PutYKWFpGKAZG3/kevlsTsOK/ryJkwIN3MB27XS -H5dTsdfyfjoAX4OYeuziHqcGF5WI//Gi2pn3PyvoxBmy5U2b5egm7vuio4Ej3GSK -Cu6ASxF0PW6+pY5BK9/ZNzuRple9m8syGSEaGDpv+ckSUq1zArt5qvwIO68sMPh4 -XOy7f0vO8PPLNmegFJukZS0IOjhlnbQLhYFpNfSpBFKj7SPTrxfjtiEWXh34YFO9 -ULElXAsOo8Zv7WD+0Vw42WF/C7N6FQ6MvnkTvAoFl8Xd8j5RKl3fo3DgqXLMl+Ow -ojht+C4qe8L7CqO406DYwwYtdyxQeBkEYwZf8X3x3m0WquAAouje3Oz+62rjaku0 -naD4Z1MCS5Kz2/y4+L7zoL0mpR8pZUqKvpHU+/pJXv9oKvbpqkd1gpadq1eCCxcX -XV79TQtcBaW9x/s+QAy0DbvO7ABIw/cvrm4gDJzM97p/TrZLnvHgAoZAURukNTlq -PyLnAtU7aB3wCdid0SZtjvtb0XK7Bru+ntH/ZPLSqRdl0IKU4VWa9IvacWAdA6V2 -O7YqHtQyOOsCiQIzBBABCAAdFiEEOiCYC0os/2DIx9KmA1npIchdiNoFAlyIkHQA -CgkQA1npIchdiNpjYA//SE4ry9VwHkhKEA653sX8iIBRI61WqN0g2Q553YyXgN4f -e4yUqLcmmk/dPlJIg/NAks9ut0dTixB/V3+wla0Le5PfuziqxDmC6SUW0QkpVVpd -hy4ueDVdsxuyOsJPYW6Rf8Q/9SsZqjKKhzTZEX2hbVKCrPJKuRIkACbgWJMwzbGT -nF4CrRJSW4b+Bv+02tVGXuWQrb7Tln6hIz/+Kk4HwU4EZL+kPevJhv+TI9hSnfVz -eFmL7kuKCOzcdlH/eSAz1cZLUEJQF2HPrlJmkYiql/6YDrm1fZ3JIpuLAWducG92 -p9Apk1BtfJSCqusolvpZt6ytTC7NjhkS+xT6GBAEvCAF83vCQJXtp3+gmkCJruxD -cJs/c7TXJ14c82AkEkJ6isATgowORO3YU2A2slDNtO4J9iB6km/U0HjVh+oF0Eu4 -XOVyyrQ/XaXBk3miLouu86pSSKrje+v5QT6f/23FXPAB8tNv8L8o9kS5CmV/Lpli -CoELOOCk3N4TMvBZ3S7CVNjbVkv3cvy1mc8iSx1/XqJEUCiUcWMFCyaqmAl7PEpt -nHJT+SyE7HmRsbBB3tvN6TIaCqLemt73OyUyzSeqIxYuUWyUBlm34ECb+LeH7puR -uzOPzeLJ0nwmKziRBlnM5XXVs+NXgEC4/yAFih5+9l7Zahn534B69RpptPltPLG5 -Ag0EXIeYDAEQANRAcNUrMaq+Eos2NbM3robwfjDDThFuvYgc/AZ7iOYi30rsHGn9 -OILDfVS61CFHe+uYdTfk1XCyPIU6M+lK9E5Ifkxwkg/r4tTtQEplfy6IMUs2tyzp -S+HgxjFLqdCAboc5wKWPRiispq77t3hIkjDv8eTKtuDVjhBFMls823/uAdAXwYcv -prlF4ktPs7dJORHEUf0AUu3TfXFcyeE7utYL+jROTOyX4xeIlgYFKlt5qRBEywWT -72ZqK+0hPjE6eKKQnUpWHLlw0vmbUTptg1uhaKfnacX4iyBAopXCR3g+TxOfZ947 -VMNrPMEx4j4Ig8nBl2ejtWLtNBU3tW8zOlc9FiJ7+HvCyLVjaiZTGRZ+ah/gxl3v -BIeOU36162E0rb/62b4tdHYeTzRRG2Nsxr6MVFLu1Etss3kJnurEX1OmRcGem1ZX -iM6gLTksd6O8d1f6uQe6I+THHJPcI2tBGzvjwQ2ajO/oswwSP8HxXBnwCv7Z9/zg -yCBoEtwo7TG6w5F+2AATrVIWSTLCGkw2KQDHqdjHDZ7Zfw3Li+7Pk7KgAQwRMpLu -LX0gFQmR+PJp6vmsKIzDpZbv1T950TQ2jWZCMrauIY4wz61crzv+xDloVXSO/2eD -i7G8p1+PzAgiPwFCVlD98J1BzTBlagVKiPK7ZbU177nTZ5JeBRCnrmmFABEBAAGJ -AjwEGAEIACYWIQSWR5anuz7kB8g/dYjE6x2g1J4iqwUCXIeYDAIbDAUJB4YfgAAK -CRDE6x2g1J4iq15kD/9SpMyv/UiP5qZSTsiPj+P2g1JCBpjTPudMi+g2uP4Uc8mu -EAtMAjsN2M6r3Qp3PCmZVTbe1trjrlcCj4rC4pXjPdvpUJG3q08DO/Tc8NkexyYr -bivhzkt/CjvnL8QtpQNcTLbr9k8toFUp5poW1MfBeQzs/bVP+ga58hNkaeVW++ZX -OR9+s8igLQzX6n7kztwzcOIP0EAUOTgAiB6Fo7fLrIZnjycOU2ucVcz6s+u/MIpW -qtYNKXu7iTEoJMIANgQOA6wOHM5yI2rwaoDm8mxm805O8FYEWmKzentm0Nh575qU -hlwlyoItr/2YPOTaOzRIj4xO420yeMlQ7MpU7F6W33mhiMPhYMgNDEAbUNrckMng -izu1+KZS/HfpSwz7Z131S7qvuZHV1PsbzWUsG443U5Mm/nRidol4+eFexiKAvKAT -0N1UY6HdCIsezcLwDqW963QJl9L91C4UyDnEHvhx/SZjGAHziJnr6Pnba4PPYWp/ -PlWMAFkxxuZHDbZjaWNe5fvGJePaFmKB8VOLIrlfX9TWiBdJfDjB9QfA1/tx0wLo -aPtEYxm6er7o5ANk7w6HdDUrXnZ2KO+ShMAaQrdY6BzGT5kehU1039InJjtZQpP2 -nRng/vJw3Od7UG/wexfCbu44TsQK+ilvbfWx/QdtPoF49Qwa9wW034Xt0sgNrrkC -DQRch5y9ARAAu3UADX+IvLbeueCQ7iWxslY/m4fcnTO/yvMHxXpJ6KL/FTPMsG9a -4z7bYGD4mHePBNPuozGlQlbL20J+GW9+AHEab+LsApNl1eDWjpf6E1IN0jGtkBEd -SqoAiyPlgP5D/JSANUhOspzgJlOR/HmG7UxlFN3Ne0iKwOPZfX2Gt2wPBdhzeRiY -o2UPEdkKjIFyOgNolG/Q6+K0xf6A6QGdGpGd4zdm1t1wsNKFJ27G7jjCTvBJqQY0 -yV1FbySSA19jo3m/z7FGecTiVSEjIwLtektD/MmsFwkn8J4r/gBSlFS61NCRfNP2 -0M07slAMuvO7MbvDd5mpNWl7c3+6ybQzrY/7RtVK0iCR9ianmzCYmc2aJ0e0aFBc -2uotdMwCA0vrguFvgRTb31bYxttVnVcpT3b24pIFJWes521lHydeaTrF8Qy8+lJu -3kqNIF5JYan3FgRf11YySHb2VZWg8GFKw2gSyNOOtWqajxGYYjIUvKl4t1w/WoRh -EDorBon2lx2djgoAXOXYFezhsMlbbo1sExIGYRYf+LqO9yqpvm6EXLdUHhwDTzY3 -ZkS/wV95jDGsU4NyRBREok/1DSQ+ydHHEqcbrVXlZFTXKNkC8ZDvXocyOWvT7Z8B -qKeNAKCc/QZOY3NQG64CcAp6nX6R1ihcguYvjLpLOWiVIUIK7RDfyWMAEQEAAYkC -PAQYAQgAJhYhBJZHlqe7PuQHyD91iMTrHaDUniKrBQJch5y9AhsMBQkHhh+AAAoJ -EMTrHaDUniKrlWAP/3I4BiQBi9BjeJpvyMuHz/sHYQXT/pccD3nzTuEftsVQo/Y6 -Z0xUarsJiNMWNz0N2UKQdz8r5QQ0mEo0LB5X3Gi7TI15ITX+zKF7qEcMumPmhJLm -Oup97j3f9awT4YINr9RCsXKZnfAKz99Iizn5H5Dn42TJtaqPReECOiGtZBSooy1h -y+Ht37u10jH8NhYGtuJvjgEH80Ziy4CbdkUJd4NPyXbN9D0yGDz/7J7T3M0YWOAc -uJHQwMI2hOwKOuWIBgmDvqO7kKSCDjQor+9Fiqu15ASKLn2ZGU9hP3jNoTwzSfMg -zal3HR9LvmibxNbEOeIWGwcX64Dpl5Sk5TyVViZTJJaZWT+7MZOhZEjgxWjBCUMq -GJxn4hsBpjoAEldepXFabvd7FT6xX89nxwOTjow5D48LaLev1XxM/PfJORrbM6Zq -oDrzby6UA3Y+u4p21S7NI5DHqR6v84bhZdN0Deqxlc53eNgvM62MOlXVv3itCFI+ -zDZntvrHZbUgmpH3SLxSZUq35DN7Uk1GQTBK1YQvUd50l2JXIrxwFhEkXNr7Pp0q -xJo9WrnRc/xgEoghQPZNoMCalY/mRk53XVDob4Y5F1URVfOkiQ7/dT5r/G12iSam -qfFa2tG0zKYecoPdASyOB1uGhJaXgYgk5hbNKVA0OoTDqwgRb/ih1K/Lu0KO -=HEiN ------END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/meta/gpg_keys/philipr-za.asc b/meta/gpg_keys/philipr-za.asc deleted file mode 100644 index 90bb54b8e3..0000000000 --- a/meta/gpg_keys/philipr-za.asc +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBFxZfxgBEAC93PEokxzD2Shr/QaacwC82FXux1XApxJ2AdFongmYGhM/IYVW -qQL7r5TkTeSAwHrYu8cTbIcWt9GuYVcPf/Mk4II7ILXkuEoQYencnlYiBx4JnVGt -/64tt3+hjoRUnanesYB5T2eFUHsQFeS6821TMDwTOTm02AsRJY09rZIUtkTjlpx8 -yPou6GiCnDrOs455BRq9wGN1RvVCWWLV13ATlWQAydY1ltY8uC3MpyOmZvgohg7v -Y5semvIMKKBgTJoYriBFAnvH807NEUU9F9stXoNtvipwmA0xOESgNAQCJXlhbU4j -cDHdo0Dq01rfo0eP8Btxr925hwVYQ4ccnAzH42At016cJQmL7zav7+mkrM0966ZS -8zc7bEH28x/0wOh+rL5hqS8/5oIad2o9xhmOxn+9mit5jS9rHy6prY9px/zgGqeU -BrSm/q1l5YSrhhAy6XQ2BRQ7PTf/nCben2RGCSccbj0TVF7Fl9lospAJaIhMk/V5 -jVMcWx2aFUZhGqWtfW1e2N5QXqHWLJEKqYvvmsgorKObx1XzTc8Ij7DDRbtNkEA+ -o7mawLR8SpN4uR0ffUsChEC1yg3uTXdA125xp8pxnG2LdGdgYw3/dlEBWo0Ram0X -M5BaRDDSeRA8B/oW31g4Xqv2TImC4qmxZkjaofpH3zp8RrreYouv346hkwARAQAB -tCFQaGlsaXAgUm9iaW5zb24gPHNpbWlhbkB0YXJpLmNvbT6JAlQEEwEIAD4WIQSu -IfNZYVfUNhbsN/rtBR4uQPs9nQUCXFl/GAIbAwUJB4YfgAULCQgHAgYVCgkICwIE -FgIDAQIeAQIXgAAKCRDtBR4uQPs9nYhLD/95XwGo5rJHFl7LefSzvZpUeSVTjYEG -tjX3dnc84aOExoOeFQ5qX+kNZQxEPdPPAHSm62zgOfG1hl6NkO5N+6it+2fzDd0F -6P7EAWeI090fse3Mf+fKiLFkRVQ/f+eQ40BlT2op53gla5NwdQMxOOicQeKPEZp+ -3h4VkVHwU55yqg7uVLwAqRELq4RPA36btk30GUsG3USoJ3y4/7Sz6D5aloMMe4Gf -HgKH8kynVjaQGvYlh+/DtK/i9Qv+5vsrfDoemNl2ZC1Inaaxm6YHv9t9VtK1xUEj -ZZvzRL/S13+LC1dzRLcwK8ubkcrdFf9sgCo7YBJ8Hodz8bRrsDzkMS+UdSxaK5oA -jGbRqgXRjn4IQyGtO1tWpcinzLPIxwD9rh349RlkqpY6Ii6GWgNNyLftHMeVEJ/e -hUCn+fBvM5jB6n6iSH3nMEFyjKF9e3XosqCs9o1zMVEf8O3usz/7GfVEeTmOFJYC -M5zakmBYVOdIwtlTsh8SctyMMOq4VkKMrM46qBcUH9x64BQb2b1igQ3fV1lfr0jX -wnHJP2iXAc5tWcxNqfS7iiTnRd6oMgop7JTt7X3zWGBRLKcGmxj4C901hVOfjFky -4q+7zvlaJdMV2HgMUMWNIMaTsUSs3n+eUuDz0WipdEQ3aX47qzVF5APZLyqkiexW -glD31H3iUwE+2LkCDQRcWX8YARAA6GK9AaK4Qx0sToPqAEoDdTn1RI8NFAGP9h3A -MhDHMOQ7hiO0wDj+KZxoqtaLSgUp+5Kfi1A6IYQgJ4u13mlxF2fA2EE/GWAOLskn -4+TNd/wksEfahoglllMt1OibtFs1SUCw4yBJj9TeEF9bY4UTEKgG50QS0q0bxKhC -Woc9W5m8mkTCQDni6N2Z+yw1Kl/ZXnwN5PDoWXGe8k4gv/3Ppuz58ePKjpDvwpPJ -Ic7EWAhjslhCeqL5rraKRzd/W9bWDAS2QEtHLHWzdGLFajas3qzP/ViR+twvIjRM -voKGv91tZf7vQlw7Y4xofUwZTuJ3NbNqKdQwK0xZpTNwBjiMj0b55de6uPeOl2ri -0mXWv+mjoPYC5TyavrjTo4zl0RJnXsBCmjNwNTAsafbp/1nTaeuVTeS4GkbKGrgM -8L71Go5cN44LFaflExNHJxeRYFFdf1cy8b+B+4qyxVn6/g3FW0mu8j6bpXNnsfXw -o5TUqzYgSb0DsZA1f/kO3+ZHEZEqyTb2T9PfEzUXOCMtvqwzv0on1XHXzBRhFD4X -r1qz+oZKgvKrxYOI2UNZVrRDImE4C7I6x4pLfoadhMY6ksOnIlijZgoFdGAa8OkK -Lyw6Lar8HJbcTFJJUaP+einLj5iNxvRBPkcGSUcjdJwLC0cxoqKwEUW5SEUmlqyL -hL1/DBsAEQEAAYkCPAQYAQgAJhYhBK4h81lhV9Q2Fuw3+u0FHi5A+z2dBQJcWX8Y -AhsMBQkHhh+AAAoJEO0FHi5A+z2d5/YQAJFvXY6b3+Nop9hEhQ45nT7IfX4DPmEs -X+DLzf2JAe/q5Jxeu9KWWrgAVZwsWnyCYcWaFS6/dLHIvyes2cf9R1xRFRofp7Dd -0m9mp4VlJpOiLeSPn/Ixg6PXMiEKrvHNntZD01MlZdZFvcmQ340wrzFX6pp2FSoB -67+FQibbLOnzE8Hy+AH9GhI/ofEcASNQakNmDW2+F781OJk+vg5PgvDFf0RTyGi4 -uTpxQl6MHCpWLrb6PEvkM9HWPIIQNZD2Ddckyg/t0ssdS6qt2DFFFXRfHqxyHbfz -4TwYmBeQhbyiI3YQwbYOIwMEc6Dai5vERS3VLMrCTH9oyhgPhtTUQfJahs+uL5u+ -5IPpKjLrDcna6pKrmYDCzy/oBdEUKokEDoU9Q1nelpIuKsSpCqiO1+6vRzt7B+RZ -a5T3n2FL0jOSq4aY4Y7s3XRl7qvjd7wcQnrlD0Yd/CPwUfN1z5M+m/tQxJddQg0x -OhtPG1YgTW0IcVhYt0DjKjTzwpN9lw4GSkRfWpC4/am/7ihfD4DutcXi6U7RoIcS -OyfgZZIKZoAHm0CMumQU4Z9+GeF69f7fLysWPQkXn1qrmkRXP7KrxHVwEhAovp/z -wovYyD8kG3ogjml3X6nVCME4h9PH9ZDKTP9mA7i5furidVaLNsLzPDqSX+eGbBHT -oZ7V88g+VRzc -=dQo1 ------END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/stringhandler.asc b/meta/gpg_keys/stringhandler.asc new file mode 100644 index 0000000000..5e6f1d65c3 --- /dev/null +++ b/meta/gpg_keys/stringhandler.asc @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGXDUhsBEACXgDQhpNt7RdWc4TbEOKJ7Ns80l/Q5GNGRHID5X7T+rLMME6Uo +wJor6K4WoQh6oiRNQBr2b8eJ4G5sDKk6UbbTpaIyv5mP/JWvij+Kb7DT+xtEegEm +kvor1ysKwSv4Fx64tiK/7RQPR1zjtKlTPBSr5VYFixCfN60WIvG9SKcZ4+KIc31d +Usy5wJAOsEhnj0yKSjo/Xsex+1YLQUT156Cy7rJjk3sLEv97zESNcT2+JpX4e367 +nahw3oGEvXiXI6ir+TaAeyyWArKBqJv564XpZ9Mef2i0IG/wgjDF5VJlHBEidmEW +2p8vMGIM4SNKD4giRSQRNc/jAffSNIrmi3Ri3UlUYTlblf5Hlcq3JB2nSGjrDXVB +i3x5lOjKIy6Bp/F+TF/JCsd8TC+2HhoKqttY5tBKFbGKYDr5TkOPzHjNRHuQ1EAB +HkYTn8P8pr3Y3x4cwXSIE5qA2CpA9hEvaetDVz0NAgieRyqgmCPJG38m3xsKhxWL +Ihw/BfbzVffTyfUX2u2/PvFaoRt/Dx8o+ZZAG6BUp+G4vGhtGeq/VvUjoyJ5bd2C +2ksMKE5U0jA5leeX16AoEli0gh+LeEjGBETBKz8LAWaPlJnituerLEXSfOrnhvMi +wFfPq6amIZ/XkphwyvkX0ne9cOoNjjSzHOFNM21x2lQsJYR85KR6AxEPtwARAQAB +tCxzdHJpbmdoYW5kbGVyIDxzdHJpbmdoYW5kbGVyQHByb3Rvbm1haWwuY29tPokC +VAQTAQgAPhYhBIGxvgyXTm0ZLw6H4Cm72HKl+aguBQJlw1IbAhsDBQkFo5qABQsJ +CAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJECm72HKl+agufnUQAJMPaOO5S7J8UNNN +D7AeCv2IJ5GBYV1QCQ5GqGW11WKcrperm4dVw66J686oAFb6xM+xOFrsbCW5XRpi +x+Icr3vmGn/iXTpDE15gTJfevywH0hTt6Dzx4cWmbi1whwpDPal0i7iqpU6DxREu +G5xFwHZXswfM6z6vpqcDlJG/+P7uqF/Mo+/A7F6yn9+tX96o+TFAA+h3KO88V2x6 +e4OFsdQFewqYIjTcGw9t60jSFoiw9h4z9mNaHUfme14B++Ng7/Mlq21R7NiQmXNH +CGsPqfowaAsATuSORakg5PdoicxXPo/ykbhr7ztDNAqq8ieduyBTrB7i9QhOISAj +jpjvCFK49TBv/XUV6+uFDQoiAzKSjxbbbYCleHJNazMGZfhbXmB+YVKFzV/DDevq +HV6Gs/5Ae13Pp+N7zxEFM3EgRlxgPbAS8wacH8ASqQ1Uwrk2EgeZKKC00BvQzRm3 +duM5Fp4iLEUAHA0g2MxveQkGY5SmfpF/0hE2gmW4pHFNItw19nsITiT0cFyfwzEr +JRCsXmk4SMEN+oKGjg59qBAc/qd3QNpSQ5BPgxh/zULbivcEI+5uPHIohafyg3cc +W05mNSBhdYp1U7cpLQCcAx+Bz3p0nh78eWoK65749bArN8qqle8y+hwdpaImh6RD +bjKaEItN5bLb8OxO8cPNAB+IXaXluQINBGXDUhsBEADDpiDP3qTe/oGruheMRCru +qoktaefBFvtmKq//uIAsW+7qjsHRoLs0gGXogknF8GSVpQJbCqJTQJfQjtDY2mVu +CpUkL+ytrL287wKvVMj2C5U8qi/FXfaTkYNGC6eYMIFd5urmLetwT2S4bC7qHnlo +MxEOKJI4ECja9i1xk8PSpcfN+Du2FOs5pHwhRMfUC7NBA8MQyx/hsQ1RgWY8pAzq +j71sIo4lc7xi6eT9HpAxnLUFfWYVIP1bN1FowAaiQEdfNtELc+OKMJfmv8VRW+hN +ojQ7m5sNQr3PG81Vrxn6nVyeDsMAt+/qZUCsnBHTBxaHBHJZlP1tSLZ9XVxY/3L7 +vu/KlqQv5fuMPBKYudwvTfgmtqNTtU9ZcDbrs3P0KOXr/yNXiWYlYRuJDdxhZqdT +ReCKiG79c9ADsttm6VK0di8RUjDmKdUDP+XnjLc337Slx1QhVLHHKuxAnC1LmaPQ +TalS3evcpi58NjB3RiFKGoHmecoKp53AnT4l/xSdXCDm3DjBaUEuejVVFnRfdWxL +m5DyGwtYiFotmmbg09ctv1eZapusbCU4riDsAhlKOj+y8TZmxNUJkRpvYhlWwpGD +m8RkVbot2D8HDArtaKDAvul0MqFrtLNE6ldXflXf6vgw1SI9LZf3uJb6m0Opo4Lt +eqcnUGoCLaXbY5V/PidpdwARAQABiQI8BBgBCAAmFiEEgbG+DJdObRkvDofgKbvY +cqX5qC4FAmXDUhsCGwwFCQWjmoAACgkQKbvYcqX5qC70lg//XTWyltA/Qa7orP2N +OqrywJQx17iWMc6t5EwI8bW5ST74hW79AomzXyjhWC15LNrK/KjoVUh0/1WLdVC4 +jv+L0CJkeUgulV63qdPiHwTsRLhnObGAes5g033N2I2yXruDwYiOPlI9nR+YSMyu +9nQwcNfdvgSTipqP1hdCjT1RvxLzBctkHPnclKUPfQvYDdDNg2c6NY6/Eq+R4Sc+ +dlGNENpyGbDnqYDU0iVHnSgkG1gSNr6VQeNhKrgtbihDQSP1+ppCeROC5AzeoGUP +pNvPuMVuqdIDt9u4T7unhr9JOb5LDqpplAH67j2+s4+D9ggrpteTBNV+7jGZglur +sPAlHDnyP+AlPHoHwrxCG98s2WOWGdHW3Zx+3LkP/ijB2tx69fO7J1W7600qiHXx +J1WNGJAvwQHlsuTG4klRzUr6dOe+SAapaCDiwduIiNy6aO3jAyi000WjM7kWncgJ +gXn/FKdbjTMUea+liDe6WYuntPp+ewz0jYZdDXY4tCwVoii0bJ7l5FxPiNCvPm8m +5t3P0JTpGwNpqT0YuEPiebFXwSmSyX3O1GZDXbWgYwG6DxC3xa1mebjKPQdTss5i +p5hGhKFTBlk0KFwCCEDtZjmWzrAtceqcDZ95jAeQc1PJMb3jWw4snbCjirTx4gug +V0f2fATBiu2NWfgql40ixuzveL0= +=uH/a +-----END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/swvheerden.asc b/meta/gpg_keys/swvheerden.asc index 47e2a7324f..42eef171bb 100644 --- a/meta/gpg_keys/swvheerden.asc +++ b/meta/gpg_keys/swvheerden.asc @@ -1,40 +1,53 @@ -----BEGIN PGP PUBLIC KEY BLOCK----- -mDMEXFmM+BYJKwYBBAHaRw8BAQdAKKpMVnKMXJEcs2ECTbKorqRTEkClTr9sl3rv -lTq0cwK0KXNjaGFsayB2YW4gaGVlcmRlbiA8c3d2aGVlcmRlbkBnbWFpbC5jb20+ -iJYEExYIAD4WIQRcSddibQnZLZanMfgE89io5lhCzAUCXFmM+AIbAwUJCWYBgAUL -CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAE89io5lhCzFOCAP4ujof1jX8sfVot -O6SoIi+rTc3mSD7CyhMtXsZIs1wO4AD+N7MqzQ5pmvfSBI64S3AKEHV2Ie+mk43v -5vnSU4KGrwOJAjMEEAEIAB0WIQSuIfNZYVfUNhbsN/rtBR4uQPs9nQUCXFqgiQAK -CRDtBR4uQPs9naPcD/4wKeAv8HwYGaNbrVgRKGALfWZdcrM6JxKxvXLvudlm4jYi -72mAGPgTW2FFWhhdzJBQYlTxueBBb4nStEGsD1MAMgPsUNGBiyIlF6Qo/1fvBmHW -Bsev+8MRY3FETlpqGKUF+iPVpfhWrpWnsatDcN7rnZecF1Fwj0AoEiPol41L1TzC -2rFtkkB3SpgCU7C5amI9PNAE5bhrk3D0TXgl9cu4rx81aTzJEreu2Qp2Ld1Av3eN -49ZcyFFYQp5luZu6hxuYz3llLT56fre/OdZVhfHCusbA/5TQTHaIsZY5BO1rQh6C -fdMWnl9KQe2V+Am4Ccwinj9d5+CWem05hF0xzY7ihUtHBQfr1a/6vVnUFPtjA7Wv -hp0aKVwniwR+BSOg1fbowZ1YU8o+rhKgJlIg61QOS6Mbm9nw0tncRseKRdFuT6iu -/kCY/ny9aHjRWvQ45Y/Ld5lpjQhoZlcuHDREdTPFpdBnYiFPTEAAhT+HZdnA2525 -lAQg7Bdwor4BJH4YW342bjyXWe40Ij5XzKweQ69paL6LcvLuQOzphvgqpDGLSaTp -ZqdEidNVGGYhu2aL8gC3CUhFrHSAtWdLwTnj4dTTXDgsObSGZIzxStsBRIrEpdDv -LK1TDV6r+w5BKZOH7ecMh2jqA1RqRxALnFmg3sgAtf/KPy5Lv6ZpW5GbZfiQgIh1 -BBAWCAAdFiEEiTBA/hklxU/poczD00Haf8YJhicFAlxapNEACgkQ00Haf8YJhif7 -igEA/18aC75pBp2TbnGk6iMSOHu54jC5nYVkCtoqPrWneM8BAInbp1jURAz6yqjC -cihhfSnel98M1PdcP32kJuEEijoHiQIzBBABCAAdFiEEOiCYC0os/2DIx9KmA1np -IchdiNoFAlxaqloACgkQA1npIchdiNo/OxAAoHREAxc9s9SwSJfTfDUlbZNw342K -Jg2tOFmd7OEHXFTcSBS4X2MftQsX/X5KedqHdyxIVhyP3XyhHUDpqBpHvdvnRtjZ -lQlEzXyvwicy92FE+Co6lPG4XH/WdtWTIaurhlqm9sKaRGP4qvn+qVHbUfIWENbk -nDh8qKy90ENQjqdDEf7S0JnkCHsQhFUyStLXbfsah27SHuIc4zD6ZyJmTkIDDeyr -MPt3aNZ2/LamkLto6XEnTWHilEbyifO5/JwY2nl46Bu77ht5ulkqns2CJ8OiuCWH -O8+/QkWfy7bB0DBj3rQ4QYQzdTUHZPUi5ydoUIpzmavtlwxRHxw5VZv55YnigA4q -AFqsmCfBvY2sza0wo7r7gDKExlgtLcBuG3jbqMWsZ082Ga3144JCe2+2HDPw+aHe -nZJ3O2vrNeBXdyjxPLaj+0IdZHJHaK2hTNk1g2pLsRiwuMPhXeEK/VbBbwmKFXiG -FenoIG8eIrGShnvrO2n1nLur8v13pBCMga9LL3i2DuqYZER7GKOcGZYPnFS1jjR0 -rJqAumfqxSWvh7C2vW07uD7gpG1iVFTJv9kLwVdfaPJALn5YbI/gqc7LHnor2wRC -rk7fUe23J56/DJqEn4jrBXfdMjwYxqE65kabeMqIecjX+wwBZQQ3QZHmDZ5YXy2M -A6YS80hZx6gDpDS4OARcWYz4EgorBgEEAZdVAQUBAQdAzMLBLnQ6fLP6yF4FFxVL -8SP8nmdjF8TuCmy/xWjFMhYDAQgHiH4EGBYIACYWIQRcSddibQnZLZanMfgE89io -5lhCzAUCXFmM+AIbDAUJCWYBgAAKCRAE89io5lhCzKp7AQDPS3kW2suf/kN/KQ8p -DA69jdJnTIZVSOLA4ivyJk8csQEArTOnWJuNyZboSin5/e8XtuMqt6JylT94/D1G -ZzY69Aw= -=Ob4p ------END PGP PUBLIC KEY BLOCK----- +mQSuBGXB9V8RDACNREHtA4e53celi1DFDMQwFVBsKg2eN6S1IqINuZhgDquRL92O +Jtcja18GTzaVdC80+TuvL19XWdi4B869XFClZpLlbM1p6HP11haOtG30CRNbRWAQ +FOgDTO+Z/HkktsZlY1Zv0LLOwr/U2EbZAAKp/uKndpwST7rsmBJZyepbPKMTv0yj +IEJSC1NJ2r7iStdEEfpCzzBgDlWesg0+3f710+Z4kQm6EvPq8Z3tFCpcOMKcfbbh +d4cDYJSikEPelrkh6a51gFkcCVAO4KUbmROu95ijHA2INpXtf4L5kiY6iS2OEaej +9J6Whh/spD8bV5wUbV/dpgl21esLrdFpS/0sXlPnez1gDH1d2nIonBA94LxrdL/K +rNfScyLSCI9HfWaXjs8UkrAEHlvbStDI1pvPla7Qjtx+8H9D/UF+1kqfCAgAvuzH +Mwn00HybRLnjIx3RO4fmv8HdmPJHlAt2apT5M+KlxzE2aKu59OqXDaN1cMcmenox +DaXEWx30MB6eFB8BAJAnyT0w9C6NMAml4ecnaI+AsGzwuTXXTFppXm7mhRZ5C/9o +O7YWlyVBGxOoihwlNQU7wu1xt82NRE0PhynNWc1wQrFzPt9Xj2sy7FoEqYjFLkAg +9V8qJ8Gv1neu0DcIqt7tPBJ7ZLcvKtngLbXV1jSGiZsMhHSNYThCZfZu2AitjZgP +BJ7zwsWuAG/x491ellGnE3qnYd7eR628TiHueT2J0CmneovEF65anSPswdCY93/u +YvKYVXHt4Zwtdekc7PmZeVUu2atVuG+ELxYWBBSI01/YeQXi2cJ16rxJf1Eq8nkd +9sli7pK1r+c5tbjgDB8JVp5CEiwkkakVogNBXxYdSmB0GxIIQ38qbWTJ2ZDmeJH4 +aWQEs3EDtplaTKdLUIIl84Lgx7r7EMYlQKQC8Ah3HNmneihasZa3xvhsarBa5uYw +dmz23u73AXn3aWnX+d8RdToQ//znnP06LL2KwQocAPyM5yuo5N1P7kAThqSC17em +XlMu/6Y7emVq7H0Xi4ElQRrJQnOuZOOxcsUOIfu4VTDE6PNSwc6OnROVE+bPyssL +/irHxwOkSQEwYkuj3jAL3R15GnRiMHKy206WLYpkDlLm1HKvFM60xbSdXn8C7Z2c +56RgVBs2JNzo9dsjBv58RT4q/cK+NuKlvRFlgmAc8gOmJ5woh5zw/pLow8i0ljwz +NW2GD9TlcNUrzFjU4sQ4siFw9RY3Oc8TTZbL3YP6Ci+NqtRXRIbrXYECNijPHfAz +d6rtFTI2uQl7ii6ZkQJ9fzAMa7riigPtjNhZW6WiKl43TAEaimZ/YFSPU1zTrwMG +AVBauTzxU+R2rPjoxoZddvok2548Mzt0eFVvF5Qfx11mt+bL2MfMT3ryUxgAEHOF +bvLE0aQd49u+hChAItfh9Bq5d2mtQ1YXFvu8n82EjGd0SXUuN6AVs1HLF51v1cxZ +vxGjE3oTKgfNZavrlvRD46kbWYtor3qYGuokGwzB5BLOlWtxOzYXMDMligj7WqXz +/V8oEgUmwnwt58V8Z5HR28jQH9+yyGFbE1L7bliwG2rYgT2w+7x6hl6rRaE1Th6/ +5LQpU2NoYWxrIHZhbiBIZWVyZGVuIDxzd3ZoZWVyZGVuQGdtYWlsLmNvbT6IlgQT +EQgAPhYhBGrrA9URsApsptQzcQGqGYFT/ofiBQJlwfVfAhsDBQkHhh8JBQsJCAcC +BhUKCQgLAgQWAgMBAh4BAheAAAoJEAGqGYFT/ofi/+gA/3vDPjDCMUwGcupnBnYD +W1Bi8yhu/pMXRnTdZvXerCvJAQCGVjvOssfx0Nb9S92nb6QyreCg1W5snNZSVRjl +8uywo7kDDQRlwfVfEAwAhpp8f6B2xNk0M/+YZNOskzwbj+XSzkgiSXp8xLSCchrM +poiM1cvRgkDw/TA3lo5kd1S9q4NHdP/t/4xFpZ6o+oPNbW2MqwyqH0CtHprN6HtX +x3G71s1JbAHq40EtXGFVLx29yTpQY3pBBQD6kdH/T5xi2IR+Xi1RaMfcsZL4ilzc +6eq818AsFFuOfMgicxmUJKXd3vDywjpmY6VbqZ68UULPngYQkNfYVafx4LOD1y5O +8qPfxR0MCcpqHDX/P9Zo2OXr0PJFf+lgV45/nrdA4/SxbuT4E++Xgm7ZTHwktpI2 +giyNjZ08Mqy/j8VlxxHd231AmL11tSGy501C5NyYIgxZLq/lRe/M8uNWy1eiAIYD +Qos9lnvZTRJxDxwkQp7jqqNfnWMW9wtqp/I2Y5S1LlRrq9T+PC/t5N0RzE9WaWpJ +8voThtdOps0nB+IA3o45dNz4b3yhVEt8AjQwp0LAncXnp3VyqwLDvSf5ua/YQPZx +u3DxgWyckPSOpHomZ1VnAAMFC/95PVeQHX6lGjIXL1eK5EfyxBgDuc6TVc5zfHor +aJTwDpGD5dzriaRRZUQzdFSyUjSRG796foT9T7CQk3uuhzfdJTRFGuszGmUgWGTQ +Rry3OJni3ZDRmOkbWzlcYlwVsekd5BpbSTqPCiq55OsycT4EwMY3rnCMtQ+8TcFJ +Sn27GM4trN/ForclTHoLDQTvY6qT559rWox8zrUmH23AA4CQofb2MPlIM/iYosMy +XTl9MQfNhESe6gS7EHbQalGbQjbXA6Q3o5wsktdRMLXAhYL4aOU9kS3YDaH5d8lk +68xVYe7mb6rj6tv0UKUp68NwnPhm3TGItnsBlUhCVcWzw2Fttt9xF2I99fDl8FJE +5MTMiiGET51w9sB51BA+K1Lj57T4YmIgAzgRqw6zHstGqXU5US13RvpGAX/RzgRP +icmzkQiBGXm66IqUzRu0SelomW2oTfXjo7VVs5U8zpc2KWHb8wqN7rDWDxfJZsj9 +RyEJgc6ekmgC2vlbFqqAQ/C44+uIfgQYEQgAJhYhBGrrA9URsApsptQzcQGqGYFT +/ofiBQJlwfVfAhsMBQkHhh8JAAoJEAGqGYFT/ofi768A/jBr3sFU97M5Hf0nGfYA +Bjw6yQmDOU02magWtM4aJmpBAP9YLTeYB02+GYo3wM7MQ8xyJaS8Ed+PzAeNtQMM +xDOnOQ== +=VupM +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 8cf6ea2b68..538e117b71 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "tari", - "version": "1.0.0-pre.5", + "version": "1.0.0-pre.11a", "lockfileVersion": 2, "requires": true, "packages": {} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 96440e4837..4b396a926c 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -13,4 +13,4 @@ # - the CI files in .github folder # - the Makefile in base_layer/key_manager/Makefile [toolchain] -channel = "nightly-2023-06-04" +channel = "nightly-2024-02-04" diff --git a/rustfmt.toml b/rustfmt.toml index 3bc22cf400..13868eb0c1 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -10,7 +10,7 @@ imports_layout = "HorizontalVertical" imports_granularity = "Crate" match_block_trailing_comma = true max_width = 120 -newline_style = "Native" +newline_style = "Auto" normalize_comments = true overflow_delimited_expr = true reorder_imports = true diff --git a/scripts/install_ubuntu_dependencies-arm64.sh b/scripts/install_ubuntu_dependencies-arm64.sh deleted file mode 100755 index 0fd7d7054d..0000000000 --- a/scripts/install_ubuntu_dependencies-arm64.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env sh -# -# Install Ubuntu aarch64/arm64 deb dev/tool packages on x86_64 -# -apt-get -y install $* \ - pkg-config-aarch64-linux-gnu \ - gcc-aarch64-linux-gnu \ - g++-aarch64-linux-gnu diff --git a/scripts/install_ubuntu_dependencies-cross_compile.sh b/scripts/install_ubuntu_dependencies-cross_compile.sh new file mode 100755 index 0000000000..6e78ae7a21 --- /dev/null +++ b/scripts/install_ubuntu_dependencies-cross_compile.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env sh +# +# Install Ubuntu aarch64(arm64)/riscv64 deb dev/tool packages on x86_64 +# +USAGE="Usage: $0 ISA_ARCH other packages, ie aarch64" + +if [ "$#" == "0" ]; then + echo "$USAGE" + exit 1 +fi + +isa_arch=${1} +shift + +apt-get --assume-yes install $* \ + pkg-config-${isa_arch}-linux-gnu \ + gcc-${isa_arch}-linux-gnu \ + g++-${isa_arch}-linux-gnu diff --git a/scripts/install_ubuntu_dependencies.sh b/scripts/install_ubuntu_dependencies.sh index 2bb35cf3b7..d3113b6930 100755 --- a/scripts/install_ubuntu_dependencies.sh +++ b/scripts/install_ubuntu_dependencies.sh @@ -20,4 +20,5 @@ apt-get install --no-install-recommends --assume-yes \ protobuf-compiler \ libncurses5-dev \ libncursesw5-dev \ + libudev-dev \ zip diff --git a/scripts/test_in_docker.sh b/scripts/test_in_docker.sh index e7bf2b1781..8717674b52 100755 --- a/scripts/test_in_docker.sh +++ b/scripts/test_in_docker.sh @@ -2,8 +2,8 @@ # Run the Tari test suite locally inside a suitable docker container -IMAGE=quay.io/tarilabs/rust_tari-build-with-deps:nightly-2023-06-04 -TOOLCHAIN_VERSION=nightly-2023-06-04 +IMAGE=quay.io/tarilabs/rust_tari-build-with-deps:nightly-2024-02-04 +TOOLCHAIN_VERSION=nightly-2024-02-04 CONTAINER=tari_test echo "Deleting old container"