diff --git a/.github/build-iroha1-fork.src.yml b/.github/build-iroha1-fork.src.yml
index b4547c2e056..d27aba0bd0c 100644
--- a/.github/build-iroha1-fork.src.yml
+++ b/.github/build-iroha1-fork.src.yml
@@ -27,7 +27,7 @@ jobs:
check_if_pull_request_comes_from_fork:
runs-on: ubuntu-20.04 #ubuntu-latest
permissions: read-all
- name: Pull requests from forks should use this workflow
+ name: Pull requests from forks should use this workflow
if: github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
steps:
- &step_show_context
@@ -44,8 +44,8 @@ jobs:
uses: actions/checkout@v2
with: &step_checkout_with_head
ref: ${{ github.event.pull_request.head.sha }}
- -
- name: Filter files
+
+ - name: Filter files
uses: dorny/paths-filter@v2
id: filter
with:
@@ -70,13 +70,13 @@ jobs:
run: |
echo "Pull requests from forks are not allowed to change Dockerfiles"
false
-
+
- name: verify build depedencies script is not changed
if: steps.filter.outputs.build_dependecies == 'true'
run: |
echo "Pull requests from forks are not allowed to change build dependencies script"
false
-
+
## This job is to generate build matrixes for build jobs
## The matrixes depend on what is requeted to be build
## At the moment there are several options:
@@ -118,12 +118,12 @@ jobs:
commit_was_merged_build_spec(){
git_is_merge_commit $1 &&
git log -n1 $1 --format=%s | grep -q '^Merge branch' &&
- echo "/build before-merge"
+ echo "/build ubuntu debug release normal gcc-10"
}
case ${{github.event_name}} in
pull_request_target) if commit_message_body_build_spec FETCH_HEAD >/tmp/comment_body ;then
if git_is_merge_commit FETCH_HEAD ;then
- echo ::warning::'/build directive in merge commit overrides default "/build before-merge"'
+ echo ::warning::'/build directive in merge commit overrides default "/build ubuntu debug release normal gcc-10"'
fi
elif commit_was_merged_build_spec FETCH_HEAD >/tmp/comment_body ;then
true
@@ -175,7 +175,7 @@ jobs:
matrix_dockerimage_release: ${{steps.matrixes.outputs.matrix_dockerimage_release}}
matrix_dockerimage_debug: ${{steps.matrixes.outputs.matrix_dockerimage_debug}}
- ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies
+ ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependencies
## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name,
## and conditional tags :edge for development branch, and :latest for git-tags.
Docker-iroha-builder:
@@ -329,7 +329,7 @@ jobs:
needs:
- Docker-iroha-builder
- generate_matrixes
- runs-on: [ self-hosted, Linux ]
+ runs-on: [ self-hosted, Linux, iroha ]
permissions: read-all
container: ## Container is taken from previous job
image: &container_image ${{needs.Docker-iroha-builder.outputs.container}}
@@ -413,7 +413,7 @@ jobs:
key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- &step_vcpkg_build
- name: Build iroha vcpkg dependancies
+ name: Build iroha vcpkg dependencies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -751,7 +751,7 @@ jobs:
needs:
- build-UR
- generate_matrixes
- runs-on: [ self-hosted, Linux ] #ubuntu-latest
+ runs-on: [ self-hosted, Linux, iroha ] #ubuntu-latest
# strategy: *strategy_ubuntu_release
# if: *if_ubuntu_release
strategy:
diff --git a/.github/build-iroha1.src.yml b/.github/build-iroha1.src.yml
index 7a239490ad6..6501a6dc97b 100644
--- a/.github/build-iroha1.src.yml
+++ b/.github/build-iroha1.src.yml
@@ -208,7 +208,7 @@ jobs:
true
else
#echo >/tmp/comment_body "/build debug; /build ubuntu release debug normal"
- echo >/tmp/comment_body "/build ubuntu debug release normal gcc-10"
+ echo >/tmp/comment_body "/build ubuntu debug release normal gcc-10"$'\n' "/build macos debug clang"
fi ;;
push) commit_message_body_build_spec >/tmp/comment_body || {
echo "/build ubuntu debug release normal gcc-10"
@@ -221,6 +221,7 @@ jobs:
id: matrixes
run: |
set -x
+ cat /tmp/comment_body
cat /tmp/comment_body | .github/chatops-gen-matrix.sh
echo "::set-output name=matrix_ubuntu::$(cat matrix_ubuntu)"
echo "::set-output name=matrix_ubuntu_release::$(cat matrix_ubuntu_release)"
@@ -352,13 +353,13 @@ jobs:
- &step_docker_buildx
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- - &step_docker_cache
- name: Cache Docker layers
- uses: actions/cache@v2
- with:
- path: /tmp/.buildx-cache
- key: ${{ runner.os }}-buildx-${{env.dockertag}}
- restore-keys: ${{ runner.os }}-buildx-
+ # - &step_docker_cache
+ # name: Cache Docker layers
+ # uses: actions/cache@v2
+ # with:
+ # path: /tmp/.buildx-cache
+ # key: ${{ runner.os }}-buildx-${{env.dockertag}}
+ # restore-keys: ${{ runner.os }}-buildx-
- &step_docker_build_and_push
id: build_and_push
name: Build and push
@@ -379,14 +380,14 @@ jobs:
push: ${{ github.event_name != 'pull_request' || github.event.pull_request.head.repo == github.event.pull_request.base.repo }}
tags: ${{ steps.meta_ghcr.outputs.tags }}
labels: ${{ steps.meta_ghcr.outputs.labels }}
- - &step_docker_move_cache
- # Temp fix
- # https://github.com/docker/build-push-action/issues/252
- # https://github.com/moby/buildkit/issues/1896
- name: Move cache
- run: |
- rm -rf /tmp/.buildx-cache
- mv /tmp/.buildx-cache-new /tmp/.buildx-cache
+ # - &step_docker_move_cache
+ # # Temp fix
+ # # https://github.com/docker/build-push-action/issues/252
+ # # https://github.com/moby/buildkit/issues/1896
+ # name: Move cache
+ # run: |
+ # rm -rf /tmp/.buildx-cache
+ # mv /tmp/.buildx-cache-new /tmp/.buildx-cache
-
name: Check if dockertaghash exists in remote registry
id: dockertag_already
@@ -409,7 +410,7 @@ jobs:
needs:
- Docker-iroha-builder
- generate_matrixes
- runs-on: [ self-hosted, Linux ]
+ runs-on: [ self-hosted, Linux, iroha ]
container: ## Container is taken from previous job
image: &container_image ${{needs.Docker-iroha-builder.outputs.container}}
options: --user root
@@ -473,24 +474,24 @@ jobs:
echo >>$GITHUB_ENV _CCACHE_DIR=$($(realpath $CCACHE_PATH/gcc) --show-config | sed -nE 's,.*cache_dir = ,,p')
echo >>$GITHUB_ENV NPROC=$(nproc | awk '{printf("%.0f",$1*0.77)}')
echo >>$GITHUB_ENV HOME=$HOME
- - &step_restore_ccache
- name: Restore cache CCache
- uses: actions/cache@v2
- with:
- path: ${{ env._CCACHE_DIR }}
- key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
- restore-keys: ${{runner.os}}-ccache-
- - &step_store_ccache_stats
- run: ccache --show-stats | tee /tmp/ccache-stats
- - &step_vcpkg_cache
- ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
- name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
- uses: actions/cache@v2
- with:
- path: |
- ${{env.HOME}}/.cache/vcpkg/archives
- key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
- restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
+ # - &step_restore_ccache
+ # name: Restore cache CCache
+ # uses: actions/cache@v2
+ # with:
+ # path: ${{ env._CCACHE_DIR }}
+ # key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
+ # restore-keys: ${{runner.os}}-ccache-
+ # - &step_store_ccache_stats
+ # run: ccache --show-stats | tee /tmp/ccache-stats
+ # - &step_vcpkg_cache
+ # ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
+ # name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
+ # uses: actions/cache@v2
+ # with:
+ # path: |
+ # ${{env.HOME}}/.cache/vcpkg/archives
+ # key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
+ # restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- &step_vcpkg_build
name: Build iroha vcpkg dependancies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
@@ -530,8 +531,8 @@ jobs:
- &step_cpack
name: CPack (linux only)
run: cd build; cpack; ## cmake --build build --target package
- - &step_compare_ccache_stats
- run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
+ # - &step_compare_ccache_stats
+ # run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
- &step_always_after_build
name: Show free space and disk usage
if: ${{ always() }}
@@ -735,13 +736,13 @@ jobs:
env:
<<: *step_export_cxx_env
CCACHE_PATH: /usr/local/opt/ccache/libexec
- - *step_restore_ccache
- - *step_store_ccache_stats
- - *step_vcpkg_cache
+ # - *step_restore_ccache
+ # - *step_store_ccache_stats
+ # - *step_vcpkg_cache
- *step_vcpkg_build
- *step_cmake_configure
- *step_cmake_build
- - *step_compare_ccache_stats
+ # - *step_compare_ccache_stats
- *step_always_after_build
- *step_artifact_suffix
- <<: *step_artifact_irohad
@@ -824,7 +825,7 @@ jobs:
needs:
- build-UR
- generate_matrixes
- runs-on: [ self-hosted, Linux ] #ubuntu-latest
+ runs-on: [ self-hosted, Linux, iroha ] #ubuntu-latest
# strategy: *strategy_ubuntu_release
# if: *if_ubuntu_release
strategy:
@@ -873,11 +874,11 @@ jobs:
- *step_docker_login_ghcr
- *step_warn_docker_no_push
- *step_docker_buildx
- - <<: *step_docker_cache
- with:
- path: /tmp/.buildx-cache
- key: ${{ runner.os }}-buildx-release-${{env.dockertag}}
- restore-keys: ${{ runner.os }}-buildx-release
+ # - <<: *step_docker_cache
+ # with:
+ # path: /tmp/.buildx-cache
+ # key: ${{ runner.os }}-buildx-release-${{env.dockertag}}
+ # restore-keys: ${{ runner.os }}-buildx-release
- <<: *step_docker_build_and_push
with:
<<: *step_docker_build_and_push_with
@@ -887,7 +888,7 @@ jobs:
with:
<<: *step_docker_build_and_push_ghcr-with
context: docker/release/
- - *step_docker_move_cache
+ # - *step_docker_move_cache
docker-D:
<<: *job_docker_image_release
diff --git a/.github/chatops-gen-matrix.sh b/.github/chatops-gen-matrix.sh
index 39b306e452a..a16432d4ce2 100755
--- a/.github/chatops-gen-matrix.sh
+++ b/.github/chatops-gen-matrix.sh
@@ -19,7 +19,7 @@ echoerr(){
readonly ALL_oses="ubuntu macos windows" ALL_build_types="Debug Release" ALL_cmake_opts="normal burrow ursa" ALL_compilers="gcc-9 gcc-10 clang-10 clang llvm msvc"
readonly DEFAULT_oses="ubuntu macos windows" DEFAULT_build_types="Debug" DEFAULT_cmake_opts="normal burrow ursa"
readonly DEFAULT_ubuntu_compilers="gcc-9" AVAILABLE_ubuntu_compilers="gcc-9 gcc-10 clang-10"
-readonly DEFAULT_macos_compilers="clang" AVAILABLE_macos_compilers="clang" ## Also "llvm gcc-10" but they fail
+readonly DEFAULT_macos_compilers="clang" AVAILABLE_macos_compilers="clang gcc-10" ## Also "llvm gcc-10" but they fail
readonly DEFAULT_windows_compilers="msvc" AVAILABLE_windows_compilers="msvc" ## Also "clang mingw cygwin" but they are redundant
--help-buildspec(){
diff --git a/.github/workflows/build-iroha1-fork.yml b/.github/workflows/build-iroha1-fork.yml
index 06240ad1aef..8a75d8807fa 100644
--- a/.github/workflows/build-iroha1-fork.yml
+++ b/.github/workflows/build-iroha1-fork.yml
@@ -115,12 +115,12 @@ jobs:
commit_was_merged_build_spec(){
git_is_merge_commit $1 &&
git log -n1 $1 --format=%s | grep -q '^Merge branch' &&
- echo "/build before-merge"
+ echo "/build ubuntu debug release normal gcc-10"
}
case ${{github.event_name}} in
pull_request_target) if commit_message_body_build_spec FETCH_HEAD >/tmp/comment_body ;then
if git_is_merge_commit FETCH_HEAD ;then
- echo ::warning::'/build directive in merge commit overrides default "/build before-merge"'
+ echo ::warning::'/build directive in merge commit overrides default "/build ubuntu debug release normal gcc-10"'
fi
elif commit_was_merged_build_spec FETCH_HEAD >/tmp/comment_body ;then
true
@@ -168,7 +168,7 @@ jobs:
matrix_windows: ${{steps.matrixes.outputs.matrix_windows}}
matrix_dockerimage_release: ${{steps.matrixes.outputs.matrix_dockerimage_release}}
matrix_dockerimage_debug: ${{steps.matrixes.outputs.matrix_dockerimage_debug}}
- ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies
+ ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependencies
## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name,
## and conditional tags :edge for development branch, and :latest for git-tags.
Docker-iroha-builder:
@@ -333,7 +333,7 @@ jobs:
needs:
- Docker-iroha-builder
- generate_matrixes
- runs-on: [self-hosted, Linux]
+ runs-on: [self-hosted, Linux, iroha]
permissions: read-all
container: ## Container is taken from previous job
image: ${{needs.Docker-iroha-builder.outputs.container}}
@@ -441,7 +441,7 @@ jobs:
${{env.HOME}}/.cache/vcpkg/archives
key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- - name: Build iroha vcpkg dependancies
+ - name: Build iroha vcpkg dependencies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -616,7 +616,7 @@ jobs:
needs:
- Docker-iroha-builder
- generate_matrixes
- runs-on: [self-hosted, Linux]
+ runs-on: [self-hosted, Linux, iroha]
permissions: read-all
container: ## Container is taken from previous job
image: ${{needs.Docker-iroha-builder.outputs.container}}
@@ -720,7 +720,7 @@ jobs:
${{env.HOME}}/.cache/vcpkg/archives
key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- - name: Build iroha vcpkg dependancies
+ - name: Build iroha vcpkg dependencies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -1023,7 +1023,7 @@ jobs:
${{env.HOME}}/.cache/vcpkg/archives
key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- - name: Build iroha vcpkg dependancies
+ - name: Build iroha vcpkg dependencies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -1249,7 +1249,7 @@ jobs:
working-directory:
#- *step_restore_ccache
#- *step_vcpkg_cache
- - name: Build iroha vcpkg dependancies
+ - name: Build iroha vcpkg dependencies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -1293,7 +1293,7 @@ jobs:
needs:
- build-UR
- generate_matrixes
- runs-on: [self-hosted, Linux] #ubuntu-latest
+ runs-on: [self-hosted, Linux, iroha] #ubuntu-latest
# strategy: *strategy_ubuntu_release
# if: *if_ubuntu_release
strategy:
@@ -1468,7 +1468,7 @@ jobs:
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
docker-D:
- runs-on: [self-hosted, Linux] #ubuntu-latest
+ runs-on: [self-hosted, Linux, iroha] #ubuntu-latest
env:
IMAGE_NAME: iroha
steps:
diff --git a/.github/workflows/build-iroha1.yml b/.github/workflows/build-iroha1.yml
index 52bf2cc21c3..8c3c2028184 100644
--- a/.github/workflows/build-iroha1.yml
+++ b/.github/workflows/build-iroha1.yml
@@ -223,7 +223,7 @@ jobs:
true
else
#echo >/tmp/comment_body "/build debug; /build ubuntu release debug normal"
- echo >/tmp/comment_body "/build ubuntu debug release normal gcc-10"
+ echo >/tmp/comment_body "/build ubuntu debug release normal gcc-10"$'\n' "/build macos debug clang"
fi ;;
push) commit_message_body_build_spec >/tmp/comment_body || {
echo "/build ubuntu debug release normal gcc-10"
@@ -235,6 +235,7 @@ jobs:
id: matrixes
run: |
set -x
+ cat /tmp/comment_body
cat /tmp/comment_body | .github/chatops-gen-matrix.sh
echo "::set-output name=matrix_ubuntu::$(cat matrix_ubuntu)"
echo "::set-output name=matrix_ubuntu_release::$(cat matrix_ubuntu_release)"
@@ -389,13 +390,14 @@ jobs:
images: ghcr.io/${{ github.repository }}-builder
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- - name: Cache Docker layers
- uses: actions/cache@v2
- with:
- path: /tmp/.buildx-cache
- key: ${{ runner.os }}-buildx-${{env.dockertag}}
- restore-keys: ${{ runner.os }}-buildx-
- - id: build_and_push
+ - # - &step_docker_cache
+ # name: Cache Docker layers
+ # uses: actions/cache@v2
+ # with:
+ # path: /tmp/.buildx-cache
+ # key: ${{ runner.os }}-buildx-${{env.dockertag}}
+ # restore-keys: ${{ runner.os }}-buildx-
+ id: build_and_push
name: Build and push
uses: docker/build-push-action@v2
with:
@@ -415,13 +417,14 @@ jobs:
push: ${{ github.event_name != 'pull_request' || github.event.pull_request.head.repo == github.event.pull_request.base.repo }}
tags: ${{ steps.meta_ghcr.outputs.tags }}
labels: ${{ steps.meta_ghcr.outputs.labels }}
- - # Temp fix
- # https://github.com/docker/build-push-action/issues/252
- # https://github.com/moby/buildkit/issues/1896
- name: Move cache
- run: |
- rm -rf /tmp/.buildx-cache
- mv /tmp/.buildx-cache-new /tmp/.buildx-cache
+ # - &step_docker_move_cache
+ # # Temp fix
+ # # https://github.com/docker/build-push-action/issues/252
+ # # https://github.com/moby/buildkit/issues/1896
+ # name: Move cache
+ # run: |
+ # rm -rf /tmp/.buildx-cache
+ # mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Check if dockertaghash exists in remote registry
id: dockertag_already
run: |
@@ -442,7 +445,7 @@ jobs:
needs:
- Docker-iroha-builder
- generate_matrixes
- runs-on: [self-hosted, Linux]
+ runs-on: [self-hosted, Linux, iroha]
container: ## Container is taken from previous job
image: ${{needs.Docker-iroha-builder.outputs.container}}
options: --user root
@@ -538,22 +541,25 @@ jobs:
echo >>$GITHUB_ENV _CCACHE_DIR=$($(realpath $CCACHE_PATH/gcc) --show-config | sed -nE 's,.*cache_dir = ,,p')
echo >>$GITHUB_ENV NPROC=$(nproc | awk '{printf("%.0f",$1*0.77)}')
echo >>$GITHUB_ENV HOME=$HOME
- - name: Restore cache CCache
- uses: actions/cache@v2
- with:
- path: ${{ env._CCACHE_DIR }}
- key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
- restore-keys: ${{runner.os}}-ccache-
- - run: ccache --show-stats | tee /tmp/ccache-stats
- - ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
- name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
- uses: actions/cache@v2
- with:
- path: |
- ${{env.HOME}}/.cache/vcpkg/archives
- key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
- restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- - name: Build iroha vcpkg dependancies
+ - # - &step_restore_ccache
+ # name: Restore cache CCache
+ # uses: actions/cache@v2
+ # with:
+ # path: ${{ env._CCACHE_DIR }}
+ # key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
+ # restore-keys: ${{runner.os}}-ccache-
+ # - &step_store_ccache_stats
+ # run: ccache --show-stats | tee /tmp/ccache-stats
+ # - &step_vcpkg_cache
+ # ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
+ # name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
+ # uses: actions/cache@v2
+ # with:
+ # path: |
+ # ${{env.HOME}}/.cache/vcpkg/archives
+ # key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
+ # restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
+ name: Build iroha vcpkg dependancies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -582,8 +588,9 @@ jobs:
## Release takes 2m58s on self-hosted AWS EC2 c5.x4large
- name: CPack (linux only)
run: cd build; cpack; ## cmake --build build --target package
- - run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
- - name: Show free space and disk usage
+ - # - &step_compare_ccache_stats
+ # run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
+ name: Show free space and disk usage
if: ${{ always() }}
run: |
df -h || true
@@ -728,7 +735,7 @@ jobs:
needs:
- Docker-iroha-builder
- generate_matrixes
- runs-on: [self-hosted, Linux]
+ runs-on: [self-hosted, Linux, iroha]
container: ## Container is taken from previous job
image: ${{needs.Docker-iroha-builder.outputs.container}}
options: --user root
@@ -820,22 +827,25 @@ jobs:
echo >>$GITHUB_ENV _CCACHE_DIR=$($(realpath $CCACHE_PATH/gcc) --show-config | sed -nE 's,.*cache_dir = ,,p')
echo >>$GITHUB_ENV NPROC=$(nproc | awk '{printf("%.0f",$1*0.77)}')
echo >>$GITHUB_ENV HOME=$HOME
- - name: Restore cache CCache
- uses: actions/cache@v2
- with:
- path: ${{ env._CCACHE_DIR }}
- key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
- restore-keys: ${{runner.os}}-ccache-
- - run: ccache --show-stats | tee /tmp/ccache-stats
- - ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
- name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
- uses: actions/cache@v2
- with:
- path: |
- ${{env.HOME}}/.cache/vcpkg/archives
- key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
- restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- - name: Build iroha vcpkg dependancies
+ - # - &step_restore_ccache
+ # name: Restore cache CCache
+ # uses: actions/cache@v2
+ # with:
+ # path: ${{ env._CCACHE_DIR }}
+ # key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
+ # restore-keys: ${{runner.os}}-ccache-
+ # - &step_store_ccache_stats
+ # run: ccache --show-stats | tee /tmp/ccache-stats
+ # - &step_vcpkg_cache
+ # ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
+ # name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
+ # uses: actions/cache@v2
+ # with:
+ # path: |
+ # ${{env.HOME}}/.cache/vcpkg/archives
+ # key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
+ # restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
+ name: Build iroha vcpkg dependancies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -864,8 +874,9 @@ jobs:
## Release takes 2m58s on self-hosted AWS EC2 c5.x4large
- name: CPack (linux only)
run: cd build; cpack; ## cmake --build build --target package
- - run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
- - name: Show free space and disk usage
+ - # - &step_compare_ccache_stats
+ # run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
+ name: Show free space and disk usage
if: ${{ always() }}
run: |
df -h || true
@@ -1129,22 +1140,28 @@ jobs:
echo >>$GITHUB_ENV HOME=$HOME
env:
CCACHE_PATH: /usr/local/opt/ccache/libexec
- - name: Restore cache CCache
- uses: actions/cache@v2
- with:
- path: ${{ env._CCACHE_DIR }}
- key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
- restore-keys: ${{runner.os}}-ccache-
- - run: ccache --show-stats | tee /tmp/ccache-stats
- - ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
- name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
- uses: actions/cache@v2
- with:
- path: |
- ${{env.HOME}}/.cache/vcpkg/archives
- key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
- restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
- - name: Build iroha vcpkg dependancies
+ # - *step_restore_ccache
+ # - *step_store_ccache_stats
+ # - *step_vcpkg_cache
+ - # - &step_restore_ccache
+ # name: Restore cache CCache
+ # uses: actions/cache@v2
+ # with:
+ # path: ${{ env._CCACHE_DIR }}
+ # key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
+ # restore-keys: ${{runner.os}}-ccache-
+ # - &step_store_ccache_stats
+ # run: ccache --show-stats | tee /tmp/ccache-stats
+ # - &step_vcpkg_cache
+ # ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
+ # name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
+ # uses: actions/cache@v2
+ # with:
+ # path: |
+ # ${{env.HOME}}/.cache/vcpkg/archives
+ # key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
+ # restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
+ name: Build iroha vcpkg dependancies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -1171,8 +1188,10 @@ jobs:
## Debug takes 18m44s on regular GitHub runner
## Debug takes 7m41s on self-hosted AWS EC2 c5.x4large
## Release takes 2m58s on self-hosted AWS EC2 c5.x4large
- - run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
- - name: Show free space and disk usage
+ # - *step_compare_ccache_stats
+ - # - &step_compare_ccache_stats
+ # run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
+ name: Show free space and disk usage
if: ${{ always() }}
run: |
df -h || true
@@ -1373,7 +1392,25 @@ jobs:
working-directory:
#- *step_restore_ccache
#- *step_vcpkg_cache
- - name: Build iroha vcpkg dependancies
+ - # - &step_restore_ccache
+ # name: Restore cache CCache
+ # uses: actions/cache@v2
+ # with:
+ # path: ${{ env._CCACHE_DIR }}
+ # key: ${{runner.os}}-ccache-${{ github.event.pull_request.head.sha }}
+ # restore-keys: ${{runner.os}}-ccache-
+ # - &step_store_ccache_stats
+ # run: ccache --show-stats | tee /tmp/ccache-stats
+ # - &step_vcpkg_cache
+ # ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md
+ # name: Restore cache Vcpkg binarycache ## NOTE not useng NuGet because on ubuntu nuget needs mono of 433MB, unusable.
+ # uses: actions/cache@v2
+ # with:
+ # path: |
+ # ${{env.HOME}}/.cache/vcpkg/archives
+ # key: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.${{ hashFiles('vcpkg/**') }}
+ # restore-keys: ${{runner.os}}-vcpkg-${{env.CC_NAME}}.
+ name: Build iroha vcpkg dependancies
run: ./vcpkg/build_iroha_deps.sh $PWD/vcpkg-build; test -f $PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake
## Takes 48m16s on default GitHub runner with 2 cores
## Takes 13m41s on self-hosted AWS EC2 c5.x4large
@@ -1400,7 +1437,9 @@ jobs:
## Debug takes 18m44s on regular GitHub runner
## Debug takes 7m41s on self-hosted AWS EC2 c5.x4large
## Release takes 2m58s on self-hosted AWS EC2 c5.x4large
- - name: Show free space and disk usage
+ - # - &step_compare_ccache_stats
+ # run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true
+ name: Show free space and disk usage
if: ${{ always() }}
run: |
df -h || true
@@ -1417,7 +1456,7 @@ jobs:
needs:
- build-UR
- generate_matrixes
- runs-on: [self-hosted, Linux] #ubuntu-latest
+ runs-on: [self-hosted, Linux, iroha] #ubuntu-latest
# strategy: *strategy_ubuntu_release
# if: *if_ubuntu_release
strategy:
@@ -1565,13 +1604,19 @@ jobs:
run: echo "::warning::DOCKERHUB_TOKEN and DOCKERHUB_USERNAME are empty. Will build but NOT push."
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- - name: Cache Docker layers
- uses: actions/cache@v2
- with:
- path: /tmp/.buildx-cache
- key: ${{ runner.os }}-buildx-release-${{env.dockertag}}
- restore-keys: ${{ runner.os }}-buildx-release
- - id: build_and_push
+ # - <<: *step_docker_cache
+ # with:
+ # path: /tmp/.buildx-cache
+ # key: ${{ runner.os }}-buildx-release-${{env.dockertag}}
+ # restore-keys: ${{ runner.os }}-buildx-release
+ - # - &step_docker_cache
+ # name: Cache Docker layers
+ # uses: actions/cache@v2
+ # with:
+ # path: /tmp/.buildx-cache
+ # key: ${{ runner.os }}-buildx-${{env.dockertag}}
+ # restore-keys: ${{ runner.os }}-buildx-
+ id: build_and_push
name: Build and push
uses: docker/build-push-action@v2
with:
@@ -1591,15 +1636,9 @@ jobs:
tags: ${{ steps.meta_ghcr.outputs.tags }}
labels: ${{ steps.meta_ghcr.outputs.labels }}
context: docker/release/
- - # Temp fix
- # https://github.com/docker/build-push-action/issues/252
- # https://github.com/moby/buildkit/issues/1896
- name: Move cache
- run: |
- rm -rf /tmp/.buildx-cache
- mv /tmp/.buildx-cache-new /tmp/.buildx-cache
+ # - *step_docker_move_cache
docker-D:
- runs-on: [self-hosted, Linux] #ubuntu-latest
+ runs-on: [self-hosted, Linux, iroha] #ubuntu-latest
env:
DOCKERHUB_ORG: hyperledger ## Must be hyperledger, also can use iroha1, cannot use ${{ secrets.DOCKERHUB_ORG }}
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
@@ -1741,13 +1780,19 @@ jobs:
run: echo "::warning::DOCKERHUB_TOKEN and DOCKERHUB_USERNAME are empty. Will build but NOT push."
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- - name: Cache Docker layers
- uses: actions/cache@v2
- with:
- path: /tmp/.buildx-cache
- key: ${{ runner.os }}-buildx-release-${{env.dockertag}}
- restore-keys: ${{ runner.os }}-buildx-release
- - id: build_and_push
+ # - <<: *step_docker_cache
+ # with:
+ # path: /tmp/.buildx-cache
+ # key: ${{ runner.os }}-buildx-release-${{env.dockertag}}
+ # restore-keys: ${{ runner.os }}-buildx-release
+ - # - &step_docker_cache
+ # name: Cache Docker layers
+ # uses: actions/cache@v2
+ # with:
+ # path: /tmp/.buildx-cache
+ # key: ${{ runner.os }}-buildx-${{env.dockertag}}
+ # restore-keys: ${{ runner.os }}-buildx-
+ id: build_and_push
name: Build and push
uses: docker/build-push-action@v2
with:
@@ -1767,13 +1812,8 @@ jobs:
tags: ${{ steps.meta_ghcr.outputs.tags }}
labels: ${{ steps.meta_ghcr.outputs.labels }}
context: docker/release/
- - # Temp fix
- # https://github.com/docker/build-push-action/issues/252
- # https://github.com/moby/buildkit/issues/1896
- name: Move cache
- run: |
- rm -rf /tmp/.buildx-cache
- mv /tmp/.buildx-cache-new /tmp/.buildx-cache
+ # - *step_docker_move_cache
+
needs:
- build-UD
- generate_matrixes
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c7544d27562..3a730cc808f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -10,7 +10,7 @@ if(CCACHE_PROGRAM)
endif()
PROJECT(iroha
- VERSION 1.4.0
+ VERSION 1.6.0
LANGUAGES C CXX)
SET(CMAKE_CXX_STANDARD 17)
@@ -202,6 +202,7 @@ endif()
add_subdirectory(libs)
add_subdirectory(irohad)
add_subdirectory(iroha-cli)
+add_subdirectory(iroha-lib)
add_subdirectory(shared_model)
if(TESTING)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index c946f1b8cdb..0659a339def 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -52,7 +52,7 @@ Reporting Vulnerabilities
~~~~~~~~~~~~~~~~~~~~~~~~~
While we try to be proactive in preventing security problems, we do not
-assume they?ll never come up.
+assume they'll never come up.
It is standard practice to responsibly and privately disclose to the
vendor (Hyperledger organization) a security problem before publicizing,
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 1911049ba71..3bb1cc7b9ab 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -29,4 +29,8 @@ This is the list of maintainers, including their email address for direct commun
| Vasily Zyabkin | [@BAStos525](https://github.com/BAStos525) | zyabkin@soramitsu.co.jp | DevOps |
| Ekaterina Mekhnetsova | [@outoftardis](https://github.com/outoftardis) | mekhnetsova@soramitsu.co.jp | Documentation |
| William Richter | [@WRRicht3r](https://github.com/WRRicht3r) | ryan.rrr66@gmail.com | Documentation |
-| Victor Gridnevsky | [@6r1d](https://github.com/6r1d) | gridnevsky@soramitsu.co.jp | Community manager, documentation, development: JavaScript, TypeScript, Python |
\ No newline at end of file
+| Victor Gridnevsky | [@6r1d](https://github.com/6r1d) | gridnevsky@soramitsu.co.jp | Community manager, documentation, development: JavaScript, TypeScript, Python |
+| Alexander Strokov | [@astrokov7](https://github.com/astrokov7) | strokov@soramitsu.co.jp | QA, Python |
+| Michael Timofeev | [@timofeevmd](https://github.com/timofeevmd) | timofeev@soramitsu.co.jp | QA |
+| Nikita Strygin | [@DCNick3](https://github.com/DCNick3) | moslike6@gmail.com | Development: Rust |
+| Bogdan Yamkovoy | [@yamkovoy](https://github.com/yamkovoy) | yamkovoy@soramitsu.co.jp | Documentation |
diff --git a/docker/iroha-builder/Dockerfile b/docker/iroha-builder/Dockerfile
index 2c3744b21ca..4f3d0a11340 100644
--- a/docker/iroha-builder/Dockerfile
+++ b/docker/iroha-builder/Dockerfile
@@ -24,7 +24,7 @@ RUN apt-get update && \
# other
curl file ccache libssl-dev \
gcovr cppcheck doxygen rsync graphviz graphviz-dev vim zip unzip pkg-config \
- jq \
+ jq autoconf \
postgresql postgresql-contrib; \
if [ $(uname -m) = "x86_64" ] ;then \
apt-get -y --no-install-recommends install \
diff --git a/docs/source/build/index.rst b/docs/source/build/index.rst
index 9666db84bca..a0f6addfdd7 100644
--- a/docs/source/build/index.rst
+++ b/docs/source/build/index.rst
@@ -146,6 +146,13 @@ Use this code to install environment dependencies on Debian-based Linux distro.
libraries, please consider installing the
`latest release `_ of CMake.
+RaspberryPi 4
+""""""""""""""""""""""""""
+
+To build Iroha on Raspberry Pi 4 follow the same instructions as for building Linux.
+
+**ATTENTION**: Iroha requires 8GiB of RAM. If your build terminates with `SIGKILL` consider creating a swap file or swap partition on the host device, or cross-compiling.
+
Now you are ready to `install Iroha dependencies <#installing-dependencies-with-vcpkg-dependency-manager>`_.
.. _macos-pre:
diff --git a/docs/source/configure/index.rst b/docs/source/configure/index.rst
index 7b471648c4b..1d447868a3e 100644
--- a/docs/source/configure/index.rst
+++ b/docs/source/configure/index.rst
@@ -146,6 +146,8 @@ Environment-specific parameters
}
]
+- ``max_past_created_hours``: optional parameter specifying how many hours in the past since current time (measured on the peer) can the transaction's `created_time` be set. The default value is `"24"` hours. This value must be the same on all peers, otherwise it can silently cause the network to stop producing blocks.
+
Good Practice Example
---------------------
diff --git a/docs/source/develop/api/queries.rst b/docs/source/develop/api/queries.rst
index 1a2cba9db2d..ce07e4f35f4 100644
--- a/docs/source/develop/api/queries.rst
+++ b/docs/source/develop/api/queries.rst
@@ -10,7 +10,7 @@ Validation
The validation for all queries includes:
-- timestamp — shouldn't be from the past (24 hours prior to the peer time) or from the future (range of 5 minutes added to the peer time)
+- timestamp — shouldn't be from the past (configurable in `Iroha configuration <../../configure/index.html#environment-specific-parameters>`_) or from the future (range of 5 minutes added to the peer time)
- signature of query creator — used for checking the identity of query creator
- query counter — checked to be incremented with every subsequent query from query creator
- roles — depending on the query creator's role: the range of state available to query can relate to to the same account, account in the domain, to the whole chain, or not allowed at all
diff --git a/docs/source/getting_started/index.rst b/docs/source/getting_started/index.rst
index 00d1bde7b2c..aea2ef5c6dd 100644
--- a/docs/source/getting_started/index.rst
+++ b/docs/source/getting_started/index.rst
@@ -15,7 +15,7 @@ To keep things simple, we will use Docker.
Prerequisites
-------------
For this guide, you need a machine with ``Docker`` installed.
-You can read how to install it on a `Docker's website `_.
+You can read how to install it on a `Docker's website `_.
.. note:: Of course you can build Iroha from scratch, modify its code and launch a customized node!
If you are curious how to do that — you can check :ref:`build-guide` section.
diff --git a/docs/source/requirements.txt b/docs/source/requirements.txt
index 2094df8f102..69aa62e7e13 100644
--- a/docs/source/requirements.txt
+++ b/docs/source/requirements.txt
@@ -1,6 +1,7 @@
-Sphinx
-sphinx-rtd-theme
-sphinxext-remoteliteralinclude
+Sphinx==7.2.6
+sphinx-rtd-theme==1.3.0
+sphinxext-remoteliteralinclude==0.4.0
pyyaml==6.0
-pygments_lexer_solidity
-m2r2
\ No newline at end of file
+pygments_lexer_solidity==0.7.0
+m2r2==0.3.2
+
diff --git a/goSrc/src/vmCaller/iroha/commands.go b/goSrc/src/vmCaller/iroha/commands.go
index 1c3653cb961..39e41647be1 100644
--- a/goSrc/src/vmCaller/iroha/commands.go
+++ b/goSrc/src/vmCaller/iroha/commands.go
@@ -479,9 +479,7 @@ func GetAccountTransactions(accountID string, txPaginationMeta *iroha_model.TxPa
if err != nil {
return []*pb.Transaction{}, err
}
- if err != nil {
- return []*pb.Transaction{}, err
- }
+
metaPayload := MakeQueryPayloadMeta()
query := &pb.Query{Payload: &pb.Query_Payload{
Meta: &metaPayload,
diff --git a/iroha-lib/CMakeLists.txt b/iroha-lib/CMakeLists.txt
new file mode 100755
index 00000000000..c5b6ea8b3fa
--- /dev/null
+++ b/iroha-lib/CMakeLists.txt
@@ -0,0 +1,9 @@
+set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
+
+add_subdirectory(grpc_client)
+add_subdirectory(model)
+add_subdirectory(examples)
+
+target_include_directories(grpc_client PUBLIC
+ ${PROJECT_SOURCE_DIR}/iroha-lib
+)
diff --git a/iroha-lib/README.md b/iroha-lib/README.md
new file mode 100755
index 00000000000..2759d23e82b
--- /dev/null
+++ b/iroha-lib/README.md
@@ -0,0 +1,134 @@
+# Hyperledger Iroha C++ library
+
+
+**Current version of the library was tested and compatible with Iroha 1.5.0.**
+
+The library was created to provide a convenient interface for C++ applications to communicate with [Iroha](https://github.com/hyperledger/iroha) blockchain. This includes sending transactions and queries, streaming transaction statuses and block commits.
+
+
+# iroha-lib
+
+Client library of [Iroha](https://github.com/hyperledger/iroha) written completely in modern C++.
+Currently, the latest HL Iroha 1.5 release (`hyperledger/iroha:1.5.0` Docker image) is supported.
+
+
+## Installation
+
+Follow these steps to run the project:
+
+1. Set up and run Iroha peer in a Docker container. [Follow the instructions from Iroha documentation](https://iroha.readthedocs.io/en/main/getting_started/).
+
+2. Clone this repository.
+
+3. Build the project:
+
+``` bash
+cmake --build ./build --target iroha_lib_model
+```
+
+4. Go to the `examples` directory:
+
+``` bash
+cd examples
+```
+
+5. Run the selected example:
+
+``` bash
+./tx_example
+```
+
+6. Check the logs to see if the scenario completed successfully.
+
+
+## Examples
+
+Examples describe how to establish connection with an Iroha peer which is running locally. The examples show how to create a new account and send assets to it.
+
+In `examples` directory you can find `TxExample.cpp`, `BatchExample.cpp` and `QueryExample.cpp` files. These files demonstrate main features of iroha-helpers. In the `TxExample.cpp` you can find how to build a transaction with several commands. The `BatchExample.cpp` explains how to deal with batch transactions.
+Please explore [examples](https://github.com/hyperledger/iroha/tree/develop/iroha-lib/examples) directory for more usage examples.
+
+
+## GrpcClient class
+
+With GrpcClient you can create a connection with Iroha. Use one of the `send()` methods to do this.
+
+
+### Create transaction
+
+To create a transaction, you can call a command from a list of commands or create your own from scratch.
+
+``` c++
+iroha_lib::Tx(
+ account_name,
+ keypair)
+ .createDomain(
+ domain_id,
+ user_default_role)
+ .createAsset(
+ asset_name,
+ domain_id,
+ 0)
+.signAndAddSignature();
+```
+
+
+### Create batch
+
+You can send transactions in batches. To create a batch, you need a list of defined transactions. The batch will only work if all the transactions in it pass validation. If at least one transaction doesn't pass validation, the whole batch is rejected.
+
+Below is an example of creating a batch for two transactions:
+
+``` c++
+iroha_lib::TxBatch tx_batch;
+
+std::vector transactions({tx_a, tx_b});
+
+iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(
+ tx_batch
+ .batch(transactions));
+```
+
+
+## Commands
+
+- [x] [addAssetQuantity](https://iroha.readthedocs.io/en/main/develop/api/commands.html#add-asset-quantity)
+- [x] [addPeer](https://iroha.readthedocs.io/en/main/develop/api/commands.html#add-peer)
+- [x] [addSignatory](https://iroha.readthedocs.io/en/main/develop/api/commands.html#add-signatory)
+- [x] [appendRole](https://iroha.readthedocs.io/en/main/develop/api/commands.html#append-role)
+- [x] [createAccount](https://iroha.readthedocs.io/en/main/develop/api/commands.html#create-account)
+- [x] [createAsset](https://iroha.readthedocs.io/en/main/develop/api/commands.html#create-asset)
+- [x] [createDomain](https://iroha.readthedocs.io/en/main/develop/api/commands.html#create-domain)
+- [x] [createRole](https://iroha.readthedocs.io/en/main/develop/api/commands.html#create-role)
+- [x] [detachRole](https://iroha.readthedocs.io/en/main/develop/api/commands.html#detach-role)
+- [x] [grantPermission](https://iroha.readthedocs.io/en/main/develop/api/commands.html#grant-permission)
+- [x] [removeSignatory](https://iroha.readthedocs.io/en/main/develop/api/commands.html#remove-signatory)
+- [x] [revokePermission](https://iroha.readthedocs.io/en/main/develop/api/commands.html#revoke-permission)
+- [x] [setAccountDetail](https://iroha.readthedocs.io/en/main/develop/api/commands.html#set-account-detail)
+- [x] [setAccountQuorum](https://iroha.readthedocs.io/en/main/develop/api/commands.html#set-account-quorum)
+- [x] [subtractAssetQuantity](https://iroha.readthedocs.io/en/main/develop/api/commands.html#subtract-asset-quantity)
+- [x] [transferAsset](https://iroha.readthedocs.io/en/main/develop/api/commands.html#transfer-asset)
+- [x] [сompareAndSetAccountDetail](https://iroha.readthedocs.io/en/main/develop/api/commands.html#compare-and-set-account-detail)
+- [x] [removePeer](https://iroha.readthedocs.io/en/main/develop/api/commands.html#remove-peer)
+
+
+## Queries
+
+- [x] [getAccount](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-account)
+- [x] [getAccountAssetTransactions](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-account-asset-transactions)queries.html#get-account-assets)
+- [x] [getAccountDetail](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-account-detail)
+- [x] [getAccountTransactions](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-account-transactions)
+- [x] [getTransactions](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-transactions)
+- [x] [getSignatories](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-signatories)
+- [x] [getAssetInfo](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-asset-info)
+- [x] [getRoles](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-roles)
+- [x] [getRolePermissions](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-role-permissions)
+- [x] [getPeers](https://iroha.readthedocs.io/en/main/develop/api/queries.html#get-peers)
+
+
+## Compatibility and release policy
+
+The `develop` branch is compatible with tagged releases of Iroha.
diff --git a/iroha-lib/examples/BatchExample.cpp b/iroha-lib/examples/BatchExample.cpp
new file mode 100755
index 00000000000..18b9898186e
--- /dev/null
+++ b/iroha-lib/examples/BatchExample.cpp
@@ -0,0 +1,96 @@
+#include
+
+#include "model/utils/Utils.h"
+#include "model/Tx.hpp"
+#include "model/TxBatch.hpp"
+
+
+iroha::protocol::Transaction generateTransactionWhichCreatesDomainAndAsset(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& domain_id,
+ const std::string& user_default_role,
+ const std::string& asset_name)
+{
+ auto log_manager = std::make_shared(
+ logger::LoggerConfig{logger::LogLevel::kInfo,
+ logger::getDefaultLogPatterns()})->getChild("CLI");
+ const auto keypair = generateKeypair(
+ account_name,
+ key_path,
+ log_manager);
+
+ return iroha_lib::Tx(
+ account_name,
+ keypair)
+ .createDomain(
+ domain_id,
+ user_default_role)
+ .createAsset(
+ asset_name,
+ domain_id,
+ 0)
+ .signAndAddSignature();
+}
+
+
+void sendSampleBatchTransaction(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& peer_ip,
+ uint16_t torii_port,
+ const std::string& user_default_role)
+{
+ const auto tx_a = generateTransactionWhichCreatesDomainAndAsset(
+ account_name,
+ key_path,
+ "domainsamplev2",
+ user_default_role,
+ "assetnamesamplev2");
+ const auto tx_b = generateTransactionWhichCreatesDomainAndAsset(
+ account_name,
+ key_path,
+ "domainsamplev3",
+ user_default_role,
+ "assetnamesamplev3");
+
+ iroha_lib::TxBatch tx_batch;
+ std::vector transactions({tx_a, tx_b});
+ iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(
+ tx_batch
+ .batch(transactions));
+ printTransactionStatuses(
+ peer_ip,
+ torii_port,
+ transactions);
+}
+
+
+void run(const std::string& key_path)
+{
+ auto account_name = "admin@test";
+ const auto peer_ip = "127.0.0.1";
+ uint16_t torii_port = 50051;
+ const auto user_default_role = "user";
+
+ sendSampleBatchTransaction(
+ account_name,
+ key_path,
+ peer_ip,
+ torii_port,
+ user_default_role);
+}
+
+
+int main(int argc, char** argv)
+{
+ if (argc > 1 && argc < 3) {
+ run(argv[1]);
+ } else {
+ std::cout << "Usage: " << argv[0] << " key_path\n";
+ }
+ return 0;
+}
diff --git a/iroha-lib/examples/CMakeLists.txt b/iroha-lib/examples/CMakeLists.txt
new file mode 100644
index 00000000000..76bbdf799f1
--- /dev/null
+++ b/iroha-lib/examples/CMakeLists.txt
@@ -0,0 +1,55 @@
+if (APPLE)
+ message("iroha-lib does not support MacOS currently!")
+else()
+ ###### irohalib_tx_example:
+ add_executable(irohalib_tx_example
+ TxExample.cpp
+ )
+
+ target_link_libraries(irohalib_tx_example
+ iroha_lib_model
+ logger_manager
+ )
+
+ add_install_step_for_bin(irohalib_tx_example)
+
+
+ ###### irohalib_batch_example:
+ add_executable(irohalib_batch_example
+ BatchExample.cpp
+ )
+
+ target_link_libraries(irohalib_batch_example
+ iroha_lib_model
+ logger_manager
+ )
+
+ add_install_step_for_bin(irohalib_batch_example)
+
+
+ ###### irohalib_query_example:
+ add_executable(irohalib_query_example
+ QueryExample.cpp
+ )
+
+ target_link_libraries(irohalib_query_example
+ iroha_lib_model
+ logger_manager
+ )
+
+ add_install_step_for_bin(irohalib_query_example)
+
+
+ ###### irohalib_domainassetcreation_example:
+ add_executable(irohalib_domainassetcreation_example
+ DomainAssetCreation.cpp
+ )
+
+ target_link_libraries(irohalib_domainassetcreation_example
+ iroha_lib_model
+ logger_manager
+ gflags
+ )
+
+ add_install_step_for_bin(irohalib_domainassetcreation_example)
+endif()
diff --git a/iroha-lib/examples/DomainAssetCreation.cpp b/iroha-lib/examples/DomainAssetCreation.cpp
new file mode 100755
index 00000000000..1b7b92d0b25
--- /dev/null
+++ b/iroha-lib/examples/DomainAssetCreation.cpp
@@ -0,0 +1,274 @@
+#include
+#include
+#include
+#include
+#include "model/Tx.hpp"
+#include "model/Query.hpp"
+#include "model/utils/Utils.h"
+
+
+/// Command line options:
+DEFINE_string(admin_account_name, "admin@test", "set the admin account name. The account will be used to create domain and asset");
+DEFINE_string(key_path, ".", "set the key path. Here should be private and public key pair for admin");
+DEFINE_string(peer_ip, "127.0.0.1", "set the peer IP address. It is address of Iroha node");
+DEFINE_uint32(torii_port, 50051u, "set the torii port. Port of iroha node to send commands and queries.");
+DEFINE_string(user_default_role, "user", "set the user default role for newly created domain");
+DEFINE_string(asset_full_name, "assetnamesamplev4#domainsamplev4", "set the asset full name (format asset_name#domain)");
+
+
+iroha_lib::Query generateQueryBase(const std::string& account_name, const std::string& key_path);
+iroha::protocol::Query generateGetAccountAssetsQuery(const std::string& account_name, const std::string& key_path);
+iroha::protocol::Query generateGetAccountTransactionsQuery(const std::string& account_name, const std::string& key_path);
+iroha::protocol::Query generateGetAccountQuery(const std::string& account_name, const std::string& key_path);
+
+template
+void sendTransaction(Tx& tx, const std::string& peer_ip, const uint16_t torii_port);
+
+iroha::protocol::Transaction generateTransactionWhichCreatesDomainAndAsset(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& domain_id,
+ const std::string& user_default_role,
+ const std::string& asset_name);
+iroha::protocol::Transaction generateTransactionWhichAddsAssetQuantiti(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& assetIdWithDomain,
+ const std::string& assetAmount);
+
+void printAccountAssets(const std::string& account_name, const std::string& key_path, const std::string& peer_ip, const uint16_t torii_port);
+void printAccount(const std::string& account_name, const std::string& key_path, const std::string& peer_ip, const uint16_t torii_port);
+
+void printErrorResponse(const QueryResponse& response);
+
+void run(const std::string& adminAccountName, const std::string& key_path,
+ const std::string& peer_ip, uint16_t torii_port,
+ const std::string& user_default_role,
+ const std::string& assetFullName);
+
+
+int main(int argc, char* argv[]) try {
+ gflags::SetUsageMessage("Usage: " + std::string(argv[0]));
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ run(FLAGS_admin_account_name,
+ FLAGS_key_path,
+ FLAGS_peer_ip,
+ FLAGS_torii_port,
+ FLAGS_user_default_role,
+ FLAGS_asset_full_name);
+
+ gflags::ShutDownCommandLineFlags();
+} catch (const std::exception& e) {
+ std::cerr << fmt::format("Exception from {}: '{}'\n", __FUNCTION__, e.what());
+}
+
+void run(const std::string& adminAccountName, const std::string& key_path,
+ const std::string& peer_ip, uint16_t torii_port,
+ const std::string& user_default_role,
+ const std::string& assetFullName)
+{
+ const auto [assetName, assetDomain] = splitAssetFullName(assetFullName);
+
+ const auto tx2CreateDomainAndAsset = generateTransactionWhichCreatesDomainAndAsset(
+ adminAccountName,
+ key_path,
+ assetDomain,
+ user_default_role,
+ assetName);
+
+ const auto tx2AddAssetQuantity = generateTransactionWhichAddsAssetQuantiti(
+ adminAccountName,
+ key_path,
+ assetFullName,
+ "100");
+
+ sendTransaction(tx2CreateDomainAndAsset, peer_ip, torii_port);
+ sendTransaction(tx2AddAssetQuantity, peer_ip, torii_port);
+
+ /// querying:
+ printAccountAssets(adminAccountName, key_path, peer_ip, torii_port);
+ printAccount(adminAccountName, key_path, peer_ip, torii_port);
+}
+
+iroha::protocol::Transaction generateTransactionWhichCreatesDomainAndAsset(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& domain_id,
+ const std::string& user_default_role,
+ const std::string& asset_name)
+{
+ static auto log_manager = std::make_shared(
+ logger::LoggerConfig{logger::LogLevel::kInfo,
+ logger::getDefaultLogPatterns()})->getChild("CLI");
+ const auto keypair = generateKeypair(
+ account_name,
+ key_path,
+ log_manager);
+
+ return iroha_lib::Tx(
+ account_name,
+ keypair)
+ .createDomain(
+ domain_id,
+ user_default_role)
+ .createAsset(
+ asset_name,
+ domain_id,
+ 0)
+ .signAndAddSignature();
+}
+
+iroha::protocol::Transaction generateTransactionWhichAddsAssetQuantiti(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& assetIdWithDomain,
+ const std::string& assetAmount)
+{
+ static auto log_manager = std::make_shared(
+ logger::LoggerConfig{logger::LogLevel::kInfo,
+ logger::getDefaultLogPatterns()})->getChild("CLI");
+ const auto keypair = generateKeypair(
+ account_name,
+ key_path,
+ log_manager);
+
+ return iroha_lib::Tx(
+ account_name,
+ keypair)
+ .addAssetQuantity(assetIdWithDomain, assetAmount)
+ .signAndAddSignature();
+}
+
+template
+void sendTransaction(
+ Tx& tx,
+ const std::string& peer_ip,
+ const uint16_t torii_port)
+{
+ iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(tx);
+
+ printTransactionStatus(
+ peer_ip,
+ torii_port,
+ getTransactionHash(tx));
+}
+
+void printAccountAssets(const std::string& account_name,
+ const std::string& key_path,
+ const std::string& peer_ip,
+ const uint16_t torii_port)
+{
+ fmt::print("----------->{}-----------\n", __FUNCTION__);
+
+ const auto query_proto = generateGetAccountAssetsQuery(
+ account_name,
+ key_path);
+ const QueryResponse response = iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(query_proto);
+ const auto payload = query_proto.payload();
+ assert(payload.get_account_assets().account_id() == account_name);
+ assert(payload.has_get_account_assets());
+
+ if (response.has_error_response())
+ {
+ printErrorResponse(response);
+ return;
+ }
+
+ assert(response.has_account_assets_response());
+ const auto accountAssetsResponce = response.account_assets_response();
+ for (const auto& r : accountAssetsResponce.account_assets())
+ {
+ fmt::print("\tasset: {} {}\n", r.asset_id(), r.balance());
+ }
+
+ fmt::print("-----------<{}-----------\n", __FUNCTION__);
+}
+
+iroha::protocol::Query generateGetAccountAssetsQuery(const std::string& account_name,
+ const std::string& key_path)
+{
+ return generateQueryBase(account_name, key_path)
+ .getAccountAssets(account_name)
+ .signAndAddSignature();
+}
+
+iroha_lib::Query generateQueryBase(
+ const std::string& account_name,
+ const std::string& key_path)
+{
+ static auto log_manager = std::make_shared(
+ logger::LoggerConfig{logger::LogLevel::kInfo,
+ logger::getDefaultLogPatterns()})->getChild("CLI");
+
+ const auto keypair = generateKeypair(
+ account_name,
+ key_path,
+ log_manager);
+ return iroha_lib::Query(keypair);
+}
+
+iroha::protocol::Query generateGetAccountTransactionsQuery(
+ const std::string& account_name,
+ const std::string& key_path)
+{
+ return generateQueryBase(account_name, key_path)
+ .getAccountTransactions(account_name)
+ .signAndAddSignature();
+}
+
+iroha::protocol::Query generateGetAccountQuery(
+ const std::string& account_name,
+ const std::string& key_path)
+{
+ return generateQueryBase(account_name, key_path)
+ .getAccount(account_name)
+ .signAndAddSignature();
+}
+
+void printErrorResponse(const QueryResponse& response)
+{
+ const auto errorResponse = response.error_response();
+ std::cerr << fmt::format("{}: {}", errorResponse.error_code(), errorResponse.message()) << std::endl;
+}
+
+void printAccount(const std::string& account_name,
+ const std::string& key_path,
+ const std::string& peer_ip,
+ const uint16_t torii_port)
+{
+ fmt::print("----------->{}-----------\n", __FUNCTION__);
+
+ const auto query_proto = generateGetAccountQuery(
+ account_name,
+ key_path);
+ const QueryResponse response = iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(query_proto);
+ const auto payload = query_proto.payload();
+ assert(payload.get_account().account_id() == account_name);
+ assert(payload.has_get_account());
+
+ if (response.has_error_response())
+ {
+ printErrorResponse(response);
+ return;
+ }
+
+ assert(response.has_account_response());
+ const auto account = response.account_response().account();
+ fmt::print("account_id={},\n"
+ "domain_id={}\n"
+ "quorum={}\n"
+ "json_data={}\n",
+ account.account_id(), account.domain_id(), account.quorum(), account.json_data());
+
+ fmt::print("-----------<{}-----------\n", __FUNCTION__);
+}
diff --git a/iroha-lib/examples/QueryExample.cpp b/iroha-lib/examples/QueryExample.cpp
new file mode 100755
index 00000000000..bd2c2587a25
--- /dev/null
+++ b/iroha-lib/examples/QueryExample.cpp
@@ -0,0 +1,180 @@
+#include
+#include
+
+#include "model/Query.hpp"
+#include "model/Tx.hpp"
+#include "model/utils/Utils.h"
+
+
+iroha_lib::Query generateSampleQuery(
+ const std::string& account_name,
+ const std::string& key_path,
+ uint64_t counter=0u)
+{
+ auto log_manager = std::make_shared(
+ logger::LoggerConfig{logger::LogLevel::kInfo,
+ logger::getDefaultLogPatterns()})->getChild("CLI");
+ const auto keypair = generateKeypair(
+ account_name,
+ key_path,
+ log_manager);
+ return iroha_lib::Query(
+ keypair,
+ counter);
+}
+
+
+iroha::protocol::Query generateGetAccountAssetsQuery(
+ const std::string& account_name,
+ const std::string& key_path,
+ uint64_t counter=0u)
+{
+ return generateSampleQuery(
+ account_name,
+ key_path,
+ counter)
+ .getAccountAssets(account_name)
+ .signAndAddSignature();
+}
+
+
+iroha::protocol::Query generateGetAccountTransactionsQuery(
+ const std::string& account_name,
+ const std::string& key_path,
+ uint64_t counter=0u)
+{
+ return generateSampleQuery(account_name, key_path, counter)
+ .getAccountTransactions(account_name)
+ .signAndAddSignature();
+}
+
+
+iroha::protocol::Transaction generateTransactionWhichCreatesDomainAndAsset(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& domain_id,
+ const std::string& user_default_role,
+ const std::string& asset_name)
+{
+ auto log_manager = std::make_shared(
+ logger::LoggerConfig{logger::LogLevel::kInfo,
+ logger::getDefaultLogPatterns()})->getChild("CLI");
+ const auto keypair = generateKeypair(
+ account_name,
+ key_path,
+ log_manager);
+
+ return iroha_lib::Tx(
+ account_name,
+ keypair)
+ .createDomain(
+ domain_id,
+ user_default_role)
+ .createAsset(
+ asset_name,
+ domain_id,
+ 0)
+ .signAndAddSignature();
+}
+
+
+void sendSampleTransaction(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& peer_ip,
+ uint16_t torii_port,
+ const std::string& domain_id,
+ const std::string& user_default_role,
+ const std::string& asset_name)
+{
+ const auto tx_proto = generateTransactionWhichCreatesDomainAndAsset(
+ account_name,
+ key_path,
+ domain_id,
+ user_default_role,
+ asset_name);
+
+ iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(tx_proto);
+ printTransactionStatus(
+ peer_ip,
+ torii_port,
+ getTransactionHash(tx_proto));
+}
+
+
+void runQueryWithSingleTransactionGenerated(const std::string& key_path)
+{
+ auto account_name = "admin@test";
+ const auto peer_ip = "127.0.0.1";
+ uint16_t torii_port = 50051;
+ const auto user_default_role = "user";
+
+ sendSampleTransaction(
+ account_name,
+ key_path,
+ peer_ip,
+ torii_port,
+ "domainsamplev4",
+ user_default_role,
+ "assetnamesamplev4");
+
+ const auto query_proto = generateGetAccountAssetsQuery(
+ account_name,
+ key_path);
+ iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(query_proto);
+ assert(query_proto.payload().get_account_assets().account_id() == account_name);
+}
+
+
+void runQueryWithMultiplyTransactionsGenerated(const std::string& key_path)
+{
+ auto account_name = "admin@test";
+ const auto peer_ip = "127.0.0.1";
+ uint16_t torii_port = 50051;
+ const auto user_default_role = "user";
+
+ for(uint8_t txCounter = 4u; txCounter; --txCounter)
+ {
+ sendSampleTransaction(
+ account_name,
+ key_path,
+ peer_ip,
+ torii_port,
+ "domainsamplequeryv" + std::to_string(txCounter),
+ user_default_role,
+ "assetnamesamplequeryv" + std::to_string(txCounter));
+ }
+
+ const auto query_proto = generateGetAccountTransactionsQuery(
+ account_name,
+ key_path);
+ iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(query_proto);
+ assert(query_proto.payload().get_account_transactions().account_id() == account_name);
+}
+
+
+void run(const std::string& key_path)
+{
+ runQueryWithSingleTransactionGenerated(key_path);
+ runQueryWithMultiplyTransactionsGenerated(key_path);
+}
+
+
+int main(int argc, char** argv)
+{
+ if (argc > 1 && argc < 3) {
+ run(argv[1]);
+ } else {
+ std::cout << "Usage: " << argv[0] << " key_path\n";
+ }
+ return 0;
+}
diff --git a/iroha-lib/examples/TxExample.cpp b/iroha-lib/examples/TxExample.cpp
new file mode 100755
index 00000000000..0e13e4bf55d
--- /dev/null
+++ b/iroha-lib/examples/TxExample.cpp
@@ -0,0 +1,91 @@
+#include
+
+#include "model/Tx.hpp"
+#include "model/utils/Utils.h"
+
+
+iroha::protocol::Transaction generateTransactionWhichCreatesDomainAndAsset(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& domain_id,
+ const std::string& user_default_role,
+ const std::string& asset_name)
+{
+ auto log_manager = std::make_shared(
+ logger::LoggerConfig{logger::LogLevel::kInfo,
+ logger::getDefaultLogPatterns()})->getChild("CLI");
+ const auto keypair = generateKeypair(
+ account_name,
+ key_path,
+ log_manager);
+
+ return iroha_lib::Tx(
+ account_name,
+ keypair)
+ .createDomain(
+ domain_id,
+ user_default_role)
+ .createAsset(
+ asset_name,
+ domain_id,
+ 0)
+ .signAndAddSignature();
+}
+
+
+void sendSampleTransaction(
+ const std::string& account_name,
+ const std::string& key_path,
+ const std::string& peer_ip,
+ uint16_t torii_port,
+ const std::string& domain_id,
+ const std::string& user_default_role,
+ const std::string& asset_name)
+{
+ const auto tx_proto = generateTransactionWhichCreatesDomainAndAsset(
+ account_name,
+ key_path,
+ domain_id,
+ user_default_role,
+ asset_name);
+
+ iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .send(tx_proto);
+
+ const auto tx_hash = getTransactionHash(tx_proto);
+ printTransactionStatus(
+ peer_ip,
+ torii_port,
+ tx_hash);
+}
+
+
+void run(const std::string& key_path)
+{
+ auto account_name = "admin@test";
+ const auto peer_ip = "127.0.0.1";
+ uint16_t torii_port = 50051;
+ const auto user_default_role = "user";
+
+ sendSampleTransaction(
+ account_name,
+ key_path,
+ peer_ip,
+ torii_port,
+ "domainsamplev1",
+ user_default_role,
+ "assetnamesamplev1");
+}
+
+
+int main(int argc, char** argv)
+{
+ if (argc > 1 && argc < 3) {
+ run(argv[1]);
+ } else {
+ std::cout << "Usage: " << argv[0] << " key_path\n";
+ }
+ return 0;
+}
diff --git a/iroha-lib/grpc_client/CMakeLists.txt b/iroha-lib/grpc_client/CMakeLists.txt
new file mode 100755
index 00000000000..2d3a7365263
--- /dev/null
+++ b/iroha-lib/grpc_client/CMakeLists.txt
@@ -0,0 +1,11 @@
+add_library(grpc_client
+ GrpcClient.hpp
+ GrpcClient.cpp
+)
+
+target_link_libraries(grpc_client
+ grpc_channel_factory
+ model_generators
+ command_client
+ query_client
+)
diff --git a/iroha-lib/grpc_client/GrpcClient.cpp b/iroha-lib/grpc_client/GrpcClient.cpp
new file mode 100755
index 00000000000..3cae5f36671
--- /dev/null
+++ b/iroha-lib/grpc_client/GrpcClient.cpp
@@ -0,0 +1,86 @@
+#include "GrpcClient.hpp"
+
+
+namespace iroha_lib {
+
+template
+std::shared_ptr createUnauthenticatedChanel(const std::string& address)
+{
+ return grpc::CreateChannel(
+ address,
+ grpc::InsecureChannelCredentials());
+}
+
+
+template
+std::unique_ptr createClient(const std::string& address)
+{
+ return Service::NewStub(createUnauthenticatedChanel(address));
+}
+
+
+template
+std::unique_ptr createClient(
+ const std::string& ip,
+ const size_t port)
+{
+ const auto peer_ip = ip + ":" + std::to_string(port);
+ return createClient(peer_ip);
+}
+
+
+GrpcClient::GrpcClient(
+ const std::string& target_ip,
+ const uint16_t port)
+ : command_stub_(createClient(
+ target_ip,
+ port)),
+ query_stub_(createClient(
+ target_ip,
+ port))
+{}
+
+grpc::Status GrpcClient::send(const Transaction& tx)
+{
+ google::protobuf::Empty empty;
+ grpc::ClientContext context;
+ return command_stub_->Torii(
+ &context,
+ tx,
+ &empty);
+}
+
+grpc::Status GrpcClient::send(const TxList& tx_list)
+{
+ google::protobuf::Empty empty;
+ grpc::ClientContext context;
+ return command_stub_->ListTorii(
+ &context,
+ tx_list,
+ &empty);
+}
+
+QueryResponse GrpcClient::send(const iroha::protocol::Query& query)
+{
+ QueryResponse queryResponse;
+ grpc::ClientContext context;
+ query_stub_->Find(
+ &context,
+ query,
+ &queryResponse);
+ return queryResponse;
+}
+
+ToriiResponse GrpcClient::getTxStatus(const std::string& tx_hash)
+{
+ TxStatusRequest statusRequest;
+ statusRequest.set_tx_hash(tx_hash);
+ ToriiResponse toriiResponse;
+ grpc::ClientContext context;
+ command_stub_->Status(
+ &context,
+ statusRequest,
+ &toriiResponse);
+ return toriiResponse;
+}
+} // namespace iroha_lib
diff --git a/iroha-lib/grpc_client/GrpcClient.hpp b/iroha-lib/grpc_client/GrpcClient.hpp
new file mode 100755
index 00000000000..a735a02eaf1
--- /dev/null
+++ b/iroha-lib/grpc_client/GrpcClient.hpp
@@ -0,0 +1,27 @@
+#pragma once
+
+#include
+#include
+
+
+namespace iroha_lib {
+
+using namespace iroha::protocol;
+
+class GrpcClient {
+
+public:
+ GrpcClient(
+ const std::string& target_ip,
+ const uint16_t port);
+ grpc::Status send(const Transaction& tx);
+ grpc::Status send(const TxList& tx_list);
+ QueryResponse send(const iroha::protocol::Query& query);
+ ToriiResponse getTxStatus(const std::string& tx_hash);
+
+private:
+ std::shared_ptr command_stub_;
+ std::shared_ptr query_stub_;
+};
+
+}
diff --git a/iroha-lib/model/CMakeLists.txt b/iroha-lib/model/CMakeLists.txt
new file mode 100644
index 00000000000..5590abf0e94
--- /dev/null
+++ b/iroha-lib/model/CMakeLists.txt
@@ -0,0 +1,21 @@
+add_library(iroha_lib_model
+ Query.hpp
+ Query.cpp
+ Tx.hpp
+ Tx.cpp
+ TxBatch.hpp
+ generators/CommandGenerator.hpp
+ generators/CommandGenerator.cpp
+ generators/QueryGenerator.hpp
+ generators/QueryGenerator.cpp
+ utils/Utils.h
+ utils/Utils.cpp
+)
+
+target_link_libraries(iroha_lib_model
+ grpc_client
+)
+
+target_include_directories(iroha_lib_model PUBLIC
+ ${PROJECT_SOURCE_DIR}/iroha-lib
+)
diff --git a/iroha-lib/model/Query.cpp b/iroha-lib/model/Query.cpp
new file mode 100644
index 00000000000..7e1847157a4
--- /dev/null
+++ b/iroha-lib/model/Query.cpp
@@ -0,0 +1,161 @@
+#include "cryptography/ed25519_sha3_impl/internal/ed25519_impl.hpp"
+#include "Query.hpp"
+#include "model/converters/pb_common.hpp"
+
+
+namespace iroha_lib {
+
+Query::Query(
+ const iroha::keypair_t& keypair,
+ uint64_t counter,
+ uint64_t created_time) noexcept
+ : counter_(counter),
+ created_time_(created_time),
+ keypair_(keypair)
+{}
+
+Query& Query::getAccount(const std::string& account_id)
+{
+ protobuf_query_ = *query_generator_.generateGetAccount(
+ account_id,
+ counter_,
+ created_time_);
+ return *this;
+}
+
+Query& Query::getAccountAssets(const std::string& account_id)
+{
+ protobuf_query_ = *query_generator_.generateGetAccountAssets(
+ account_id,
+ counter_,
+ created_time_);
+ return *this;
+}
+
+Query& Query::getAccountDetail(const std::string& account_id)
+{
+ protobuf_query_ = *query_generator_.generateGetAccountDetail(
+ account_id,
+ counter_,
+ created_time_);
+ return *this;
+}
+
+Query& Query::getAccountTransactions(const std::string& account_id,
+ const std::optional& first_tx_hash,
+ const std::optional& first_tx_time,
+ const std::optional& last_tx_time,
+ const std::optional first_tx_height,
+ const std::optional last_tx_height)
+{
+ protobuf_query_ = *query_generator_.generateGetAccountTransactions(
+ account_id,
+ counter_,
+ created_time_,
+ first_tx_hash,
+ first_tx_time,
+ last_tx_time,
+ first_tx_height,
+ last_tx_height);
+ return *this;
+}
+
+Query& Query::getAccountAssetTransactions(const std::string& account_id,
+ const std::string& asset_id,
+ const std::optional& first_tx_hash,
+ const std::optional& first_tx_time,
+ const std::optional& last_tx_time,
+ const std::optional first_tx_height,
+ const std::optional last_tx_height)
+{
+ protobuf_query_ = *query_generator_.generateGetAccountAssetTransactions(
+ account_id,
+ counter_,
+ created_time_,
+ asset_id,
+ first_tx_hash,
+ first_tx_time,
+ last_tx_time,
+ first_tx_height,
+ last_tx_height);
+ return *this;
+}
+
+Query& Query::getTransactions(
+ const std::string& account_id,
+ const std::vector& tx_hashes)
+{
+ protobuf_query_ = *query_generator_.generateGetTransactions(
+ account_id,
+ counter_,
+ created_time_,
+ tx_hashes);
+ return *this;
+}
+
+Query& Query::getSignatories(const std::string& account_id)
+{
+ protobuf_query_ = *query_generator_.generateGetSignatories(
+ account_id,
+ counter_,
+ created_time_);
+ return *this;
+}
+
+Query& Query::getAssetInfo(
+ const std::string& account_id,
+ const std::string& asset_id)
+{
+ protobuf_query_ = *query_generator_.generateGetAssetInfo(
+ account_id,
+ counter_,
+ created_time_,
+ asset_id);
+ return *this;
+}
+
+Query& Query::getRoles(const std::string& account_id)
+{
+ protobuf_query_ = *query_generator_.generateGetRoles(
+ account_id,
+ counter_,
+ created_time_);
+ return *this;
+}
+
+Query& Query::getRolePermissions(
+ const std::string& account_id,
+ const std::string& role_id)
+{
+ protobuf_query_ = *query_generator_.generateGetRolePermissions(
+ account_id,
+ counter_,
+ created_time_,
+ role_id);
+ return *this;
+}
+
+Query& Query::getPeers(const std::string& account_id)
+{
+ protobuf_query_ = *query_generator_.generateGetPeers(
+ account_id,
+ counter_,
+ created_time_);
+ return *this;
+}
+
+
+const iroha::protocol::Query Query::signAndAddSignature()
+{
+ auto signature = iroha::sign(
+ iroha::hash(protobuf_query_).to_string(),
+ keypair_.pubkey,
+ keypair_.privkey);
+
+ auto sig = protobuf_query_.mutable_signature();
+ sig->set_signature(signature.to_hexstring());
+ sig->set_public_key(keypair_.pubkey.to_hexstring());
+ return protobuf_query_;
+}
+
+}
diff --git a/iroha-lib/model/Query.hpp b/iroha-lib/model/Query.hpp
new file mode 100644
index 00000000000..1e59e0b51a6
--- /dev/null
+++ b/iroha-lib/model/Query.hpp
@@ -0,0 +1,56 @@
+#pragma once
+
+#include "crypto/keypair.hpp"
+#include "queries.pb.h"
+#include "generators/QueryGenerator.hpp"
+
+
+namespace iroha_lib {
+
+class Query {
+public:
+ Query(const iroha::keypair_t& keypair,
+ uint64_t counter = 1u,
+ uint64_t created_time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count()) noexcept;
+
+ Query& getAccount(const std::string& account_id);
+ Query& getAccountAssets(const std::string& account_id);
+ Query& getAccountDetail(const std::string& account_id);
+ Query& getAccountTransactions(const std::string& account_id,
+ const std::optional& first_tx_hash={},
+ const std::optional& first_tx_time={},
+ const std::optional& last_tx_time={},
+ const std::optional first_tx_height={},
+ const std::optional last_tx_height={});
+ Query& getAccountAssetTransactions(
+ const std::string& account_id,
+ const std::string& asset_id,
+ const std::optional& first_tx_hash={},
+ const std::optional& first_tx_time={},
+ const std::optional& last_tx_time={},
+ const std::optional first_tx_height={},
+ const std::optional last_tx_height={});
+ Query& getTransactions(
+ const std::string& account_id,
+ const std::vector& tx_hashes);
+ Query& getSignatories(const std::string& account_id);
+ Query& getAssetInfo(
+ const std::string& account_id,
+ const std::string& asset_id);
+ Query& getRoles(const std::string& account_id);
+ Query& getRolePermissions(
+ const std::string& account_id,
+ const std::string& role_id);
+ Query& getPeers(const std::string& account_id);
+
+ const iroha::protocol::Query signAndAddSignature();
+
+private:
+ uint64_t counter_;
+ uint64_t created_time_;
+ iroha::protocol::Query protobuf_query_;
+ iroha::keypair_t keypair_;
+ QueryGenerator query_generator_;
+};
+
+} // namespace iroha_lib
diff --git a/iroha-lib/model/Tx.cpp b/iroha-lib/model/Tx.cpp
new file mode 100644
index 00000000000..416c4c11747
--- /dev/null
+++ b/iroha-lib/model/Tx.cpp
@@ -0,0 +1,250 @@
+#include "Tx.hpp"
+
+#include
+#include
+
+#include "grpc_client/GrpcClient.hpp"
+#include "model/converters/pb_common.hpp"
+#include "cryptography/ed25519_sha3_impl/internal/ed25519_impl.hpp"
+
+
+namespace iroha_lib {
+
+void Tx::addCommand(const iroha::protocol::Command& command)
+{
+ auto payload = protobuf_transaction_.mutable_payload()->mutable_reduced_payload();
+ auto cmd = payload->add_commands();
+ new (cmd)
+ iroha::protocol::Command(command);
+}
+
+Tx& Tx::addAssetQuantity(
+ const std::string& asset_id,
+ const std::string& amount)
+{
+ auto cmd = cmd_generator_.generateAddAssetQuantity(
+ asset_id,
+ amount);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::addPeer(
+ const std::string& address,
+ const std::string& pubkey,
+ const std::optional& tls_certificate,
+ bool syncing_peer)
+{
+ auto cmd = cmd_generator_.generateAddPeer(
+ address,
+ pubkey,
+ tls_certificate,
+ syncing_peer);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::addSignatory(
+ const std::string& account_id,
+ const std::string& pubkey)
+{
+ auto cmd = cmd_generator_.generateAddSignatory(
+ account_id,
+ pubkey);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::appendRole(
+ const std::string& account_id,
+ const std::string& role_name)
+{
+ auto cmd = cmd_generator_.generateAppendRole(
+ account_id,
+ role_name);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::createAccount(
+ const std::string& account_name,
+ const std::string& domain_id,
+ const std::string& pubkey)
+{
+ auto cmd = cmd_generator_.generateCreateAccount(
+ account_name,
+ domain_id,
+ pubkey);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::createAsset(
+ const std::string& asset_name,
+ const std::string& domain_id,
+ uint32_t precision)
+{
+ auto cmd = cmd_generator_.generateCreateAsset(
+ asset_name,
+ domain_id,
+ precision);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::createDomain(
+ const std::string& domain_id,
+ const std::string& user_default_role)
+{
+ auto cmd = cmd_generator_.generateCreateDomain(
+ domain_id,
+ user_default_role);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::createRole(
+ const std::string& roleName,
+ const std::unordered_set& permissions)
+{
+ auto cmd = cmd_generator_.generateCreateRole(
+ roleName,
+ permissions);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::detachRole(
+ const std::string& account_id,
+ const std::string& role_name)
+{
+ auto cmd = cmd_generator_.generateDetachRole(
+ account_id,
+ role_name);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::grantPermission(
+ const std::string& account_id,
+ const iroha::protocol::GrantablePermission permission)
+{
+ auto cmd = cmd_generator_.generateGrantPermission(
+ account_id,
+ permission);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::removePeer(const std::string& pubkey)
+{
+ auto cmd = cmd_generator_.generateRemovePeer(pubkey);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::removeSignatory(
+ const std::string& account_id,
+ const std::string& pubkey)
+{
+ auto cmd = cmd_generator_.generateRemoveSignatory(
+ account_id,
+ pubkey);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::revokePermission(
+ const std::string& account_id,
+ const iroha::protocol::GrantablePermission permission)
+{
+ auto cmd = cmd_generator_.generateGrantPermission(
+ account_id,
+ permission);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::setAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value)
+{
+ auto cmd = cmd_generator_.generateSetAccountDetail(
+ account_id,
+ key,
+ value);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::setAccountQuorum(
+ const std::string& account_id,
+ uint32_t quorum)
+{
+ auto cmd = cmd_generator_.generateSetAccountQuorum(
+ account_id,
+ quorum);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::subtractAssetQuantity(
+ const std::string& asset_id,
+ const std::string& amount)
+{
+ auto cmd = cmd_generator_.generateSubtractAssetQuantity(
+ asset_id,
+ amount);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::transferAsset(
+ const std::string& account_id,
+ const std::string& dest_account_id,
+ const std::string& asset_id,
+ const std::string& description,
+ const std::string& amount)
+{
+ auto cmd = cmd_generator_.generateTransferAsset(
+ account_id,
+ dest_account_id,
+ asset_id,
+ description,
+ amount);
+ addCommand(*cmd);
+ return *this;
+}
+
+Tx& Tx::compareAndSetAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value,
+ const std::optional& old_value,
+ bool check_empty)
+{
+ auto cmd = cmd_generator_.generateCompareAndSetAccountDetail(
+ account_id,
+ key,
+ value,
+ old_value,
+ check_empty);
+ addCommand(*cmd);
+ return *this;
+}
+
+const iroha::protocol::Transaction Tx::signAndAddSignature()
+{
+ auto signature = iroha::sign(
+ iroha::hash(protobuf_transaction_).to_string(),
+ keypair_.pubkey,
+ keypair_.privkey);
+
+ auto proto_signature = protobuf_transaction_.add_signatures();
+ proto_signature->set_public_key(keypair_.pubkey.to_hexstring());
+ proto_signature->set_signature(signature.to_hexstring());
+ return protobuf_transaction_;
+}
+
+}
diff --git a/iroha-lib/model/Tx.hpp b/iroha-lib/model/Tx.hpp
new file mode 100644
index 00000000000..7e3b74cd5b3
--- /dev/null
+++ b/iroha-lib/model/Tx.hpp
@@ -0,0 +1,105 @@
+#ifndef TX_HPP
+#define TX_HPP
+
+#include "transaction.pb.h"
+#include
+#include "crypto/keypair.hpp"
+#include "generators/CommandGenerator.hpp"
+
+
+namespace iroha_lib {
+
+class Tx {
+
+private:
+ iroha::keypair_t keypair_;
+ iroha::protocol::Transaction protobuf_transaction_;
+ CommandGenerator cmd_generator_;
+
+public:
+ explicit Tx(
+ const std::string& account_id,
+ const iroha::keypair_t& keypair,
+ uint64_t created_time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(),
+ uint32_t quorum = 1)
+ : keypair_(keypair)
+ {
+ auto payload = protobuf_transaction_.mutable_payload()->mutable_reduced_payload();
+ payload->set_created_time(created_time);
+ payload->set_creator_account_id(account_id);
+ payload->set_quorum(quorum);
+ }
+
+ void addCommand(const iroha::protocol::Command& command);
+
+ Tx& addAssetQuantity(
+ const std::string& account_id,
+ const std::string& role_name);
+ Tx& addPeer(
+ const std::string& address,
+ const std::string& pubkey,
+ const std::optional& tls_certificate = {},
+ bool syncing_peer = false);
+ Tx& addSignatory(
+ const std::string& account_id,
+ const std::string& pubkey);
+ Tx& appendRole(
+ const std::string& account_id,
+ const std::string& role_name);
+ Tx& createAccount(
+ const std::string& account_name,
+ const std::string& domain_id,
+ const std::string& pubkey);
+ Tx& createAsset(
+ const std::string& asset_name,
+ const std::string& domain_id,
+ uint32_t precision);
+ Tx& createDomain(
+ const std::string& domain_id,
+ const std::string& user_default_role);
+ Tx& createRole(
+ const std::string& roleName,
+ const std::unordered_set& permissions);
+ Tx& detachRole(
+ const std::string& account_id,
+ const std::string& role_name);
+ Tx& grantPermission(
+ const std::string& account_id,
+ const iroha::protocol::GrantablePermission permission);
+ Tx& removePeer(
+ const std::string& pubkey);
+ Tx& removeSignatory(
+ const std::string& account_id,
+ const std::string& pubkey);
+ Tx& revokePermission(
+ const std::string& account_id,
+ const iroha::protocol::GrantablePermission permission);
+ Tx& setAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value);
+ Tx& setAccountQuorum(
+ const std::string& account_id,
+ uint32_t quorum);
+ Tx& subtractAssetQuantity(
+ const std::string& asset_id,
+ const std::string& amount);
+ Tx& transferAsset(
+ const std::string& account_id,
+ const std::string& dest_account_id,
+ const std::string& asset_id,
+ const std::string& description,
+ const std::string& amount);
+ Tx& compareAndSetAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value,
+ const std::optional& old_value,
+ bool check_empty);
+
+ const iroha::protocol::Transaction signAndAddSignature();
+};
+
+}
+
+#endif
diff --git a/iroha-lib/model/TxBatch.cpp b/iroha-lib/model/TxBatch.cpp
new file mode 100644
index 00000000000..6bcc94ae077
--- /dev/null
+++ b/iroha-lib/model/TxBatch.cpp
@@ -0,0 +1,39 @@
+#include "TxBatch.hpp"
+
+#include "model/converters/pb_common.hpp"
+#include "transaction.pb.h"
+#include "primitive.pb.h"
+
+
+namespace iroha_lib {
+
+using namespace iroha::protocol;
+using iroha::protocol::Transaction_Payload_BatchMeta_BatchType;
+
+Transaction_Payload_BatchMeta_BatchType TxBatch::getBatchType(bool atomic) const
+{
+ return atomic ? Transaction_Payload_BatchMeta_BatchType_ATOMIC
+ : Transaction_Payload_BatchMeta_BatchType_ORDERED;
+}
+
+TxList TxBatch::batch(std::vector& transactions, bool atomic)
+{
+ TxList tx_list;
+
+ if (atomic) {
+ Transaction::Payload::BatchMeta meta;
+ meta.set_type(getBatchType(atomic));
+
+ for (auto& tx: transactions) {
+ tx.payload().batch().New()->CopyFrom(meta);
+ *tx_list.add_transactions() = tx;
+ }
+ } else {
+ for (const auto& tx: transactions) {
+ *tx_list.add_transactions() = tx;
+ }
+ }
+ return tx_list;
+}
+
+}
diff --git a/iroha-lib/model/TxBatch.hpp b/iroha-lib/model/TxBatch.hpp
new file mode 100644
index 00000000000..120d45a81e7
--- /dev/null
+++ b/iroha-lib/model/TxBatch.hpp
@@ -0,0 +1,45 @@
+#ifndef TX_BATCH_HPP
+#define TX_BATCH_HPP
+
+#include "transaction.pb.h"
+#include
+
+
+namespace iroha_lib {
+
+using namespace iroha::protocol;
+using iroha::protocol::Transaction_Payload_BatchMeta_BatchType;
+
+class TxBatch {
+
+public:
+ Transaction_Payload_BatchMeta_BatchType getBatchType(bool atomic) const
+ {
+ return atomic ? Transaction_Payload_BatchMeta_BatchType_ATOMIC
+ : Transaction_Payload_BatchMeta_BatchType_ORDERED;
+ }
+
+ TxList batch(std::vector& transactions, bool atomic = true)
+ {
+ TxList tx_list;
+
+ if (atomic) {
+ Transaction::Payload::BatchMeta meta;
+ meta.set_type(getBatchType(atomic));
+
+ for (auto& tx: transactions) {
+ tx.payload().batch().New()->CopyFrom(meta);
+ *tx_list.add_transactions() = tx;
+ }
+ } else {
+ for (const auto& tx: transactions) {
+ *tx_list.add_transactions() = tx;
+ }
+ }
+ return tx_list;
+ }
+};
+
+}
+
+#endif
diff --git a/iroha-lib/model/generators/CommandGenerator.cpp b/iroha-lib/model/generators/CommandGenerator.cpp
new file mode 100644
index 00000000000..c1982b9bf07
--- /dev/null
+++ b/iroha-lib/model/generators/CommandGenerator.cpp
@@ -0,0 +1,281 @@
+#include "CommandGenerator.hpp"
+#include "model/converters/json_transaction_factory.hpp"
+
+
+namespace iroha_lib {
+
+std::shared_ptr CommandGenerator::generateAddAssetQuantity(
+ const std::string& asset_id,
+ const std::string& amount)
+{
+ AddAssetQuantity addAssetQuantity;
+ addAssetQuantity.set_asset_id(asset_id);
+ addAssetQuantity.set_amount(amount);
+
+ auto cmd = Command();
+ cmd.set_allocated_add_asset_quantity(new AddAssetQuantity(addAssetQuantity));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateAddPeer(
+ const std::string& address,
+ const std::string& pubkey,
+ const std::optional& tls_certificate,
+ bool syncing_peer)
+{
+ AddPeer pb_add_peer;
+ auto peer = pb_add_peer.mutable_peer();
+
+ Peer primitive_peer;
+ primitive_peer.set_address(address);
+ primitive_peer.set_peer_key(
+ iroha::hexstringToArray(pubkey)
+ .value()
+ .to_hexstring());
+
+ if (tls_certificate.has_value()) {
+ primitive_peer.set_tls_certificate(*std::move(tls_certificate));
+ }
+ primitive_peer.set_syncing_peer(syncing_peer);
+
+ peer->CopyFrom(primitive_peer);
+
+ auto cmd = Command();
+ cmd.set_allocated_add_peer(new AddPeer(pb_add_peer));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateAddSignatory(
+ const std::string& account_id,
+ const std::string& pubkey)
+{
+ AddSignatory pb_add_signatory;
+ pb_add_signatory.set_account_id(account_id);
+ pb_add_signatory.set_public_key(
+ iroha::hexstringToArray(pubkey)
+ .value()
+ .to_hexstring());
+
+ auto cmd = Command();
+ cmd.set_allocated_add_signatory(new AddSignatory(pb_add_signatory));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateAppendRole(
+ const std::string& account_id,
+ const std::string& role_name)
+{
+ AppendRole append_role;
+ append_role.set_account_id(account_id);
+ append_role.set_role_name(role_name);
+
+ auto cmd = Command();
+ cmd.set_allocated_append_role(new AppendRole(append_role));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateCreateAccount(
+ const std::string& account_name,
+ const std::string& domain_id,
+ const std::string& pubkey)
+{
+ CreateAccount pb_create_account;
+ pb_create_account.set_account_name(account_name);
+ pb_create_account.set_domain_id(domain_id);
+ pb_create_account.set_public_key(iroha::hexstringToArray(pubkey).value().to_hexstring());
+
+ auto cmd = Command();
+ cmd.set_allocated_create_account(new CreateAccount(pb_create_account));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateCreateAsset(
+ const std::string& asset_name,
+ const std::string& domain_id,
+ uint8_t precision)
+{
+
+ CreateAsset pb_create_asset;
+ pb_create_asset.set_asset_name(asset_name);
+ pb_create_asset.set_domain_id(domain_id);
+ pb_create_asset.set_precision(precision);
+
+ auto cmd = Command();
+ cmd.set_allocated_create_asset(new CreateAsset(pb_create_asset));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateCreateDomain(
+ const std::string& domain_id,
+ const std::string& default_role)
+{
+ CreateDomain pb_create_domain;
+ pb_create_domain.set_domain_id(domain_id);
+ pb_create_domain.set_default_role(default_role);
+
+ auto cmdCreateDomain = Command();
+ cmdCreateDomain.set_allocated_create_domain(new CreateDomain(pb_create_domain));
+ return generateCommand(cmdCreateDomain);
+}
+
+std::shared_ptr CommandGenerator::generateCreateRole(
+ const std::string& role_name,
+ const std::unordered_set& permissions)
+{
+ CreateRole createRole;
+ createRole.set_role_name(role_name);
+ std::for_each(permissions.begin(),
+ permissions.end(),
+ [&createRole](auto permission) {
+ createRole.add_permissions(permission);
+ });
+
+ auto cmd = Command();
+ cmd.set_allocated_create_role(new CreateRole(createRole));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateDetachRole(
+ const std::string& account_id,
+ const std::string& role_name)
+{
+ DetachRole detach_role;
+ detach_role.set_account_id(account_id);
+ detach_role.set_role_name(role_name);
+
+ auto cmd = Command();
+ cmd.set_allocated_detach_role(new DetachRole(detach_role));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateGrantPermission(
+ const std::string& account_id,
+ const GrantablePermission permission)
+{
+ GrantPermission grantPermission;
+ grantPermission.set_account_id(account_id);
+ grantPermission.set_permission(permission);
+
+ auto cmd = Command();
+ cmd.set_allocated_grant_permission(new GrantPermission(grantPermission));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateRemovePeer(const std::string& pubkey)
+{
+ RemovePeer removePeer;
+ removePeer.set_public_key(pubkey);
+
+ auto cmd = Command();
+ cmd.set_allocated_remove_peer(new RemovePeer(removePeer));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateRemoveSignatory(
+ const std::string& account_id,
+ const std::string& pubkey)
+{
+ RemoveSignatory removeSignatory;
+ removeSignatory.set_account_id(account_id);
+ removeSignatory.set_public_key(iroha::hexstringToArray(pubkey).value().to_hexstring());
+
+ auto cmd = Command();
+ cmd.set_allocated_remove_signatory(new RemoveSignatory(removeSignatory));
+ return generateCommand(cmd);
+
+}
+
+std::shared_ptr CommandGenerator::generateRevokePermission(
+ const std::string& account_id,
+ const GrantablePermission permission)
+{
+ RevokePermission revokdePermission;
+ revokdePermission.set_account_id(account_id);
+ revokdePermission.set_permission(permission);
+
+ auto cmd = Command();
+ cmd.set_allocated_revoke_permission(new RevokePermission(revokdePermission));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateSetAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value)
+{
+ SetAccountDetail accountDetails;
+ accountDetails.set_account_id(account_id);
+ accountDetails.set_key(key);
+ accountDetails.set_value(value);
+
+ auto cmd = Command();
+ cmd.set_allocated_set_account_detail(new SetAccountDetail(accountDetails));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateSetAccountQuorum(
+ const std::string& account_id, uint32_t quorum)
+{
+ SetAccountQuorum setAccountQuorum;
+ setAccountQuorum.set_account_id(account_id);
+ setAccountQuorum.set_quorum(quorum);
+
+ auto cmd = Command();
+ cmd.set_allocated_set_account_quorum(new SetAccountQuorum(setAccountQuorum));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateSubtractAssetQuantity(
+ const std::string& asset_id,
+ const std::string& amount)
+{
+ SubtractAssetQuantity subtractAssetQuantity;
+ subtractAssetQuantity.set_asset_id(asset_id);
+ subtractAssetQuantity.set_amount(amount);
+
+ auto cmd = Command();
+ cmd.set_allocated_subtract_asset_quantity(new SubtractAssetQuantity(subtractAssetQuantity));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateTransferAsset(
+ const std::string& account_id,
+ const std::string& dest_account_id,
+ const std::string& asset_id,
+ const std::string& description,
+ const std::string& amount)
+{
+ TransferAsset transferAsset;
+ transferAsset.set_src_account_id(account_id);
+ transferAsset.set_dest_account_id(dest_account_id);
+ transferAsset.set_asset_id(asset_id);
+ transferAsset.set_description(description);
+ transferAsset.set_amount(amount);
+
+ auto cmd = Command();
+ cmd.set_allocated_transfer_asset(new TransferAsset(transferAsset));
+ return generateCommand(cmd);
+}
+
+std::shared_ptr CommandGenerator::generateCompareAndSetAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value,
+ const std::optional& old_value,
+ bool check_empty)
+{
+ CompareAndSetAccountDetail compareAndSetAccountDetail;
+ compareAndSetAccountDetail.set_account_id(account_id);
+ compareAndSetAccountDetail.set_key(key);
+ compareAndSetAccountDetail.set_value(value);
+ if (old_value.has_value()) {
+ compareAndSetAccountDetail.set_old_value(*std::move(old_value));
+ }
+ compareAndSetAccountDetail.set_check_empty(check_empty);
+
+ auto cmd = Command();
+ cmd.set_allocated_compare_and_set_account_detail(new CompareAndSetAccountDetail(compareAndSetAccountDetail));
+ return generateCommand(cmd);
+}
+
+}
diff --git a/iroha-lib/model/generators/CommandGenerator.hpp b/iroha-lib/model/generators/CommandGenerator.hpp
new file mode 100644
index 00000000000..06948072839
--- /dev/null
+++ b/iroha-lib/model/generators/CommandGenerator.hpp
@@ -0,0 +1,87 @@
+#ifndef COMMAND_GENERATOR_HPP
+#define COMMAND_GENERATOR_HPP
+
+#include "commands.pb.h"
+#include
+
+
+namespace iroha_lib {
+
+using namespace iroha::protocol;
+
+class CommandGenerator {
+
+public:
+ template
+ std::shared_ptr generateCommand(ParamTypes... args)
+ {
+ return std::make_shared(args...);
+ }
+
+ std::shared_ptr generateAddAssetQuantity(
+ const std::string& asset_id,
+ const std::string& amount);
+ std::shared_ptr generateAddPeer(
+ const std::string& address,
+ const std::string& pubkey,
+ const std::optional& tls_certificate,
+ bool syncing_peer);
+ std::shared_ptr generateAddSignatory(
+ const std::string& account_id,
+ const std::string& pubkey);
+ std::shared_ptr generateAppendRole(
+ const std::string& account_id,
+ const std::string& role_name);
+ std::shared_ptr generateCreateAccount(
+ const std::string& account_name,
+ const std::string& domain_id,
+ const std::string& pubkey);
+ std::shared_ptr generateCreateAsset(
+ const std::string& asset_name,
+ const std::string& domain_name,
+ uint8_t precision);
+ std::shared_ptr generateCreateDomain(
+ const std::string& domain_id,
+ const std::string& default_role);
+ std::shared_ptr generateCreateRole(
+ const std::string& role_name,
+ const std::unordered_set& permissions);
+ std::shared_ptr generateDetachRole(
+ const std::string& account_id,
+ const std::string& role_name);
+ std::shared_ptr generateGrantPermission(
+ const std::string& account_id,
+ const GrantablePermission permission);
+ std::shared_ptr generateRemovePeer(const std::string& pubkey);
+ std::shared_ptr generateRemoveSignatory(
+ const std::string& account_id,
+ const std::string& pubkey);
+ std::shared_ptr generateRevokePermission(
+ const std::string& account_id,
+ const GrantablePermission permission);
+ std::shared_ptr generateSetAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value);
+ std::shared_ptr generateSetAccountQuorum(
+ const std::string& account_id, uint32_t quorum);
+ std::shared_ptr generateSubtractAssetQuantity(
+ const std::string& asset_id,
+ const std::string& amount);
+ std::shared_ptr generateTransferAsset(
+ const std::string& account_id,
+ const std::string& dest_account_id,
+ const std::string& asset_id,
+ const std::string& description,
+ const std::string& amount);
+ std::shared_ptr generateCompareAndSetAccountDetail(
+ const std::string& account_id,
+ const std::string& key,
+ const std::string& value,
+ const std::optional& old_value,
+ bool check_empty);
+};
+
+}
+
+#endif
diff --git a/iroha-lib/model/generators/QueryGenerator.cpp b/iroha-lib/model/generators/QueryGenerator.cpp
new file mode 100644
index 00000000000..45c08203ccf
--- /dev/null
+++ b/iroha-lib/model/generators/QueryGenerator.cpp
@@ -0,0 +1,226 @@
+#include "QueryGenerator.hpp"
+#include "model/utils/Utils.h"
+
+
+namespace iroha_lib {
+
+std::shared_ptr QueryGenerator::generateQuery(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time)
+{
+ auto query = std::make_shared();
+ auto* payload = query->mutable_payload()->mutable_meta();
+ payload->set_creator_account_id(account_id);
+ payload->set_query_counter(counter);
+ payload->set_created_time(created_time);
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetAccount(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload();
+ auto mutablePayload = queryPayload->mutable_get_account();
+ mutablePayload->set_account_id(account_id);
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetAccountAssets(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_account_assets();
+ queryPayload->set_account_id(account_id);
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetAccountDetail(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_account_detail();
+ queryPayload->set_account_id(account_id);
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetAccountTransactions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::optional& first_tx_hash,
+ const std::optional& first_tx_time,
+ const std::optional& last_tx_time,
+ const std::optional first_tx_height,
+ const std::optional last_tx_height)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_account_transactions();
+ queryPayload->set_account_id(account_id);
+ if (first_tx_hash.has_value()) {
+ query->mutable_payload()->mutable_get_account_transactions()->mutable_pagination_meta()->set_allocated_first_tx_hash(first_tx_hash.value());
+ }
+ if (first_tx_time.has_value()) {
+ query->mutable_payload()->mutable_get_account_transactions()->mutable_pagination_meta()->set_allocated_first_tx_time(first_tx_time.value());
+ }
+ if (last_tx_time.has_value()) {
+ query->mutable_payload()->mutable_get_account_transactions()->mutable_pagination_meta()->set_allocated_last_tx_time(last_tx_time.value());
+ }
+ if (first_tx_height.has_value()) {
+ query->mutable_payload()->mutable_get_account_transactions()->mutable_pagination_meta()->set_first_tx_height(first_tx_height.value());
+ }
+ if (last_tx_height.has_value()) {
+ query->mutable_payload()->mutable_get_account_transactions()->mutable_pagination_meta()->set_last_tx_height(last_tx_height.value());
+ }
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetAccountAssetTransactions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::string& assetId,
+ const std::optional& first_tx_hash,
+ const std::optional& first_tx_time,
+ const std::optional& last_tx_time,
+ const std::optional first_tx_height,
+ const std::optional last_tx_height)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_account_asset_transactions();
+ queryPayload->set_account_id(account_id);
+ queryPayload->set_asset_id(assetId);
+
+ if (first_tx_hash.has_value()) {
+ query->mutable_payload()->mutable_get_account_asset_transactions()->mutable_pagination_meta()->set_allocated_first_tx_hash(first_tx_hash.value());
+ }
+ if (first_tx_time.has_value()) {
+ query->mutable_payload()->mutable_get_account_asset_transactions()->mutable_pagination_meta()->set_allocated_first_tx_time(first_tx_time.value());
+ }
+ if (last_tx_time.has_value()) {
+ query->mutable_payload()->mutable_get_account_asset_transactions()->mutable_pagination_meta()->set_allocated_last_tx_time(last_tx_time.value());
+ }
+ if (first_tx_height.has_value()) {
+ query->mutable_payload()->mutable_get_account_asset_transactions()->mutable_pagination_meta()->set_first_tx_height(first_tx_height.value());
+ }
+ if (last_tx_height.has_value()) {
+ query->mutable_payload()->mutable_get_account_asset_transactions()->mutable_pagination_meta()->set_last_tx_height(last_tx_height.value());
+ }
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetTransactions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::vector& transaction_hashes)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_transactions();
+
+ std::for_each(
+ transaction_hashes.begin(),
+ transaction_hashes.end(),
+ [&queryPayload](auto tx_hash) {
+ auto adder = queryPayload->add_tx_hashes();
+ *adder = string_to_hex(tx_hash);
+ });
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetSignatories(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_signatories();
+ queryPayload->set_account_id(account_id);
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetAssetInfo(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::string& assetId)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_asset_info();
+ queryPayload->set_asset_id(assetId);
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetRoles(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ query->mutable_payload()->mutable_get_roles();
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetRolePermissions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::string& role_id)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ auto queryPayload = query->mutable_payload()->mutable_get_role_permissions();
+ queryPayload->set_role_id(role_id);
+ return query;
+}
+
+std::shared_ptr QueryGenerator::generateGetPeers(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time)
+{
+ auto query = generateQuery(
+ account_id,
+ counter,
+ created_time);
+ query->mutable_payload()->mutable_get_peers();
+ return query;
+}
+
+}
diff --git a/iroha-lib/model/generators/QueryGenerator.hpp b/iroha-lib/model/generators/QueryGenerator.hpp
new file mode 100644
index 00000000000..b97f981b8a7
--- /dev/null
+++ b/iroha-lib/model/generators/QueryGenerator.hpp
@@ -0,0 +1,79 @@
+#pragma once
+
+#include "queries.pb.h"
+#include
+
+
+namespace iroha_lib {
+
+using namespace iroha::protocol;
+
+class QueryGenerator {
+public:
+ std::shared_ptr generateGetAccount(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time);
+ std::shared_ptr generateGetAccountAssets(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time);
+ std::shared_ptr generateGetAccountDetail(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time);
+ std::shared_ptr generateGetAccountTransactions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::optional& first_tx_hash={},
+ const std::optional& first_tx_time={},
+ const std::optional& last_tx_time={},
+ const std::optional first_tx_height={},
+ const std::optional last_tx_height={});
+ std::shared_ptr generateGetAccountAssetTransactions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::string& assetId,
+ const std::optional& first_tx_hash={},
+ const std::optional& first_tx_time={},
+ const std::optional& last_tx_time={},
+ const std::optional first_tx_height={},
+ const std::optional last_tx_height={});
+ std::shared_ptr generateGetTransactions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::vector& transaction_hashes);
+ std::shared_ptr generateGetSignatories(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time);
+ std::shared_ptr generateGetAssetInfo(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::string& assetId);
+ std::shared_ptr generateGetRoles(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time);
+ std::shared_ptr generateGetRolePermissions(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time,
+ const std::string& role_id);
+ std::shared_ptr generateGetPeers(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time);
+
+private:
+ std::shared_ptr generateQuery(
+ const std::string& account_id,
+ uint64_t counter,
+ const uint64_t created_time);
+};
+
+} // namespace iroha_lib
diff --git a/iroha-lib/model/utils/Utils.cpp b/iroha-lib/model/utils/Utils.cpp
new file mode 100755
index 00000000000..c9591c3f1ea
--- /dev/null
+++ b/iroha-lib/model/utils/Utils.cpp
@@ -0,0 +1,135 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "crypto/keys_manager_impl.hpp"
+#include "logger/logger_manager.hpp"
+#include "model/converters/pb_common.hpp"
+#include "grpc_client/GrpcClient.hpp"
+#include "Utils.h"
+
+
+namespace fs = boost::filesystem;
+
+
+void verifyPath(
+ const fs::path& path,
+ const logger::LoggerPtr& logger)
+{
+ if (not fs::exists(path)) {
+ const auto error_message = "Path " + path.string() + " not found.";
+ logger->error(error_message);
+ throw error_message;
+ }
+}
+
+
+void verifyKepair(
+ const iroha::expected::Result& keypair,
+ const logger::LoggerPtr& logger,
+ const fs::path& path,
+ const std::string& account_name)
+{
+ if (auto error = iroha::expected::resultToOptionalError(keypair)) {
+ const auto error_message = fmt::format(
+ "Keypair error= {}."
+ "\nKeypair path= {}, name= {}.\n",
+ error.value(),
+ path.string(),
+ account_name);
+ logger->error(error_message);
+ throw error_message;
+ }
+}
+
+
+iroha::keypair_t generateKeypair(
+ const std::string& account_name,
+ const std::string& key_path,
+ const logger::LoggerManagerTreePtr& log_manager)
+{
+ const auto logger = log_manager->getChild("Main")->getLogger();
+ const auto keys_manager_log = log_manager->getChild("KeysManager")->getLogger();
+ fs::path path(key_path);
+
+ verifyPath(
+ path,
+ logger);
+
+ iroha::KeysManagerImpl manager(
+ (path / account_name)
+ .string(),
+ keys_manager_log);
+ auto keypair = manager.loadKeys(boost::none);
+
+ verifyKepair(
+ keypair,
+ logger,
+ path,
+ account_name);
+ return iroha::keypair_t(
+ iroha::pubkey_t::from_hexstring(keypair.assumeValue().publicKey()).assumeValue(),
+ iroha::privkey_t::from_string(toBinaryString(keypair.assumeValue().privateKey())).assumeValue());
+}
+
+
+const std::string getTransactionHash(const Transaction& tx)
+{
+ return iroha::hash(tx).to_hexstring();
+}
+
+
+void printTransactionStatus(
+ const std::string& peer_ip,
+ uint16_t torii_port,
+ const std::string& tx_hash)
+{
+ const auto status = iroha_lib::GrpcClient(
+ peer_ip,
+ torii_port)
+ .getTxStatus(tx_hash);
+ std::cout << "Tx hash=" << tx_hash
+ << " Status name=" << TxStatus_Name(status.tx_status())
+ << " Status code=" << status.tx_status()
+ << " Error code=" << status.error_code()
+ << std::endl;
+}
+
+
+void printTransactionStatuses(
+ const std::string& peer_ip,
+ uint16_t torii_port,
+ const std::vector& transactions)
+{
+ for (const auto& tx: transactions) {
+ printTransactionStatus(
+ peer_ip,
+ torii_port,
+ getTransactionHash(tx));
+ }
+}
+
+
+std::string string_to_hex(const std::string& in)
+{
+ std::stringstream ss;
+
+ ss << std::hex << std::setfill('0');
+ for (size_t i = 0; i < in.length(); ++i) {
+ ss << std::setw(2) << static_cast(static_cast(in[i]));
+ }
+ return ss.str();
+}
+
+std::pair splitAssetFullName(const std::string& assetFullName)
+{
+ constexpr const char nameDomainSeparator = '#';
+ const auto separatorPosition = assetFullName.find(nameDomainSeparator);
+ const std::string assetName = assetFullName.substr(0, separatorPosition);
+ const std::string assetDomain = assetFullName.substr(separatorPosition+1);
+ return {assetName, assetDomain};
+}
diff --git a/iroha-lib/model/utils/Utils.h b/iroha-lib/model/utils/Utils.h
new file mode 100755
index 00000000000..1ab30a58235
--- /dev/null
+++ b/iroha-lib/model/utils/Utils.h
@@ -0,0 +1,43 @@
+#include
+#include
+
+#include "crypto/keys_manager_impl.hpp"
+#include "logger/logger_manager.hpp"
+#include "model/converters/pb_common.hpp"
+#include "grpc_client/GrpcClient.hpp"
+
+
+using namespace iroha::protocol;
+namespace fs = boost::filesystem;
+
+
+void verifyPath(
+ const fs::path& path,
+ const logger::LoggerPtr& logger);
+
+void verifyKepair(
+ const iroha::expected::Result& keypair,
+ const logger::LoggerPtr& logger,
+ const fs::path& path,
+ const std::string& account_name);
+
+iroha::keypair_t generateKeypair(
+ const std::string& account_name,
+ const std::string& key_path,
+ const logger::LoggerManagerTreePtr& log_manager);
+
+const std::string getTransactionHash(const Transaction& tx);
+
+void printTransactionStatus(
+ const std::string& peer_ip,
+ uint16_t torii_port,
+ const std::string& tx_hash);
+
+void printTransactionStatuses(
+ const std::string& peer_ip,
+ uint16_t torii_port,
+ const std::vector& transactions);
+
+std::string string_to_hex(const std::string& in);
+
+std::pair splitAssetFullName(const std::string& assetFullName);
diff --git a/irohad/consensus/yac/transport/impl/consensus_service_impl.cpp b/irohad/consensus/yac/transport/impl/consensus_service_impl.cpp
index 89b4c9896fc..c828be33bf6 100644
--- a/irohad/consensus/yac/transport/impl/consensus_service_impl.cpp
+++ b/irohad/consensus/yac/transport/impl/consensus_service_impl.cpp
@@ -13,39 +13,27 @@ ServiceImpl::ServiceImpl(logger::LoggerPtr log,
std::function)> callback)
: callback_(std::move(callback)), log_(std::move(log)) {}
-grpc::Status ServiceImpl::HandleState(
+grpc::Status ServiceImpl::SendState(
::grpc::ServerContext *context,
- ::iroha::consensus::yac::proto::State &request) {
+ const ::iroha::consensus::yac::proto::State *request,
+ ::google::protobuf::Empty *response) {
std::vector state;
- for (const auto &pb_vote : request.votes())
- if (auto vote = PbConverters::deserializeVote(pb_vote, log_))
+ for (const auto &pb_vote : request->votes()) {
+ if (auto vote = PbConverters::deserializeVote(pb_vote, log_)) {
state.push_back(*vote);
-
+ }
+ }
if (state.empty()) {
log_->info("Received an empty votes collection");
return grpc::Status::CANCELLED;
}
-
if (not sameKeys(state)) {
log_->info("Votes are statelessly invalid: proposal rounds are different");
return grpc::Status::CANCELLED;
}
log_->info("Received votes[size={}] from {}", state.size(), context->peer());
+
callback_(std::move(state));
return grpc::Status::OK;
}
-
-grpc::Status ServiceImpl::SendState(
- ::grpc::ServerContext *context,
- ::grpc::ServerReader< ::iroha::consensus::yac::proto::State> *reader,
- ::google::protobuf::Empty *response) {
- ::iroha::consensus::yac::proto::State request;
-
- grpc::Status status = grpc::Status::OK;
- while (status.ok() && reader->Read(&request)) {
- status = HandleState(context, request);
- }
-
- return status;
-}
diff --git a/irohad/consensus/yac/transport/impl/consensus_service_impl.hpp b/irohad/consensus/yac/transport/impl/consensus_service_impl.hpp
index ec07b5107d1..936e22a282a 100644
--- a/irohad/consensus/yac/transport/impl/consensus_service_impl.hpp
+++ b/irohad/consensus/yac/transport/impl/consensus_service_impl.hpp
@@ -30,19 +30,13 @@ namespace iroha::consensus::yac {
* Naming is confusing, because this is rpc call that
* perform on another machine;
*/
- grpc::Status SendState(
- ::grpc::ServerContext *context,
- ::grpc::ServerReader< ::iroha::consensus::yac::proto::State> *reader,
- ::google::protobuf::Empty *response) override;
-
- /**
- * Handles state;
- */
- grpc::Status HandleState(::grpc::ServerContext *context,
- ::iroha::consensus::yac::proto::State &request);
+ grpc::Status SendState(::grpc::ServerContext *context,
+ const ::iroha::consensus::yac::proto::State *request,
+ ::google::protobuf::Empty *response) override;
private:
std::function)> callback_;
+
logger::LoggerPtr log_;
};
} // namespace iroha::consensus::yac
diff --git a/irohad/consensus/yac/transport/impl/network_impl.cpp b/irohad/consensus/yac/transport/impl/network_impl.cpp
index 9831ee8cd9b..e7226b90129 100644
--- a/irohad/consensus/yac/transport/impl/network_impl.cpp
+++ b/irohad/consensus/yac/transport/impl/network_impl.cpp
@@ -44,51 +44,14 @@ void NetworkImpl::sendState(const shared_model::interface::Peer &to,
*pb_vote = PbConverters::serializeVote(vote);
}
- auto stream_writer = stubs_.exclusiveAccess(
- [&](auto &stubs) -> std::shared_ptr<::grpc::ClientWriterInterface<
- ::iroha::consensus::yac::proto::State>> {
- auto const it = stubs.find(to.pubkey());
- if (it == stubs.end() || std::get<0>(it->second) != to.address()) {
- if (it != stubs.end()) {
- // clear all
- std::get<3>(it->second)->WritesDone();
- stubs.erase(to.pubkey());
- }
-
- auto maybe_client = client_factory_->createClient(to);
- if (expected::hasError(maybe_client)) {
- log_->error("Could not send state to {}: {}",
- to,
- maybe_client.assumeError());
- return nullptr;
- }
-
- std::unique_ptr client =
- std::move(maybe_client).assumeValue();
-
- auto context = std::make_unique();
- context->set_wait_for_ready(true);
- context->set_deadline(std::chrono::system_clock::now()
- + std::chrono::seconds(5));
-
- auto response = std::make_unique<::google::protobuf::Empty>();
- std::shared_ptr<::grpc::ClientWriterInterface<
- ::iroha::consensus::yac::proto::State>>
- writer = client->SendState(context.get(), response.get());
-
- stubs[to.pubkey()] = std::make_tuple(std::string{to.address()},
- std::move(client),
- std::move(context),
- writer,
- std::move(response));
- return writer;
- }
-
- return std::get<3>(it->second);
- });
-
- if (!stream_writer)
+ auto maybe_client = client_factory_->createClient(to);
+ if (expected::hasError(maybe_client)) {
+ log_->error(
+ "Could not send state to {}: {}", to, maybe_client.assumeError());
return;
+ }
+ std::shared_ptr client =
+ std::move(maybe_client).assumeValue();
log_->debug("Propagating votes for {}, size={} to {}",
state.front().hash.vote_round,
@@ -96,26 +59,30 @@ void NetworkImpl::sendState(const shared_model::interface::Peer &to,
to);
getSubscription()->dispatcher()->add(
getSubscription()->dispatcher()->kExecuteInPool,
- [peer{to.pubkey()},
- request(std::move(request)),
- wstream_writer(utils::make_weak(stream_writer)),
+ [request(std::move(request)),
+ client(std::move(client)),
log(utils::make_weak(log_)),
log_sending_msg(fmt::format("Send votes bundle[size={}] for {} to {}",
state.size(),
state.front().hash.vote_round,
to))] {
auto maybe_log = log.lock();
- auto stream_writer = wstream_writer.lock();
-
- if (!maybe_log || !stream_writer) {
+ if (not maybe_log) {
return;
}
-
+ grpc::ClientContext context;
+ context.set_wait_for_ready(true);
+ context.set_deadline(std::chrono::system_clock::now()
+ + std::chrono::seconds(5));
+ google::protobuf::Empty response;
maybe_log->info(log_sending_msg);
- if (!stream_writer->Write(request)) {
- maybe_log->warn("RPC failed: {}", peer);
+ auto status = client->SendState(&context, request, &response);
+ if (not status.ok()) {
+ maybe_log->warn(
+ "RPC failed: {} {}", context.peer(), status.error_message());
return;
+ } else {
+ maybe_log->info("RPC succeeded: {}", context.peer());
}
- maybe_log->info("RPC succeeded: {}", peer);
});
}
diff --git a/irohad/consensus/yac/transport/impl/network_impl.hpp b/irohad/consensus/yac/transport/impl/network_impl.hpp
index 397c24c36b2..84596302134 100644
--- a/irohad/consensus/yac/transport/impl/network_impl.hpp
+++ b/irohad/consensus/yac/transport/impl/network_impl.hpp
@@ -23,7 +23,8 @@ namespace iroha::consensus::yac {
* Class which provides implementation of client-side transport for
* consensus based on grpc
*/
- class NetworkImpl : public YacNetwork {
+ class NetworkImpl : public YacNetwork,
+ public std::enable_shared_from_this {
public:
using Service = proto::Yac;
using ClientFactory = iroha::network::ClientFactory;
diff --git a/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp b/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp
index fdfef728d10..d57bc1d70cc 100644
--- a/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp
+++ b/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp
@@ -101,6 +101,7 @@ DEFINE_validator(pg_opt, &ValidateNonEmpty);
// NOLINTNEXTLINE
DEFINE_string(rocksdb_path, "", "Specify path to RocksDB");
DEFINE_validator(rocksdb_path, &ValidateNonEmpty);
+DEFINE_bool(ignore_checking_with_schema_version, false, "Should schema version be checked");
logger::LoggerManagerTreePtr getDefaultLogManager() {
return std::make_shared(logger::LoggerConfig{
@@ -651,8 +652,11 @@ bool Wsv::from_rocksdb(RocksDbCommon &rdbc) {
if (key_starts_with_and_drop(RDB_F_VERSION)) {
assert(key.empty());
schema_version = std::string{val};
- assert(schema_version == "1#4#0" &&
- "This version of iroha_wsv_diff can check WSV in RocksDB of version 1.2.0 only");
+ if (! FLAGS_ignore_checking_with_schema_version)
+ {
+ assert(schema_version == "1#4#0" &&
+ "This version of iroha_wsv_diff can check WSV in RocksDB of version 1.4.0 only");
+ }
} else if (key_starts_with_and_drop(RDB_NETWORK)) {
if (key_starts_with_and_drop(RDB_PEERS)) {
if (key_starts_with_and_drop(RDB_ADDRESS)) {
diff --git a/irohad/main/application.cpp b/irohad/main/application.cpp
index 710bae30e8f..e76893932ad 100644
--- a/irohad/main/application.cpp
+++ b/irohad/main/application.cpp
@@ -279,13 +279,13 @@ Irohad::RunResult Irohad::initSettings() {
Irohad::RunResult Irohad::initValidatorsConfigs() {
validators_config_ =
std::make_shared(
- config_.max_proposal_size);
+ config_.max_proposal_size, false, false, config_.max_past_created_hours);
block_validators_config_ =
std::make_shared(
- config_.max_proposal_size, true);
+ config_.max_proposal_size, true, false, config_.max_past_created_hours);
proposal_validators_config_ =
std::make_shared(
- config_.max_proposal_size, false, true);
+ config_.max_proposal_size, false, true, config_.max_past_created_hours);
log_->info("[Init] => validators configs");
return {};
}
diff --git a/irohad/main/iroha_conf_literals.cpp b/irohad/main/iroha_conf_literals.cpp
index c2c1f35036f..66119648c8d 100644
--- a/irohad/main/iroha_conf_literals.cpp
+++ b/irohad/main/iroha_conf_literals.cpp
@@ -48,6 +48,7 @@ namespace config_members {
{"warning", logger::LogLevel::kWarn},
{"error", logger::LogLevel::kError},
{"critical", logger::LogLevel::kCritical}};
+ const char *MaxPastCreatedHours = "max_past_created_hours";
const char *Address = "address";
const char *PublicKey = "public_key";
const char *InitialPeers = "initial_peers";
diff --git a/irohad/main/iroha_conf_literals.hpp b/irohad/main/iroha_conf_literals.hpp
index 354967f02c6..ee9b3b03975 100644
--- a/irohad/main/iroha_conf_literals.hpp
+++ b/irohad/main/iroha_conf_literals.hpp
@@ -47,6 +47,7 @@ namespace config_members {
extern const char *LogLevel;
extern const char *LogPatternsSection;
extern const char *LogChildrenSection;
+ extern const char *MaxPastCreatedHours;
extern const std::unordered_map LogLevels;
extern const char *InitialPeers;
extern const char *Address;
diff --git a/irohad/main/iroha_conf_loader.cpp b/irohad/main/iroha_conf_loader.cpp
index 6700c8edb3e..f9272b9737f 100644
--- a/irohad/main/iroha_conf_loader.cpp
+++ b/irohad/main/iroha_conf_loader.cpp
@@ -697,6 +697,7 @@ inline bool JsonDeserializerImpl::loadInto(IrohadConfig &dest) {
.loadInto(dest.proposal_creation_timeout)
and getDictChild(MaxProposalPack).loadInto(dest.max_proposal_pack)
and getDictChild(HealthcheckPort).loadInto(dest.healthcheck_port)
+ and getDictChild(MaxPastCreatedHours).loadInto(dest.max_past_created_hours)
and getDictChild(VoteDelay).loadInto(dest.vote_delay)
and getDictChild(MstSupport).loadInto(dest.mst_support)
and getDictChild(MstExpirationTime).loadInto(dest.mst_expiration_time)
diff --git a/irohad/main/iroha_conf_loader.hpp b/irohad/main/iroha_conf_loader.hpp
index f65ddc51725..e4e87687fc7 100644
--- a/irohad/main/iroha_conf_loader.hpp
+++ b/irohad/main/iroha_conf_loader.hpp
@@ -73,7 +73,7 @@ struct IrohadConfig {
boost::optional logger_manager;
std::optional initial_peers;
boost::optional utility_service;
-
+ std::optional max_past_created_hours;
// getters
uint32_t getMaxpProposalPack() const;
uint32_t getProposalDelay() const;
diff --git a/irohad/ordering/impl/on_demand_ordering_gate.cpp b/irohad/ordering/impl/on_demand_ordering_gate.cpp
index 59033e8605f..22991840b54 100644
--- a/irohad/ordering/impl/on_demand_ordering_gate.cpp
+++ b/irohad/ordering/impl/on_demand_ordering_gate.cpp
@@ -196,7 +196,7 @@ void OnDemandOrderingGate::sendCachedTransactions() {
end_iterator->get()->transactions().end(),
[&](const auto &tx) {
return (uint64_t)now
- > shared_model::validation::FieldValidator::kMaxDelay
+ > shared_model::validation::FieldValidator::kDefaultMaxDelay
+ tx->createdTime();
})) {
end_iterator = batches.erase(end_iterator);
diff --git a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp
index bc03541a68a..6c68e62b050 100644
--- a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp
+++ b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp
@@ -295,16 +295,17 @@ iroha::ordering::PackedProposalData
OnDemandOrderingServiceImpl::packNextProposals(const consensus::Round &round) {
auto const available_txs_count = availableTxsCountBatchesCache();
auto const full_proposals_count = available_txs_count / transaction_limit_;
- auto const number_of_packs =
- (available_txs_count
- + (full_proposals_count > 0 ? 0 : transaction_limit_ - 1))
- / transaction_limit_;
+ auto const number_of_proposals = std::min(
+ (uint32_t)((available_txs_count
+ + (full_proposals_count > 0 ? 0 : transaction_limit_ - 1))
+ / transaction_limit_),
+ max_proposal_pack_);
PackedProposalContainer outcome;
std::vector> txs;
BloomFilter256 bf;
- for (uint32_t ix = 0; ix < number_of_packs; ++ix) {
+ for (uint32_t ix = 0; ix < number_of_proposals; ++ix) {
assert(!isEmptyBatchesCache());
batches_cache_.getTransactions(
transaction_limit_, txs, bf, [&](auto const &batch) {
diff --git a/irohad/ordering/impl/on_demand_os_server_grpc.cpp b/irohad/ordering/impl/on_demand_os_server_grpc.cpp
index cc17b44f991..1733d500c0d 100644
--- a/irohad/ordering/impl/on_demand_os_server_grpc.cpp
+++ b/irohad/ordering/impl/on_demand_os_server_grpc.cpp
@@ -94,8 +94,8 @@ grpc::Status OnDemandOsServerGrpc::RequestProposal(
if (!request->has_bloom_filter()
|| request->bloom_filter().size() != BloomFilter256::kBytesCount) {
#endif // USE_BLOOM_FILTER
- log_->info("Response with full {} txs proposal.",
- sptr_proposal->transactions().size());
+ log_->debug("Response with full {} txs proposal.",
+ sptr_proposal->transactions().size());
*proposal = proto_proposal;
#if USE_BLOOM_FILTER
} else {
diff --git a/schema/yac.proto b/schema/yac.proto
index 4b2cc5fed4e..325e98440fb 100644
--- a/schema/yac.proto
+++ b/schema/yac.proto
@@ -39,5 +39,5 @@ message State {
}
service Yac {
- rpc SendState (stream State) returns (google.protobuf.Empty);
+ rpc SendState (State) returns (google.protobuf.Empty);
}
diff --git a/shared_model/backend/protobuf/impl/proto_block_json_converter.cpp b/shared_model/backend/protobuf/impl/proto_block_json_converter.cpp
index e428e10fb81..ca7e9ce6fb9 100644
--- a/shared_model/backend/protobuf/impl/proto_block_json_converter.cpp
+++ b/shared_model/backend/protobuf/impl/proto_block_json_converter.cpp
@@ -24,7 +24,7 @@ ProtoBlockJsonConverter::serialize(const interface::Block &block) const
google::protobuf::util::MessageToJsonString(proto_block, &result);
if (not status.ok()) {
- return iroha::expected::makeError(status.error_message());
+ return iroha::expected::makeError(status.message());
}
return iroha::expected::makeValue(result);
}
@@ -35,7 +35,7 @@ ProtoBlockJsonConverter::deserialize(
iroha::protocol::Block block;
auto status = google::protobuf::util::JsonStringToMessage(json, &block);
if (not status.ok()) {
- return iroha::expected::makeError(status.error_message());
+ return iroha::expected::makeError(status.message());
}
std::unique_ptr result =
std::make_unique(std::move(block.block_v1()));
diff --git a/shared_model/converters/protobuf/json_proto_converter.hpp b/shared_model/converters/protobuf/json_proto_converter.hpp
index 1effd86e8c9..1998fa020d5 100644
--- a/shared_model/converters/protobuf/json_proto_converter.hpp
+++ b/shared_model/converters/protobuf/json_proto_converter.hpp
@@ -45,7 +45,7 @@ namespace shared_model {
if (status.ok()) {
return result;
}
- return status.error_message();
+ return status.message();
}
} // namespace protobuf
} // namespace converters
diff --git a/shared_model/validators/field_validator.cpp b/shared_model/validators/field_validator.cpp
index 25d263d3b2a..9d9cfdf714b 100644
--- a/shared_model/validators/field_validator.cpp
+++ b/shared_model/validators/field_validator.cpp
@@ -25,7 +25,6 @@
#include "interfaces/queries/asset_pagination_meta.hpp"
#include "interfaces/queries/query_payload_meta.hpp"
#include "interfaces/queries/tx_pagination_meta.hpp"
-#include "multihash/multihash.hpp"
#include "validators/field_validator.hpp"
#include "validators/validation_error_helpers.hpp"
@@ -124,7 +123,11 @@ namespace shared_model {
FieldValidator::FieldValidator(std::shared_ptr config,
time_t future_gap,
TimeFunction time_provider)
- : future_gap_(future_gap), time_provider_(time_provider) {}
+ : future_gap_(future_gap), time_provider_(time_provider),
+ max_delay_(config->max_past_created_hours ?
+ std::chrono::hours(config->max_past_created_hours.value()) / std::chrono::milliseconds(1)
+ : kDefaultMaxDelay)
+ {}
std::optional FieldValidator::validateAccountId(
const interface::types::AccountIdType &account_id) const {
@@ -284,7 +287,7 @@ namespace shared_model {
"CreatedTime",
{fmt::format(
"sent from future, timestamp: {}, now: {}", timestamp, now)});
- } else if (now > kMaxDelay + timestamp) {
+ } else if (now > max_delay_.count() + timestamp) {
return ValidationError(
"CreatedTime",
{fmt::format("too old, timestamp: {}, now: {}", timestamp, now)});
diff --git a/shared_model/validators/field_validator.hpp b/shared_model/validators/field_validator.hpp
index 2fc7c0eaa0a..72dbdd21a67 100644
--- a/shared_model/validators/field_validator.hpp
+++ b/shared_model/validators/field_validator.hpp
@@ -6,6 +6,7 @@
#ifndef IROHA_SHARED_MODEL_FIELD_VALIDATOR_HPP
#define IROHA_SHARED_MODEL_FIELD_VALIDATOR_HPP
+#include
#include
#include "cryptography/default_hash_provider.hpp"
@@ -202,14 +203,18 @@ namespace shared_model {
// time provider callback
TimeFunction time_provider_;
- public:
// max-delay between tx creation and validation
- static constexpr auto kMaxDelay =
- std::chrono::hours(24) / std::chrono::milliseconds(1);
+ std::chrono::milliseconds max_delay_;
+
+ public:
// default value for future_gap field of FieldValidator
static constexpr auto kDefaultFutureGap =
std::chrono::minutes(5) / std::chrono::milliseconds(1);
+ // default value for future_gap field of FieldValidator
+ static constexpr auto kDefaultMaxDelay =
+ std::chrono::hours(24) / std::chrono::milliseconds(1);
+
static constexpr size_t hash_size =
crypto::DefaultHashProvider::kHashLength;
/// limit for the set account detail size in bytes
diff --git a/shared_model/validators/validators_common.cpp b/shared_model/validators/validators_common.cpp
index 10eb580d530..6c1828b8174 100644
--- a/shared_model/validators/validators_common.cpp
+++ b/shared_model/validators/validators_common.cpp
@@ -13,10 +13,13 @@ namespace shared_model {
ValidatorsConfig::ValidatorsConfig(uint64_t max_batch_size,
bool partial_ordered_batches_are_valid,
- bool txs_duplicates_allowed)
+ bool txs_duplicates_allowed,
+ std::optional max_past_created_hours)
: max_batch_size(max_batch_size),
partial_ordered_batches_are_valid(partial_ordered_batches_are_valid),
- txs_duplicates_allowed(txs_duplicates_allowed) {}
+ txs_duplicates_allowed(txs_duplicates_allowed),
+ max_past_created_hours(max_past_created_hours)
+ {}
bool validateHexString(const std::string &str) {
static const std::regex hex_regex{R"([0-9a-fA-F]*)"};
diff --git a/shared_model/validators/validators_common.hpp b/shared_model/validators/validators_common.hpp
index 6a80a8f2bd9..72af05781b7 100644
--- a/shared_model/validators/validators_common.hpp
+++ b/shared_model/validators/validators_common.hpp
@@ -7,7 +7,7 @@
#define IROHA_VALIDATORS_COMMON_HPP
#include
-
+#include
#include
namespace shared_model {
@@ -20,7 +20,8 @@ namespace shared_model {
struct ValidatorsConfig {
ValidatorsConfig(uint64_t max_batch_size,
bool partial_ordered_batches_are_valid = false,
- bool txs_duplicates_allowed = false);
+ bool txs_duplicates_allowed = false,
+ std::optional max_past_created_hours = {});
/// Maximum allowed amount of transactions within a batch
const uint64_t max_batch_size;
@@ -36,6 +37,14 @@ namespace shared_model {
* - BlockLoader
*/
const bool txs_duplicates_allowed;
+
+ /**
+ * A parameter, which specifies how many hours before the current peer's
+ * `created_time` can the transaction be set.
+ * Default is `FieldValidator::KDefaultMaxDelay` (hours).
+ * The value must be synchronised across all peers.
+ */
+ std::optional max_past_created_hours;
};
/**
diff --git a/test/framework/integration_framework/integration_test_framework.cpp b/test/framework/integration_framework/integration_test_framework.cpp
index 4b098ca4058..19bd55ae90d 100644
--- a/test/framework/integration_framework/integration_test_framework.cpp
+++ b/test/framework/integration_framework/integration_test_framework.cpp
@@ -313,6 +313,7 @@ IntegrationTestFramework::IntegrationTestFramework(
config_.max_proposal_size = 10;
config_.mst_support = mst_support;
config_.syncing_mode = false;
+ config_.max_past_created_hours = 24;
switch (db_type) {
case iroha::StorageType::kPostgres: {
@@ -652,8 +653,8 @@ IntegrationTestFramework &IntegrationTestFramework::sendTx(
const shared_model::proto::Transaction &tx) {
sendTx(tx, [this](const auto &status) {
if (!status.statelessErrorOrCommandName().empty()) {
- log_->debug("Got error while sending transaction: "
- + status.statelessErrorOrCommandName());
+ log_->debug("Got error while sending transaction: {}",
+ status.statelessErrorOrCommandName());
}
});
return *this;
diff --git a/test/module/irohad/consensus/yac/network_test.cpp b/test/module/irohad/consensus/yac/network_test.cpp
index 63e80706bef..98a91376e1c 100644
--- a/test/module/irohad/consensus/yac/network_test.cpp
+++ b/test/module/irohad/consensus/yac/network_test.cpp
@@ -69,6 +69,23 @@ namespace iroha::consensus::yac {
VoteMessage message;
};
+ /**
+ * @given initialized network
+ * @when send vote to itself
+ * @then vote handled
+ */
+ TEST_F(YacNetworkTest, MessageHandledWhenMessageSent) {
+ proto::State request;
+ expectConnection(*peer, [&request](auto &stub) {
+ EXPECT_CALL(stub, SendState(_, _, _))
+ .WillOnce(DoAll(SaveArg<1>(&request), Return(grpc::Status::OK)));
+ });
+
+ network->sendState(*peer, {message});
+
+ ASSERT_EQ(request.votes_size(), 1);
+ }
+
/**
* @given initialized network
* @when send request with one vote
@@ -81,7 +98,7 @@ namespace iroha::consensus::yac {
auto pb_vote = request.add_votes();
*pb_vote = PbConverters::serializeVote(message);
- auto response = service->HandleState(&context, request);
+ auto response = service->SendState(&context, &request, nullptr);
ASSERT_EQ(response.error_code(), grpc::StatusCode::OK);
}
@@ -93,7 +110,7 @@ namespace iroha::consensus::yac {
TEST_F(YacNetworkTest, SendMessageEmptyKeys) {
proto::State request;
grpc::ServerContext context;
- auto response = service->HandleState(&context, request);
+ auto response = service->SendState(&context, &request, nullptr);
ASSERT_EQ(response.error_code(), grpc::StatusCode::CANCELLED);
}
} // namespace iroha::consensus::yac
diff --git a/test/module/irohad/ordering/on_demand_os_test.cpp b/test/module/irohad/ordering/on_demand_os_test.cpp
index 8b08495f0e1..17126e7d5b6 100644
--- a/test/module/irohad/ordering/on_demand_os_test.cpp
+++ b/test/module/irohad/ordering/on_demand_os_test.cpp
@@ -171,6 +171,37 @@ TEST_F(OnDemandOsTest, OverflowRound) {
.size());
}
+TEST_F(OnDemandOsTest, OverflowRound4) {
+ generateTransactionsAndInsert({1, transaction_limit * 5});
+
+ os->onCollaborationOutcome(commit_round);
+
+ ASSERT_TRUE(os->onRequestProposal(target_round));
+ ASSERT_TRUE(os->onRequestProposal(target_round)->size() == 4);
+ for (size_t ix = 0; ix < 4; ++ix) {
+ ASSERT_EQ(transaction_limit,
+ os->onRequestProposal(target_round)
+ ->
+ operator[](ix)
+ .first->transactions()
+ .size());
+ }
+}
+
+TEST_F(OnDemandOsTest, OverflowRound5) {
+ generateTransactionsAndInsert({1, transaction_limit * 15});
+
+ os->onCollaborationOutcome(commit_round);
+
+ auto pack = os->onRequestProposal(target_round);
+ ASSERT_TRUE(pack);
+ ASSERT_TRUE(pack->size() == max_proposal_pack);
+ for (size_t ix = 0; ix < max_proposal_pack; ++ix) {
+ ASSERT_EQ(transaction_limit,
+ pack->operator[](ix).first->transactions().size());
+ }
+}
+
/**
* @given initialized on-demand OS
* @when insert commit round and then proposal_limit + 2 reject rounds
diff --git a/test/tool/test_tool_iroha_wsv_diff.sh b/test/tool/test_tool_iroha_wsv_diff.sh
index c46ad024df7..edb2198f778 100755
--- a/test/tool/test_tool_iroha_wsv_diff.sh
+++ b/test/tool/test_tool_iroha_wsv_diff.sh
@@ -61,7 +61,7 @@ test_equal_wsv()(
## Make WSV in rocks database from block store
time $iroha_migrate -block_store_path="$BLOCK_STORE_PATH" -rocksdb_path "$ROCKSDB_PATH" $DROP_STATE
- $iroha_wsv_diff -pg_opt "$PG_OPT" -rocksdb_path "$ROCKSDB_PATH"
+ $iroha_wsv_diff -pg_opt "$PG_OPT" -rocksdb_path "$ROCKSDB_PATH" -ignore_checking_with_schema_version
# ## No difference in dumps expected
diff <(tail -n+2 rockdb.wsv) <(tail -n+2 postgres.wsv)
@@ -83,7 +83,7 @@ update account_has_asset set amount = 0.0 where account_id = 'superuser@bootstra
END
trap 'echo clean-up; psql_with_params /dev/null' EXIT
- if ! $iroha_wsv_diff -pg_opt "$PG_OPT" -rocksdb_path "$ROCKSDB_PATH" | tee log ;then
+ if ! $iroha_wsv_diff -pg_opt "$PG_OPT" -rocksdb_path "$ROCKSDB_PATH" -ignore_checking_with_schema_version | tee log ;then
grep -Fq /dev/null' EXIT
- if ! $iroha_wsv_diff -pg_opt "$PG_OPT" -rocksdb_path "$ROCKSDB_PATH" | tee log ;then
+ if ! $iroha_wsv_diff -pg_opt "$PG_OPT" -rocksdb_path "$ROCKSDB_PATH" -ignore_checking_with_schema_version | tee log ;then
grep -Fq
-Date: Wed, 26 May 2021 18:02:45 +0300
-Subject: [PATCH] rxcpp from master 2020-08-22
-
----
- ports/rxcpp/portfile.cmake | 5 ++--
- ports/rxcpp/support_find_package.patch | 32 --------------------------
- 2 files changed, 2 insertions(+), 35 deletions(-)
- delete mode 100644 ports/rxcpp/support_find_package.patch
-
-diff --git a/ports/rxcpp/portfile.cmake b/ports/rxcpp/portfile.cmake
-index b98701132..b11458d3c 100644
---- a/ports/rxcpp/portfile.cmake
-+++ b/ports/rxcpp/portfile.cmake
-@@ -1,10 +1,9 @@
- vcpkg_from_github(
- OUT_SOURCE_PATH SOURCE_PATH
- REPO ReactiveX/RxCpp
-- REF v4.1.0
-- SHA512 a92e817ecbdf6f235cae724ada2615af9fa0c243249625d0f2c2f09ff5dd7f53fdabd03a0278fe2995fe27528c5511d71f87b7a6b3d54f73b49b65aef56e32fd
-+ REF 9002d9bea0e6b90624672e90a409b56de5286fc6
-+ SHA512 5f4540df6bcb9a980026481a75719201cff0c2e3e957a51dd22d63399138133f13c3a7b5b507124acc635c633d16583768619d62d725a01c40dc31a2b2ece422
- HEAD_REF master
-- PATCHES support_find_package.patch
- )
-
- vcpkg_configure_cmake(
-diff --git a/ports/rxcpp/support_find_package.patch b/ports/rxcpp/support_find_package.patch
-deleted file mode 100644
-index bb1da2d2d..000000000
---- a/ports/rxcpp/support_find_package.patch
-+++ /dev/null
-@@ -1,32 +0,0 @@
--diff --git a/projects/CMake/CMakeLists.txt b/projects/CMake/CMakeLists.txt
--index 3d0744740..293f187c5 100644
----- a/projects/CMake/CMakeLists.txt
--+++ b/projects/CMake/CMakeLists.txt
--@@ -146,3 +146,27 @@ set(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY TRUE CACHE BOOL "Don't require all project
--
-- install(DIRECTORY ${RXCPP_DIR}/Rx/v2/src/rxcpp/ DESTINATION include/rxcpp
-- FILES_MATCHING PATTERN "*.hpp")
--+
--+# Here we are exporting TARGETS so that other projects can import rxcpp
--+# just with find_package(rxcpp CONFIG) after rxcpp is installed into system by "make install".
--+add_library(rxcpp INTERFACE)
--+
--+target_include_directories(rxcpp INTERFACE
--+ $
--+ $
--+)
--+
--+install(TARGETS rxcpp EXPORT rxcppConfig)
--+install(EXPORT rxcppConfig DESTINATION share/rxcpp/cmake)
--+
--+# When find_package(rxcpp SOME_VERSION REQUIRED) will be used in third party project
--+# where SOME_VERSION is any version incompatible with ${PROJECT_VERSION} then cmake will generate the error.
--+# It means you don't need track versions manually.
--+include(CMakePackageConfigHelpers)
--+write_basic_package_version_file("${PROJECT_BINARY_DIR}/rxcppConfigVersion.cmake"
--+ VERSION
--+ ${PROJECT_VERSION}
--+ COMPATIBILITY
--+ AnyNewerVersion
--+)
--+install(FILES "${PROJECT_BINARY_DIR}/rxcppConfigVersion.cmake" DESTINATION share/rxcpp/cmake)
---
-2.31.1
-
diff --git a/vcpkg/patches/0002-upgrade_rxcpp_to_4.1.1.patch b/vcpkg/patches/0002-upgrade_rxcpp_to_4.1.1.patch
new file mode 100644
index 00000000000..e3bfeb9c485
--- /dev/null
+++ b/vcpkg/patches/0002-upgrade_rxcpp_to_4.1.1.patch
@@ -0,0 +1,146 @@
+diff --git a/ports/rxcpp/CONTROL b/ports/rxcpp/CONTROL
+deleted file mode 100644
+index b34395f12..000000000
+--- a/ports/rxcpp/CONTROL
++++ /dev/null
+@@ -1,4 +0,0 @@
+-Source: rxcpp
+-Version: 4.1.0-1
+-Homepage: https://github.com/Reactive-Extensions/RxCpp
+-Description: Reactive Extensions for C++
+diff --git a/ports/rxcpp/disable-tests.patch b/ports/rxcpp/disable-tests.patch
+new file mode 100644
+index 000000000..8378c7f0c
+--- /dev/null
++++ b/ports/rxcpp/disable-tests.patch
+@@ -0,0 +1,34 @@
++diff --git a/projects/CMake/CMakeLists.txt b/projects/CMake/CMakeLists.txt
++index 8856aa42e..2b3d57e97 100755
++--- a/projects/CMake/CMakeLists.txt
+++++ b/projects/CMake/CMakeLists.txt
++@@ -11,18 +11,20 @@ get_filename_component(RXCPP_DIR "${RXCPP_DIR}" PATH)
++
++ MESSAGE( STATUS "RXCPP_DIR: " ${RXCPP_DIR} )
++
++-add_subdirectory(${RXCPP_DIR}/Rx/v2/test ${CMAKE_CURRENT_BINARY_DIR}/test)
+++if (NOT ${RXCPP_DISABLE_TESTS_AND_EXAMPLES})
+++ add_subdirectory(${RXCPP_DIR}/Rx/v2/test ${CMAKE_CURRENT_BINARY_DIR}/test)
++
++-add_subdirectory(${RXCPP_DIR}/projects/doxygen ${CMAKE_CURRENT_BINARY_DIR}/projects/doxygen)
+++ add_subdirectory(${RXCPP_DIR}/projects/doxygen ${CMAKE_CURRENT_BINARY_DIR}/projects/doxygen)
++
++-set(EXAMPLES_DIR ${RXCPP_DIR}/Rx/v2/examples)
+++ set(EXAMPLES_DIR ${RXCPP_DIR}/Rx/v2/examples)
++
++-add_subdirectory(${EXAMPLES_DIR}/cep ${CMAKE_CURRENT_BINARY_DIR}/examples/cep)
++-add_subdirectory(${EXAMPLES_DIR}/stop ${CMAKE_CURRENT_BINARY_DIR}/examples/stop)
++-add_subdirectory(${EXAMPLES_DIR}/linesfrombytes ${CMAKE_CURRENT_BINARY_DIR}/examples/linesfrombytes)
++-add_subdirectory(${EXAMPLES_DIR}/println ${CMAKE_CURRENT_BINARY_DIR}/examples/println)
++-add_subdirectory(${EXAMPLES_DIR}/pythagorian ${CMAKE_CURRENT_BINARY_DIR}/examples/pythagorian)
++-add_subdirectory(${EXAMPLES_DIR}/tests ${CMAKE_CURRENT_BINARY_DIR}/examples/tests)
+++ add_subdirectory(${EXAMPLES_DIR}/cep ${CMAKE_CURRENT_BINARY_DIR}/examples/cep)
+++ add_subdirectory(${EXAMPLES_DIR}/stop ${CMAKE_CURRENT_BINARY_DIR}/examples/stop)
+++ add_subdirectory(${EXAMPLES_DIR}/linesfrombytes ${CMAKE_CURRENT_BINARY_DIR}/examples/linesfrombytes)
+++ add_subdirectory(${EXAMPLES_DIR}/println ${CMAKE_CURRENT_BINARY_DIR}/examples/println)
+++ add_subdirectory(${EXAMPLES_DIR}/pythagorian ${CMAKE_CURRENT_BINARY_DIR}/examples/pythagorian)
+++ add_subdirectory(${EXAMPLES_DIR}/tests ${CMAKE_CURRENT_BINARY_DIR}/examples/tests)
+++endif ()
++
++ # The list of RxCpp source files. Please add every new file to this list
++ set(RX_SOURCES
+diff --git a/ports/rxcpp/portfile.cmake b/ports/rxcpp/portfile.cmake
+index b98701132..56c14927b 100644
+--- a/ports/rxcpp/portfile.cmake
++++ b/ports/rxcpp/portfile.cmake
+@@ -1,21 +1,19 @@
+ vcpkg_from_github(
+ OUT_SOURCE_PATH SOURCE_PATH
+ REPO ReactiveX/RxCpp
+- REF v4.1.0
+- SHA512 a92e817ecbdf6f235cae724ada2615af9fa0c243249625d0f2c2f09ff5dd7f53fdabd03a0278fe2995fe27528c5511d71f87b7a6b3d54f73b49b65aef56e32fd
++ REF v4.1.1
++ SHA512 387e1276151a19b62fd1d36b486ff5f3ed28f0f48ae8b00902bf13464d20603f492ecd63ab4444d04293fc3d92a8f7ce3e67a4c68836415c4655331fb6b54edb
+ HEAD_REF master
+- PATCHES support_find_package.patch
++ PATCHES
++ disable-tests.patch # from https://github.com/ReactiveX/RxCpp/pull/574
+ )
+
+-vcpkg_configure_cmake(
+- SOURCE_PATH ${SOURCE_PATH}
+- PREFER_NINJA
+- OPTIONS
+- -DBUILD_TESTS=OFF
++vcpkg_cmake_configure(
++ SOURCE_PATH "${SOURCE_PATH}"
+ )
+
+-vcpkg_install_cmake()
+-vcpkg_fixup_cmake_targets(CONFIG_PATH share/${PORT}/cmake/)
++vcpkg_cmake_install()
++vcpkg_cmake_config_fixup(CONFIG_PATH share/${PORT}/cmake/)
+
+ file(REMOVE_RECURSE ${CURRENT_PACKAGES_DIR}/debug)
+ file(COPY ${SOURCE_PATH}/license.md DESTINATION ${CURRENT_PACKAGES_DIR}/share/${PORT})
+diff --git a/ports/rxcpp/support_find_package.patch b/ports/rxcpp/support_find_package.patch
+deleted file mode 100644
+index bb1da2d2d..000000000
+--- a/ports/rxcpp/support_find_package.patch
++++ /dev/null
+@@ -1,32 +0,0 @@
+-diff --git a/projects/CMake/CMakeLists.txt b/projects/CMake/CMakeLists.txt
+-index 3d0744740..293f187c5 100644
+---- a/projects/CMake/CMakeLists.txt
+-+++ b/projects/CMake/CMakeLists.txt
+-@@ -146,3 +146,27 @@ set(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY TRUE CACHE BOOL "Don't require all project
+-
+- install(DIRECTORY ${RXCPP_DIR}/Rx/v2/src/rxcpp/ DESTINATION include/rxcpp
+- FILES_MATCHING PATTERN "*.hpp")
+-+
+-+# Here we are exporting TARGETS so that other projects can import rxcpp
+-+# just with find_package(rxcpp CONFIG) after rxcpp is installed into system by "make install".
+-+add_library(rxcpp INTERFACE)
+-+
+-+target_include_directories(rxcpp INTERFACE
+-+ $
+-+ $
+-+)
+-+
+-+install(TARGETS rxcpp EXPORT rxcppConfig)
+-+install(EXPORT rxcppConfig DESTINATION share/rxcpp/cmake)
+-+
+-+# When find_package(rxcpp SOME_VERSION REQUIRED) will be used in third party project
+-+# where SOME_VERSION is any version incompatible with ${PROJECT_VERSION} then cmake will generate the error.
+-+# It means you don't need track versions manually.
+-+include(CMakePackageConfigHelpers)
+-+write_basic_package_version_file("${PROJECT_BINARY_DIR}/rxcppConfigVersion.cmake"
+-+ VERSION
+-+ ${PROJECT_VERSION}
+-+ COMPATIBILITY
+-+ AnyNewerVersion
+-+)
+-+install(FILES "${PROJECT_BINARY_DIR}/rxcppConfigVersion.cmake" DESTINATION share/rxcpp/cmake)
+diff --git a/ports/rxcpp/vcpkg.json b/ports/rxcpp/vcpkg.json
+new file mode 100644
+index 000000000..27a475cd0
+--- /dev/null
++++ b/ports/rxcpp/vcpkg.json
+@@ -0,0 +1,18 @@
++{
++ "name": "rxcpp",
++ "version": "4.1.1",
++ "port-version": 1,
++ "description": "Reactive Extensions for C++",
++ "homepage": "https://github.com/Reactive-Extensions/RxCpp",
++ "license": "Apache-2.0",
++ "dependencies": [
++ {
++ "name": "vcpkg-cmake",
++ "host": true
++ },
++ {
++ "name": "vcpkg-cmake-config",
++ "host": true
++ }
++ ]
++}
diff --git a/vcpkg/patches/0007-upgrade_abseil_to_20230125.patch b/vcpkg/patches/0007-upgrade_abseil_to_20230125.patch
new file mode 100644
index 00000000000..c5ea8742274
--- /dev/null
+++ b/vcpkg/patches/0007-upgrade_abseil_to_20230125.patch
@@ -0,0 +1,171 @@
+diff --git a/ports/abseil/fix-32-bit-arm.patch b/ports/abseil/fix-32-bit-arm.patch
+new file mode 100644
+index 000000000..bc9ba4f14
+--- /dev/null
++++ b/ports/abseil/fix-32-bit-arm.patch
+@@ -0,0 +1,23 @@
++diff --git a/absl/time/internal/cctz/src/zone_info_source.cc b/absl/time/internal/cctz/src/zone_info_source.cc
++index 7209533..5ab5a59 100644
++--- a/absl/time/internal/cctz/src/zone_info_source.cc
+++++ b/absl/time/internal/cctz/src/zone_info_source.cc
++@@ -65,7 +65,7 @@ ZoneInfoSourceFactory zone_info_source_factory __attribute__((weak)) =
++ extern ZoneInfoSourceFactory zone_info_source_factory;
++ extern ZoneInfoSourceFactory default_factory;
++ ZoneInfoSourceFactory default_factory = DefaultFactory;
++-#if defined(_M_IX86)
+++#if defined(_M_IX86) || defined(_M_ARM)
++ #pragma comment( \
++ linker, \
++ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
++@@ -83,8 +83,7 @@ ZoneInfoSourceFactory default_factory = DefaultFactory;
++ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
++ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
++ "@@ZA")
++-#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM) || \
++- defined(_M_ARM64)
+++#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM64)
++ #pragma comment( \
++ linker, \
++ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+diff --git a/ports/abseil/fix-dll-support.patch b/ports/abseil/fix-dll-support.patch
+new file mode 100644
+index 000000000..c06b39588
+--- /dev/null
++++ b/ports/abseil/fix-dll-support.patch
+@@ -0,0 +1,24 @@
++diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
++index c4a41e6..da46613 100644
++--- a/CMake/AbseilDll.cmake
+++++ b/CMake/AbseilDll.cmake
++@@ -787,7 +787,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
++ # Abseil libraries require C++14 as the current minimum standard. When
++ # compiled with C++17 (either because it is the compiler's default or
++ # explicitly requested), then Abseil requires C++17.
++- _absl_target_compile_features_if_available(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
+++ _absl_target_compile_features_if_available(${_dll} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
++ else()
++ # Note: This is legacy (before CMake 3.8) behavior. Setting the
++ # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is
++@@ -797,8 +797,8 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
++ # CXX_STANDARD_REQUIRED does guard against the top-level CMake project
++ # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents
++ # "decaying" to an older standard if the requested one isn't available).
++- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD})
++- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
+++ set_property(TARGET ${_dll} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD})
+++ set_property(TARGET ${_dll} PROPERTY CXX_STANDARD_REQUIRED ON)
++ endif()
++
++ install(TARGETS ${_dll} EXPORT ${PROJECT_NAME}Targets
+diff --git a/ports/abseil/portfile.cmake b/ports/abseil/portfile.cmake
+index 94a7673c0..366d6ae27 100644
+--- a/ports/abseil/portfile.cmake
++++ b/ports/abseil/portfile.cmake
+@@ -5,51 +5,49 @@ endif()
+ vcpkg_from_github(
+ OUT_SOURCE_PATH SOURCE_PATH
+ REPO abseil/abseil-cpp
+- REF 997aaf3a28308eba1b9156aa35ab7bca9688e9f6 #LTS 20210324
+- SHA512 bdd80a2278eef121e8837791fdebca06e87bfff4adc438c123e0ce11efc42a8bd461edcbbe18c0eee05be2cd6100f9acf8eab3db58ac73322b5852e6ffe7c85b
++ REF 20230125.0
++ SHA512 b3d334215c78b31a2eb10bd9d4a978cd48367866d6daa2065c6c727282bafe19ef7ff5bd7cd4ed5c319d3b04e0711222e08ddbe7621ef1e079fed93a7307112f
+ HEAD_REF master
+ PATCHES
+- # in C++17 mode, use std::any, std::optional, std::string_view, std::variant
+- # instead of the library replacement types
+- # in C++11 mode, force use of library replacement types, otherwise the automatic
+- # detection can cause ABI issues depending on which compiler options
+- # are enabled for consuming user code
+- fix-cxx-standard.patch
++ fix-dll-support.patch
+ )
+
+-vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS
+- FEATURES
+- cxx17 ABSL_USE_CXX17
+-)
++# With ABSL_PROPAGATE_CXX_STD=ON abseil automatically detect if it is being
++# compiled with C++14 or C++17, and modifies the installed `absl/base/options.h`
++# header accordingly. This works even if CMAKE_CXX_STANDARD is not set. Abseil
++# uses the compiler default behavior to update `absl/base/options.h` as needed.
++if (ABSL_USE_CXX17 IN_LIST FEATURES)
++ set(ABSL_USE_CXX17_OPTION "-DCMAKE_CXX_STANDARD=17")
++endif ()
+
+-vcpkg_configure_cmake(
+- SOURCE_PATH ${SOURCE_PATH}
+- PREFER_NINJA
++vcpkg_cmake_configure(
++ SOURCE_PATH "${SOURCE_PATH}"
+ DISABLE_PARALLEL_CONFIGURE
+- OPTIONS ${FEATURE_OPTIONS}
++ OPTIONS -DABSL_PROPAGATE_CXX_STD=ON ${ABSL_USE_CXX17_OPTION}
+ )
+
+-vcpkg_install_cmake()
+-vcpkg_fixup_cmake_targets(CONFIG_PATH lib/cmake/absl TARGET_PATH share/absl)
++vcpkg_cmake_install()
++vcpkg_cmake_config_fixup(PACKAGE_NAME absl CONFIG_PATH lib/cmake/absl)
++vcpkg_fixup_pkgconfig()
+
+ vcpkg_copy_pdbs()
+-file(REMOVE_RECURSE ${CURRENT_PACKAGES_DIR}/debug/share
+- ${CURRENT_PACKAGES_DIR}/debug/include
+- ${CURRENT_PACKAGES_DIR}/include/absl/copts
+- ${CURRENT_PACKAGES_DIR}/include/absl/strings/testdata
+- ${CURRENT_PACKAGES_DIR}/include/absl/time/internal/cctz/testdata
++file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/share"
++ "${CURRENT_PACKAGES_DIR}/debug/include"
++ "${CURRENT_PACKAGES_DIR}/include/absl/copts"
++ "${CURRENT_PACKAGES_DIR}/include/absl/strings/testdata"
++ "${CURRENT_PACKAGES_DIR}/include/absl/time/internal/cctz/testdata"
+ )
+
+ if (VCPKG_LIBRARY_LINKAGE STREQUAL dynamic)
+- vcpkg_replace_string(${CURRENT_PACKAGES_DIR}/include/absl/base/config.h
++ vcpkg_replace_string("${CURRENT_PACKAGES_DIR}/include/absl/base/config.h"
+ "#elif defined(ABSL_CONSUME_DLL)" "#elif 1"
+ )
+- vcpkg_replace_string(${CURRENT_PACKAGES_DIR}/include/absl/base/internal/thread_identity.h
++ vcpkg_replace_string("${CURRENT_PACKAGES_DIR}/include/absl/base/internal/thread_identity.h"
+ "&& !defined(ABSL_CONSUME_DLL)" "&& 0"
+ )
+- vcpkg_replace_string(${CURRENT_PACKAGES_DIR}/include/absl/container/internal/hashtablez_sampler.h
++ vcpkg_replace_string("${CURRENT_PACKAGES_DIR}/include/absl/container/internal/hashtablez_sampler.h"
+ "!defined(ABSL_CONSUME_DLL)" "0"
+ )
+ endif()
+
+-file(INSTALL ${SOURCE_PATH}/LICENSE DESTINATION ${CURRENT_PACKAGES_DIR}/share/${PORT} RENAME copyright)
++file(INSTALL "${SOURCE_PATH}/LICENSE" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}" RENAME copyright)
+diff --git a/ports/abseil/vcpkg.json b/ports/abseil/vcpkg.json
+index aaf8773c4..b805a4d5d 100644
+--- a/ports/abseil/vcpkg.json
++++ b/ports/abseil/vcpkg.json
+@@ -1,6 +1,6 @@
+ {
+ "name": "abseil",
+- "version-date": "2021-03-24",
++ "version": "20230125.0",
+ "description": [
+ "an open-source collection designed to augment the C++ standard library.",
+ "Abseil is an open-source collection of C++ library code designed to augment the C++ standard library. The Abseil library code is collected from Google's own C++ code base, has been extensively tested and used in production, and is the same code we depend on in our daily coding lives.",
+@@ -8,7 +8,17 @@
+ "Abseil is not meant to be a competitor to the standard library; we've just found that many of these utilities serve a purpose within our code base, and we now want to provide those resources to the C++ community as a whole."
+ ],
+ "homepage": "https://github.com/abseil/abseil-cpp",
+- "supports": "(x64 | arm64) & (linux | osx | windows)",
++ "license": "Apache-2.0",
++ "dependencies": [
++ {
++ "name": "vcpkg-cmake",
++ "host": true
++ },
++ {
++ "name": "vcpkg-cmake-config",
++ "host": true
++ }
++ ],
+ "features": {
+ "cxx17": {
+ "description": "Enable compiler C++17."
diff --git a/vcpkg/patches/0008-upgrade_benchmark_to_1.7.1.patch b/vcpkg/patches/0008-upgrade_benchmark_to_1.7.1.patch
new file mode 100644
index 00000000000..8bc1ca4acdc
--- /dev/null
+++ b/vcpkg/patches/0008-upgrade_benchmark_to_1.7.1.patch
@@ -0,0 +1,117 @@
+diff --git a/ports/benchmark/CONTROL b/ports/benchmark/CONTROL
+deleted file mode 100644
+index 9a7618385..000000000
+--- a/ports/benchmark/CONTROL
++++ /dev/null
+@@ -1,5 +0,0 @@
+-Source: benchmark
+-Version: 1.5.2
+-Homepage: https://github.com/google/benchmark
+-Description: A library to support the benchmarking of functions, similar to unit-tests.
+-Supports: !(arm|uwp)
+diff --git a/ports/benchmark/fixedBuildingForMac.patch b/ports/benchmark/fixedBuildingForMac.patch
+new file mode 100644
+index 000000000..74d3fd220
+--- /dev/null
++++ b/ports/benchmark/fixedBuildingForMac.patch
+@@ -0,0 +1,26 @@
++From b976cab799c7fb20a5ceadd368431125ac0d99c4 Mon Sep 17 00:00:00 2001
++From: Grzegorz Bazior
++Date: Fri, 31 Mar 2023 16:20:55 +0200
++Subject: [PATCH] Fixed building for MacOS - there were unused variable
++
++Signed-off-by: Your Name
++---
++ CMakeLists.txt | 2 +-
++ 1 file changed, 1 insertion(+), 1 deletion(-)
++
++diff --git a/CMakeLists.txt b/CMakeLists.txt
++index 9ab265e..0cc0e4a 100644
++--- a/CMakeLists.txt
+++++ b/CMakeLists.txt
++@@ -20,7 +20,7 @@ option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
++ option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
++ option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
++ option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
++-option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." ON)
+++option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." OFF)
++ option(BENCHMARK_FORCE_WERROR "Build Release candidates with -Werror regardless of compiler issues." OFF)
++
++ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "PGI")
++--
++2.34.1
++
+diff --git a/ports/benchmark/portfile.cmake b/ports/benchmark/portfile.cmake
+index 7f4add94c..36676e59c 100644
+--- a/ports/benchmark/portfile.cmake
++++ b/ports/benchmark/portfile.cmake
+@@ -1,33 +1,29 @@
+-#https://github.com/google/benchmark/issues/661
+-vcpkg_fail_port_install(ON_TARGET "uwp")
+-
+ vcpkg_check_linkage(ONLY_STATIC_LIBRARY)
+
+ vcpkg_from_github(
+ OUT_SOURCE_PATH SOURCE_PATH
+ REPO google/benchmark
+- REF 73d4d5e8d6d449fc8663765a42aa8aeeee844489 # v1.5.2
+- SHA512 b87a7c207eb85187165df8ff99ab1bbf5d38fc2a6d839e267a71987951c94e33b55fd7fbee6f2b59202b0379a7e9705b73b193edaea0b9c742eddf3fcbe5f48e
++ REF v1.7.1
++ SHA512 396af1c1d3eaa2b78c6d23b1472f6088db85a294056ae1c2366dc5c0becdc8f141ba8fc3a235033324ab0a41c2298f5d242ef09b9b6f69d9877de6bcb2062efd
+ HEAD_REF master
++ PATCHES fixedBuildingForMac.patch
+ )
+
+-vcpkg_configure_cmake(
++vcpkg_cmake_configure(
+ SOURCE_PATH ${SOURCE_PATH}
+- PREFER_NINJA
+ OPTIONS
+ -DBENCHMARK_ENABLE_TESTING=OFF
+ )
+
+-vcpkg_install_cmake()
+-
++vcpkg_cmake_install()
+ vcpkg_copy_pdbs()
+
+-vcpkg_fixup_cmake_targets(CONFIG_PATH lib/cmake/benchmark)
++vcpkg_cmake_config_fixup(CONFIG_PATH lib/cmake/benchmark)
+
+-vcpkg_fixup_pkgconfig(SYSTEM_LIBRARIES pthread)
++vcpkg_fixup_pkgconfig()
+
+-file(REMOVE_RECURSE ${CURRENT_PACKAGES_DIR}/debug/include)
+-file(REMOVE_RECURSE ${CURRENT_PACKAGES_DIR}/debug/share)
++file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/include")
++file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/share")
+
+ # Handle copyright
+-file(INSTALL ${SOURCE_PATH}/LICENSE DESTINATION ${CURRENT_PACKAGES_DIR}/share/${PORT} RENAME copyright)
+\ No newline at end of file
++file(INSTALL "${SOURCE_PATH}/LICENSE" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}" RENAME copyright)
+diff --git a/ports/benchmark/vcpkg.json b/ports/benchmark/vcpkg.json
+new file mode 100644
+index 000000000..279c2663d
+--- /dev/null
++++ b/ports/benchmark/vcpkg.json
+@@ -0,0 +1,19 @@
++{
++ "$comment": "https://github.com/google/benchmark/issues/661 describes the missing UWP support upstream",
++ "name": "benchmark",
++ "version-semver": "1.7.1",
++ "description": "A library to support the benchmarking of functions, similar to unit-tests.",
++ "homepage": "https://github.com/google/benchmark",
++ "license": "Apache-2.0",
++ "supports": "!uwp",
++ "dependencies": [
++ {
++ "name": "vcpkg-cmake",
++ "host": true
++ },
++ {
++ "name": "vcpkg-cmake-config",
++ "host": true
++ }
++ ]
++}
diff --git a/vcpkg/patches/0009-upgrade_protobuf_to_3.21.patch b/vcpkg/patches/0009-upgrade_protobuf_to_3.21.patch
new file mode 100644
index 00000000000..dfeafb9287d
--- /dev/null
+++ b/vcpkg/patches/0009-upgrade_protobuf_to_3.21.patch
@@ -0,0 +1,255 @@
+diff --git a/ports/protobuf/compile_options.patch b/ports/protobuf/compile_options.patch
+new file mode 100644
+index 000000000..8cf541774
+--- /dev/null
++++ b/ports/protobuf/compile_options.patch
+@@ -0,0 +1,48 @@
++diff --git a/CMakeLists.txt b/CMakeLists.txt
++index 04cb3303a..608c580be 100644
++--- a/CMakeLists.txt
+++++ b/CMakeLists.txt
++@@ -242,12 +242,12 @@ endif (protobuf_BUILD_SHARED_LIBS)
++ if (MSVC)
++ if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
++ # Build with multiple processes
++- add_definitions(/MP)
+++ add_compile_options(/MP)
++ endif()
++ # Set source file and execution character sets to UTF-8
++- add_definitions(/utf-8)
+++ add_compile_options(/utf-8)
++ # MSVC warning suppressions
++- add_definitions(
+++ add_compile_options(
++ /wd4065 # switch statement contains 'default' but no 'case' labels
++ /wd4244 # 'conversion' conversion from 'type1' to 'type2', possible loss of data
++ /wd4251 # 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
++@@ -262,23 +262,17 @@ if (MSVC)
++ /wd4996 # The compiler encountered a deprecated declaration.
++ )
++ # Allow big object
++- add_definitions(/bigobj)
+++ add_compile_options(/bigobj)
++ string(REPLACE "/" "\\" PROTOBUF_SOURCE_WIN32_PATH ${protobuf_SOURCE_DIR})
++ string(REPLACE "/" "\\" PROTOBUF_BINARY_WIN32_PATH ${protobuf_BINARY_DIR})
++ string(REPLACE "." "," protobuf_RC_FILEVERSION "${protobuf_VERSION}")
++ configure_file(${protobuf_SOURCE_DIR}/cmake/extract_includes.bat.in extract_includes.bat)
++
++ # Suppress linker warnings about files with no symbols defined.
++- set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /ignore:4221")
+++ string(APPEND CMAKE_STATIC_LINKER_FLAGS " /ignore:4221")
++
++- if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
++- # Configure Resource Compiler
++- enable_language(RC)
++- # use English language (0x409) in resource compiler
++- set(rc_flags "/l0x409")
++- # fix rc.exe invocations because of usage of add_definitions()
++- set(CMAKE_RC_COMPILE_OBJECT " ${rc_flags} /fo