diff --git a/.gitignore b/.gitignore index e7a67332996..46a19161c91 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ *.dot *.abi.hpp *.cmake +!CMakeModules/*.cmake *.ninja \#* \.#* @@ -72,6 +73,8 @@ witness_node_data_dir *.pyc *.pyo +Testing/* +build.tar.gz build/* build-debug/* diff --git a/CMakeLists.txt b/CMakeLists.txt index 84277dced15..b6179154e55 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,14 +1,11 @@ -cmake_minimum_required( VERSION 3.5 ) +cmake_minimum_required( VERSION 3.8 ) project( EOSIO ) - +include(CTest) # suppresses DartConfiguration.tcl error enable_testing() -if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) - message(WARNING "CMAKE_INSTALL_PREFIX is set to default path of ${CMAKE_INSTALL_PREFIX}, resetting to ${CMAKE_INSTALL_PREFIX}/eosio") - set(CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}/eosio") -elseif ("${CMAKE_INSTALL_PREFIX}" STREQUAL "/usr/local") - message(WARNING "CMAKE_INSTALL_PREFIX is explicitly set to /usr/local. This is not recommended.") +if ("${CMAKE_INSTALL_PREFIX}" STREQUAL "/usr/local") + message(WARNING "CMAKE_INSTALL_PREFIX is set to /usr/local. This is not recommended.") endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules") @@ -25,13 +22,14 @@ include( InstallDirectoryPermissions ) include( MASSigning ) set( BLOCKCHAIN_NAME "EOSIO" ) -set( CMAKE_CXX_STANDARD 14 ) +set( CMAKE_CXX_STANDARD 17 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 7) -set(VERSION_PATCH 4) +set(VERSION_MINOR 8) +set(VERSION_PATCH 0) +#set(VERSION_SUFFIX develop) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") @@ -48,15 +46,15 @@ set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.0) - message(FATAL_ERROR "GCC version must be at least 6.0!") + if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) + message(FATAL_ERROR "GCC version must be at least 7.0!") endif() if ("${CMAKE_GENERATOR}" STREQUAL "Ninja") add_compile_options(-fdiagnostics-color=always) endif() elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.0) - message(FATAL_ERROR "Clang version must be at least 4.0!") + if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) + message(FATAL_ERROR "Clang version must be at least 5.0!") endif() if ("${CMAKE_GENERATOR}" STREQUAL "Ninja") add_compile_options(-fcolor-diagnostics) @@ -102,7 +100,9 @@ IF( WIN32 ) set(Boost_USE_MULTITHREADED ON) set(BOOST_ALL_DYN_LINK OFF) # force dynamic linking for all libraries ENDIF(WIN32) -FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS +### Remove after Boost 1.70 CMake fixes are in place +set( Boost_NO_BOOST_CMAKE ON CACHE STRING "ON or OFF" ) +find_package(Boost 1.67 REQUIRED COMPONENTS date_time filesystem system @@ -111,12 +111,10 @@ FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS unit_test_framework iostreams) -# Some new stdlibc++s will #error on ; a problem for boost pre-1.69 -if( APPLE AND UNIX ) - add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) -endif() +add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) -set(THREADS_PREFER_PTHREAD_FLAG 1) +set(CMAKE_THREAD_PREFER_PTHREAD TRUE) +set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads) link_libraries(Threads::Threads) @@ -158,11 +156,11 @@ else( WIN32 ) # Apple AND Linux if( APPLE ) # Apple Specific Options Here message( STATUS "Configuring EOSIO on OS X" ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-deprecated-declarations" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall -Wno-deprecated-declarations" ) else( APPLE ) # Linux Specific Options Here message( STATUS "Configuring EOSIO on Linux" ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall" ) if ( FULL_STATIC_BUILD ) set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") endif ( FULL_STATIC_BUILD ) @@ -190,7 +188,7 @@ set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build EOSIO for code coverage anal if(ENABLE_COVERAGE_TESTING) SET(CMAKE_CXX_FLAGS "--coverage ${CMAKE_CXX_FLAGS}") - find_program( LCOV_PATH lcov ) + find_program( LCOV_PATH lcov ) find_program( LLVMCOV_PATH llvm-cov ) find_program( GENHTML_PATH NAMES genhtml) endif() diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index ff4a1eca48c..a3cc2342bf9 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -24,17 +24,18 @@ find_package(LLVM 4.0 REQUIRED CONFIG) link_directories(${LLVM_LIBRARY_DIR}) -set( CMAKE_CXX_STANDARD 14 ) +set( CMAKE_CXX_STANDARD 17 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON ) if ( APPLE ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-deprecated-declarations" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall -Wno-deprecated-declarations" ) else ( APPLE ) - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") - set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +### Remove after Boost 1.70 CMake fixes are in place +set( Boost_NO_BOOST_CMAKE ON CACHE STRING "ON or OFF" ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) find_package(Boost 1.67 REQUIRED COMPONENTS date_time @@ -73,7 +74,7 @@ find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir DOC "Path to the GMP library" ) -macro(add_eosio_test test_name) +macro(add_eosio_test_executable test_name) add_executable( ${test_name} ${ARGN} ) target_link_libraries( ${test_name} ${LLVM} @@ -128,7 +129,6 @@ macro(add_eosio_test test_name) ${PLATFORM_SPECIFIC_LIBS} ) - #### TODO /usr/local/include is a hack for fc and some other includes target_include_directories( ${test_name} PUBLIC ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @@ -137,10 +137,13 @@ macro(add_eosio_test test_name) @CMAKE_INSTALL_FULL_INCLUDEDIR@/wasm-jit @CMAKE_INSTALL_FULL_INCLUDEDIR@/softfloat ) +endmacro() + +macro(add_eosio_test test_name) + add_eosio_test_executable( ${test_name} ${ARGN} ) + #This will generate a test with the default runtime + add_test(NAME ${test_name} COMMAND ${test_name} --report_level=detailed --color_output) + #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose - add_test(NAME ${test_name}_binaryen COMMAND ${test_name} - --report_level=detailed --color_output -- --binaryen) - add_test(NAME ${test_name}_wavm COMMAND ${test_name} - --report_level=detailed --color_output --catch_system_errors=no -- --wavm) endmacro() diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 053ad6fa4f4..a1ea88182a9 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -22,17 +22,18 @@ find_package(LLVM 4.0 REQUIRED CONFIG) link_directories(${LLVM_LIBRARY_DIR}) -set( CMAKE_CXX_STANDARD 14 ) +set( CMAKE_CXX_STANDARD 17 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON ) if ( APPLE ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-deprecated-declarations" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall -Wno-deprecated-declarations" ) else ( APPLE ) - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") - set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +### Remove after Boost 1.70 CMake fixes are in place +set( Boost_NO_BOOST_CMAKE ON CACHE STRING "ON or OFF" ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) find_package(Boost 1.67 REQUIRED COMPONENTS date_time @@ -72,7 +73,7 @@ find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir DOC "Path to the GMP library" ) -macro(add_eosio_test test_name) +macro(add_eosio_test_executable test_name) add_executable( ${test_name} ${ARGN} ) target_link_libraries( ${test_name} ${LLVM} @@ -138,13 +139,14 @@ macro(add_eosio_test test_name) @CMAKE_SOURCE_DIR@/libraries/chainbase/include @CMAKE_SOURCE_DIR@/libraries/testing/include @CMAKE_SOURCE_DIR@/libraries/wasm-jit/Include ) - # +endmacro() + +macro(add_eosio_test test_name) + add_eosio_test_executable( ${test_name} ${ARGN} ) + #This will generate a test with the default runtime + add_test(NAME ${test_name} COMMAND ${test_name} --report_level=detailed --color_output) #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose - add_test(NAME ${test_name}_binaryen COMMAND ${test_name} - --report_level=detailed --color_output -- --binaryen) - add_test(NAME ${test_name}_wavm COMMAND ${test_name} - --report_level=detailed --color_output --catch_system_errors=no -- --wavm) endmacro() if(ENABLE_COVERAGE_TESTING) diff --git a/Docker/Dockerfile b/Docker/Dockerfile deleted file mode 100644 index a763f59c398..00000000000 --- a/Docker/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM eosio/builder as builder -ARG branch=master -ARG symbol=SYS - -RUN apt-get update -y && apt-get install -y libcurl4-openssl-dev libusb-1.0-0-dev -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ - && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ - && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ - && cmake --build /tmp/build --target install - - -FROM ubuntu:18.04 - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl ca-certificates && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/lib/* /usr/local/lib/ -COPY --from=builder /tmp/build/bin /opt/eosio/bin -COPY --from=builder /eos/Docker/config.ini / -COPY --from=builder /etc/eosio-version /etc -COPY --from=builder /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh -ENV EOSIO_ROOT=/opt/eosio -RUN chmod +x /opt/eosio/bin/nodeosd.sh -ENV LD_LIBRARY_PATH /usr/local/lib -ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/Docker/README.md b/Docker/README.md deleted file mode 100644 index a08ec40b683..00000000000 --- a/Docker/README.md +++ /dev/null @@ -1,162 +0,0 @@ -# Run in docker - -Simple and fast setup of EOS.IO on Docker is also available. - -## Install Dependencies - -- [Docker](https://docs.docker.com) Docker 17.05 or higher is required -- [docker-compose](https://docs.docker.com/compose/) version >= 1.10.0 - -## Docker Requirement - -- At least 7GB RAM (Docker -> Preferences -> Advanced -> Memory -> 7GB or above) -- If the build below fails, make sure you've adjusted Docker Memory settings and try again. - -## Build eos image - -```bash -git clone https://github.com/EOSIO/eos.git --recursive --depth 1 -cd eos/Docker -docker build . -t eosio/eos -``` - -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.4 tag, you could do the following: - -```bash -docker build -t eosio/eos:v1.7.4 --build-arg branch=v1.7.4 . -``` - -By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. - -```bash -docker build -t eosio/eos --build-arg symbol= . -``` - -## Start nodeos docker container only - -```bash -docker run --name nodeos -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 -``` - -By default, all data is persisted in a docker volume. It can be deleted if the data is outdated or corrupted: - -```bash -$ docker inspect --format '{{ range .Mounts }}{{ .Name }} {{ end }}' nodeos -fdc265730a4f697346fa8b078c176e315b959e79365fc9cbd11f090ea0cb5cbc -$ docker volume rm fdc265730a4f697346fa8b078c176e315b959e79365fc9cbd11f090ea0cb5cbc -``` - -Alternately, you can directly mount host directory into the container - -```bash -docker run --name nodeos -v /path-to-data-dir:/opt/eosio/bin/data-dir -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 -``` - -## Get chain info - -```bash -curl http://127.0.0.1:8888/v1/chain/get_info -``` - -## Start both nodeos and keosd containers - -```bash -docker volume create --name=nodeos-data-volume -docker volume create --name=keosd-data-volume -docker-compose up -d -``` - -After `docker-compose up -d`, two services named `nodeosd` and `keosd` will be started. nodeos service would expose ports 8888 and 9876 to the host. keosd service does not expose any port to the host, it is only accessible to cleos when running cleos is running inside the keosd container as described in "Execute cleos commands" section. - -### Execute cleos commands - -You can run the `cleos` commands via a bash alias. - -```bash -alias cleos='docker-compose exec keosd /opt/eosio/bin/cleos -u http://nodeosd:8888 --wallet-url http://localhost:8900' -cleos get info -cleos get account inita -``` - -Upload sample exchange contract - -```bash -cleos set contract exchange contracts/exchange/ -``` - -If you don't need keosd afterwards, you can stop the keosd service using - -```bash -docker-compose stop keosd -``` - -### Develop/Build custom contracts - -Due to the fact that the eosio/eos image does not contain the required dependencies for contract development (this is by design, to keep the image size small), you will need to utilize the eosio/eos-dev image. This image contains both the required binaries and dependencies to build contracts using eosiocpp. - -You can either use the image available on [Docker Hub](https://hub.docker.com/r/eosio/eos-dev/) or navigate into the dev folder and build the image manually. - -```bash -cd dev -docker build -t eosio/eos-dev . -``` - -### Change default configuration - -You can use docker compose override file to change the default configurations. For example, create an alternate config file `config2.ini` and a `docker-compose.override.yml` with the following content. - -```yaml -version: "2" - -services: - nodeos: - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - - ./config2.ini:/opt/eosio/bin/data-dir/config.ini -``` - -Then restart your docker containers as follows: - -```bash -docker-compose down -docker-compose up -``` - -### Clear data-dir - -The data volume created by docker-compose can be deleted as follows: - -```bash -docker volume rm nodeos-data-volume -docker volume rm keosd-data-volume -``` - -### Docker Hub - -Docker Hub images are now deprecated. New build images were discontinued on January 1st, 2019. The existing old images will be removed on June 1st, 2019. - -### EOSIO Testnet - -We can easily set up a EOSIO local testnet using docker images. Just run the following commands: - -Note: if you want to use the mongo db plugin, you have to enable it in your `data-dir/config.ini` first. - -``` -# create volume -docker volume create --name=nodeos-data-volume -docker volume create --name=keosd-data-volume -# pull images and start containers -docker-compose -f docker-compose-eosio-latest.yaml up -d -# get chain info -curl http://127.0.0.1:8888/v1/chain/get_info -# get logs -docker-compose logs -f nodeosd -# stop containers -docker-compose -f docker-compose-eosio-latest.yaml down -``` - -The `blocks` data are stored under `--data-dir` by default, and the wallet files are stored under `--wallet-dir` by default, of course you can change these as you want. - -### About MongoDB Plugin - -Currently, the mongodb plugin is disabled in `config.ini` by default, you have to change it manually in `config.ini` or you can mount a `config.ini` file to `/opt/eosio/bin/data-dir/config.ini` in the docker-compose file. diff --git a/Docker/builder/Dockerfile b/Docker/builder/Dockerfile deleted file mode 100644 index cac09937cd0..00000000000 --- a/Docker/builder/Dockerfile +++ /dev/null @@ -1,58 +0,0 @@ -FROM ubuntu:18.04 - -LABEL author="xiaobo " maintainer="Xiaobo Huang-Ming Huang " version="0.1.1" \ - description="This is a base image for building eosio/eos" - -RUN echo 'APT::Install-Recommends 0;' >> /etc/apt/apt.conf.d/01norecommends \ - && echo 'APT::Install-Suggests 0;' >> /etc/apt/apt.conf.d/01norecommends \ - && apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo wget curl net-tools ca-certificates unzip gnupg - -RUN echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic main" >> /etc/apt/sources.list.d/llvm.list \ - && wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - \ - && apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y git-core automake autoconf libtool build-essential pkg-config libtool \ - mpi-default-dev libicu-dev python-dev python3-dev libbz2-dev zlib1g-dev libssl-dev libgmp-dev \ - clang-4.0 lldb-4.0 lld-4.0 llvm-4.0-dev libclang-4.0-dev ninja-build \ - && rm -rf /var/lib/apt/lists/* - -RUN update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-4.0/bin/clang 400 \ - && update-alternatives --install /usr/bin/clang++ clang++ /usr/lib/llvm-4.0/bin/clang++ 400 - -RUN wget https://cmake.org/files/v3.9/cmake-3.9.6-Linux-x86_64.sh \ - && bash cmake-3.9.6-Linux-x86_64.sh --prefix=/usr/local --exclude-subdir --skip-license \ - && rm cmake-3.9.6-Linux-x86_64.sh - -ENV CC clang -ENV CXX clang++ - -RUN wget https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -O - | tar -xj \ - && cd boost_1_67_0 \ - && ./bootstrap.sh --prefix=/usr/local \ - && echo 'using clang : 4.0 : clang++-4.0 ;' >> project-config.jam \ - && ./b2 -d0 -j$(nproc) --with-thread --with-date_time --with-system --with-filesystem --with-program_options \ - --with-signals --with-serialization --with-chrono --with-test --with-context --with-locale --with-coroutine --with-iostreams toolset=clang link=static install \ - && cd .. && rm -rf boost_1_67_0 - -RUN wget https://github.com/mongodb/mongo-c-driver/releases/download/1.10.2/mongo-c-driver-1.10.2.tar.gz -O - | tar -xz \ - && cd mongo-c-driver-1.10.2 \ - && mkdir cmake-build && cd cmake-build \ - && cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DENABLE_BSON=ON \ - -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j$(nproc) \ - && make install \ - && cd ../../ && rm -rf mongo-c-driver-1.10.2 - -RUN git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/llvm.git \ - && git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/clang.git llvm/tools/clang \ - && cd llvm \ - && cmake -H. -Bbuild -GNinja -DCMAKE_INSTALL_PREFIX=/opt/wasm -DLLVM_TARGETS_TO_BUILD= -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly -DCMAKE_BUILD_TYPE=Release \ - && cmake --build build --target install \ - && cd .. && rm -rf llvm - -RUN git clone --depth 1 -b releases/v3.3 https://github.com/mongodb/mongo-cxx-driver \ - && cd mongo-cxx-driver/build \ - && cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. \ - && make -j$(nproc) \ - && make install \ - && cd ../../ && rm -rf mongo-cxx-driver diff --git a/Docker/cleos.sh b/Docker/cleos.sh deleted file mode 100755 index 36246d27545..00000000000 --- a/Docker/cleos.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# Usage: -# Go into cmd loop: sudo ./cleos.sh -# Run single cmd: sudo ./cleos.sh - -PREFIX="docker-compose exec nodeosd cleos" -if [ -z $1 ] ; then - while : - do - read -e -p "cleos " cmd - history -s "$cmd" - $PREFIX $cmd - done -else - $PREFIX "$@" -fi diff --git a/Docker/config.ini b/Docker/config.ini deleted file mode 100644 index 3dd9181f359..00000000000 --- a/Docker/config.ini +++ /dev/null @@ -1,157 +0,0 @@ -# the endpoint upon which to listen for incoming connections (eosio::bnet_plugin) -bnet-endpoint = 0.0.0.0:4321 - -# the number of threads to use to process network messages (eosio::bnet_plugin) -# bnet-threads = - -# remote endpoint of other node to connect to; Use multiple bnet-connect options as needed to compose a network (eosio::bnet_plugin) -# bnet-connect = - -# this peer will request no pending transactions from other nodes (eosio::bnet_plugin) -bnet-no-trx = false - -# the location of the blocks directory (absolute path or relative to application data dir) (eosio::chain_plugin) -blocks-dir = "blocks" - -# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints. (eosio::chain_plugin) -# checkpoint = - -# Override default WASM runtime (eosio::chain_plugin) -# wasm-runtime = - -# Maximum size (in MB) of the chain state database (eosio::chain_plugin) -chain-state-db-size-mb = 8192 - -# Maximum size (in MB) of the reversible blocks database (eosio::chain_plugin) -reversible-blocks-db-size-mb = 340 - -# print contract's output to console (eosio::chain_plugin) -contracts-console = false - -# Account added to actor whitelist (may specify multiple times) (eosio::chain_plugin) -# actor-whitelist = - -# Account added to actor blacklist (may specify multiple times) (eosio::chain_plugin) -# actor-blacklist = - -# Contract account added to contract whitelist (may specify multiple times) (eosio::chain_plugin) -# contract-whitelist = - -# Contract account added to contract blacklist (may specify multiple times) (eosio::chain_plugin) -# contract-blacklist = - -# Track actions which match receiver:action:actor. Actor may be blank to include all. Receiver and Action may not be blank. (eosio::history_plugin) -# filter-on = - -# PEM encoded trusted root certificate (or path to file containing one) used to validate any TLS connections made. (may specify multiple times) -# (eosio::http_client_plugin) -# https-client-root-cert = - -# true: validate that the peer certificates are valid and trusted, false: ignore cert errors (eosio::http_client_plugin) -https-client-validate-peers = 1 - -# The local IP and port to listen for incoming http connections; set blank to disable. (eosio::http_plugin) -http-server-address = 0.0.0.0:8888 - -# The local IP and port to listen for incoming https connections; leave blank to disable. (eosio::http_plugin) -# https-server-address = - -# Filename with the certificate chain to present on https connections. PEM format. Required for https. (eosio::http_plugin) -# https-certificate-chain-file = - -# Filename with https private key in PEM format. Required for https (eosio::http_plugin) -# https-private-key-file = - -# Specify the Access-Control-Allow-Origin to be returned on each request. (eosio::http_plugin) -# access-control-allow-origin = - -# Specify the Access-Control-Allow-Headers to be returned on each request. (eosio::http_plugin) -# access-control-allow-headers = - -# Specify the Access-Control-Max-Age to be returned on each request. (eosio::http_plugin) -# access-control-max-age = - -# Specify if Access-Control-Allow-Credentials: true should be returned on each request. (eosio::http_plugin) -access-control-allow-credentials = false - -# The actual host:port used to listen for incoming p2p connections. (eosio::net_plugin) -p2p-listen-endpoint = 0.0.0.0:9876 - -# An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. (eosio::net_plugin) -# p2p-server-address = - -# The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network. (eosio::net_plugin) -# p2p-peer-address = - -# Maximum number of client0nodes from any single IP address (eosio::net_plugin) -p2p-max-nodes-per-host = 1 - -# The name supplied to identify this node amongst the peers. (eosio::net_plugin) -agent-name = "EOS Test Agent" - -# Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined. (eosio::net_plugin) -allowed-connection = any - -# Optional public key of peer allowed to connect. May be used multiple times. (eosio::net_plugin) -# peer-key = - -# Tuple of [PublicKey, WIF private key] (may specify multiple times) (eosio::net_plugin) -# peer-private-key = - -# Maximum number of clients from which connections are accepted, use 0 for no limit (eosio::net_plugin) -max-clients = 25 - -# number of seconds to wait before cleaning up dead connections (eosio::net_plugin) -connection-cleanup-period = 30 - -# True to require exact match of peer network version. (eosio::net_plugin) -network-version-match = 0 - -# number of blocks to retrieve in a chunk from any individual peer during synchronization (eosio::net_plugin) -sync-fetch-span = 100 - -# Enable block production, even if the chain is stale. (eosio::producer_plugin) -enable-stale-production = false - -# Start this node in a state where production is paused (eosio::producer_plugin) -pause-on-startup = false - -# Limits the maximum time (in milliseconds) that is allowed a pushed transaction's code to execute before being considered invalid (eosio::producer_plugin) -max-transaction-time = 30 - -# Limits the maximum age (in seconds) of the DPOS Irreversible Block for a chain this node will produce blocks on (use negative value to indicate unlimited) (eosio::producer_plugin) -max-irreversible-block-age = -1 - -# ID of producer controlled by this node (e.g. inita; may specify multiple times) (eosio::producer_plugin) -# producer-name = -producer-name = eosio - -# (DEPRECATED - Use signature-provider instead) Tuple of [public key, WIF private key] (may specify multiple times) (eosio::producer_plugin) -# private-key = - -# Key=Value pairs in the form = -# Where: -# is a string form of a vaild EOSIO public key -# -# is a string in the form : -# -# is KEY, or KEOSD -# -# KEY: is a string form of a valid EOSIO private key which maps to the provided public key -# -# KEOSD: is the URL where keosd is available and the approptiate wallet(s) are unlocked (eosio::producer_plugin) -signature-provider = EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3 - -# Limits the maximum time (in milliseconds) that is allowd for sending blocks to a keosd provider for signing (eosio::producer_plugin) -keosd-provider-timeout = 5 - -# Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block) (eosio::txn_test_gen_plugin) -txn-reference-block-lag = 0 - -# eosio key that will be imported automatically when a wallet is created. (eosio::wallet_plugin) -# eosio-key = - -# Plugin(s) to enable, may be specified multiple times -# plugin = -plugin = eosio::chain_api_plugin -plugin = eosio::history_api_plugin diff --git a/Docker/dev/Dockerfile b/Docker/dev/Dockerfile deleted file mode 100644 index f2dea74ac6c..00000000000 --- a/Docker/dev/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM eosio/builder -ARG branch=master -ARG symbol=SYS - -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ - && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ - && cmake -H. -B"/opt/eosio" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ - && cmake --build /opt/eosio --target install \ - && cp /eos/Docker/config.ini / && ln -s /opt/eosio/contracts /contracts && cp /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh && ln -s /eos/tutorials /tutorials - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl ca-certificates vim psmisc python3-pip && rm -rf /var/lib/apt/lists/* -RUN pip3 install numpy -ENV EOSIO_ROOT=/opt/eosio -RUN chmod +x /opt/eosio/bin/nodeosd.sh -ENV LD_LIBRARY_PATH /usr/local/lib -ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/Docker/docker-compose-latest.yml b/Docker/docker-compose-latest.yml deleted file mode 100644 index 7384e230cb8..00000000000 --- a/Docker/docker-compose-latest.yml +++ /dev/null @@ -1,33 +0,0 @@ -version: "3" - -services: - nodeosd: - image: eosio/eos:latest - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - cap_add: - - IPC_LOCK - stop_grace_period: 10m - - keosd: - image: eosio/eos:latest - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=keosd:8900 --http-alias=localhost:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - stop_grace_period: 10m - -volumes: - nodeos-data-volume: - external: true - keosd-data-volume: - external: true diff --git a/Docker/docker-compose.yml b/Docker/docker-compose.yml deleted file mode 100755 index a00ffffa4e4..00000000000 --- a/Docker/docker-compose.yml +++ /dev/null @@ -1,40 +0,0 @@ -version: "3" - -services: - builder: - build: - context: builder - image: eosio/builder - - nodeosd: - build: - context: . - image: eosio/eos - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - cap_add: - - IPC_LOCK - stop_grace_period: 10m - - keosd: - image: eosio/eos - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=keosd:8900 --http-alias=localhost:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - stop_grace_period: 10m - -volumes: - nodeos-data-volume: - external: true - keosd-data-volume: - external: true diff --git a/Docker/nodeosd.sh b/Docker/nodeosd.sh deleted file mode 100755 index 870548d6b6b..00000000000 --- a/Docker/nodeosd.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -cd /opt/eosio/bin - -if [ ! -d "/opt/eosio/bin/data-dir" ]; then - mkdir /opt/eosio/bin/data-dir -fi - -if [ -f '/opt/eosio/bin/data-dir/config.ini' ]; then - echo - else - cp /config.ini /opt/eosio/bin/data-dir -fi - -if [ -d '/opt/eosio/bin/data-dir/contracts' ]; then - echo - else - cp -r /contracts /opt/eosio/bin/data-dir -fi - -while :; do - case $1 in - --config-dir=?*) - CONFIG_DIR=${1#*=} - ;; - *) - break - esac - shift -done - -if [ ! "$CONFIG_DIR" ]; then - CONFIG_DIR="--config-dir=/opt/eosio/bin/data-dir" -else - CONFIG_DIR="" -fi - -exec /opt/eosio/bin/nodeos $CONFIG_DIR "$@" diff --git a/HEADER b/HEADER deleted file mode 100644 index ff55f97b69f..00000000000 --- a/HEADER +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ diff --git a/README.md b/README.md index 974e8ef63a7..6dca7ff2d2f 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,11 @@ Block.one is neither launching nor operating any initial public blockchains base There is no public testnet running currently. -**If you have previously installed EOSIO, please run the `eosio_uninstall` script (it is in the directory where you cloned EOSIO) before downloading and using the binary releases.** +--- + +**If you used our build scripts to install eosio, [please be sure to uninstall](#build-script-uninstall) before using our packages.** + +--- #### Mac OS X Brew Install ```sh @@ -37,48 +41,44 @@ $ brew install eosio ```sh $ brew remove eosio ``` -#### Ubuntu 18.04 Debian Package Install + +#### Ubuntu 18.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.4/eosio_1.7.4-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.4-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.8.0-1-ubuntu-18.04_amd64.deb ``` -#### Ubuntu 16.04 Debian Package Install +#### Ubuntu 16.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.4/eosio_1.7.4-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.4-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.8.0-1-ubuntu-16.04_amd64.deb ``` -#### Debian Package Uninstall +#### Ubuntu Package Uninstall ```sh $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.4/eosio-1.7.4-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.4-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio-1.8.0-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.8.0-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh $ sudo yum remove eosio ``` -#### Fedora RPM Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.4/eosio-1.7.4-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.4-1.fc27.x86_64.rpm -``` -#### Fedora RPM Package Uninstall -```sh -$ sudo yum remove eosio -``` + +#### Build Script Uninstall + +If you have previously installed EOSIO using build scripts, you may execute `./scripts/eosio_uninstall.sh` to uninstall. +- Passing `--force` will answer yes to all prompts +- Passing `--full` will remove data directories (be very careful with this) ## Supported Operating Systems EOSIO currently supports the following operating systems: -1. Amazon 2017.09 and higher -2. Centos 7 -3. Fedora 25 and higher (Fedora 27 recommended) -4. Mint 18 -5. Ubuntu 16.04 -6. Ubuntu 18.04 -7. MacOS Darwin 10.12 and higher (MacOS 10.14.x recommended) +1. Amazon Linux 2 +2. CentOS 7 +3. Ubuntu 16.04 +4. Ubuntu 18.04 +5. MacOS 10.14 (Mojave) ## Resources 1. [Website](https://eos.io) diff --git a/libraries/appbase b/libraries/appbase index 737df2c70b0..d864831e7b1 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 737df2c70b0b5467ce928d97457985c852f7850e +Subproject commit d864831e7b12639e9f63fe721b9d4ae225e03fb0 diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 2c430fecea0..254d462c5ed 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -41,12 +41,17 @@ add_library( eosio_chain webassembly/wabt.cpp # get_config.cpp -# global_property_object.cpp # # contracts/chain_initializer.cpp - + trace.cpp transaction_metadata.cpp + protocol_state_object.cpp + protocol_feature_activation.cpp + protocol_feature_manager.cpp + genesis_intrinsics.cpp + whitelisted_intrinsics.cpp + thread_utils.cpp ${HEADERS} ) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 1beb647eed6..dc7687cf05c 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -29,68 +30,121 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { } } -void apply_context::exec_one( action_trace& trace ) +apply_context::apply_context(controller& con, transaction_context& trx_ctx, uint32_t action_ordinal, uint32_t depth) +:control(con) +,db(con.mutable_db()) +,trx_context(trx_ctx) +,recurse_depth(depth) +,first_receiver_action_ordinal(action_ordinal) +,action_ordinal(action_ordinal) +,idx64(*this) +,idx128(*this) +,idx256(*this) +,idx_double(*this) +,idx_long_double(*this) +{ + action_trace& trace = trx_ctx.get_action_trace(action_ordinal); + act = &trace.act; + receiver = trace.receiver; + context_free = trace.context_free; +} + +void apply_context::exec_one() { auto start = fc::time_point::now(); action_receipt r; r.receiver = receiver; - r.act_digest = digest_type::hash(act); - - trace.trx_id = trx_context.id; - trace.block_num = control.pending_block_state()->block_num; - trace.block_time = control.pending_block_time(); - trace.producer_block_id = control.pending_producer_block_id(); - trace.act = act; - trace.context_free = context_free; + r.act_digest = digest_type::hash(*act); const auto& cfg = control.get_global_properties().configuration; + const account_metadata_object* receiver_account = nullptr; try { try { - const auto& a = control.get_account( receiver ); - privileged = a.privileged; - auto native = control.find_apply_handler( receiver, act.account, act.name ); + receiver_account = &db.get( receiver ); + privileged = receiver_account->is_privileged(); + auto native = control.find_apply_handler( receiver, act->account, act->name ); if( native ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); + control.check_action_list( act->account, act->name ); } (*native)( *this ); } - if( a.code.size() > 0 - && !(act.account == config::system_account_name && act.name == N( setcode ) && - receiver == config::system_account_name) ) { + if( ( receiver_account->code_hash != digest_type() ) && + ( !( act->account == config::system_account_name + && act->name == N( setcode ) + && receiver == config::system_account_name ) + || control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) + ) + ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); + control.check_action_list( act->account, act->name ); } try { - control.get_wasm_interface().apply( a.code_version, a.code, *this ); + control.get_wasm_interface().apply( receiver_account->code_hash, receiver_account->vm_type, receiver_account->vm_version, *this ); } catch( const wasm_exit& ) {} } - } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output.str()) ) - } catch( fc::exception& e ) { - trace.receipt = r; // fill with known data + + if( !privileged && control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) { + const size_t checktime_interval = 10; + size_t counter = 0; + bool not_in_notify_context = (receiver == act->account); + const auto end = _account_ram_deltas.end(); + for( auto itr = _account_ram_deltas.begin(); itr != end; ++itr, ++counter ) { + if( counter == checktime_interval ) { + trx_context.checktime(); + counter = 0; + } + if( itr->delta > 0 && itr->account != receiver ) { + EOS_ASSERT( not_in_notify_context, unauthorized_ram_usage_increase, + "unprivileged contract cannot increase RAM usage of another account within a notify context: ${account}", + ("account", itr->account) + ); + EOS_ASSERT( has_authorization( itr->account ), unauthorized_ram_usage_increase, + "unprivileged contract cannot increase RAM usage of another account that has not authorized the action: ${account}", + ("account", itr->account) + ); + } + } + } + } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) + } catch( const fc::exception& e ) { + action_trace& trace = trx_context.get_action_trace( action_ordinal ); + trace.error_code = controller::convert_exception_to_error_code( e ); trace.except = e; finalize_trace( trace, start ); throw; } + // Note: It should not be possible for receiver_account to be invalidated because: + // * a pointer to an object in a chainbase index is not invalidated if other objects in that index are modified, removed, or added; + // * a pointer to an object in a chainbase index is not invalidated if the fields of that object are modified; + // * and, the *receiver_account object itself cannot be removed because accounts cannot be deleted in EOSIO. + r.global_sequence = next_global_sequence(); - r.recv_sequence = next_recv_sequence( receiver ); + r.recv_sequence = next_recv_sequence( *receiver_account ); - const auto& account_sequence = db.get(act.account); - r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above - r.abi_sequence = account_sequence.abi_sequence; // could be modified by action execution above + const account_metadata_object* first_receiver_account = nullptr; + if( act->account == receiver ) { + first_receiver_account = receiver_account; + } else { + first_receiver_account = &db.get(act->account); + } + + r.code_sequence = first_receiver_account->code_sequence; // could be modified by action execution above + r.abi_sequence = first_receiver_account->abi_sequence; // could be modified by action execution above - for( const auto& auth : act.authorization ) { + for( const auto& auth : act->authorization ) { r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); } + action_trace& trace = trx_context.get_action_trace( action_ordinal ); trace.receipt = r; - trx_context.executed.emplace_back( move(r) ); + trx_context.executed.emplace_back( std::move(r) ); finalize_trace( trace, start ); @@ -104,20 +158,19 @@ void apply_context::finalize_trace( action_trace& trace, const fc::time_point& s trace.account_ram_deltas = std::move( _account_ram_deltas ); _account_ram_deltas.clear(); - trace.console = _pending_console_output.str(); - reset_console(); + trace.console = std::move( _pending_console_output ); + _pending_console_output.clear(); trace.elapsed = fc::time_point::now() - start; } -void apply_context::exec( action_trace& trace ) +void apply_context::exec() { - _notified.push_back(receiver); - exec_one( trace ); + _notified.emplace_back( receiver, action_ordinal ); + exec_one(); for( uint32_t i = 1; i < _notified.size(); ++i ) { - receiver = _notified[i]; - trace.inline_traces.emplace_back( ); - exec_one( trace.inline_traces.back() ); + std::tie( receiver, action_ordinal ) = _notified[i]; + exec_one(); } if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { @@ -125,14 +178,12 @@ void apply_context::exec( action_trace& trace ) transaction_exception, "max inline action depth per transaction reached" ); } - for( const auto& inline_action : _cfa_inline_actions ) { - trace.inline_traces.emplace_back(); - trx_context.dispatch_action( trace.inline_traces.back(), inline_action, inline_action.account, true, recurse_depth + 1 ); + for( uint32_t ordinal : _cfa_inline_actions ) { + trx_context.execute_action( ordinal, recurse_depth + 1 ); } - for( const auto& inline_action : _inline_actions ) { - trace.inline_traces.emplace_back(); - trx_context.dispatch_action( trace.inline_traces.back(), inline_action, inline_action.account, false, recurse_depth + 1 ); + for( uint32_t ordinal : _inline_actions ) { + trx_context.execute_action( ordinal, recurse_depth + 1 ); } } /// exec() @@ -142,9 +193,8 @@ bool apply_context::is_account( const account_name& account )const { } void apply_context::require_authorization( const account_name& account ) { - for( uint32_t i=0; i < act.authorization.size(); i++ ) { - if( act.authorization[i].actor == account ) { - used_authorizations[i] = true; + for( uint32_t i=0; i < act->authorization.size(); i++ ) { + if( act->authorization[i].actor == account ) { return; } } @@ -152,7 +202,7 @@ void apply_context::require_authorization( const account_name& account ) { } bool apply_context::has_authorization( const account_name& account )const { - for( const auto& auth : act.authorization ) + for( const auto& auth : act->authorization ) if( auth.actor == account ) return true; return false; @@ -160,10 +210,9 @@ bool apply_context::has_authorization( const account_name& account )const { void apply_context::require_authorization(const account_name& account, const permission_name& permission) { - for( uint32_t i=0; i < act.authorization.size(); i++ ) - if( act.authorization[i].actor == account ) { - if( act.authorization[i].permission == permission ) { - used_authorizations[i] = true; + for( uint32_t i=0; i < act->authorization.size(); i++ ) + if( act->authorization[i].actor == account ) { + if( act->authorization[i].permission == permission ) { return; } } @@ -172,15 +221,18 @@ void apply_context::require_authorization(const account_name& account, } bool apply_context::has_recipient( account_name code )const { - for( auto a : _notified ) - if( a == code ) + for( const auto& p : _notified ) + if( p.first == code ) return true; return false; } void apply_context::require_recipient( account_name recipient ) { if( !has_recipient(recipient) ) { - _notified.push_back(recipient); + _notified.emplace_back( + recipient, + schedule_action( action_ordinal, recipient, false ) + ); } } @@ -208,9 +260,9 @@ void apply_context::execute_inline( action&& a ) { bool enforce_actor_whitelist_blacklist = trx_context.enforce_whiteblacklist && control.is_producing_block(); flat_set actors; - bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + bool disallow_send_to_self_bypass = control.is_builtin_activated( builtin_protocol_feature_t::restrict_action_to_self ); bool send_to_self = (a.account == receiver); - bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act.account) && control.is_producing_block()); + bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act->account) && control.is_producing_block()); flat_set inherited_authorizations; if( inherit_parent_authorizations ) { @@ -227,7 +279,7 @@ void apply_context::execute_inline( action&& a ) { if( enforce_actor_whitelist_blacklist ) actors.insert( auth.actor ); - if( inherit_parent_authorizations && std::find(act.authorization.begin(), act.authorization.end(), auth) != act.authorization.end() ) { + if( inherit_parent_authorizations && std::find(act->authorization.begin(), act->authorization.end(), auth) != act->authorization.end() ) { inherited_authorizations.insert( auth ); } } @@ -271,7 +323,10 @@ void apply_context::execute_inline( action&& a ) { } } - _inline_actions.emplace_back( move(a) ); + auto inline_receiver = a.account; + _inline_actions.emplace_back( + schedule_action( std::move(a), inline_receiver, false ) + ); } void apply_context::execute_context_free_inline( action&& a ) { @@ -282,19 +337,55 @@ void apply_context::execute_context_free_inline( action&& a ) { EOS_ASSERT( a.authorization.size() == 0, action_validate_exception, "context-free actions cannot have authorizations" ); - _cfa_inline_actions.emplace_back( move(a) ); + + auto inline_receiver = a.account; + _cfa_inline_actions.emplace_back( + schedule_action( std::move(a), inline_receiver, true ) + ); } void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, account_name payer, transaction&& trx, bool replace_existing ) { EOS_ASSERT( trx.context_free_actions.size() == 0, cfa_inside_generated_tx, "context free actions are not currently allowed in generated transactions" ); - trx.expiration = control.pending_block_time() + fc::microseconds(999'999); // Rounds up to nearest second (makes expiration check unnecessary) - trx.set_reference_block(control.head_block_id()); // No TaPoS check necessary bool enforce_actor_whitelist_blacklist = trx_context.enforce_whiteblacklist && control.is_producing_block() && !control.sender_avoids_whitelist_blacklist_enforcement( receiver ); trx_context.validate_referenced_accounts( trx, enforce_actor_whitelist_blacklist ); + if( control.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { + auto exts = trx.validate_and_extract_extensions(); + if( exts.size() > 0 ) { + EOS_ASSERT( exts.size() == 1, invalid_transaction_extension, + "only one extension is currently supported for deferred transactions" + ); + const auto& context = exts.front().get(); + EOS_ASSERT( context.sender == receiver, ill_formed_deferred_transaction_generation_context, + "deferred transaction generaction context contains mismatching sender", + ("expected", receiver)("actual", context.sender) + ); + EOS_ASSERT( context.sender_id == sender_id, ill_formed_deferred_transaction_generation_context, + "deferred transaction generaction context contains mismatching sender_id", + ("expected", sender_id)("actual", context.sender_id) + ); + EOS_ASSERT( context.sender_trx_id == trx_context.id, ill_formed_deferred_transaction_generation_context, + "deferred transaction generaction context contains mismatching sender_trx_id", + ("expected", trx_context.id)("actual", context.sender_trx_id) + ); + } else { + FC_ASSERT( trx.transaction_extensions.size() == 0, "invariant failure" ); + trx.transaction_extensions.emplace_back( + deferred_transaction_generation_context::extension_id(), + fc::raw::pack( deferred_transaction_generation_context( trx_context.id, sender_id, receiver ) ) + ); + } + trx.expiration = time_point_sec(); + trx.ref_block_num = 0; + trx.ref_block_prefix = 0; + } else { + trx.expiration = control.pending_block_time() + fc::microseconds(999'999); // Rounds up to nearest second (makes expiration check unnecessary) + trx.set_reference_block(control.head_block_id()); // No TaPoS check necessary + } + // Charge ahead of time for the additional net usage needed to retire the deferred transaction // whether that be by successfully executing, soft failure, hard failure, or expiration. const auto& cfg = control.get_global_properties().configuration; @@ -303,9 +394,21 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a auto delay = fc::seconds(trx.delay_sec); + bool ram_restrictions_activated = control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ); + if( !control.skip_auth_check() && !privileged ) { // Do not need to check authorization if replayng irreversible block or if contract is privileged if( payer != receiver ) { - require_authorization(payer); /// uses payer's storage + if( ram_restrictions_activated ) { + EOS_ASSERT( receiver == act->account, action_validate_exception, + "cannot bill RAM usage of deferred transactions to another account within notify context" + ); + EOS_ASSERT( has_authorization( payer ), action_validate_exception, + "cannot bill RAM usage of deferred transaction to another account that has not authorized the action: ${payer}", + ("payer", payer) + ); + } else { + require_authorization(payer); /// uses payer's storage + } } // Originally this code bypassed authorization checks if a contract was deferring only actions to itself. @@ -318,7 +421,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a // So, the deferred transaction must always go through the authorization checking if it is not sent by a privileged contract. // However, the old logic must still be considered because it cannot objectively change until a consensus protocol upgrade. - bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + bool disallow_send_to_self_bypass = control.is_builtin_activated( builtin_protocol_feature_t::restrict_action_to_self ); auto is_sending_only_to_self = [&trx]( const account_name& self ) { bool send_to_self = true; @@ -363,39 +466,60 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a if ( auto ptr = db.find(boost::make_tuple(receiver, sender_id)) ) { EOS_ASSERT( replace_existing, deferred_tx_duplicate, "deferred transaction with the same sender_id and payer already exists" ); - // TODO: Remove the following subjective check when the deferred trx replacement RAM bug has been fixed with a hard fork. - EOS_ASSERT( !control.is_producing_block(), subjective_block_production_exception, + bool replace_deferred_activated = control.is_builtin_activated(builtin_protocol_feature_t::replace_deferred); + + EOS_ASSERT( replace_deferred_activated || !control.is_producing_block() + || control.all_subjective_mitigations_disabled(), + subjective_block_production_exception, "Replacing a deferred transaction is temporarily disabled." ); - // TODO: The logic of the next line needs to be incorporated into the next hard fork. - // add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); + uint64_t orig_trx_ram_bytes = config::billable_size_v + ptr->packed_trx.size(); + if( replace_deferred_activated ) { + add_ram_usage( ptr->payer, -static_cast( orig_trx_ram_bytes ) ); + } else { + control.add_to_ram_correction( ptr->payer, orig_trx_ram_bytes ); + } - db.modify( *ptr, [&]( auto& gtx ) { - gtx.sender = receiver; - gtx.sender_id = sender_id; - gtx.payer = payer; - gtx.published = control.pending_block_time(); - gtx.delay_until = gtx.published + delay; - gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); + transaction_id_type trx_id_for_new_obj; + if( replace_deferred_activated ) { + trx_id_for_new_obj = trx.id(); + } else { + trx_id_for_new_obj = ptr->trx_id; + } - trx_size = gtx.set( trx ); - }); + // Use remove and create rather than modify because mutating the trx_id field in a modifier is unsafe. + db.remove( *ptr ); + db.create( [&]( auto& gtx ) { + gtx.trx_id = trx_id_for_new_obj; + gtx.sender = receiver; + gtx.sender_id = sender_id; + gtx.payer = payer; + gtx.published = control.pending_block_time(); + gtx.delay_until = gtx.published + delay; + gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); + + trx_size = gtx.set( trx ); + } ); } else { db.create( [&]( auto& gtx ) { - gtx.trx_id = trx.id(); - gtx.sender = receiver; - gtx.sender_id = sender_id; - gtx.payer = payer; - gtx.published = control.pending_block_time(); - gtx.delay_until = gtx.published + delay; - gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); - - trx_size = gtx.set( trx ); - }); + gtx.trx_id = trx.id(); + gtx.sender = receiver; + gtx.sender_id = sender_id; + gtx.payer = payer; + gtx.published = control.pending_block_time(); + gtx.delay_until = gtx.published + delay; + gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); + + trx_size = gtx.set( trx ); + } ); } - EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act.account) || (receiver == payer) || privileged, - subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); + EOS_ASSERT( ram_restrictions_activated + || control.is_ram_billing_in_notify_allowed() + || (receiver == act->account) || (receiver == payer) || privileged, + subjective_block_production_exception, + "Cannot charge RAM to other accounts during notify." + ); add_ram_usage( payer, (config::billable_size_v + trx_size) ); } @@ -409,6 +533,26 @@ bool apply_context::cancel_deferred_transaction( const uint128_t& sender_id, acc return gto; } +uint32_t apply_context::schedule_action( uint32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free ) +{ + uint32_t scheduled_action_ordinal = trx_context.schedule_action( ordinal_of_action_to_schedule, + receiver, context_free, + action_ordinal, first_receiver_action_ordinal ); + + act = &trx_context.get_action_trace( action_ordinal ).act; + return scheduled_action_ordinal; +} + +uint32_t apply_context::schedule_action( action&& act_to_schedule, account_name receiver, bool context_free ) +{ + uint32_t scheduled_action_ordinal = trx_context.schedule_action( std::move(act_to_schedule), + receiver, context_free, + action_ordinal, first_receiver_action_ordinal ); + + act = &trx_context.get_action_trace( action_ordinal ).act; + return scheduled_action_ordinal; +} + const table_id_object* apply_context::find_table( name code, name scope, name table ) { return db.find(boost::make_tuple(code, scope, table)); } @@ -444,11 +588,6 @@ vector apply_context::get_active_producers() const { return accounts; } -void apply_context::reset_console() { - _pending_console_output = std::ostringstream(); - _pending_console_output.setf( std::ios::scientific, std::ios::floatfield ); -} - bytes apply_context::get_packed_transaction() { auto r = fc::raw::pack( static_cast(trx_context.trx) ); return r; @@ -456,8 +595,10 @@ bytes apply_context::get_packed_transaction() { void apply_context::update_db_usage( const account_name& payer, int64_t delta ) { if( delta > 0 ) { - if( !(privileged || payer == account_name(receiver)) ) { - EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act.account), + if( !(privileged || payer == account_name(receiver) + || control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) ) + { + EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act->account), subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); require_authorization( payer ); } @@ -710,19 +851,18 @@ uint64_t apply_context::next_global_sequence() { return p.global_action_sequence; } -uint64_t apply_context::next_recv_sequence( account_name receiver ) { - const auto& rs = db.get( receiver ); - db.modify( rs, [&]( auto& mrs ) { - ++mrs.recv_sequence; +uint64_t apply_context::next_recv_sequence( const account_metadata_object& receiver_account ) { + db.modify( receiver_account, [&]( auto& ra ) { + ++ra.recv_sequence; }); - return rs.recv_sequence; + return receiver_account.recv_sequence; } uint64_t apply_context::next_auth_sequence( account_name actor ) { - const auto& rs = db.get( actor ); - db.modify( rs, [&](auto& mrs ){ - ++mrs.auth_sequence; + const auto& amo = db.get( actor ); + db.modify( amo, [&](auto& am ){ + ++am.auth_sequence; }); - return rs.auth_sequence; + return amo.auth_sequence; } void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { @@ -734,5 +874,13 @@ void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { } } +action_name apply_context::get_sender() const { + const action_trace& trace = trx_context.get_action_trace( action_ordinal ); + if (trace.creator_action_ordinal > 0) { + const action_trace& creator_trace = trx_context.get_action_trace( trace.creator_action_ordinal ); + return creator_trace.receiver; + } + return 0; +} } } /// eosio::chain diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index e69f7129121..83988c16657 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -331,16 +331,20 @@ namespace eosio { namespace chain { EOS_ASSERT( auth.actor == link.account, irrelevant_auth_exception, "the owner of the linked permission needs to be the actor of the declared authorization" ); - EOS_ASSERT( link.type != updateauth::get_name(), action_validate_exception, - "Cannot link eosio::updateauth to a minimum permission" ); - EOS_ASSERT( link.type != deleteauth::get_name(), action_validate_exception, - "Cannot link eosio::deleteauth to a minimum permission" ); - EOS_ASSERT( link.type != linkauth::get_name(), action_validate_exception, - "Cannot link eosio::linkauth to a minimum permission" ); - EOS_ASSERT( link.type != unlinkauth::get_name(), action_validate_exception, - "Cannot link eosio::unlinkauth to a minimum permission" ); - EOS_ASSERT( link.type != canceldelay::get_name(), action_validate_exception, - "Cannot link eosio::canceldelay to a minimum permission" ); + if( link.code == config::system_account_name + || !_control.is_builtin_activated( builtin_protocol_feature_t::fix_linkauth_restriction ) ) + { + EOS_ASSERT( link.type != updateauth::get_name(), action_validate_exception, + "Cannot link eosio::updateauth to a minimum permission" ); + EOS_ASSERT( link.type != deleteauth::get_name(), action_validate_exception, + "Cannot link eosio::deleteauth to a minimum permission" ); + EOS_ASSERT( link.type != linkauth::get_name(), action_validate_exception, + "Cannot link eosio::linkauth to a minimum permission" ); + EOS_ASSERT( link.type != unlinkauth::get_name(), action_validate_exception, + "Cannot link eosio::unlinkauth to a minimum permission" ); + EOS_ASSERT( link.type != canceldelay::get_name(), action_validate_exception, + "Cannot link eosio::canceldelay to a minimum permission" ); + } const auto linked_permission_name = lookup_minimum_permission(link.account, link.code, link.type); diff --git a/libraries/chain/block_header.cpp b/libraries/chain/block_header.cpp index d623406fd25..692089dc9e6 100644 --- a/libraries/chain/block_header.cpp +++ b/libraries/chain/block_header.cpp @@ -28,5 +28,44 @@ namespace eosio { namespace chain { return result; } + vector block_header::validate_and_extract_header_extensions()const { + using block_header_extensions_t = block_header_extension_types::block_header_extensions_t; + using decompose_t = block_header_extension_types::decompose_t; + + static_assert( std::is_same::value, + "block_header_extensions is not setup as expected" ); + + vector results; + + uint16_t id_type_lower_bound = 0; + + for( size_t i = 0; i < header_extensions.size(); ++i ) { + const auto& e = header_extensions[i]; + auto id = e.first; + + EOS_ASSERT( id >= id_type_lower_bound, invalid_block_header_extension, + "Block header extensions are not in the correct order (ascending id types required)" + ); + + results.emplace_back(); + + auto match = decompose_t::extract( id, e.second, results.back() ); + EOS_ASSERT( match, invalid_block_header_extension, + "Block header extension with id type ${id} is not supported", + ("id", id) + ); + + if( match->enforce_unique ) { + EOS_ASSERT( i == 0 || id > id_type_lower_bound, invalid_block_header_extension, + "Block header extension with id type ${id} is not allowed to repeat", + ("id", id) + ); + } + + id_type_lower_bound = id; + } + + return results; + } } } diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index c63aa804333..f7dd7aba656 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -15,240 +15,330 @@ namespace eosio { namespace chain { return active_schedule.producers[index]; } - uint32_t block_header_state::calc_dpos_last_irreversible()const { + uint32_t block_header_state::calc_dpos_last_irreversible( account_name producer_of_next_block )const { vector blocknums; blocknums.reserve( producer_to_last_implied_irb.size() ); for( auto& i : producer_to_last_implied_irb ) { - blocknums.push_back(i.second); + blocknums.push_back( (i.first == producer_of_next_block) ? dpos_proposed_irreversible_blocknum : i.second); } /// 2/3 must be greater, so if I go 1/3 into the list sorted from low to high, then 2/3 are greater if( blocknums.size() == 0 ) return 0; - /// TODO: update to nth_element - std::sort( blocknums.begin(), blocknums.end() ); - return blocknums[ (blocknums.size()-1) / 3 ]; + + std::size_t index = (blocknums.size()-1) / 3; + std::nth_element( blocknums.begin(), blocknums.begin() + index, blocknums.end() ); + return blocknums[ index ]; } - /** - * Generate a template block header state for a given block time, it will not - * contain a transaction mroot, action mroot, or new_producers as those components - * are derived from chain state. - */ - block_header_state block_header_state::generate_next( block_timestamp_type when )const { - block_header_state result; - - if( when != block_timestamp_type() ) { - EOS_ASSERT( when > header.timestamp, block_validate_exception, "next block must be in the future" ); - } else { - (when = header.timestamp).slot++; - } - result.header.timestamp = when; - result.header.previous = id; - result.header.schedule_version = active_schedule.version; - - auto prokey = get_scheduled_producer(when); - result.block_signing_key = prokey.block_signing_key; - result.header.producer = prokey.producer_name; - - result.pending_schedule_lib_num = pending_schedule_lib_num; - result.pending_schedule_hash = pending_schedule_hash; - result.block_num = block_num + 1; - result.producer_to_last_produced = producer_to_last_produced; - result.producer_to_last_implied_irb = producer_to_last_implied_irb; - result.producer_to_last_produced[prokey.producer_name] = result.block_num; - result.blockroot_merkle = blockroot_merkle; - result.blockroot_merkle.append( id ); - - result.active_schedule = active_schedule; - result.pending_schedule = pending_schedule; - result.dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; - result.bft_irreversible_blocknum = bft_irreversible_blocknum; - - result.producer_to_last_implied_irb[prokey.producer_name] = result.dpos_proposed_irreversible_blocknum; - result.dpos_irreversible_blocknum = result.calc_dpos_last_irreversible(); - - /// grow the confirmed count - static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); - - // This uses the previous block active_schedule because thats the "schedule" that signs and therefore confirms _this_ block - auto num_active_producers = active_schedule.producers.size(); - uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; - - if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { - result.confirm_count.reserve( confirm_count.size() + 1 ); - result.confirm_count = confirm_count; - result.confirm_count.resize( confirm_count.size() + 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } else { - result.confirm_count.resize( confirm_count.size() ); - memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } - - return result; - } /// generate_next - - bool block_header_state::maybe_promote_pending() { - if( pending_schedule.producers.size() && - dpos_irreversible_blocknum >= pending_schedule_lib_num ) + pending_block_header_state block_header_state::next( block_timestamp_type when, + uint16_t num_prev_blocks_to_confirm )const + { + pending_block_header_state result; + + if( when != block_timestamp_type() ) { + EOS_ASSERT( when > header.timestamp, block_validate_exception, "next block must be in the future" ); + } else { + (when = header.timestamp).slot++; + } + + auto prokey = get_scheduled_producer(when); + + auto itr = producer_to_last_produced.find( prokey.producer_name ); + if( itr != producer_to_last_produced.end() ) { + EOS_ASSERT( itr->second < (block_num+1) - num_prev_blocks_to_confirm, producer_double_confirm, + "producer ${prod} double-confirming known range", + ("prod", prokey.producer_name)("num", block_num+1) + ("confirmed", num_prev_blocks_to_confirm)("last_produced", itr->second) ); + } + + result.block_num = block_num + 1; + result.previous = id; + result.timestamp = when; + result.confirmed = num_prev_blocks_to_confirm; + result.active_schedule_version = active_schedule.version; + result.prev_activated_protocol_features = activated_protocol_features; + + result.block_signing_key = prokey.block_signing_key; + result.producer = prokey.producer_name; + + result.blockroot_merkle = blockroot_merkle; + result.blockroot_merkle.append( id ); + + /// grow the confirmed count + static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); + + // This uses the previous block active_schedule because thats the "schedule" that signs and therefore confirms _this_ block + auto num_active_producers = active_schedule.producers.size(); + uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; + + if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { + result.confirm_count.reserve( confirm_count.size() + 1 ); + result.confirm_count = confirm_count; + result.confirm_count.resize( confirm_count.size() + 1 ); + result.confirm_count.back() = (uint8_t)required_confs; + } else { + result.confirm_count.resize( confirm_count.size() ); + memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); + result.confirm_count.back() = (uint8_t)required_confs; + } + + auto new_dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; + + int32_t i = (int32_t)(result.confirm_count.size() - 1); + uint32_t blocks_to_confirm = num_prev_blocks_to_confirm + 1; /// confirm the head block too + while( i >= 0 && blocks_to_confirm ) { + --result.confirm_count[i]; + //idump((confirm_count[i])); + if( result.confirm_count[i] == 0 ) + { + uint32_t block_num_for_i = result.block_num - (uint32_t)(result.confirm_count.size() - 1 - i); + new_dpos_proposed_irreversible_blocknum = block_num_for_i; + //idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); + + if (i == static_cast(result.confirm_count.size() - 1)) { + result.confirm_count.resize(0); + } else { + memmove( &result.confirm_count[0], &result.confirm_count[i + 1], result.confirm_count.size() - i - 1); + result.confirm_count.resize( result.confirm_count.size() - i - 1 ); + } + + break; + } + --i; + --blocks_to_confirm; + } + + result.dpos_proposed_irreversible_blocknum = new_dpos_proposed_irreversible_blocknum; + result.dpos_irreversible_blocknum = calc_dpos_last_irreversible( prokey.producer_name ); + + result.prev_pending_schedule = pending_schedule; + + if( pending_schedule.schedule.producers.size() && + result.dpos_irreversible_blocknum >= pending_schedule.schedule_lib_num ) { - active_schedule = move( pending_schedule ); + result.active_schedule = pending_schedule.schedule; flat_map new_producer_to_last_produced; - for( const auto& pro : active_schedule.producers ) { - auto existing = producer_to_last_produced.find( pro.producer_name ); - if( existing != producer_to_last_produced.end() ) { - new_producer_to_last_produced[pro.producer_name] = existing->second; + + for( const auto& pro : result.active_schedule.producers ) { + if( pro.producer_name == prokey.producer_name ) { + new_producer_to_last_produced[pro.producer_name] = result.block_num; } else { - new_producer_to_last_produced[pro.producer_name] = dpos_irreversible_blocknum; + auto existing = producer_to_last_produced.find( pro.producer_name ); + if( existing != producer_to_last_produced.end() ) { + new_producer_to_last_produced[pro.producer_name] = existing->second; + } else { + new_producer_to_last_produced[pro.producer_name] = result.dpos_irreversible_blocknum; + } } } + new_producer_to_last_produced[prokey.producer_name] = result.block_num; + + result.producer_to_last_produced = std::move( new_producer_to_last_produced ); flat_map new_producer_to_last_implied_irb; - for( const auto& pro : active_schedule.producers ) { - auto existing = producer_to_last_implied_irb.find( pro.producer_name ); - if( existing != producer_to_last_implied_irb.end() ) { - new_producer_to_last_implied_irb[pro.producer_name] = existing->second; + + for( const auto& pro : result.active_schedule.producers ) { + if( pro.producer_name == prokey.producer_name ) { + new_producer_to_last_implied_irb[pro.producer_name] = dpos_proposed_irreversible_blocknum; } else { - new_producer_to_last_implied_irb[pro.producer_name] = dpos_irreversible_blocknum; + auto existing = producer_to_last_implied_irb.find( pro.producer_name ); + if( existing != producer_to_last_implied_irb.end() ) { + new_producer_to_last_implied_irb[pro.producer_name] = existing->second; + } else { + new_producer_to_last_implied_irb[pro.producer_name] = result.dpos_irreversible_blocknum; + } } } - producer_to_last_produced = move( new_producer_to_last_produced ); - producer_to_last_implied_irb = move( new_producer_to_last_implied_irb); - producer_to_last_produced[header.producer] = block_num; + result.producer_to_last_implied_irb = std::move( new_producer_to_last_implied_irb ); - return true; + result.was_pending_promoted = true; + } else { + result.active_schedule = active_schedule; + result.producer_to_last_produced = producer_to_last_produced; + result.producer_to_last_produced[prokey.producer_name] = block_num; + result.producer_to_last_implied_irb = producer_to_last_implied_irb; + result.producer_to_last_implied_irb[prokey.producer_name] = dpos_proposed_irreversible_blocknum; } - return false; + + return result; } - void block_header_state::set_new_producers( producer_schedule_type pending ) { - EOS_ASSERT( pending.version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); - EOS_ASSERT( pending_schedule.producers.size() == 0, producer_schedule_exception, - "cannot set new pending producers until last pending is confirmed" ); - header.new_producers = move(pending); - pending_schedule_hash = digest_type::hash( *header.new_producers ); - pending_schedule = *header.new_producers; - pending_schedule_lib_num = block_num; - } - - - /** - * Transitions the current header state into the next header state given the supplied signed block header. - * - * Given a signed block header, generate the expected template based upon the header time, - * then validate that the provided header matches the template. - * - * If the header specifies new_producers then apply them accordingly. - */ - block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee )const { - EOS_ASSERT( h.timestamp != block_timestamp_type(), block_validate_exception, "", ("h",h) ); - EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); - - EOS_ASSERT( h.timestamp > header.timestamp, block_validate_exception, "block must be later in time" ); - EOS_ASSERT( h.previous == id, unlinkable_block_exception, "block must link to current state" ); - auto result = generate_next( h.timestamp ); - EOS_ASSERT( result.header.producer == h.producer, wrong_producer, "wrong producer specified" ); - EOS_ASSERT( result.header.schedule_version == h.schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); - - auto itr = producer_to_last_produced.find(h.producer); - if( itr != producer_to_last_produced.end() ) { - EOS_ASSERT( itr->second < result.block_num - h.confirmed, producer_double_confirm, "producer ${prod} double-confirming known range", ("prod", h.producer) ); - } - - // FC_ASSERT( result.header.block_mroot == h.block_mroot, "mismatch block merkle root" ); - - /// below this point is state changes that cannot be validated with headers alone, but never-the-less, - /// must result in header state changes - - result.set_confirmed( h.confirmed ); - - auto was_pending_promoted = result.maybe_promote_pending(); - - if( h.new_producers ) { - EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); - result.set_new_producers( *h.new_producers ); - } - - result.header.action_mroot = h.action_mroot; - result.header.transaction_mroot = h.transaction_mroot; - result.header.producer_signature = h.producer_signature; - result.id = result.header.id(); - - // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here - if( !skip_validate_signee ) { - result.verify_signee( result.signee() ); - } - - return result; - } /// next - - void block_header_state::set_confirmed( uint16_t num_prev_blocks ) { - /* - idump((num_prev_blocks)(confirm_count.size())); - - for( uint32_t i = 0; i < confirm_count.size(); ++i ) { - std::cerr << "confirm_count["<= 0 && blocks_to_confirm ) { - --confirm_count[i]; - //idump((confirm_count[i])); - if( confirm_count[i] == 0 ) - { - uint32_t block_num_for_i = block_num - (uint32_t)(confirm_count.size() - 1 - i); - dpos_proposed_irreversible_blocknum = block_num_for_i; - //idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); - - if (i == static_cast(confirm_count.size() - 1)) { - confirm_count.resize(0); - } else { - memmove( &confirm_count[0], &confirm_count[i + 1], confirm_count.size() - i - 1); - confirm_count.resize( confirm_count.size() - i - 1 ); - } - - return; - } - --i; - --blocks_to_confirm; - } - } + signed_block_header pending_block_header_state::make_block_header( + const checksum256_type& transaction_mroot, + const checksum256_type& action_mroot, + optional&& new_producers, + vector&& new_protocol_feature_activations + )const + { + signed_block_header h; + + h.timestamp = timestamp; + h.producer = producer; + h.confirmed = confirmed; + h.previous = previous; + h.transaction_mroot = transaction_mroot; + h.action_mroot = action_mroot; + h.schedule_version = active_schedule_version; + h.new_producers = std::move(new_producers); + + if( new_protocol_feature_activations.size() > 0 ) { + h.header_extensions.emplace_back( + protocol_feature_activation::extension_id(), + fc::raw::pack( protocol_feature_activation{ std::move(new_protocol_feature_activations) } ) + ); + } + + return h; + } + + block_header_state pending_block_header_state::_finish_next( + const signed_block_header& h, + const std::function&, + const vector& )>& validator + )&& + { + EOS_ASSERT( h.timestamp == timestamp, block_validate_exception, "timestamp mismatch" ); + EOS_ASSERT( h.previous == previous, unlinkable_block_exception, "previous mismatch" ); + EOS_ASSERT( h.confirmed == confirmed, block_validate_exception, "confirmed mismatch" ); + EOS_ASSERT( h.producer == producer, wrong_producer, "wrong producer specified" ); + EOS_ASSERT( h.schedule_version == active_schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); + + if( h.new_producers ) { + EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); + EOS_ASSERT( h.new_producers->version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); + EOS_ASSERT( prev_pending_schedule.schedule.producers.size() == 0, producer_schedule_exception, + "cannot set new pending producers until last pending is confirmed" ); + } + + protocol_feature_activation_set_ptr new_activated_protocol_features; + + auto exts = h.validate_and_extract_header_extensions(); + { + if( exts.size() > 0 ) { + const auto& new_protocol_features = exts.front().get().protocol_features; + validator( timestamp, prev_activated_protocol_features->protocol_features, new_protocol_features ); + + new_activated_protocol_features = std::make_shared( + *prev_activated_protocol_features, + new_protocol_features + ); + } else { + new_activated_protocol_features = std::move( prev_activated_protocol_features ); + } + } + + auto block_number = block_num; + + block_header_state result( std::move( *static_cast(this) ) ); + + result.id = h.id(); + result.header = h; + + result.header_exts = std::move(exts); + + if( h.new_producers ) { + result.pending_schedule.schedule = *h.new_producers; + result.pending_schedule.schedule_hash = digest_type::hash( *h.new_producers ); + result.pending_schedule.schedule_lib_num = block_number; + } else { + if( was_pending_promoted ) { + result.pending_schedule.schedule.version = prev_pending_schedule.schedule.version; + } else { + result.pending_schedule.schedule = std::move( prev_pending_schedule.schedule ); + } + result.pending_schedule.schedule_hash = std::move( prev_pending_schedule.schedule_hash ); + result.pending_schedule.schedule_lib_num = prev_pending_schedule.schedule_lib_num; + } + + result.activated_protocol_features = std::move( new_activated_protocol_features ); + + return result; + } + + block_header_state pending_block_header_state::finish_next( + const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee + )&& + { + auto result = std::move(*this)._finish_next( h, validator ); + + // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here + if( !skip_validate_signee ) { + result.verify_signee( result.signee() ); + } + + return result; + } - digest_type block_header_state::sig_digest()const { - auto header_bmroot = digest_type::hash( std::make_pair( header.digest(), blockroot_merkle.get_root() ) ); - return digest_type::hash( std::make_pair(header_bmroot, pending_schedule_hash) ); - } + block_header_state pending_block_header_state::finish_next( + signed_block_header& h, + const std::function&, + const vector& )>& validator, + const std::function& signer + )&& + { + auto result = std::move(*this)._finish_next( h, validator ); + result.sign( signer ); + h.producer_signature = result.header.producer_signature; + return result; + } + + /** + * Transitions the current header state into the next header state given the supplied signed block header. + * + * Given a signed block header, generate the expected template based upon the header time, + * then validate that the provided header matches the template. + * + * If the header specifies new_producers then apply them accordingly. + */ + block_header_state block_header_state::next( + const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee )const + { + return next( h.timestamp, h.confirmed ).finish_next( h, validator, skip_validate_signee ); + } - void block_header_state::sign( const std::function& signer ) { - auto d = sig_digest(); - header.producer_signature = signer( d ); - EOS_ASSERT( block_signing_key == fc::crypto::public_key( header.producer_signature, d ), wrong_signing_key, "block is signed with unexpected key" ); - } + digest_type block_header_state::sig_digest()const { + auto header_bmroot = digest_type::hash( std::make_pair( header.digest(), blockroot_merkle.get_root() ) ); + return digest_type::hash( std::make_pair(header_bmroot, pending_schedule.schedule_hash) ); + } - public_key_type block_header_state::signee()const { - return fc::crypto::public_key( header.producer_signature, sig_digest(), true ); - } + void block_header_state::sign( const std::function& signer ) { + auto d = sig_digest(); + header.producer_signature = signer( d ); + EOS_ASSERT( block_signing_key == fc::crypto::public_key( header.producer_signature, d ), + wrong_signing_key, "block is signed with unexpected key" ); + } - void block_header_state::verify_signee( const public_key_type& signee )const { - EOS_ASSERT( block_signing_key == signee, wrong_signing_key, "block not signed by expected key", - ("block_signing_key", block_signing_key)( "signee", signee ) ); - } + public_key_type block_header_state::signee()const { + return fc::crypto::public_key( header.producer_signature, sig_digest(), true ); + } - void block_header_state::add_confirmation( const header_confirmation& conf ) { - for( const auto& c : confirmations ) - EOS_ASSERT( c.producer != conf.producer, producer_double_confirm, "block already confirmed by this producer" ); + void block_header_state::verify_signee( const public_key_type& signee )const { + EOS_ASSERT( block_signing_key == signee, wrong_signing_key, + "block not signed by expected key", + ("block_signing_key", block_signing_key)( "signee", signee ) ); + } - auto key = active_schedule.get_producer_key( conf.producer ); - EOS_ASSERT( key != public_key_type(), producer_not_in_schedule, "producer not in current schedule" ); - auto signer = fc::crypto::public_key( conf.producer_signature, sig_digest(), true ); - EOS_ASSERT( signer == key, wrong_signing_key, "confirmation not signed by expected key" ); + /** + * Reference cannot outlive *this. Assumes header_exts is not mutated after instatiation. + */ + const vector& block_header_state::get_new_protocol_feature_activations()const { + static const vector no_activations{}; - confirmations.emplace_back( conf ); - } + if( header_exts.size() == 0 || !header_exts.front().contains() ) + return no_activations; + return header_exts.front().get().protocol_features; + } } } /// namespace eosio::chain diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 0f7169b32b8..fef0dd3f637 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -233,6 +233,9 @@ namespace eosio { namespace chain { if (first_block) { append(first_block); + } else { + my->head.reset(); + my->head_id = {}; } auto pos = my->block_stream.tellp(); diff --git a/libraries/chain/block_state.cpp b/libraries/chain/block_state.cpp index b4834775951..3a246038149 100644 --- a/libraries/chain/block_state.cpp +++ b/libraries/chain/block_state.cpp @@ -3,17 +3,42 @@ namespace eosio { namespace chain { - block_state::block_state( const block_header_state& prev, block_timestamp_type when ) - :block_header_state( prev.generate_next( when ) ), - block( std::make_shared() ) - { - static_cast(*block) = header; - } + block_state::block_state( const block_header_state& prev, + signed_block_ptr b, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee + ) + :block_header_state( prev.next( *b, validator, skip_validate_signee ) ) + ,block( std::move(b) ) + {} - block_state::block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ) - :block_header_state( prev.next( *b, skip_validate_signee )), block( move(b) ) - { } + block_state::block_state( pending_block_header_state&& cur, + signed_block_ptr&& b, + vector&& trx_metas, + const std::function&, + const vector& )>& validator, + const std::function& signer + ) + :block_header_state( std::move(cur).finish_next( *b, validator, signer ) ) + ,block( std::move(b) ) + ,trxs( std::move(trx_metas) ) + {} + block_state::block_state( pending_block_header_state&& cur, + const signed_block_ptr& b, + vector&& trx_metas, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee + ) + :block_header_state( std::move(cur).finish_next( *b, validator, skip_validate_signee ) ) + ,block( b ) + ,trxs( std::move(trx_metas) ) + {} } } /// eosio::chain diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index a8f714bb1e5..2a438333c7f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -6,14 +6,19 @@ #include #include +#include #include #include #include +#include #include #include #include #include +#include +#include +#include #include #include #include @@ -21,6 +26,7 @@ #include #include +#include #include #include @@ -30,13 +36,16 @@ using resource_limits::resource_limits_manager; using controller_index_set = index_set< account_index, - account_sequence_index, + account_metadata_index, + account_ram_correction_index, global_property_multi_index, + protocol_state_multi_index, dynamic_global_property_multi_index, block_summary_multi_index, transaction_multi_index, generated_transaction_multi_index, - table_id_multi_index + table_id_multi_index, + code_index >; using contract_database_index_set = index_set< @@ -93,19 +102,104 @@ class maybe_session { optional _session; }; +struct building_block { + building_block( const block_header_state& prev, + block_timestamp_type when, + uint16_t num_prev_blocks_to_confirm, + const vector& new_protocol_feature_activations ) + :_pending_block_header_state( prev.next( when, num_prev_blocks_to_confirm ) ) + ,_new_protocol_feature_activations( new_protocol_feature_activations ) + {} + + pending_block_header_state _pending_block_header_state; + optional _new_pending_producer_schedule; + vector _new_protocol_feature_activations; + size_t _num_new_protocol_features_that_have_activated = 0; + vector _pending_trx_metas; + vector _pending_trx_receipts; + vector _actions; +}; + +struct assembled_block { + block_id_type _id; + pending_block_header_state _pending_block_header_state; + vector _trx_metas; + signed_block_ptr _unsigned_block; +}; + +struct completed_block { + block_state_ptr _block_state; +}; + +using block_stage_type = fc::static_variant; + struct pending_state { - pending_state( maybe_session&& s ) - :_db_session( move(s) ){} + pending_state( maybe_session&& s, const block_header_state& prev, + block_timestamp_type when, + uint16_t num_prev_blocks_to_confirm, + const vector& new_protocol_feature_activations ) + :_db_session( move(s) ) + ,_block_stage( building_block( prev, when, num_prev_blocks_to_confirm, new_protocol_feature_activations ) ) + {} maybe_session _db_session; + block_stage_type _block_stage; + controller::block_status _block_status = controller::block_status::incomplete; + optional _producer_block_id; - block_state_ptr _pending_block_state; + /** @pre _block_stage cannot hold completed_block alternative */ + const pending_block_header_state& get_pending_block_header_state()const { + if( _block_stage.contains() ) + return _block_stage.get()._pending_block_header_state; - vector _actions; + return _block_stage.get()._pending_block_header_state; + } - controller::block_status _block_status = controller::block_status::incomplete; + const vector& get_trx_receipts()const { + if( _block_stage.contains() ) + return _block_stage.get()._pending_trx_receipts; - optional _producer_block_id; + if( _block_stage.contains() ) + return _block_stage.get()._unsigned_block->transactions; + + return _block_stage.get()._block_state->block->transactions; + } + + const vector& get_trx_metas()const { + if( _block_stage.contains() ) + return _block_stage.get()._pending_trx_metas; + + if( _block_stage.contains() ) + return _block_stage.get()._trx_metas; + + return _block_stage.get()._block_state->trxs; + } + + bool is_protocol_feature_activated( const digest_type& feature_digest )const { + if( _block_stage.contains() ) { + auto& bb = _block_stage.get(); + const auto& activated_features = bb._pending_block_header_state.prev_activated_protocol_features->protocol_features; + + if( activated_features.find( feature_digest ) != activated_features.end() ) return true; + + if( bb._num_new_protocol_features_that_have_activated == 0 ) return false; + + auto end = bb._new_protocol_feature_activations.begin() + bb._num_new_protocol_features_that_have_activated; + return (std::find( bb._new_protocol_feature_activations.begin(), end, feature_digest ) != end); + } + + if( _block_stage.contains() ) { + // Calling is_protocol_feature_activated during the assembled_block stage is not efficient. + // We should avoid doing it. + // In fact for now it isn't even implemented. + EOS_THROW( misc_exception, + "checking if protocol feature is activated in the assembled_block stage is not yet supported" ); + // TODO: implement this + } + + const auto& activated_features = _block_stage.get()._block_state->activated_protocol_features->protocol_features; + return (activated_features.find( feature_digest ) != activated_features.end()); + } void push() { _db_session.push(); @@ -123,19 +217,20 @@ struct controller_impl { wasm_interface wasmif; resource_limits_manager resource_limits; authorization_manager authorization; + protocol_feature_manager protocol_features; controller::config conf; chain_id_type chain_id; - bool replaying= false; optional replay_head_time; db_read_mode read_mode = db_read_mode::SPECULATIVE; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped optional subjective_cpu_leeway; bool trusted_producer_light_validation = false; uint32_t snapshot_head_block = 0; - boost::asio::thread_pool thread_pool; + named_thread_pool thread_pool; typedef pair handler_key; map< account_name, map > apply_handlers; + unordered_map< builtin_protocol_feature_t, std::function, enum_hash > protocol_feature_activation_handlers; /** * Transactions that were undone by pop_block or abort_block, transactions @@ -146,7 +241,11 @@ struct controller_impl { void pop_block() { auto prev = fork_db.get_block( head->header.previous ); - EOS_ASSERT( prev, block_validate_exception, "attempt to pop beyond last irreversible block" ); + + if( !prev ) { + EOS_ASSERT( fork_db.root()->id == head->header.previous, block_validate_exception, "attempt to pop beyond last irreversible block" ); + prev = fork_db.root(); + } if( const auto* b = reversible_blocks.find(head->block_num) ) { @@ -158,35 +257,67 @@ struct controller_impl { for( const auto& t : head->trxs ) unapplied_transactions[t->signed_id] = t; } + head = prev; db.undo(); + protocol_features.popped_blocks_to( prev->block_num ); + } + + template + void on_activation(); + + template + inline void set_activation_handler() { + auto res = protocol_feature_activation_handlers.emplace( F, &controller_impl::on_activation ); + EOS_ASSERT( res.second, misc_exception, "attempting to set activation handler twice" ); } + inline void trigger_activation_handler( builtin_protocol_feature_t f ) { + auto itr = protocol_feature_activation_handlers.find( f ); + if( itr == protocol_feature_activation_handlers.end() ) return; + (itr->second)( *this ); + } void set_apply_handler( account_name receiver, account_name contract, action_name action, apply_handler v ) { apply_handlers[receiver][make_pair(contract,action)] = v; } - controller_impl( const controller::config& cfg, controller& s ) + controller_impl( const controller::config& cfg, controller& s, protocol_feature_set&& pfs ) :self(s), db( cfg.state_dir, cfg.read_only ? database::read_only : database::read_write, - cfg.state_size ), + cfg.state_size, false, cfg.db_map_mode, cfg.db_hugepage_paths ), reversible_blocks( cfg.blocks_dir/config::reversible_blocks_dir_name, cfg.read_only ? database::read_only : database::read_write, - cfg.reversible_cache_size ), + cfg.reversible_cache_size, false, cfg.db_map_mode, cfg.db_hugepage_paths ), blog( cfg.blocks_dir ), fork_db( cfg.state_dir ), - wasmif( cfg.wasm_runtime ), + wasmif( cfg.wasm_runtime, db ), resource_limits( db ), authorization( s, db ), + protocol_features( std::move(pfs) ), conf( cfg ), chain_id( cfg.genesis.compute_chain_id() ), read_mode( cfg.read_mode ), - thread_pool( cfg.thread_pool_size ) + thread_pool( "chain", cfg.thread_pool_size ) { + fork_db.open( [this]( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + { check_protocol_features( timestamp, cur_features, new_features ); } + ); + + set_activation_handler(); + set_activation_handler(); + set_activation_handler(); + + self.irreversible_block.connect([this](const block_state_ptr& bsp) { + wasmif.current_lib(bsp->block_num); + }); + + #define SET_APP_HANDLER( receiver, contract, action) \ set_apply_handler( #receiver, #contract, #action, &BOOST_PP_CAT(apply_, BOOST_PP_CAT(contract, BOOST_PP_CAT(_,action) ) ) ) @@ -204,11 +335,6 @@ struct controller_impl { */ SET_APP_HANDLER( eosio, eosio, canceldelay ); - - fork_db.irreversible.connect( [&]( auto b ) { - on_irreversible(b); - }); - } /** @@ -237,157 +363,230 @@ struct controller_impl { } } - void on_irreversible( const block_state_ptr& s ) { - if( !blog.head() ) - blog.read_head(); + void log_irreversible() { + EOS_ASSERT( fork_db.root(), fork_database_exception, "fork database not properly initialized" ); const auto& log_head = blog.head(); - bool append_to_blog = false; - if (!log_head) { - if (s->block) { - EOS_ASSERT(s->block_num == blog.first_block_num(), block_log_exception, "block log has no blocks and is appending the wrong first block. Expected ${expected}, but received: ${actual}", - ("expected", blog.first_block_num())("actual", s->block_num)); - append_to_blog = true; - } else { - EOS_ASSERT(s->block_num == blog.first_block_num() - 1, block_log_exception, "block log has no blocks and is not properly set up to start after the snapshot"); - } - } else { - auto lh_block_num = log_head->block_num(); - if (s->block_num > lh_block_num) { - EOS_ASSERT(s->block_num - 1 == lh_block_num, unlinkable_block_exception, "unlinkable block", ("s->block_num", s->block_num)("lh_block_num", lh_block_num)); - EOS_ASSERT(s->block->previous == log_head->id(), unlinkable_block_exception, "irreversible doesn't link to block log head"); - append_to_blog = true; - } - } + auto lib_num = log_head ? log_head->block_num() : (blog.first_block_num() - 1); - db.commit( s->block_num ); + auto root_id = fork_db.root()->id; - if( append_to_blog ) { - blog.append(s->block); + if( log_head ) { + EOS_ASSERT( root_id == log_head->id(), fork_database_exception, "fork database root does not match block log head" ); + } else { + EOS_ASSERT( fork_db.root()->block_num == lib_num, fork_database_exception, + "empty block log expects the first appended block to build off a block that is not the fork database root" ); } - const auto& ubi = reversible_blocks.get_index(); - auto objitr = ubi.begin(); - while( objitr != ubi.end() && objitr->blocknum <= s->block_num ) { - reversible_blocks.remove( *objitr ); - objitr = ubi.begin(); - } + auto fork_head = (read_mode == db_read_mode::IRREVERSIBLE) ? fork_db.pending_head() : fork_db.head(); - // the "head" block when a snapshot is loaded is virtual and has no block data, all of its effects - // should already have been loaded from the snapshot so, it cannot be applied - if (s->block) { - if (read_mode == db_read_mode::IRREVERSIBLE) { - // when applying a snapshot, head may not be present - // when not applying a snapshot, make sure this is the next block - if (!head || s->block_num == head->block_num + 1) { - apply_block(s->block, controller::block_status::complete); - head = s; - } else { - // otherwise, assert the one odd case where initializing a chain - // from genesis creates and applies the first block automatically. - // when syncing from another chain, this is pushed in again - EOS_ASSERT(!head || head->block_num == 1, block_validate_exception, "Attempting to re-apply an irreversible block that was not the implied genesis block"); + if( fork_head->dpos_irreversible_blocknum <= lib_num ) + return; + + const auto branch = fork_db.fetch_branch( fork_head->id, fork_head->dpos_irreversible_blocknum ); + try { + const auto& rbi = reversible_blocks.get_index(); + + for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { + if( read_mode == db_read_mode::IRREVERSIBLE ) { + apply_block( *bitr, controller::block_status::complete ); + head = (*bitr); + fork_db.mark_valid( head ); } - fork_db.mark_in_current_chain(head, true); - fork_db.set_validity(head, true); + emit( self.irreversible_block, *bitr ); + + db.commit( (*bitr)->block_num ); + root_id = (*bitr)->id; + + blog.append( (*bitr)->block ); + + auto rbitr = rbi.begin(); + while( rbitr != rbi.end() && rbitr->blocknum <= (*bitr)->block_num ) { + reversible_blocks.remove( *rbitr ); + rbitr = rbi.begin(); + } } - emit(self.irreversible_block, s); + } catch( fc::exception& ) { + if( root_id != fork_db.root()->id ) { + fork_db.advance_root( root_id ); + } + throw; + } + + //db.commit( fork_head->dpos_irreversible_blocknum ); // redundant + + if( root_id != fork_db.root()->id ) { + fork_db.advance_root( root_id ); } } + /** + * Sets fork database head to the genesis state. + */ + void initialize_blockchain_state() { + wlog( "Initializing new blockchain with genesis state" ); + producer_schedule_type initial_schedule{ 0, {{config::system_account_name, conf.genesis.initial_key}} }; + + block_header_state genheader; + genheader.active_schedule = initial_schedule; + genheader.pending_schedule.schedule = initial_schedule; + genheader.pending_schedule.schedule_hash = fc::sha256::hash(initial_schedule); + genheader.header.timestamp = conf.genesis.initial_timestamp; + genheader.header.action_mroot = conf.genesis.compute_chain_id(); + genheader.id = genheader.header.id(); + genheader.block_num = genheader.header.block_num(); + + head = std::make_shared(); + static_cast(*head) = genheader; + head->activated_protocol_features = std::make_shared(); + head->block = std::make_shared(genheader.header); + db.set_revision( head->block_num ); + initialize_database(); + } + void replay(std::function shutdown) { - auto blog_head = blog.read_head(); + auto blog_head = blog.head(); auto blog_head_time = blog_head->timestamp.to_time_point(); - replaying = true; replay_head_time = blog_head_time; auto start_block_num = head->block_num + 1; - ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", - ("s", start_block_num)("n", blog_head->block_num()) ); - auto start = fc::time_point::now(); - while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { - replay_push_block( next, controller::block_status::irreversible ); - if( next->block_num() % 500 == 0 ) { - ilog( "${n} of ${head}", ("n", next->block_num())("head", blog_head->block_num()) ); - if( shutdown() ) break; + + std::exception_ptr except_ptr; + + if( start_block_num <= blog_head->block_num() ) { + ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", + ("s", start_block_num)("n", blog_head->block_num()) ); + try { + while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { + replay_push_block( next, controller::block_status::irreversible ); + if( next->block_num() % 500 == 0 ) { + ilog( "${n} of ${head}", ("n", next->block_num())("head", blog_head->block_num()) ); + if( shutdown() ) break; + } + } + } catch( const database_guard_exception& e ) { + except_ptr = std::current_exception(); + } + ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); + + auto pending_head = fork_db.pending_head(); + if( pending_head->block_num < head->block_num || head->block_num < fork_db.root()->block_num ) { + ilog( "resetting fork database with new last irreversible block as the new root: ${id}", + ("id", head->id) ); + fork_db.reset( *head ); + } else if( head->block_num != fork_db.root()->block_num ) { + auto new_root = fork_db.search_on_branch( pending_head->id, head->block_num ); + EOS_ASSERT( new_root, fork_database_exception, "unexpected error: could not find new LIB in fork database" ); + ilog( "advancing fork database root to new last irreversible block within existing fork database: ${id}", + ("id", new_root->id) ); + fork_db.mark_valid( new_root ); + fork_db.advance_root( new_root->id ); } - } - ilog( "${n} blocks replayed", ("n", head->block_num - start_block_num) ); - // if the irreversible log is played without undo sessions enabled, we need to sync the - // revision ordinal to the appropriate expected value here. - if( self.skip_db_sessions( controller::block_status::irreversible ) ) - db.set_revision(head->block_num); + // if the irreverible log is played without undo sessions enabled, we need to sync the + // revision ordinal to the appropriate expected value here. + if( self.skip_db_sessions( controller::block_status::irreversible ) ) + db.set_revision( head->block_num ); + } else { + ilog( "no irreversible blocks need to be replayed" ); + } - int rev = 0; - while( auto obj = reversible_blocks.find(head->block_num+1) ) { - ++rev; - replay_push_block( obj->get_block(), controller::block_status::validated ); + if( !except_ptr && !shutdown() ) { + int rev = 0; + while( auto obj = reversible_blocks.find(head->block_num+1) ) { + ++rev; + replay_push_block( obj->get_block(), controller::block_status::validated ); + } + ilog( "${n} reversible blocks replayed", ("n",rev) ); } - ilog( "${n} reversible blocks replayed", ("n",rev) ); auto end = fc::time_point::now(); ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", - ("n", head->block_num - start_block_num)("duration", (end-start).count()/1000000) + ("n", head->block_num + 1 - start_block_num)("duration", (end-start).count()/1000000) ("mspb", ((end-start).count()/1000.0)/(head->block_num-start_block_num)) ); - replaying = false; replay_head_time.reset(); + + if( except_ptr ) { + std::rethrow_exception( except_ptr ); + } } void init(std::function shutdown, const snapshot_reader_ptr& snapshot) { - - bool report_integrity_hash = !!snapshot; - if (snapshot) { - EOS_ASSERT( !head, fork_database_exception, "" ); + // Setup state if necessary (or in the default case stay with already loaded state): + uint32_t lib_num = 1u; + if( snapshot ) { snapshot->validate(); - - read_from_snapshot( snapshot ); - - auto end = blog.read_head(); - if( !end ) { - blog.reset( conf.genesis, signed_block_ptr(), head->block_num + 1 ); - } else if( end->block_num() > head->block_num ) { - replay( shutdown ); + if( blog.head() ) { + lib_num = blog.head()->block_num(); + read_from_snapshot( snapshot, blog.first_block_num(), lib_num ); } else { - EOS_ASSERT( end->block_num() == head->block_num, fork_database_exception, - "Block log is provided with snapshot but does not contain the head block from the snapshot" ); + read_from_snapshot( snapshot, 0, std::numeric_limits::max() ); + lib_num = head->block_num; + blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); } } else { - if( !head ) { - initialize_fork_db(); // set head to genesis state - } + if( db.revision() < 1 || !fork_db.head() ) { + if( fork_db.head() ) { + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { + fork_db.rollback_head_to_root(); + } + wlog( "No existing chain state. Initializing fresh blockchain state." ); + } else { + EOS_ASSERT( db.revision() < 1, database_exception, + "No existing fork database despite existing chain state. Replay required." ); + wlog( "No existing chain state or fork database. Initializing fresh blockchain state and resetting fork database."); + } + initialize_blockchain_state(); // sets head to genesis state - auto end = blog.read_head(); - if( !end ) { - blog.reset( conf.genesis, head->block ); - } else if( end->block_num() > head->block_num ) { - replay( shutdown ); - report_integrity_hash = true; - } - } + if( !fork_db.head() ) { + fork_db.reset( *head ); + } - if( shutdown() ) return; + if( blog.head() ) { + EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, + "block log does not start with genesis block" + ); + lib_num = blog.head()->block_num(); + } else { + blog.reset( conf.genesis, head->block ); + } + } else { + lib_num = fork_db.root()->block_num; + auto first_block_num = blog.first_block_num(); + if( blog.head() ) { + EOS_ASSERT( first_block_num <= lib_num && lib_num <= blog.head()->block_num(), + block_log_exception, + "block log does not contain last irreversible block", + ("block_log_first_num", first_block_num) + ("block_log_last_num", blog.head()->block_num()) + ("fork_db_lib", lib_num) + ); + lib_num = blog.head()->block_num(); + } else { + lib_num = fork_db.root()->block_num; + if( first_block_num != (lib_num + 1) ) { + blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); + } + } - const auto& ubi = reversible_blocks.get_index(); - auto objitr = ubi.rbegin(); - if( objitr != ubi.rend() ) { - EOS_ASSERT( objitr->blocknum == head->block_num, fork_database_exception, - "reversible block database is inconsistent with fork database, replay blockchain", - ("head",head->block_num)("unconfimed", objitr->blocknum) ); - } else { - auto end = blog.read_head(); - EOS_ASSERT( !end || end->block_num() == head->block_num, fork_database_exception, - "fork database exists but reversible block database does not, replay blockchain", - ("blog_head",end->block_num())("head",head->block_num) ); + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { + fork_db.rollback_head_to_root(); + } + head = fork_db.head(); + } } + // At this point head != nullptr && fork_db.head() != nullptr && fork_db.root() != nullptr. + // Furthermore, fork_db.root()->block_num <= lib_num. + // Also, even though blog.head() may still be nullptr, blog.first_block_num() is guaranteed to be lib_num + 1. - EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, "fork database is inconsistent with shared memory", - ("db",db.revision())("head",head->block_num) ); + EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, + "fork database head is inconsistent with state", + ("db",db.revision())("head",head->block_num) ); if( db.revision() > head->block_num ) { - wlog( "warning: database revision (${db}) is greater than head block number (${head}), " + wlog( "database revision (${db}) is greater than head block number (${head}), " "attempting to undo pending changes", ("db",db.revision())("head",head->block_num) ); } @@ -395,18 +594,101 @@ struct controller_impl { db.undo(); } + protocol_features.init( db ); + + const auto& rbi = reversible_blocks.get_index(); + auto last_block_num = lib_num; + + if( read_mode == db_read_mode::IRREVERSIBLE ) { + // ensure there are no reversible blocks + auto itr = rbi.begin(); + if( itr != rbi.end() ) { + wlog( "read_mode has changed to irreversible: erasing reversible blocks" ); + } + for( ; itr != rbi.end(); itr = rbi.begin() ) + reversible_blocks.remove( *itr ); + } else { + auto itr = rbi.begin(); + for( ; itr != rbi.end() && itr->blocknum <= lib_num; itr = rbi.begin() ) + reversible_blocks.remove( *itr ); + + EOS_ASSERT( itr == rbi.end() || itr->blocknum == lib_num + 1, reversible_blocks_exception, + "gap exists between last irreversible block and first reversible block", + ("lib", lib_num)("first_reversible_block_num", itr->blocknum) + ); + + auto ritr = rbi.rbegin(); + + if( ritr != rbi.rend() ) { + last_block_num = ritr->blocknum; + } + + EOS_ASSERT( head->block_num <= last_block_num, reversible_blocks_exception, + "head block (${head_num}) is greater than the last locally stored block (${last_block_num})", + ("head_num", head->block_num)("last_block_num", last_block_num) + ); + + auto pending_head = fork_db.pending_head(); + + if( ritr != rbi.rend() + && lib_num < pending_head->block_num + && pending_head->block_num <= last_block_num + ) { + auto rbitr = rbi.find( pending_head->block_num ); + EOS_ASSERT( rbitr != rbi.end(), reversible_blocks_exception, "pending head block not found in reversible blocks"); + auto rev_id = rbitr->get_block_id(); + EOS_ASSERT( rev_id == pending_head->id, + reversible_blocks_exception, + "mismatch in block id of pending head block ${num} in reversible blocks database: " + "expected: ${expected}, actual: ${actual}", + ("num", pending_head->block_num)("expected", pending_head->id)("actual", rev_id) + ); + } else if( ritr != rbi.rend() && last_block_num < pending_head->block_num ) { + const auto b = fork_db.search_on_branch( pending_head->id, last_block_num ); + FC_ASSERT( b, "unexpected violation of invariants" ); + auto rev_id = ritr->get_block_id(); + EOS_ASSERT( rev_id == b->id, + reversible_blocks_exception, + "mismatch in block id of last block (${num}) in reversible blocks database: " + "expected: ${expected}, actual: ${actual}", + ("num", last_block_num)("expected", b->id)("actual", rev_id) + ); + } + // else no checks needed since fork_db will be completely reset on replay anyway + } + + bool report_integrity_hash = !!snapshot || (lib_num > head->block_num); + + if( last_block_num > head->block_num ) { + replay( shutdown ); // replay any irreversible and reversible blocks ahead of current head + } + + if( shutdown() ) return; + + if( read_mode != db_read_mode::IRREVERSIBLE + && fork_db.pending_head()->id != fork_db.head()->id + && fork_db.head()->id == fork_db.root()->id + ) { + wlog( "read_mode has changed from irreversible: applying best branch from fork database" ); + + for( auto pending_head = fork_db.pending_head(); + pending_head->id != fork_db.head()->id; + pending_head = fork_db.pending_head() + ) { + wlog( "applying branch from fork database ending with block: ${id}", ("id", pending_head->id) ); + maybe_switch_forks( pending_head, controller::block_status::complete ); + } + } + if( report_integrity_hash ) { const auto hash = calculate_integrity_hash(); ilog( "database initialized with hash: ${hash}", ("hash", hash) ); } - } ~controller_impl() { + thread_pool.stop(); pending.reset(); - - db.flush(); - reversible_blocks.flush(); } void add_indices() { @@ -422,6 +704,11 @@ struct controller_impl { void clear_all_undo() { // Rewind the database to the last irreversible block db.undo_all(); + /* + FC_ASSERT(db.revision() == self.head_block_num(), + "Chainbase revision does not match head block num", + ("rev", db.revision())("head_block", self.head_block_num())); + */ } void add_contract_tables_to_snapshot( const snapshot_writer_ptr& snapshot ) const { @@ -513,7 +800,7 @@ struct controller_impl { resource_limits.add_to_snapshot(snapshot); } - void read_from_snapshot( const snapshot_reader_ptr& snapshot ) { + void read_from_snapshot( const snapshot_reader_ptr& snapshot, uint32_t blog_start, uint32_t blog_end ) { snapshot->read_section([this]( auto §ion ){ chain_snapshot_header header; section.read_row(header, db); @@ -521,15 +808,21 @@ struct controller_impl { }); - snapshot->read_section([this]( auto §ion ){ + snapshot->read_section([this, blog_start, blog_end]( auto §ion ){ block_header_state head_header_state; section.read_row(head_header_state, db); - auto head_state = std::make_shared(head_header_state); - fork_db.set(head_state); - fork_db.set_validity(head_state, true); - fork_db.mark_in_current_chain(head_state, true); - head = head_state; + snapshot_head_block = head_header_state.block_num; + EOS_ASSERT( blog_start <= (snapshot_head_block + 1) && snapshot_head_block <= blog_end, + block_log_exception, + "Block log is provided with snapshot but does not contain the head block from the snapshot nor a block right after it", + ("snapshot_head_block", snapshot_head_block) + ("block_log_first_num", blog_start) + ("block_log_last_num", blog_end) + ); + + fork_db.reset( head_header_state ); + head = fork_db.head(); snapshot_head_block = head->block_num; }); @@ -568,43 +861,18 @@ struct controller_impl { return enc.result(); } - - /** - * Sets fork database head to the genesis state. - */ - void initialize_fork_db() { - wlog( " Initializing new blockchain with genesis state " ); - producer_schedule_type initial_schedule{ 0, {{config::system_account_name, conf.genesis.initial_key}} }; - - block_header_state genheader; - genheader.active_schedule = initial_schedule; - genheader.pending_schedule = initial_schedule; - genheader.pending_schedule_hash = fc::sha256::hash(initial_schedule); - genheader.header.timestamp = conf.genesis.initial_timestamp; - genheader.header.action_mroot = conf.genesis.compute_chain_id(); - genheader.id = genheader.header.id(); - genheader.block_num = genheader.header.block_num(); - - head = std::make_shared( genheader ); - head->block = std::make_shared(genheader.header); - fork_db.set( head ); - db.set_revision( head->block_num ); - - initialize_database(); - } - void create_native_account( account_name name, const authority& owner, const authority& active, bool is_privileged = false ) { db.create([&](auto& a) { a.name = name; a.creation_date = conf.genesis.initial_timestamp; - a.privileged = is_privileged; if( name == config::system_account_name ) { a.set_abi(eosio_contract_abi(abi_def())); } }); - db.create([&](auto & a) { - a.name = name; + db.create([&](auto & a) { + a.name = name; + a.set_privileged( is_privileged ); }); const auto& owner_permission = authorization.create_permission(name, config::owner_name, 0, @@ -630,13 +898,21 @@ struct controller_impl { const auto& tapos_block_summary = db.get(1); db.modify( tapos_block_summary, [&]( auto& bs ) { - bs.block_id = head->id; + bs.block_id = head->id; }); conf.genesis.initial_configuration.validate(); db.create([&](auto& gpo ){ - gpo.configuration = conf.genesis.initial_configuration; + gpo.configuration = conf.genesis.initial_configuration; + }); + + db.create([&](auto& pso ){ + pso.num_supported_key_types = 2; + for( const auto& i : genesis_intrinsics ) { + add_intrinsic_to_whitelist( pso.whitelisted_intrinsics, i ); + } }); + db.create([](auto&){}); authorization.initialize_database(); @@ -664,58 +940,22 @@ struct controller_impl { conf.genesis.initial_timestamp ); } - - - /** - * @post regardless of the success of commit block there is no active pending block - */ - void commit_block( bool add_to_fork_db ) { - auto reset_pending_on_exit = fc::make_scoped_exit([this]{ - pending.reset(); - }); - - try { - if (add_to_fork_db) { - pending->_pending_block_state->validated = true; - auto new_bsp = fork_db.add(pending->_pending_block_state, true); - emit(self.accepted_block_header, pending->_pending_block_state); - head = fork_db.head(); - EOS_ASSERT(new_bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); - } - - if( !replaying ) { - reversible_blocks.create( [&]( auto& ubo ) { - ubo.blocknum = pending->_pending_block_state->block_num; - ubo.set_block( pending->_pending_block_state->block ); - }); - } - - emit( self.accepted_block, pending->_pending_block_state ); - } catch (...) { - // dont bother resetting pending, instead abort the block - reset_pending_on_exit.cancel(); - abort_block(); - throw; - } - - // push the state for pending. - pending->push(); - } - // The returned scoped_exit should not exceed the lifetime of the pending which existed when make_block_restore_point was called. fc::scoped_exit> make_block_restore_point() { - auto orig_block_transactions_size = pending->_pending_block_state->block->transactions.size(); - auto orig_state_transactions_size = pending->_pending_block_state->trxs.size(); - auto orig_state_actions_size = pending->_actions.size(); + auto& bb = pending->_block_stage.get(); + auto orig_block_transactions_size = bb._pending_trx_receipts.size(); + auto orig_state_transactions_size = bb._pending_trx_metas.size(); + auto orig_state_actions_size = bb._actions.size(); std::function callback = [this, orig_block_transactions_size, orig_state_transactions_size, orig_state_actions_size]() { - pending->_pending_block_state->block->transactions.resize(orig_block_transactions_size); - pending->_pending_block_state->trxs.resize(orig_state_transactions_size); - pending->_actions.resize(orig_state_actions_size); + auto& bb = pending->_block_stage.get(); + bb._pending_trx_receipts.resize(orig_block_transactions_size); + bb._pending_trx_metas.resize(orig_state_transactions_size); + bb._actions.resize(orig_state_actions_size); }; return fc::make_scoped_exit( std::move(callback) ); @@ -734,8 +974,14 @@ struct controller_impl { // Deliver onerror action containing the failed deferred transaction directly back to the sender. etrx.actions.emplace_back( vector{{gtrx.sender, config::active_name}}, onerror( gtrx.sender_id, gtrx.packed_trx.data(), gtrx.packed_trx.size() ) ); - etrx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to avoid appearing expired - etrx.set_reference_block( self.head_block_id() ); + if( self.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { + etrx.expiration = time_point_sec(); + etrx.ref_block_num = 0; + etrx.ref_block_prefix = 0; + } else { + etrx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to nearest second to avoid appearing expired + etrx.set_reference_block( self.head_block_id() ); + } transaction_context trx_context( self, etrx, etrx.id(), start ); trx_context.deadline = deadline; @@ -746,34 +992,37 @@ struct controller_impl { try { trx_context.init_for_implicit_trx(); trx_context.published = gtrx.published; - trx_context.trace->action_traces.emplace_back(); - trx_context.dispatch_action( trx_context.trace->action_traces.back(), etrx.actions.back(), gtrx.sender ); + trx_context.execute_action( trx_context.schedule_action( etrx.actions.back(), gtrx.sender, false, 0, 0 ), 0 ); trx_context.finalize(); // Automatically rounds up network and CPU usage in trace and bills payers if successful auto restore = make_block_restore_point(); trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::soft_fail, trx_context.billed_cpu_time_us, trace->net_usage ); - fc::move_append( pending->_actions, move(trx_context.executed) ); + fc::move_append( pending->_block_stage.get()._actions, move(trx_context.executed) ); trx_context.squash(); restore.cancel(); return trace; + } catch( const disallowed_transaction_extensions_bad_block_exception& ) { + throw; + } catch( const protocol_feature_bad_block_exception& ) { + throw; } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); + trace->error_code = controller::convert_exception_to_error_code( e ); trace->except = e; trace->except_ptr = std::current_exception(); } return trace; } - void remove_scheduled_transaction( const generated_transaction_object& gto ) { - resource_limits.add_pending_ram_usage( - gto.payer, - -(config::billable_size_v + gto.packed_trx.size()) - ); + int64_t remove_scheduled_transaction( const generated_transaction_object& gto ) { + int64_t ram_delta = -(config::billable_size_v + gto.packed_trx.size()); + resource_limits.add_pending_ram_usage( gto.payer, ram_delta ); // No need to verify_account_ram_usage since we are only reducing memory db.remove( gto ); + return ram_delta; } bool failure_is_subjective( const fc::exception& e ) const { @@ -820,7 +1069,7 @@ struct controller_impl { // // IF the transaction FAILs in a subjective way, `undo_session` should expire without being squashed // resulting in the GTO being restored and available for a future block to retire. - remove_scheduled_transaction(gto); + int64_t trx_removal_ram_delta = remove_scheduled_transaction(gto); fc::datastream ds( gtrx.packed_trx.data(), gtrx.packed_trx.size() ); @@ -837,13 +1086,14 @@ struct controller_impl { if( gtrx.expiration < self.pending_block_time() ) { trace = std::make_shared(); trace->id = gtrx.trx_id; - trace->block_num = self.pending_block_state()->block_num; + trace->block_num = self.head_block_num() + 1; trace->block_time = self.pending_block_time(); trace->producer_block_id = self.pending_producer_block_id(); trace->scheduled = true; trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::expired, billed_cpu_time_us, 0 ); // expire the transaction + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); undo_session.squash(); return trace; } @@ -866,7 +1116,13 @@ struct controller_impl { trx_context.init_for_deferred_trx( gtrx.published ); if( trx_context.enforce_whiteblacklist && pending->_block_status == controller::block_status::incomplete ) { - check_actor_list( trx_context.bill_to_accounts ); // Assumes bill_to_accounts is the set of actors authorizing the transaction + flat_set actors; + for( const auto& act : trx_context.trx.actions ) { + for( const auto& auth : act.authorization ) { + actors.insert( auth.actor ); + } + } + check_actor_list( actors ); } trx_context.exec(); @@ -879,10 +1135,12 @@ struct controller_impl { trx_context.billed_cpu_time_us, trace->net_usage ); - fc::move_append( pending->_actions, move(trx_context.executed) ); + fc::move_append( pending->_block_stage.get()._actions, move(trx_context.executed) ); + + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); trx_context.squash(); undo_session.squash(); @@ -890,8 +1148,13 @@ struct controller_impl { restore.cancel(); return trace; + } catch( const disallowed_transaction_extensions_bad_block_exception& ) { + throw; + } catch( const protocol_feature_bad_block_exception& ) { + throw; } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); + trace->error_code = controller::convert_exception_to_error_code( e ); trace->except = e; trace->except_ptr = std::current_exception(); trace->elapsed = fc::time_point::now() - trx_context.start; @@ -909,8 +1172,9 @@ struct controller_impl { error_trace->failed_dtrx_trace = trace; trace = error_trace; if( !trace->except_ptr ) { + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); undo_session.squash(); return trace; } @@ -945,14 +1209,15 @@ struct controller_impl { block_timestamp_type(self.pending_block_time()).slot ); // Should never fail trace->receipt = push_receipt(gtrx.trx_id, transaction_receipt::hard_fail, cpu_time_to_bill_us, 0); + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); undo_session.squash(); } else { emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); } return trace; @@ -967,8 +1232,9 @@ struct controller_impl { uint64_t cpu_usage_us, uint64_t net_usage ) { uint64_t net_usage_words = net_usage / 8; EOS_ASSERT( net_usage_words*8 == net_usage, transaction_exception, "net_usage is not divisible by 8" ); - pending->_pending_block_state->block->transactions.emplace_back( trx ); - transaction_receipt& r = pending->_pending_block_state->block->transactions.back(); + auto& receipts = pending->_block_stage.get()._pending_trx_receipts; + receipts.emplace_back( trx ); + transaction_receipt& r = receipts.back(); r.cpu_usage_us = cpu_usage_us; r.net_usage_words = net_usage_words; r.status = status; @@ -1046,7 +1312,7 @@ struct controller_impl { ? transaction_receipt::executed : transaction_receipt::delayed; trace->receipt = push_receipt(*trx->packed_trx, s, trx_context.billed_cpu_time_us, trace->net_usage); - pending->_pending_block_state->trxs.emplace_back(trx); + pending->_block_stage.get()._pending_trx_metas.emplace_back(trx); } else { transaction_receipt_header r; r.status = transaction_receipt::executed; @@ -1055,7 +1321,7 @@ struct controller_impl { trace->receipt = r; } - fc::move_append(pending->_actions, move(trx_context.executed)); + fc::move_append(pending->_block_stage.get()._actions, move(trx_context.executed)); // call the accept signal but only once for this transaction if (!trx->accepted) { @@ -1063,7 +1329,7 @@ struct controller_impl { emit( self.accepted_transaction, trx); } - emit(self.applied_transaction, trace); + emit(self.applied_transaction, std::tie(trace, trn)); if ( read_mode != db_read_mode::SPECULATIVE && pending->_block_status == controller::block_status::incomplete ) { @@ -1078,7 +1344,12 @@ struct controller_impl { unapplied_transactions.erase( trx->signed_id ); } return trace; + } catch( const disallowed_transaction_extensions_bad_block_exception& ) { + throw; + } catch( const protocol_feature_bad_block_exception& ) { + throw; } catch (const fc::exception& e) { + trace->error_code = controller::convert_exception_to_error_code( e ); trace->except = e; trace->except_ptr = std::current_exception(); } @@ -1088,19 +1359,22 @@ struct controller_impl { } emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, trn) ); return trace; } FC_CAPTURE_AND_RETHROW((trace)) } /// push_transaction - - void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, + void start_block( block_timestamp_type when, + uint16_t confirm_block_count, + const vector& new_protocol_feature_activations, + controller::block_status s, const optional& producer_block_id ) { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); - auto guard_pending = fc::make_scoped_exit([this](){ + auto guard_pending = fc::make_scoped_exit([this, head_block_num=head->block_num](){ + protocol_features.popped_blocks_to( head_block_num ); pending.reset(); }); @@ -1108,44 +1382,111 @@ struct controller_impl { EOS_ASSERT( db.revision() == head->block_num, database_exception, "db revision is not on par with head block", ("db.revision()", db.revision())("controller_head_block", head->block_num)("fork_db_head_block", fork_db.head()->block_num) ); - pending.emplace(maybe_session(db)); + pending.emplace( maybe_session(db), *head, when, confirm_block_count, new_protocol_feature_activations ); } else { - pending.emplace(maybe_session()); + pending.emplace( maybe_session(), *head, when, confirm_block_count, new_protocol_feature_activations ); } pending->_block_status = s; pending->_producer_block_id = producer_block_id; - pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active - pending->_pending_block_state->in_current_chain = true; - pending->_pending_block_state->set_confirmed(confirm_block_count); + auto& bb = pending->_block_stage.get(); + const auto& pbhs = bb._pending_block_header_state; + + // modify state of speculative block only if we are in speculative read mode (otherwise we need clean state for head or read-only modes) + if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) + { + const auto& pso = db.get(); + + auto num_preactivated_protocol_features = pso.preactivated_protocol_features.size(); + bool handled_all_preactivated_features = (num_preactivated_protocol_features == 0); + + if( new_protocol_feature_activations.size() > 0 ) { + flat_map activated_protocol_features; + activated_protocol_features.reserve( std::max( num_preactivated_protocol_features, + new_protocol_feature_activations.size() ) ); + for( const auto& feature_digest : pso.preactivated_protocol_features ) { + activated_protocol_features.emplace( feature_digest, false ); + } + + size_t num_preactivated_features_that_have_activated = 0; + + const auto& pfs = protocol_features.get_protocol_feature_set(); + for( const auto& feature_digest : new_protocol_feature_activations ) { + const auto& f = pfs.get_protocol_feature( feature_digest ); + + auto res = activated_protocol_features.emplace( feature_digest, true ); + if( res.second ) { + // feature_digest was not preactivated + EOS_ASSERT( !f.preactivation_required, protocol_feature_exception, + "attempted to activate protocol feature without prior required preactivation: ${digest}", + ("digest", feature_digest) + ); + } else { + EOS_ASSERT( !res.first->second, block_validate_exception, + "attempted duplicate activation within a single block: ${digest}", + ("digest", feature_digest) + ); + // feature_digest was preactivated + res.first->second = true; + ++num_preactivated_features_that_have_activated; + } + + if( f.builtin_feature ) { + trigger_activation_handler( *f.builtin_feature ); + } + + protocol_features.activate_feature( feature_digest, pbhs.block_num ); + + ++bb._num_new_protocol_features_that_have_activated; + } + + if( num_preactivated_features_that_have_activated == num_preactivated_protocol_features ) { + handled_all_preactivated_features = true; + } + } - auto was_pending_promoted = pending->_pending_block_state->maybe_promote_pending(); + EOS_ASSERT( handled_all_preactivated_features, block_validate_exception, + "There are pre-activated protocol features that were not activated at the start of this block" + ); - //modify state in speculative block only if we are speculative reads mode (other wise we need clean state for head or irreversible reads) - if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) { + if( new_protocol_feature_activations.size() > 0 ) { + db.modify( pso, [&]( auto& ps ) { + ps.preactivated_protocol_features.clear(); + + ps.activated_protocol_features.reserve( ps.activated_protocol_features.size() + + new_protocol_feature_activations.size() ); + for( const auto& feature_digest : new_protocol_feature_activations ) { + ps.activated_protocol_features.emplace_back( feature_digest, pbhs.block_num ); + } + }); + } const auto& gpo = db.get(); + if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ... - ( *gpo.proposed_schedule_block_num <= pending->_pending_block_state->dpos_irreversible_blocknum ) && // ... that has now become irreversible ... - pending->_pending_block_state->pending_schedule.producers.size() == 0 && // ... and there is room for a new pending schedule ... - !was_pending_promoted // ... and not just because it was promoted to active at the start of this block, then: + ( *gpo.proposed_schedule_block_num <= pbhs.dpos_irreversible_blocknum ) && // ... that has now become irreversible ... + pbhs.prev_pending_schedule.schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion ) - { - // Promote proposed schedule to pending schedule. - if( !replaying ) { - ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", - ("proposed_num", *gpo.proposed_schedule_block_num)("n", pending->_pending_block_state->block_num) - ("lib", pending->_pending_block_state->dpos_irreversible_blocknum) - ("schedule", static_cast(gpo.proposed_schedule) ) ); - } - pending->_pending_block_state->set_new_producers( gpo.proposed_schedule ); - db.modify( gpo, [&]( auto& gp ) { - gp.proposed_schedule_block_num = optional(); - gp.proposed_schedule.clear(); - }); + { + // Promote proposed schedule to pending schedule. + if( !replay_head_time ) { + ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", + ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) + ("lib", pbhs.dpos_irreversible_blocknum) + ("schedule", static_cast(gpo.proposed_schedule) ) ); } + EOS_ASSERT( gpo.proposed_schedule.version == pbhs.active_schedule_version + 1, + producer_schedule_exception, "wrong producer schedule version specified" ); + + pending->_block_stage.get()._new_pending_producer_schedule = gpo.proposed_schedule; + db.modify( gpo, [&]( auto& gp ) { + gp.proposed_schedule_block_num = optional(); + gp.proposed_schedule.clear(); + }); + } + try { auto onbtrx = std::make_shared( get_on_block_transaction() ); onbtrx->implicit = true; @@ -1168,23 +1509,182 @@ struct controller_impl { } guard_pending.cancel(); - } // start_block + } /// start_block + + void finalize_block() + { + EOS_ASSERT( pending, block_validate_exception, "it is not valid to finalize when there is no pending block"); + EOS_ASSERT( pending->_block_stage.contains(), block_validate_exception, "already called finalize_block"); + try { + auto& pbhs = pending->get_pending_block_header_state(); - void sign_block( const std::function& signer_callback ) { - auto p = pending->_pending_block_state; + // Update resource limits: + resource_limits.process_account_limit_updates(); + const auto& chain_config = self.get_global_properties().configuration; + uint32_t max_virtual_mult = 1000; + uint64_t CPU_TARGET = EOS_PERCENT(chain_config.max_block_cpu_usage, chain_config.target_block_cpu_usage_pct); + resource_limits.set_block_parameters( + { CPU_TARGET, chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}}, + {EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}} + ); + resource_limits.process_block_usage(pbhs.block_num); + + auto& bb = pending->_block_stage.get(); - p->sign( signer_callback ); + // Create (unsigned) block: + auto block_ptr = std::make_shared( pbhs.make_block_header( + calculate_trx_merkle(), + calculate_action_merkle(), + std::move( bb._new_pending_producer_schedule ), + std::move( bb._new_protocol_feature_activations ) + ) ); - static_cast(*p->block) = p->header; - } /// sign_block + block_ptr->transactions = std::move( bb._pending_trx_receipts ); - void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { + auto id = block_ptr->id(); + + // Update TaPoS table: + create_block_summary( id ); + + /* + ilog( "finalized block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", + ("n",pbhs.block_num) + ("id",id) + ("t",pbhs.timestamp) + ("p",pbhs.producer) + ("signing_key", pbhs.block_signing_key) + ("v",pbhs.active_schedule_version) + ("lib",pbhs.dpos_irreversible_blocknum) + ("ndtrxs",db.get_index().size()) + ("np",block_ptr->new_producers) + ); + */ + + pending->_block_stage = assembled_block{ + id, + std::move( bb._pending_block_header_state ), + std::move( bb._pending_trx_metas ), + std::move( block_ptr ) + }; + } FC_CAPTURE_AND_RETHROW() } /// finalize_block + + /** + * @post regardless of the success of commit block there is no active pending block + */ + void commit_block( bool add_to_fork_db ) { + auto reset_pending_on_exit = fc::make_scoped_exit([this]{ + pending.reset(); + }); + + try { + EOS_ASSERT( pending->_block_stage.contains(), block_validate_exception, + "cannot call commit_block until pending block is completed" ); + + auto bsp = pending->_block_stage.get()._block_state; + + if( add_to_fork_db ) { + fork_db.add( bsp ); + fork_db.mark_valid( bsp ); + emit( self.accepted_block_header, bsp ); + head = fork_db.head(); + EOS_ASSERT( bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); + } + + if( !replay_head_time && read_mode != db_read_mode::IRREVERSIBLE ) { + reversible_blocks.create( [&]( auto& ubo ) { + ubo.blocknum = bsp->block_num; + ubo.set_block( bsp->block ); + }); + } + + if( add_to_fork_db ) { + log_irreversible(); + } + + emit( self.accepted_block, bsp ); + } catch (...) { + // dont bother resetting pending, instead abort the block + reset_pending_on_exit.cancel(); + abort_block(); + throw; + } + + // push the state for pending. + pending->push(); + } + + /** + * This method is called from other threads. The controller_impl should outlive those threads. + * However, to avoid race conditions, it means that the behavior of this function should not change + * after controller_impl construction. + + * This should not be an issue since the purpose of this function is to ensure all of the protocol features + * in the supplied vector are recognized by the software, and the set of recognized protocol features is + * determined at startup and cannot be changed without a restart. + */ + void check_protocol_features( block_timestamp_type timestamp, + const flat_set& currently_activated_protocol_features, + const vector& new_protocol_features ) + { + const auto& pfs = protocol_features.get_protocol_feature_set(); + + for( auto itr = new_protocol_features.begin(); itr != new_protocol_features.end(); ++itr ) { + const auto& f = *itr; + + auto status = pfs.is_recognized( f, timestamp ); + switch( status ) { + case protocol_feature_set::recognized_t::unrecognized: + EOS_THROW( protocol_feature_exception, + "protocol feature with digest '${digest}' is unrecognized", ("digest", f) ); + break; + case protocol_feature_set::recognized_t::disabled: + EOS_THROW( protocol_feature_exception, + "protocol feature with digest '${digest}' is disabled", ("digest", f) ); + break; + case protocol_feature_set::recognized_t::too_early: + EOS_THROW( protocol_feature_exception, + "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", f)("timestamp", timestamp) ); + break; + case protocol_feature_set::recognized_t::ready: + break; + default: + EOS_THROW( protocol_feature_exception, "unexpected recognized_t status" ); + break; + } + + EOS_ASSERT( currently_activated_protocol_features.find( f ) == currently_activated_protocol_features.end(), + protocol_feature_exception, + "protocol feature with digest '${digest}' has already been activated", + ("digest", f) + ); + + auto dependency_checker = [¤tly_activated_protocol_features, &new_protocol_features, &itr] + ( const digest_type& f ) -> bool + { + if( currently_activated_protocol_features.find( f ) != currently_activated_protocol_features.end() ) + return true; + + return (std::find( new_protocol_features.begin(), itr, f ) != itr); + }; + + EOS_ASSERT( pfs.validate_dependencies( f, dependency_checker ), protocol_feature_exception, + "not all dependencies of protocol feature with digest '${digest}' have been activated", + ("digest", f) + ); + } + } + + void apply_block( const block_state_ptr& bsp, controller::block_status s ) + { try { try { - EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); + const signed_block_ptr& b = bsp->block; + const auto& new_protocol_feature_activations = bsp->get_new_protocol_feature_activations(); + + EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported block extensions" ); auto producer_block_id = b->id(); - start_block( b->timestamp, b->confirmed, s , producer_block_id); + start_block( b->timestamp, b->confirmed, new_protocol_feature_activations, s, producer_block_id); std::vector packed_transactions; packed_transactions.reserve( b->transactions.size() ); @@ -1193,7 +1693,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::start_recover_keys( mtrx, thread_pool, chain_id, microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } @@ -1203,7 +1703,8 @@ struct controller_impl { size_t packed_idx = 0; for( const auto& receipt : b->transactions ) { - auto num_pending_receipts = pending->_pending_block_state->block->transactions.size(); + const auto& trx_receipts = pending->_block_stage.get()._pending_trx_receipts; + auto num_pending_receipts = trx_receipts.size(); if( receipt.trx.contains() ) { trace = push_transaction( packed_transactions.at(packed_idx++), fc::time_point::maximum(), receipt.cpu_usage_us, true ); } else if( receipt.trx.contains() ) { @@ -1219,36 +1720,40 @@ struct controller_impl { throw *trace->except; } - EOS_ASSERT( pending->_pending_block_state->block->transactions.size() > 0, + EOS_ASSERT( trx_receipts.size() > 0, block_validate_exception, "expected a receipt", ("block", *b)("expected_receipt", receipt) ); - EOS_ASSERT( pending->_pending_block_state->block->transactions.size() == num_pending_receipts + 1, + EOS_ASSERT( trx_receipts.size() == num_pending_receipts + 1, block_validate_exception, "expected receipt was not added", ("block", *b)("expected_receipt", receipt) ); - const transaction_receipt_header& r = pending->_pending_block_state->block->transactions.back(); + const transaction_receipt_header& r = trx_receipts.back(); EOS_ASSERT( r == static_cast(receipt), block_validate_exception, "receipt does not match", - ("producer_receipt", receipt)("validator_receipt", pending->_pending_block_state->block->transactions.back()) ); + ("producer_receipt", receipt)("validator_receipt", trx_receipts.back()) ); } finalize_block(); - // this implicitly asserts that all header fields (less the signature) are identical - EOS_ASSERT(producer_block_id == pending->_pending_block_state->header.id(), - block_validate_exception, "Block ID does not match", - ("producer_block_id",producer_block_id)("validator_block_id",pending->_pending_block_state->header.id())); - - // We need to fill out the pending block state's block because that gets serialized in the reversible block log - // in the future we can optimize this by serializing the original and not the copy + auto& ab = pending->_block_stage.get(); - // we can always trust this signature because, - // - prior to apply_block, we call fork_db.add which does a signature check IFF the block is untrusted - // - OTHERWISE the block is trusted and therefore we trust that the signature is valid - // Also, as ::sign_block does not lazily calculate the digest of the block, we can just short-circuit to save cycles - pending->_pending_block_state->header.producer_signature = b->producer_signature; - static_cast(*pending->_pending_block_state->block) = pending->_pending_block_state->header; + // this implicitly asserts that all header fields (less the signature) are identical + EOS_ASSERT( producer_block_id == ab._id, block_validate_exception, "Block ID does not match", + ("producer_block_id",producer_block_id)("validator_block_id",ab._id) ); + + auto bsp = std::make_shared( + std::move( ab._pending_block_header_state ), + b, + std::move( ab._trx_metas ), + []( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + {}, // validation of any new protocol features should have already occurred prior to apply_block + true // signature should have already been verified (assuming untrusted) prior to apply_block + ); + + pending->_block_stage = completed_block{ bsp }; commit_block(false); return; @@ -1268,12 +1773,21 @@ struct controller_impl { auto existing = fork_db.get_block( id ); EOS_ASSERT( !existing, fork_database_exception, "we already know about this block: ${id}", ("id", id) ); - auto prev = fork_db.get_block( b->previous ); - EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); + auto prev = fork_db.get_block_header( b->previous ); + EOS_ASSERT( prev, unlinkable_block_exception, + "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( thread_pool, [b, prev]() { + return async_thread_pool( thread_pool.get_executor(), [b, prev, control=this]() { const bool skip_validate_signee = false; - return std::make_shared( *prev, move( b ), skip_validate_signee ); + return std::make_shared( + *prev, + move( b ), + [control]( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + { control->check_protocol_features( timestamp, cur_features, new_features ); }, + skip_validate_signee + ); } ); } @@ -1285,19 +1799,23 @@ struct controller_impl { trusted_producer_light_validation = old_value; }); try { - block_state_ptr new_header_state = block_state_future.get(); - auto& b = new_header_state->block; + block_state_ptr bsp = block_state_future.get(); + const auto& b = bsp->block; + emit( self.pre_accepted_block, b ); - fork_db.add( new_header_state, false ); + fork_db.add( bsp ); if (conf.trusted_producers.count(b->producer)) { trusted_producer_light_validation = true; }; - emit( self.accepted_block_header, new_header_state ); - if ( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( s ); + emit( self.accepted_block_header, bsp ); + + if( read_mode != db_read_mode::IRREVERSIBLE ) { + maybe_switch_forks( fork_db.pending_head(), s ); + } else { + log_irreversible(); } } FC_LOG_AND_RETHROW( ) @@ -1315,67 +1833,90 @@ struct controller_impl { block_validate_exception, "invalid block status for replay" ); emit( self.pre_accepted_block, b ); const bool skip_validate_signee = !conf.force_all_checks; - auto new_header_state = fork_db.add( b, skip_validate_signee ); - emit( self.accepted_block_header, new_header_state ); - - if ( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( s ); + auto bsp = std::make_shared( + *head, + b, + [this]( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + { check_protocol_features( timestamp, cur_features, new_features ); }, + skip_validate_signee + ); + + if( s != controller::block_status::irreversible ) { + fork_db.add( bsp, true ); } - // on replay irreversible is not emitted by fork database, so emit it explicitly here - if( s == controller::block_status::irreversible ) - emit( self.irreversible_block, new_header_state ); + emit( self.accepted_block_header, bsp ); + + if( s == controller::block_status::irreversible ) { + apply_block( bsp, s ); + head = bsp; + + // On replay, log_irreversible is not called and so no irreversible_block signal is emittted. + // So emit it explicitly here. + emit( self.irreversible_block, bsp ); + + if (!self.skip_db_sessions(s)) { + db.commit(bsp->block_num); + } + + } else { + EOS_ASSERT( read_mode != db_read_mode::IRREVERSIBLE, block_validate_exception, + "invariant failure: cannot replay reversible blocks while in irreversible mode" ); + maybe_switch_forks( bsp, s ); + } } FC_LOG_AND_RETHROW( ) } - void maybe_switch_forks( controller::block_status s ) { - auto new_head = fork_db.head(); - + void maybe_switch_forks( const block_state_ptr& new_head, controller::block_status s ) { + bool head_changed = true; if( new_head->header.previous == head->id ) { try { - apply_block( new_head->block, s ); - fork_db.mark_in_current_chain( new_head, true ); - fork_db.set_validity( new_head, true ); + apply_block( new_head, s ); + fork_db.mark_valid( new_head ); head = new_head; } catch ( const fc::exception& e ) { - fork_db.set_validity( new_head, false ); // Removes new_head from fork_db index, so no need to mark it as not in the current chain. + fork_db.remove( new_head->id ); throw; } } else if( new_head->id != head->id ) { + auto old_head = head; ilog("switching forks from ${current_head_id} (block number ${current_head_num}) to ${new_head_id} (block number ${new_head_num})", ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); auto branches = fork_db.fetch_branch_from( new_head->id, head->id ); - for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { - fork_db.mark_in_current_chain( *itr, false ); - pop_block(); - } - EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, + if( branches.second.size() > 0 ) { + for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { + pop_block(); + } + EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail + } for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { optional except; try { - apply_block( (*ritr)->block, (*ritr)->validated ? controller::block_status::validated : controller::block_status::complete ); + apply_block( *ritr, (*ritr)->is_valid() ? controller::block_status::validated + : controller::block_status::complete ); + fork_db.mark_valid( *ritr ); head = *ritr; - fork_db.mark_in_current_chain( *ritr, true ); - (*ritr)->validated = true; + } catch (const fc::exception& e) { + except = e; } - catch (const fc::exception& e) { except = e; } - if (except) { + if( except ) { elog("exception thrown while switching forks ${e}", ("e", except->to_detail_string())); // ritr currently points to the block that threw - // if we mark it invalid it will automatically remove all forks built off it. - fork_db.set_validity( *ritr, false ); + // Remove the block that threw and all forks built off it. + fork_db.remove( (*ritr)->id ); // pop all blocks from the bad fork // ritr base is a forward itr to the last block successfully applied auto applied_itr = ritr.base(); for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { - fork_db.mark_in_current_chain( *itr, false ); pop_block(); } EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, @@ -1383,24 +1924,30 @@ struct controller_impl { // re-apply good blocks for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { - apply_block( (*ritr)->block, controller::block_status::validated /* we previously validated these blocks*/ ); + apply_block( *ritr, controller::block_status::validated /* we previously validated these blocks*/ ); head = *ritr; - fork_db.mark_in_current_chain( *ritr, true ); } throw *except; } // end if exception } /// end for each block in branch - ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id) ); + + ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id)); + } else { + head_changed = false; } + + if( head_changed ) + log_irreversible(); } /// push_block void abort_block() { if( pending ) { if ( read_mode == db_read_mode::SPECULATIVE ) { - for( const auto& t : pending->_pending_block_state->trxs ) + for( const auto& t : pending->get_trx_metas() ) unapplied_transactions[t->signed_id] = t; } pending.reset(); + protocol_features.popped_blocks_to( head->block_num ); } } @@ -1409,69 +1956,28 @@ struct controller_impl { return false; } - void set_action_merkle() { + checksum256_type calculate_action_merkle() { vector action_digests; - action_digests.reserve( pending->_actions.size() ); - for( const auto& a : pending->_actions ) + const auto& actions = pending->_block_stage.get()._actions; + action_digests.reserve( actions.size() ); + for( const auto& a : actions ) action_digests.emplace_back( a.digest() ); - pending->_pending_block_state->header.action_mroot = merkle( move(action_digests) ); + return merkle( move(action_digests) ); } - void set_trx_merkle() { + checksum256_type calculate_trx_merkle() { vector trx_digests; - const auto& trxs = pending->_pending_block_state->block->transactions; + const auto& trxs = pending->_block_stage.get()._pending_trx_receipts; trx_digests.reserve( trxs.size() ); for( const auto& a : trxs ) trx_digests.emplace_back( a.digest() ); - pending->_pending_block_state->header.transaction_mroot = merkle( move(trx_digests) ); + return merkle( move(trx_digests) ); } - - void finalize_block() - { - EOS_ASSERT(pending, block_validate_exception, "it is not valid to finalize when there is no pending block"); - try { - - - /* - ilog( "finalize block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", - ("n",pending->_pending_block_state->block_num) - ("id",pending->_pending_block_state->header.id()) - ("t",pending->_pending_block_state->header.timestamp) - ("p",pending->_pending_block_state->header.producer) - ("signing_key", pending->_pending_block_state->block_signing_key) - ("v",pending->_pending_block_state->header.schedule_version) - ("lib",pending->_pending_block_state->dpos_irreversible_blocknum) - ("ndtrxs",db.get_index().size()) - ("np",pending->_pending_block_state->header.new_producers) - ); - */ - - // Update resource limits: - resource_limits.process_account_limit_updates(); - const auto& chain_config = self.get_global_properties().configuration; - uint32_t max_virtual_mult = 1000; - uint64_t CPU_TARGET = EOS_PERCENT(chain_config.max_block_cpu_usage, chain_config.target_block_cpu_usage_pct); - resource_limits.set_block_parameters( - { CPU_TARGET, chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}}, - {EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}} - ); - resource_limits.process_block_usage(pending->_pending_block_state->block_num); - - set_action_merkle(); - set_trx_merkle(); - - auto p = pending->_pending_block_state; - p->id = p->header.id(); - - create_block_summary(p->id); - - } FC_CAPTURE_AND_RETHROW() } - void update_producers_authority() { - const auto& producers = pending->_pending_block_state->active_schedule.producers; + const auto& producers = pending->get_pending_block_header_state().active_schedule.producers; auto update_permission = [&]( auto& permission, auto threshold ) { auto auth = authority( threshold, {}, {}); @@ -1684,8 +2190,14 @@ struct controller_impl { signed_transaction trx; trx.actions.emplace_back(std::move(on_block_act)); - trx.set_reference_block(self.head_block_id()); - trx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to nearest second to avoid appearing expired + if( self.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { + trx.expiration = time_point_sec(); + trx.ref_block_num = 0; + trx.ref_block_prefix = 0; + } else { + trx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to nearest second to avoid appearing expired + trx.set_reference_block( self.head_block_id() ); + } return trx; } @@ -1709,17 +2221,29 @@ authorization_manager& controller::get_mutable_authorization_manager() return my->authorization; } +const protocol_feature_manager& controller::get_protocol_feature_manager()const +{ + return my->protocol_features; +} + controller::controller( const controller::config& cfg ) -:my( new controller_impl( cfg, *this ) ) +:my( new controller_impl( cfg, *this, protocol_feature_set{} ) ) +{ +} + +controller::controller( const config& cfg, protocol_feature_set&& pfs ) +:my( new controller_impl( cfg, *this, std::move(pfs) ) ) { } controller::~controller() { my->abort_block(); + /* Shouldn't be needed anymore. //close fork_db here, because it can generate "irreversible" signal to this controller, //in case if read-mode == IRREVERSIBLE, we will apply latest irreversible block //for that we need 'my' to be valid pointer pointing to valid controller_impl. my->fork_db.close(); + */ } void controller::add_indices() { @@ -1727,14 +2251,9 @@ void controller::add_indices() { } void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { - my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); } - else if( !my->head ) { - elog( "No head block in fork db, perhaps we need to replay" ); - } - try { my->init(shutdown, snapshot); } catch (boost::interprocess::bad_alloc& e) { @@ -1753,19 +2272,186 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } +void controller::preactivate_feature( const digest_type& feature_digest ) { + const auto& pfs = my->protocol_features.get_protocol_feature_set(); + auto cur_time = pending_block_time(); + + auto status = pfs.is_recognized( feature_digest, cur_time ); + switch( status ) { + case protocol_feature_set::recognized_t::unrecognized: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, + "protocol feature with digest '${digest}' is unrecognized", ("digest", feature_digest) ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, + "protocol feature with digest '${digest}' is unrecognized", ("digest", feature_digest) ); + } + break; + case protocol_feature_set::recognized_t::disabled: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, + "protocol feature with digest '${digest}' is disabled", ("digest", feature_digest) ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, + "protocol feature with digest '${digest}' is disabled", ("digest", feature_digest) ); + } + break; + case protocol_feature_set::recognized_t::too_early: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, + "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", feature_digest)("timestamp", cur_time) ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, + "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", feature_digest)("timestamp", cur_time) ); + } + break; + case protocol_feature_set::recognized_t::ready: + break; + default: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, "unexpected recognized_t status" ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, "unexpected recognized_t status" ); + } + break; + } + + // The above failures depend on subjective information. + // Because of deferred transactions, this complicates things considerably. + + // If producing a block, we throw a subjective failure if the feature is not properly recognized in order + // to try to avoid retiring into a block a deferred transacton driven by subjective information. + + // But it is still possible for a producer to retire a deferred transaction that deals with this subjective + // information. If they recognized the feature, they would retire it successfully, but a validator that + // does not recognize the feature should reject the entire block (not just fail the deferred transaction). + // Even if they don't recognize the feature, the producer could change their nodeos code to treat it like an + // objective failure thus leading the deferred transaction to retire with soft_fail or hard_fail. + // In this case, validators that don't recognize the feature would reject the whole block immediately, and + // validators that do recognize the feature would likely lead to a different retire status which would + // ultimately cause a validation failure and thus rejection of the block. + // In either case, it results in rejection of the block which is the desired behavior in this scenario. + + // If the feature is properly recognized by producer and validator, we have dealt with the subjectivity and + // now only consider the remaining failure modes which are deterministic and objective. + // Thus the exceptions that can be thrown below can be regular objective exceptions + // that do not cause immediate rejection of the block. + + EOS_ASSERT( !is_protocol_feature_activated( feature_digest ), + protocol_feature_exception, + "protocol feature with digest '${digest}' is already activated", + ("digest", feature_digest) + ); + + const auto& pso = my->db.get(); + + EOS_ASSERT( std::find( pso.preactivated_protocol_features.begin(), + pso.preactivated_protocol_features.end(), + feature_digest + ) == pso.preactivated_protocol_features.end(), + protocol_feature_exception, + "protocol feature with digest '${digest}' is already pre-activated", + ("digest", feature_digest) + ); + + auto dependency_checker = [&]( const digest_type& d ) -> bool + { + if( is_protocol_feature_activated( d ) ) return true; -void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { + return ( std::find( pso.preactivated_protocol_features.begin(), + pso.preactivated_protocol_features.end(), + d ) != pso.preactivated_protocol_features.end() ); + }; + + EOS_ASSERT( pfs.validate_dependencies( feature_digest, dependency_checker ), + protocol_feature_exception, + "not all dependencies of protocol feature with digest '${digest}' have been activated or pre-activated", + ("digest", feature_digest) + ); + + my->db.modify( pso, [&]( auto& ps ) { + ps.preactivated_protocol_features.push_back( feature_digest ); + } ); +} + +vector controller::get_preactivated_protocol_features()const { + const auto& pso = my->db.get(); + + if( pso.preactivated_protocol_features.size() == 0 ) return {}; + + vector preactivated_protocol_features; + + for( const auto& f : pso.preactivated_protocol_features ) { + preactivated_protocol_features.emplace_back( f ); + } + + return preactivated_protocol_features; +} + +void controller::validate_protocol_features( const vector& features_to_activate )const { + my->check_protocol_features( my->head->header.timestamp, + my->head->activated_protocol_features->protocol_features, + features_to_activate ); +} + +void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count ) +{ validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); + + EOS_ASSERT( !my->pending, block_validate_exception, "pending block already exists" ); + + vector new_protocol_feature_activations; + + const auto& pso = my->db.get(); + if( pso.preactivated_protocol_features.size() > 0 ) { + for( const auto& f : pso.preactivated_protocol_features ) { + new_protocol_feature_activations.emplace_back( f ); + } + } + + if( new_protocol_feature_activations.size() > 0 ) { + validate_protocol_features( new_protocol_feature_activations ); + } + + my->start_block( when, confirm_block_count, new_protocol_feature_activations, + block_status::incomplete, optional() ); } -void controller::finalize_block() { +void controller::start_block( block_timestamp_type when, + uint16_t confirm_block_count, + const vector& new_protocol_feature_activations ) +{ validate_db_available_size(); - my->finalize_block(); + + if( new_protocol_feature_activations.size() > 0 ) { + validate_protocol_features( new_protocol_feature_activations ); + } + + my->start_block( when, confirm_block_count, new_protocol_feature_activations, + block_status::incomplete, optional() ); } -void controller::sign_block( const std::function& signer_callback ) { - my->sign_block( signer_callback ); +block_state_ptr controller::finalize_block( const std::function& signer_callback ) { + validate_db_available_size(); + + my->finalize_block(); + + auto& ab = my->pending->_block_stage.get(); + + auto bsp = std::make_shared( + std::move( ab._pending_block_header_state ), + std::move( ab._unsigned_block ), + std::move( ab._trx_metas ), + []( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + {}, + signer_callback + ); + + my->pending->_block_stage = completed_block{ bsp }; + + return bsp; } void controller::commit_block() { @@ -1778,8 +2464,8 @@ void controller::abort_block() { my->abort_block(); } -boost::asio::thread_pool& controller::get_thread_pool() { - return my->thread_pool; +boost::asio::io_context& controller::get_thread_pool() { + return my->thread_pool.get_executor(); } std::future controller::create_block_state_future( const signed_block_ptr& b ) { @@ -1882,13 +2568,47 @@ account_name controller::fork_db_head_block_producer()const { return my->fork_db.head()->header.producer; } -block_state_ptr controller::pending_block_state()const { - if( my->pending ) return my->pending->_pending_block_state; - return block_state_ptr(); +uint32_t controller::fork_db_pending_head_block_num()const { + return my->fork_db.pending_head()->block_num; +} + +block_id_type controller::fork_db_pending_head_block_id()const { + return my->fork_db.pending_head()->id; +} + +time_point controller::fork_db_pending_head_block_time()const { + return my->fork_db.pending_head()->header.timestamp; } + +account_name controller::fork_db_pending_head_block_producer()const { + return my->fork_db.pending_head()->header.producer; +} + time_point controller::pending_block_time()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); - return my->pending->_pending_block_state->header.timestamp; + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->header.timestamp; + + return my->pending->get_pending_block_header_state().timestamp; +} + +account_name controller::pending_block_producer()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->header.producer; + + return my->pending->get_pending_block_header_state().producer; +} + +public_key_type controller::pending_block_signing_key()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->block_signing_key; + + return my->pending->get_pending_block_header_state().block_signing_key; } optional controller::pending_producer_block_id()const { @@ -1896,8 +2616,13 @@ optional controller::pending_producer_block_id()const { return my->pending->_producer_block_id; } +const vector& controller::get_pending_trx_receipts()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + return my->pending->get_trx_receipts(); +} + uint32_t controller::last_irreversible_block_num() const { - return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); + return my->fork_db.root()->block_num; } block_id_type controller::last_irreversible_block_id() const { @@ -1907,8 +2632,12 @@ block_id_type controller::last_irreversible_block_id() const { if( block_header::num_from_id(tapos_block_summary.block_id) == lib_num ) return tapos_block_summary.block_id; - return fetch_block_by_number(lib_num)->id(); + auto signed_blk = my->blog.read_block_by_num( lib_num ); + EOS_ASSERT( BOOST_LIKELY( signed_blk != nullptr ), unknown_block_exception, + "Could not find block: ${block}", ("block", lib_num) ); + + return signed_blk->id(); } const dynamic_global_property_object& controller::get_dynamic_global_properties()const { @@ -1927,8 +2656,8 @@ signed_block_ptr controller::fetch_block_by_id( block_id_type id )const { } signed_block_ptr controller::fetch_block_by_number( uint32_t block_num )const { try { - auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - if( blk_state && blk_state->block ) { + auto blk_state = fetch_block_state_by_number( block_num ); + if( blk_state ) { return blk_state->block; } @@ -1941,14 +2670,37 @@ block_state_ptr controller::fetch_block_state_by_id( block_id_type id )const { } block_state_ptr controller::fetch_block_state_by_number( uint32_t block_num )const { try { - auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - return blk_state; + const auto& rev_blocks = my->reversible_blocks.get_index(); + auto objitr = rev_blocks.find(block_num); + + if( objitr == rev_blocks.end() ) { + if( my->read_mode == db_read_mode::IRREVERSIBLE ) { + return my->fork_db.search_on_branch( my->fork_db.pending_head()->id, block_num ); + } else { + return block_state_ptr(); + } + } + + return my->fork_db.get_block( objitr->get_block_id() ); } FC_CAPTURE_AND_RETHROW( (block_num) ) } block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try { - auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - if( blk_state ) { - return blk_state->id; + const auto& blog_head = my->blog.head(); + + bool find_in_blog = (blog_head && block_num <= blog_head->block_num()); + + if( !find_in_blog ) { + if( my->read_mode != db_read_mode::IRREVERSIBLE ) { + const auto& rev_blocks = my->reversible_blocks.get_index(); + auto objitr = rev_blocks.find(block_num); + if( objitr != rev_blocks.end() ) { + return objitr->get_block_id(); + } + } else { + auto bsp = my->fork_db.search_on_branch( my->fork_db.pending_head()->id, block_num ); + + if( bsp ) return bsp->id; + } } auto signed_blk = my->blog.read_block_by_num(block_num); @@ -1976,6 +2728,10 @@ int64_t controller::set_proposed_producers( vector producers ) { const auto& gpo = get_global_properties(); auto cur_block_num = head_block_num() + 1; + if( producers.size() == 0 && is_builtin_activated( builtin_protocol_feature_t::disallow_empty_producer_schedule ) ) { + return -1; + } + if( gpo.proposed_schedule_block_num.valid() ) { if( *gpo.proposed_schedule_block_num != cur_block_num ) return -1; // there is already a proposed schedule set in a previous block, wait for it to become pending @@ -1990,13 +2746,14 @@ int64_t controller::set_proposed_producers( vector producers ) { decltype(sch.producers.cend()) end; decltype(end) begin; - if( my->pending->_pending_block_state->pending_schedule.producers.size() == 0 ) { - const auto& active_sch = my->pending->_pending_block_state->active_schedule; + const auto& pending_sch = pending_producers(); + + if( pending_sch.producers.size() == 0 ) { + const auto& active_sch = active_producers(); begin = active_sch.producers.begin(); end = active_sch.producers.end(); sch.version = active_sch.version + 1; } else { - const auto& pending_sch = my->pending->_pending_block_state->pending_schedule; begin = pending_sch.producers.begin(); end = pending_sch.producers.end(); sch.version = pending_sch.version + 1; @@ -2009,6 +2766,8 @@ int64_t controller::set_proposed_producers( vector producers ) { int64_t version = sch.version; + ilog( "proposed producer schedule with version ${v}", ("v", version) ); + my->db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = cur_block_num; gp.proposed_schedule = std::move(sch); @@ -2017,15 +2776,34 @@ int64_t controller::set_proposed_producers( vector producers ) { } const producer_schedule_type& controller::active_producers()const { - if ( !(my->pending) ) + if( !(my->pending) ) return my->head->active_schedule; - return my->pending->_pending_block_state->active_schedule; + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->active_schedule; + + return my->pending->get_pending_block_header_state().active_schedule; } const producer_schedule_type& controller::pending_producers()const { - if ( !(my->pending) ) - return my->head->pending_schedule; - return my->pending->_pending_block_state->pending_schedule; + if( !(my->pending) ) + return my->head->pending_schedule.schedule; + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->pending_schedule.schedule; + + if( my->pending->_block_stage.contains() ) { + const auto& np = my->pending->_block_stage.get()._unsigned_block->new_producers; + if( np ) + return *np; + } + + const auto& bb = my->pending->_block_stage.get(); + + if( bb._new_pending_producer_schedule ) + return *bb._new_pending_producer_schedule; + + return bb._pending_block_header_state.prev_pending_schedule.schedule; } optional controller::proposed_producers()const { @@ -2140,6 +2918,10 @@ void controller::check_key_list( const public_key_type& key )const { my->check_key_list( key ); } +bool controller::is_building_block()const { + return my->pending.valid(); +} + bool controller::is_producing_block()const { if( !my->pending ) return false; @@ -2147,7 +2929,7 @@ bool controller::is_producing_block()const { } bool controller::is_ram_billing_in_notify_allowed()const { - return !is_producing_block() || my->conf.allow_ram_billing_in_notify; + return my->conf.disable_all_subjective_mitigations || !is_producing_block() || my->conf.allow_ram_billing_in_notify; } void controller::validate_expiration( const transaction& trx )const { try { @@ -2187,6 +2969,24 @@ void controller::validate_reversible_available_size() const { EOS_ASSERT(free >= guard, reversible_guard_exception, "reversible free: ${f}, guard size: ${g}", ("f", free)("g",guard)); } +bool controller::is_protocol_feature_activated( const digest_type& feature_digest )const { + if( my->pending ) + return my->pending->is_protocol_feature_activated( feature_digest ); + + const auto& activated_features = my->head->activated_protocol_features->protocol_features; + return (activated_features.find( feature_digest ) != activated_features.end()); +} + +bool controller::is_builtin_activated( builtin_protocol_feature_t f )const { + uint32_t current_block_num = head_block_num(); + + if( my->pending ) { + ++current_block_num; + } + + return my->protocol_features.is_builtin_activated( f, current_block_num ); +} + bool controller::is_known_unexpired_transaction( const transaction_id_type& id) const { return db().find(id); } @@ -2211,4 +3011,68 @@ const flat_set &controller::get_resource_greylist() const { return my->conf.resource_greylist; } + +void controller::add_to_ram_correction( account_name account, uint64_t ram_bytes ) { + if( auto ptr = my->db.find( account ) ) { + my->db.modify( *ptr, [&]( auto& rco ) { + rco.ram_correction += ram_bytes; + } ); + } else { + my->db.create( [&]( auto& rco ) { + rco.name = account; + rco.ram_correction = ram_bytes; + } ); + } +} + +bool controller::all_subjective_mitigations_disabled()const { + return my->conf.disable_all_subjective_mitigations; +} + +fc::optional controller::convert_exception_to_error_code( const fc::exception& e ) { + const chain_exception* e_ptr = dynamic_cast( &e ); + + if( e_ptr == nullptr ) return {}; + + if( !e_ptr->error_code ) return static_cast(system_error_code::generic_system_error); + + return e_ptr->error_code; +} + +/// Protocol feature activation handlers: + +template<> +void controller_impl::on_activation() { + db.modify( db.get(), [&]( auto& ps ) { + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "preactivate_feature" ); + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "is_feature_activated" ); + } ); +} + +template<> +void controller_impl::on_activation() { + db.modify( db.get(), [&]( auto& ps ) { + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "get_sender" ); + } ); +} + +template<> +void controller_impl::on_activation() { + const auto& indx = db.get_index(); + for( auto itr = indx.begin(); itr != indx.end(); itr = indx.begin() ) { + int64_t current_ram_usage = resource_limits.get_account_ram_usage( itr->name ); + int64_t ram_delta = -static_cast(itr->ram_correction); + if( itr->ram_correction > static_cast(current_ram_usage) ) { + ram_delta = -current_ram_usage; + elog( "account ${name} was to be reduced by ${adjust} bytes of RAM despite only using ${current} bytes of RAM", + ("name", itr->name)("adjust", itr->ram_correction)("current", current_ram_usage) ); + } + + resource_limits.add_pending_ram_usage( itr->name, ram_delta ); + db.remove( *itr ); + } +} + +/// End of protocol feature activation handlers + } } /// eosio::chain diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 03e0fed7f7f..9bd9a022f22 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -12,11 +12,11 @@ #include #include +#include #include #include #include #include -#include #include #include @@ -68,7 +68,7 @@ void validate_authority_precondition( const apply_context& context, const author * This method is called assuming precondition_system_newaccount succeeds a */ void apply_eosio_newaccount(apply_context& context) { - auto create = context.act.data_as(); + auto create = context.get_action().data_as(); try { context.require_authorization(create.creator); // context.require_write_lock( config::eosio_auth_scope ); @@ -85,8 +85,8 @@ void apply_eosio_newaccount(apply_context& context) { EOS_ASSERT( name_str.size() <= 12, action_validate_exception, "account names can only be 12 chars long" ); // Check if the creator is privileged - const auto &creator = db.get(create.creator); - if( !creator.privileged ) { + const auto &creator = db.get(create.creator); + if( !creator.is_privileged() ) { EOS_ASSERT( name_str.find( "eosio." ) != 0, action_validate_exception, "only privileged accounts can have names that start with 'eosio.'" ); } @@ -101,7 +101,7 @@ void apply_eosio_newaccount(apply_context& context) { a.creation_date = context.control.pending_block_time(); }); - db.create([&](auto& a) { + db.create([&](auto& a) { a.name = create.name; }); @@ -129,42 +129,69 @@ void apply_eosio_setcode(apply_context& context) { const auto& cfg = context.control.get_global_properties().configuration; auto& db = context.db; - auto act = context.act.data_as(); + auto act = context.get_action().data_as(); context.require_authorization(act.account); EOS_ASSERT( act.vmtype == 0, invalid_contract_vm_type, "code should be 0" ); EOS_ASSERT( act.vmversion == 0, invalid_contract_vm_version, "version should be 0" ); - fc::sha256 code_id; /// default ID == 0 + fc::sha256 code_hash; /// default is the all zeros hash - if( act.code.size() > 0 ) { - code_id = fc::sha256::hash( act.code.data(), (uint32_t)act.code.size() ); + int64_t code_size = (int64_t)act.code.size(); + + if( code_size > 0 ) { + code_hash = fc::sha256::hash( act.code.data(), (uint32_t)act.code.size() ); wasm_interface::validate(context.control, act.code); } - const auto& account = db.get(act.account); + const auto& account = db.get(act.account); + bool existing_code = (account.code_hash != digest_type()); - int64_t code_size = (int64_t)act.code.size(); - int64_t old_size = (int64_t)account.code.size() * config::setcode_ram_bytes_multiplier; + EOS_ASSERT( code_size > 0 || existing_code, set_exact_code, "contract is already cleared" ); + + int64_t old_size = 0; int64_t new_size = code_size * config::setcode_ram_bytes_multiplier; - EOS_ASSERT( account.code_version != code_id, set_exact_code, "contract is already running this version of code" ); + if( existing_code ) { + const code_object& old_code_entry = db.get(boost::make_tuple(account.code_hash, account.vm_type, account.vm_version)); + EOS_ASSERT( old_code_entry.code_hash != code_hash, set_exact_code, + "contract is already running this version of code" ); + old_size = (int64_t)old_code_entry.code.size() * config::setcode_ram_bytes_multiplier; + if( old_code_entry.code_ref_count == 1 ) { + db.remove(old_code_entry); + context.control.get_wasm_interface().code_block_num_last_used(account.code_hash, account.vm_type, account.vm_version, context.control.head_block_num() + 1); + } else { + db.modify(old_code_entry, [](code_object& o) { + --o.code_ref_count; + }); + } + } - db.modify( account, [&]( auto& a ) { - /** TODO: consider whether a microsecond level local timestamp is sufficient to detect code version changes*/ - // TODO: update setcode message to include the hash, then validate it in validate - a.last_code_update = context.control.pending_block_time(); - a.code_version = code_id; - if ( code_size > 0 ) { - a.code.assign(act.code.data(), code_size); + if( code_size > 0 ) { + const code_object* new_code_entry = db.find( + boost::make_tuple(code_hash, act.vmtype, act.vmversion) ); + if( new_code_entry ) { + db.modify(*new_code_entry, [&](code_object& o) { + ++o.code_ref_count; + }); } else { - a.code.resize(0); + db.create([&](code_object& o) { + o.code_hash = code_hash; + o.code.assign(act.code.data(), code_size); + o.code_ref_count = 1; + o.first_block_used = context.control.head_block_num() + 1; + o.vm_type = act.vmtype; + o.vm_version = act.vmversion; + }); } - }); + } - const auto& account_sequence = db.get(act.account); - db.modify( account_sequence, [&]( auto& aso ) { - aso.code_sequence += 1; + db.modify( account, [&]( auto& a ) { + a.code_sequence += 1; + a.code_hash = code_hash; + a.vm_type = act.vmtype; + a.vm_version = act.vmversion; + a.last_code_update = context.control.pending_block_time(); }); if (new_size != old_size) { @@ -174,7 +201,7 @@ void apply_eosio_setcode(apply_context& context) { void apply_eosio_setabi(apply_context& context) { auto& db = context.db; - auto act = context.act.data_as(); + auto act = context.get_action().data_as(); context.require_authorization(act.account); @@ -193,9 +220,9 @@ void apply_eosio_setabi(apply_context& context) { } }); - const auto& account_sequence = db.get(act.account); - db.modify( account_sequence, [&]( auto& aso ) { - aso.abi_sequence += 1; + const auto& account_metadata = db.get(act.account); + db.modify( account_metadata, [&]( auto& a ) { + a.abi_sequence += 1; }); if (new_size != old_size) { @@ -205,7 +232,7 @@ void apply_eosio_setabi(apply_context& context) { void apply_eosio_updateauth(apply_context& context) { - auto update = context.act.data_as(); + auto update = context.get_action().data_as(); context.require_authorization(update.account); // only here to mark the single authority on this action as used auto& authorization = context.control.get_mutable_authorization_manager(); @@ -270,7 +297,7 @@ void apply_eosio_updateauth(apply_context& context) { void apply_eosio_deleteauth(apply_context& context) { // context.require_write_lock( config::eosio_auth_scope ); - auto remove = context.act.data_as(); + auto remove = context.get_action().data_as(); context.require_authorization(remove.account); // only here to mark the single authority on this action as used EOS_ASSERT(remove.permission != config::active_name, action_validate_exception, "Cannot delete active authority"); @@ -301,7 +328,7 @@ void apply_eosio_deleteauth(apply_context& context) { void apply_eosio_linkauth(apply_context& context) { // context.require_write_lock( config::eosio_auth_scope ); - auto requirement = context.act.data_as(); + auto requirement = context.get_action().data_as(); try { EOS_ASSERT(!requirement.requirement.empty(), action_validate_exception, "Required permission cannot be empty"); @@ -315,7 +342,15 @@ void apply_eosio_linkauth(apply_context& context) { EOS_ASSERT(code != nullptr, account_query_exception, "Failed to retrieve code for account: ${account}", ("account", requirement.code)); if( requirement.requirement != config::eosio_any_name ) { - const auto *permission = db.find(requirement.requirement); + const permission_object* permission = nullptr; + if( context.control.is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ) { + permission = db.find( + boost::make_tuple( requirement.account, requirement.requirement ) + ); + } else { + permission = db.find(requirement.requirement); + } + EOS_ASSERT(permission != nullptr, permission_query_exception, "Failed to retrieve permission: ${permission}", ("permission", requirement.requirement)); } @@ -350,7 +385,7 @@ void apply_eosio_unlinkauth(apply_context& context) { // context.require_write_lock( config::eosio_auth_scope ); auto& db = context.db; - auto unlink = context.act.data_as(); + auto unlink = context.get_action().data_as(); context.require_authorization(unlink.account); // only here to mark the single authority on this action as used @@ -366,7 +401,7 @@ void apply_eosio_unlinkauth(apply_context& context) { } void apply_eosio_canceldelay(apply_context& context) { - auto cancel = context.act.data_as(); + auto cancel = context.get_action().data_as(); context.require_authorization(cancel.canceling_auth.actor); // only here to mark the single authority on this action as used const auto& trx_id = cancel.trx_id; diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 52b49fff449..749ec6404d5 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -13,6 +13,15 @@ namespace eosio { namespace chain { using boost::multi_index_container; using namespace boost::multi_index; + const uint32_t fork_database::magic_number = 0x30510FDB; + + const uint32_t fork_database::min_supported_version = 1; + const uint32_t fork_database::max_supported_version = 1; + + /** + * History: + * Version 1: initial version of the new refactored fork database portable format + */ struct by_block_id; struct by_block_num; @@ -23,82 +32,194 @@ namespace eosio { namespace chain { indexed_by< hashed_unique< tag, member, std::hash>, ordered_non_unique< tag, const_mem_fun >, - ordered_non_unique< tag, + ordered_unique< tag, composite_key< block_state, - member, - member + member, + member, + member, + member >, - composite_key_compare< std::less, std::greater > - >, - ordered_non_unique< tag, - composite_key< block_header_state, - member, - member, - member - >, - composite_key_compare< std::greater, std::greater, std::greater > + composite_key_compare< + std::greater, + std::greater, + std::greater, + sha256_less + > > > > fork_multi_index_type; + bool first_preferred( const block_header_state& lhs, const block_header_state& rhs ) { + return std::tie( lhs.dpos_irreversible_blocknum, lhs.block_num ) + > std::tie( rhs.dpos_irreversible_blocknum, rhs.block_num ); + } struct fork_database_impl { + fork_database_impl( fork_database& self, const fc::path& data_dir ) + :self(self) + ,datadir(data_dir) + {} + + fork_database& self; fork_multi_index_type index; + block_state_ptr root; // Only uses the block_header_state portion block_state_ptr head; fc::path datadir; + + void add( const block_state_ptr& n, + bool ignore_duplicate, bool validate, + const std::function&, + const vector& )>& validator ); }; - fork_database::fork_database( const fc::path& data_dir ):my( new fork_database_impl() ) { - my->datadir = data_dir; + fork_database::fork_database( const fc::path& data_dir ) + :my( new fork_database_impl( *this, data_dir ) ) + {} + + void fork_database::open( const std::function&, + const vector& )>& validator ) + { if (!fc::is_directory(my->datadir)) fc::create_directories(my->datadir); auto fork_db_dat = my->datadir / config::forkdb_filename; if( fc::exists( fork_db_dat ) ) { - string content; - fc::read_file_contents( fork_db_dat, content ); - - fc::datastream ds( content.data(), content.size() ); - unsigned_int size; fc::raw::unpack( ds, size ); - for( uint32_t i = 0, n = size.value; i < n; ++i ) { - block_state s; - fc::raw::unpack( ds, s ); - set( std::make_shared( move( s ) ) ); - } - block_id_type head_id; - fc::raw::unpack( ds, head_id ); + try { + string content; + fc::read_file_contents( fork_db_dat, content ); + + fc::datastream ds( content.data(), content.size() ); + + // validate totem + uint32_t totem = 0; + fc::raw::unpack( ds, totem ); + EOS_ASSERT( totem == magic_number, fork_database_exception, + "Fork database file '${filename}' has unexpected magic number: ${actual_totem}. Expected ${expected_totem}", + ("filename", fork_db_dat.generic_string()) + ("actual_totem", totem) + ("expected_totem", magic_number) + ); + + // validate version + uint32_t version = 0; + fc::raw::unpack( ds, version ); + EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, + fork_database_exception, + "Unsupported version of fork database file '${filename}'. " + "Fork database version is ${version} while code supports version(s) [${min},${max}]", + ("filename", fork_db_dat.generic_string()) + ("version", version) + ("min", min_supported_version) + ("max", max_supported_version) + ); + + block_header_state bhs; + fc::raw::unpack( ds, bhs ); + reset( bhs ); + + unsigned_int size; fc::raw::unpack( ds, size ); + for( uint32_t i = 0, n = size.value; i < n; ++i ) { + block_state s; + fc::raw::unpack( ds, s ); + for( const auto& receipt : s.block->transactions ) { + if( receipt.trx.contains() ) { + const auto& pt = receipt.trx.get(); + s.trxs.push_back( std::make_shared( std::make_shared(pt) ) ); + } + } + s.header_exts = s.block->validate_and_extract_header_extensions(); + my->add( std::make_shared( move( s ) ), false, true, validator ); + } + block_id_type head_id; + fc::raw::unpack( ds, head_id ); + + if( my->root->id == head_id ) { + my->head = my->root; + } else { + my->head = get_block( head_id ); + EOS_ASSERT( my->head, fork_database_exception, + "could not find head while reconstructing fork database from file; '${filename}' is likely corrupted", + ("filename", fork_db_dat.generic_string()) ); + } - my->head = get_block( head_id ); + auto candidate = my->index.get().begin(); + if( candidate == my->index.get().end() || !(*candidate)->is_valid() ) { + EOS_ASSERT( my->head->id == my->root->id, fork_database_exception, + "head not set to root despite no better option available; '${filename}' is likely corrupted", + ("filename", fork_db_dat.generic_string()) ); + } else { + EOS_ASSERT( !first_preferred( **candidate, *my->head ), fork_database_exception, + "head not set to best available option available; '${filename}' is likely corrupted", + ("filename", fork_db_dat.generic_string()) ); + } + } FC_CAPTURE_AND_RETHROW( (fork_db_dat) ) fc::remove( fork_db_dat ); } } void fork_database::close() { - if( my->index.size() == 0 ) return; - auto fork_db_dat = my->datadir / config::forkdb_filename; + + if( !my->root ) { + if( my->index.size() > 0 ) { + elog( "fork_database is in a bad state when closing; not writing out '${filename}'", + ("filename", fork_db_dat.generic_string()) ); + } + return; + } + std::ofstream out( fork_db_dat.generic_string().c_str(), std::ios::out | std::ios::binary | std::ofstream::trunc ); + fc::raw::pack( out, magic_number ); + fc::raw::pack( out, max_supported_version ); // write out current version which is always max_supported_version + fc::raw::pack( out, *static_cast(&*my->root) ); uint32_t num_blocks_in_fork_db = my->index.size(); fc::raw::pack( out, unsigned_int{num_blocks_in_fork_db} ); - for( const auto& s : my->index ) { - fc::raw::pack( out, *s ); + + const auto& indx = my->index.get(); + + auto unvalidated_itr = indx.rbegin(); + auto unvalidated_end = boost::make_reverse_iterator( indx.lower_bound( false ) ); + + auto validated_itr = unvalidated_end; + auto validated_end = indx.rend(); + + for( bool unvalidated_remaining = (unvalidated_itr != unvalidated_end), + validated_remaining = (validated_itr != validated_end); + + unvalidated_remaining || validated_remaining; + + unvalidated_remaining = (unvalidated_itr != unvalidated_end), + validated_remaining = (validated_itr != validated_end) + ) + { + auto itr = (validated_remaining ? validated_itr : unvalidated_itr); + + if( unvalidated_remaining && validated_remaining ) { + if( first_preferred( **validated_itr, **unvalidated_itr ) ) { + itr = unvalidated_itr; + ++unvalidated_itr; + } else { + ++validated_itr; + } + } else if( unvalidated_remaining ) { + ++unvalidated_itr; + } else { + ++validated_itr; + } + + fc::raw::pack( out, *(*itr) ); } - if( my->head ) + + if( my->head ) { fc::raw::pack( out, my->head->id ); - else - fc::raw::pack( out, block_id_type() ); - - /// we don't normally indicate the head block as irreversible - /// we cannot normally prune the lib if it is the head block because - /// the next block needs to build off of the head block. We are exiting - /// now so we can prune this block as irreversible before exiting. - auto lib = my->head->dpos_irreversible_blocknum; - auto oldest = *my->index.get().begin(); - if( oldest->block_num <= lib ) { - prune( oldest ); + } else { + elog( "head not set in fork database; '${filename}' will be corrupted", + ("filename", fork_db_dat.generic_string()) ); } my->index.clear(); @@ -108,64 +229,154 @@ namespace eosio { namespace chain { close(); } - void fork_database::set( block_state_ptr s ) { - auto result = my->index.insert( s ); - EOS_ASSERT( s->id == s->header.id(), fork_database_exception, - "block state id (${id}) is different from block state header id (${hid})", ("id", string(s->id))("hid", string(s->header.id())) ); - - //FC_ASSERT( s->block_num == s->header.block_num() ); + void fork_database::reset( const block_header_state& root_bhs ) { + my->index.clear(); + my->root = std::make_shared(); + static_cast(*my->root) = root_bhs; + my->root->validated = true; + my->head = my->root; + } - EOS_ASSERT( result.second, fork_database_exception, "unable to insert block state, duplicate state detected" ); - if( !my->head ) { - my->head = s; - } else if( my->head->block_num < s->block_num ) { - my->head = s; + void fork_database::rollback_head_to_root() { + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.begin(); + while (itr != by_id_idx.end()) { + by_id_idx.modify( itr, [&]( block_state_ptr& bsp ) { + bsp->validated = false; + } ); + ++itr; } + my->head = my->root; } - block_state_ptr fork_database::add( const block_state_ptr& n, bool skip_validate_previous ) { - EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); - EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); + void fork_database::advance_root( const block_id_type& id ) { + EOS_ASSERT( my->root, fork_database_exception, "root not yet set" ); + + auto new_root = get_block( id ); + EOS_ASSERT( new_root, fork_database_exception, + "cannot advance root to a block that does not exist in the fork database" ); + EOS_ASSERT( new_root->is_valid(), fork_database_exception, + "cannot advance root to a block that has not yet been validated" ); + + + vector blocks_to_remove; + for( auto b = new_root; b; ) { + blocks_to_remove.push_back( b->header.previous ); + b = get_block( blocks_to_remove.back() ); + EOS_ASSERT( b || blocks_to_remove.back() == my->root->id, fork_database_exception, "invariant violation: orphaned branch was present in forked database" ); + } + + // The new root block should be erased from the fork database index individually rather than with the remove method, + // because we do not want the blocks branching off of it to be removed from the fork database. + my->index.erase( my->index.find( id ) ); - if( !skip_validate_previous ) { - auto prior = my->index.find( n->block->previous ); - EOS_ASSERT( prior != my->index.end(), unlinkable_block_exception, - "unlinkable block", ("id", n->block->id())("previous", n->block->previous) ); + // The other blocks to be removed are removed using the remove method so that orphaned branches do not remain in the fork database. + for( const auto& block_id : blocks_to_remove ) { + remove( block_id ); } - auto inserted = my->index.insert(n); - EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added?" ); + // Even though fork database no longer needs block or trxs when a block state becomes a root of the tree, + // avoid mutating the block state at all, for example clearing the block shared pointer, because other + // parts of the code which run asynchronously (e.g. mongo_db_plugin) may later expect it remain unmodified. - my->head = *my->index.get().begin(); + my->root = new_root; + } - auto lib = my->head->dpos_irreversible_blocknum; - auto oldest = *my->index.get().begin(); + block_header_state_ptr fork_database::get_block_header( const block_id_type& id )const { + const auto& by_id_idx = my->index.get(); - if( oldest->block_num < lib ) { - prune( oldest ); + if( my->root->id == id ) { + return my->root; } - return n; + auto itr = my->index.find( id ); + if( itr != my->index.end() ) + return *itr; + + return block_header_state_ptr(); } - block_state_ptr fork_database::add( signed_block_ptr b, bool skip_validate_signee ) { - EOS_ASSERT( b, fork_database_exception, "attempt to add null block" ); - EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); + void fork_database_impl::add( const block_state_ptr& n, + bool ignore_duplicate, bool validate, + const std::function&, + const vector& )>& validator ) + { + EOS_ASSERT( root, fork_database_exception, "root not yet set" ); + EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); - const auto& by_id_idx = my->index.get(); - auto existing = by_id_idx.find( b->id() ); - EOS_ASSERT( existing == by_id_idx.end(), fork_database_exception, "we already know about this block" ); + auto prev_bh = self.get_block_header( n->header.previous ); - auto prior = by_id_idx.find( b->previous ); - EOS_ASSERT( prior != by_id_idx.end(), unlinkable_block_exception, "unlinkable block", ("id", string(b->id()))("previous", string(b->previous)) ); + EOS_ASSERT( prev_bh, unlinkable_block_exception, + "unlinkable block", ("id", n->id)("previous", n->header.previous) ); - auto result = std::make_shared( **prior, move(b), skip_validate_signee ); - EOS_ASSERT( result, fork_database_exception , "fail to add new block state" ); - return add(result, true); + if( validate ) { + try { + const auto& exts = n->header_exts; + + if( exts.size() > 0 ) { + const auto& new_protocol_features = exts.front().get().protocol_features; + validator( n->header.timestamp, prev_bh->activated_protocol_features->protocol_features, new_protocol_features ); + } + } EOS_RETHROW_EXCEPTIONS( fork_database_exception, "serialized fork database is incompatible with configured protocol features" ) + } + + auto inserted = index.insert(n); + if( !inserted.second ) { + if( ignore_duplicate ) return; + EOS_THROW( fork_database_exception, "duplicate block added", ("id", n->id) ); + } + + auto candidate = index.get().begin(); + if( (*candidate)->is_valid() ) { + head = *candidate; + } } + void fork_database::add( const block_state_ptr& n, bool ignore_duplicate ) { + my->add( n, ignore_duplicate, false, + []( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + {} + ); + } + + const block_state_ptr& fork_database::root()const { return my->root; } + const block_state_ptr& fork_database::head()const { return my->head; } + block_state_ptr fork_database::pending_head()const { + const auto& indx = my->index.get(); + + auto itr = indx.lower_bound( false ); + if( itr != indx.end() && !(*itr)->is_valid() ) { + if( first_preferred( **itr, *my->head ) ) + return *itr; + } + + return my->head; + } + + branch_type fork_database::fetch_branch( const block_id_type& h, uint32_t trim_after_block_num )const { + branch_type result; + for( auto s = get_block(h); s; s = get_block( s->header.previous ) ) { + if( s->block_num <= trim_after_block_num ) + result.push_back( s ); + } + + return result; + } + + block_state_ptr fork_database::search_on_branch( const block_id_type& h, uint32_t block_num )const { + for( auto s = get_block(h); s; s = get_block( s->header.previous ) ) { + if( s->block_num == block_num ) + return s; + } + + return {}; + } + /** * Given two head blocks, return two branches of the fork graph that * end with a common ancestor (same prior block) @@ -173,32 +384,52 @@ namespace eosio { namespace chain { pair< branch_type, branch_type > fork_database::fetch_branch_from( const block_id_type& first, const block_id_type& second )const { pair result; - auto first_branch = get_block(first); - auto second_branch = get_block(second); + auto first_branch = (first == my->root->id) ? my->root : get_block(first); + auto second_branch = (second == my->root->id) ? my->root : get_block(second); + + EOS_ASSERT(first_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", first)); + EOS_ASSERT(second_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", second)); while( first_branch->block_num > second_branch->block_num ) { result.first.push_back(first_branch); - first_branch = get_block( first_branch->header.previous ); - EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", string(first_branch->header.previous)) ); + const auto& prev = first_branch->header.previous; + first_branch = (prev == my->root->id) ? my->root : get_block( prev ); + EOS_ASSERT( first_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", prev) + ); } while( second_branch->block_num > first_branch->block_num ) { result.second.push_back( second_branch ); - second_branch = get_block( second_branch->header.previous ); - EOS_ASSERT( second_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", string(second_branch->header.previous)) ); + const auto& prev = second_branch->header.previous; + second_branch = (prev == my->root->id) ? my->root : get_block( prev ); + EOS_ASSERT( second_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", prev) + ); } + if (first_branch->id == second_branch->id) return result; + while( first_branch->header.previous != second_branch->header.previous ) { result.first.push_back(first_branch); result.second.push_back(second_branch); - first_branch = get_block( first_branch->header.previous ); - second_branch = get_block( second_branch->header.previous ); - EOS_ASSERT( first_branch && second_branch, fork_db_block_not_found, - "either block ${fid} or ${sid} does not exist", - ("fid", string(first_branch->header.previous))("sid", string(second_branch->header.previous)) ); + const auto &first_prev = first_branch->header.previous; + first_branch = get_block( first_prev ); + const auto &second_prev = second_branch->header.previous; + second_branch = get_block( second_prev ); + EOS_ASSERT( first_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", first_prev) + ); + EOS_ASSERT( second_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", second_prev) + ); } if( first_branch && second_branch ) @@ -209,71 +440,47 @@ namespace eosio { namespace chain { return result; } /// fetch_branch_from - /// remove all of the invalid forks built of this id including this id + /// remove all of the invalid forks built off of this id including this id void fork_database::remove( const block_id_type& id ) { vector remove_queue{id}; + const auto& previdx = my->index.get(); + const auto head_id = my->head->id; for( uint32_t i = 0; i < remove_queue.size(); ++i ) { - auto itr = my->index.find( remove_queue[i] ); - if( itr != my->index.end() ) - my->index.erase(itr); + EOS_ASSERT( remove_queue[i] != head_id, fork_database_exception, + "removing the block and its descendants would remove the current head block" ); - auto& previdx = my->index.get(); - auto previtr = previdx.lower_bound(remove_queue[i]); + auto previtr = previdx.lower_bound( remove_queue[i] ); while( previtr != previdx.end() && (*previtr)->header.previous == remove_queue[i] ) { remove_queue.push_back( (*previtr)->id ); ++previtr; } } - //wdump((my->index.size())); - my->head = *my->index.get().begin(); - } - void fork_database::set_validity( const block_state_ptr& h, bool valid ) { - if( !valid ) { - remove( h->id ); - } else { - /// remove older than irreversible and mark block as valid - h->validated = true; + for( const auto& block_id : remove_queue ) { + auto itr = my->index.find( block_id ); + if( itr != my->index.end() ) + my->index.erase(itr); } } - void fork_database::mark_in_current_chain( const block_state_ptr& h, bool in_current_chain ) { - if( h->in_current_chain == in_current_chain ) - return; + void fork_database::mark_valid( const block_state_ptr& h ) { + if( h->validated ) return; auto& by_id_idx = my->index.get(); - auto itr = by_id_idx.find( h->id ); - EOS_ASSERT( itr != by_id_idx.end(), fork_db_block_not_found, "could not find block in fork database" ); - - by_id_idx.modify( itr, [&]( auto& bsp ) { // Need to modify this way rather than directly so that Boost MultiIndex can re-sort - bsp->in_current_chain = in_current_chain; - }); - } - void fork_database::prune( const block_state_ptr& h ) { - auto num = h->block_num; - - auto& by_bn = my->index.get(); - auto bni = by_bn.begin(); - while( bni != by_bn.end() && (*bni)->block_num < num ) { - prune( *bni ); - bni = by_bn.begin(); - } + auto itr = by_id_idx.find( h->id ); + EOS_ASSERT( itr != by_id_idx.end(), fork_database_exception, + "block state not in fork database; cannot mark as valid", + ("id", h->id) ); - auto itr = my->index.find( h->id ); - if( itr != my->index.end() ) { - irreversible(*itr); - my->index.erase(itr); - } + by_id_idx.modify( itr, []( block_state_ptr& bsp ) { + bsp->validated = true; + } ); - auto& numidx = my->index.get(); - auto nitr = numidx.lower_bound( num ); - while( nitr != numidx.end() && (*nitr)->block_num == num ) { - auto itr_to_remove = nitr; - ++nitr; - auto id = (*itr_to_remove)->id; - remove( id ); + auto candidate = my->index.get().begin(); + if( first_preferred( **candidate, *my->head ) ) { + my->head = *candidate; } } @@ -284,75 +491,4 @@ namespace eosio { namespace chain { return block_state_ptr(); } - block_state_ptr fork_database::get_block_in_current_chain_by_num( uint32_t n )const { - const auto& numidx = my->index.get(); - auto nitr = numidx.lower_bound( n ); - // following asserts removed so null can be returned - //FC_ASSERT( nitr != numidx.end() && (*nitr)->block_num == n, - // "could not find block in fork database with block number ${block_num}", ("block_num", n) ); - //FC_ASSERT( (*nitr)->in_current_chain == true, - // "block (with block number ${block_num}) found in fork database is not in the current chain", ("block_num", n) ); - if( nitr == numidx.end() || (*nitr)->block_num != n || (*nitr)->in_current_chain != true ) - return block_state_ptr(); - return *nitr; - } - - void fork_database::add( const header_confirmation& c ) { - auto b = get_block( c.block_id ); - EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",c.block_id)); - b->add_confirmation( c ); - - if( b->bft_irreversible_blocknum < b->block_num && - b->confirmations.size() >= ((b->active_schedule.producers.size() * 2) / 3 + 1) ) { - set_bft_irreversible( c.block_id ); - } - } - - /** - * This method will set this block as being BFT irreversible and will update - * all blocks which build off of it to have the same bft_irb if their existing - * bft irb is less than this block num. - * - * This will require a search over all forks - */ - void fork_database::set_bft_irreversible( block_id_type id ) { - auto& idx = my->index.get(); - auto itr = idx.find(id); - uint32_t block_num = (*itr)->block_num; - idx.modify( itr, [&]( auto& bsp ) { - bsp->bft_irreversible_blocknum = bsp->block_num; - }); - - /** to prevent stack-overflow, we perform a bredth-first traversal of the - * fork database. At each stage we iterate over the leafs from the prior stage - * and find all nodes that link their previous. If we update the bft lib then we - * add it to a queue for the next layer. This lambda takes one layer and returns - * all block ids that need to be iterated over for next layer. - */ - auto update = [&]( const vector& in ) { - vector updated; - - for( const auto& i : in ) { - auto& pidx = my->index.get(); - auto pitr = pidx.lower_bound( i ); - auto epitr = pidx.upper_bound( i ); - while( pitr != epitr ) { - pidx.modify( pitr, [&]( auto& bsp ) { - if( bsp->bft_irreversible_blocknum < block_num ) { - bsp->bft_irreversible_blocknum = block_num; - updated.push_back( bsp->id ); - } - }); - ++pitr; - } - } - return updated; - }; - - vector queue{id}; - while( queue.size() ) { - queue = update( queue ); - } - } - } } /// eosio::chain diff --git a/libraries/chain/genesis_intrinsics.cpp b/libraries/chain/genesis_intrinsics.cpp new file mode 100644 index 00000000000..be6077acbb1 --- /dev/null +++ b/libraries/chain/genesis_intrinsics.cpp @@ -0,0 +1,181 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ + +#include + +namespace eosio { namespace chain { + +const std::vector genesis_intrinsics = { + "__ashrti3", + "__lshlti3", + "__lshrti3", + "__ashlti3", + "__divti3", + "__udivti3", + "__modti3", + "__umodti3", + "__multi3", + "__addtf3", + "__subtf3", + "__multf3", + "__divtf3", + "__eqtf2", + "__netf2", + "__getf2", + "__gttf2", + "__lttf2", + "__letf2", + "__cmptf2", + "__unordtf2", + "__negtf2", + "__floatsitf", + "__floatunsitf", + "__floatditf", + "__floatunditf", + "__floattidf", + "__floatuntidf", + "__floatsidf", + "__extendsftf2", + "__extenddftf2", + "__fixtfti", + "__fixtfdi", + "__fixtfsi", + "__fixunstfti", + "__fixunstfdi", + "__fixunstfsi", + "__fixsfti", + "__fixdfti", + "__fixunssfti", + "__fixunsdfti", + "__trunctfdf2", + "__trunctfsf2", + "is_feature_active", + "activate_feature", + "get_resource_limits", + "set_resource_limits", + "set_proposed_producers", + "get_blockchain_parameters_packed", + "set_blockchain_parameters_packed", + "is_privileged", + "set_privileged", + "get_active_producers", + "db_idx64_store", + "db_idx64_remove", + "db_idx64_update", + "db_idx64_find_primary", + "db_idx64_find_secondary", + "db_idx64_lowerbound", + "db_idx64_upperbound", + "db_idx64_end", + "db_idx64_next", + "db_idx64_previous", + "db_idx128_store", + "db_idx128_remove", + "db_idx128_update", + "db_idx128_find_primary", + "db_idx128_find_secondary", + "db_idx128_lowerbound", + "db_idx128_upperbound", + "db_idx128_end", + "db_idx128_next", + "db_idx128_previous", + "db_idx256_store", + "db_idx256_remove", + "db_idx256_update", + "db_idx256_find_primary", + "db_idx256_find_secondary", + "db_idx256_lowerbound", + "db_idx256_upperbound", + "db_idx256_end", + "db_idx256_next", + "db_idx256_previous", + "db_idx_double_store", + "db_idx_double_remove", + "db_idx_double_update", + "db_idx_double_find_primary", + "db_idx_double_find_secondary", + "db_idx_double_lowerbound", + "db_idx_double_upperbound", + "db_idx_double_end", + "db_idx_double_next", + "db_idx_double_previous", + "db_idx_long_double_store", + "db_idx_long_double_remove", + "db_idx_long_double_update", + "db_idx_long_double_find_primary", + "db_idx_long_double_find_secondary", + "db_idx_long_double_lowerbound", + "db_idx_long_double_upperbound", + "db_idx_long_double_end", + "db_idx_long_double_next", + "db_idx_long_double_previous", + "db_store_i64", + "db_update_i64", + "db_remove_i64", + "db_get_i64", + "db_next_i64", + "db_previous_i64", + "db_find_i64", + "db_lowerbound_i64", + "db_upperbound_i64", + "db_end_i64", + "assert_recover_key", + "recover_key", + "assert_sha256", + "assert_sha1", + "assert_sha512", + "assert_ripemd160", + "sha1", + "sha256", + "sha512", + "ripemd160", + "check_transaction_authorization", + "check_permission_authorization", + "get_permission_last_used", + "get_account_creation_time", + "current_time", + "publication_time", + "abort", + "eosio_assert", + "eosio_assert_message", + "eosio_assert_code", + "eosio_exit", + "read_action_data", + "action_data_size", + "current_receiver", + "require_recipient", + "require_auth", + "require_auth2", + "has_auth", + "is_account", + "prints", + "prints_l", + "printi", + "printui", + "printi128", + "printui128", + "printsf", + "printdf", + "printqf", + "printn", + "printhex", + "read_transaction", + "transaction_size", + "expiration", + "tapos_block_prefix", + "tapos_block_num", + "get_action", + "send_inline", + "send_context_free_inline", + "send_deferred", + "cancel_deferred", + "get_context_free_data", + "memcpy", + "memmove", + "memcmp", + "memset" +}; + +} } // namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 3fd6aef137d..79c6d472942 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -248,7 +248,6 @@ namespace impl { std::is_same::value || std::is_same::value || std::is_same::value || - std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index b995a8508a1..235c3b91284 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include #include #include @@ -13,20 +14,12 @@ namespace eosio { namespace chain { class account_object : public chainbase::object { - OBJECT_CTOR(account_object,(code)(abi)) + OBJECT_CTOR(account_object,(abi)) id_type id; - account_name name; - uint8_t vm_type = 0; - uint8_t vm_version = 0; - bool privileged = false; - - time_point last_code_update; - digest_type code_version; + account_name name; //< name should not be changed within a chainbase modifier lambda block_timestamp_type creation_date; - - shared_blob code; - shared_blob abi; + shared_blob abi; void set_abi( const eosio::chain::abi_def& a ) { abi.resize( fc::raw::pack_size( a ) ); @@ -54,32 +47,68 @@ namespace eosio { namespace chain { > >; - class account_sequence_object : public chainbase::object + class account_metadata_object : public chainbase::object + { + OBJECT_CTOR(account_metadata_object); + + enum class flags_fields : uint32_t { + privileged = 1 + }; + + id_type id; + account_name name; //< name should not be changed within a chainbase modifier lambda + uint64_t recv_sequence = 0; + uint64_t auth_sequence = 0; + uint64_t code_sequence = 0; + uint64_t abi_sequence = 0; + digest_type code_hash; + time_point last_code_update; + uint32_t flags = 0; + uint8_t vm_type = 0; + uint8_t vm_version = 0; + + bool is_privileged()const { return has_field( flags, flags_fields::privileged ); } + + void set_privileged( bool privileged ) { + flags = set_field( flags, flags_fields::privileged, privileged ); + } + }; + + struct by_name; + using account_metadata_index = chainbase::shared_multi_index_container< + account_metadata_object, + indexed_by< + ordered_unique, member>, + ordered_unique, member> + > + >; + + class account_ram_correction_object : public chainbase::object { - OBJECT_CTOR(account_sequence_object); + OBJECT_CTOR(account_ram_correction_object); id_type id; - account_name name; - uint64_t recv_sequence = 0; - uint64_t auth_sequence = 0; - uint64_t code_sequence = 0; - uint64_t abi_sequence = 0; + account_name name; //< name should not be changed within a chainbase modifier lambda + uint64_t ram_correction = 0; }; struct by_name; - using account_sequence_index = chainbase::shared_multi_index_container< - account_sequence_object, + using account_ram_correction_index = chainbase::shared_multi_index_container< + account_ram_correction_object, indexed_by< - ordered_unique, member>, - ordered_unique, member> + ordered_unique, member>, + ordered_unique, member> > >; } } // eosio::chain CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_object, eosio::chain::account_index) -CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_sequence_object, eosio::chain::account_sequence_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_metadata_object, eosio::chain::account_metadata_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_ram_correction_object, eosio::chain::account_ram_correction_index) -FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(privileged)(last_code_update)(code_version)(creation_date)(code)(abi)) -FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) \ No newline at end of file +FC_REFLECT(eosio::chain::account_object, (name)(creation_date)(abi)) +FC_REFLECT(eosio::chain::account_metadata_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence) + (code_hash)(last_code_update)(flags)(vm_type)(vm_version)) +FC_REFLECT(eosio::chain::account_ram_correction_object, (name)(ram_correction)) diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 03bfd63881e..ce7ae2bdae7 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -452,35 +452,23 @@ class apply_context { /// Constructor public: - apply_context(controller& con, transaction_context& trx_ctx, const action& a, uint32_t depth=0) - :control(con) - ,db(con.mutable_db()) - ,trx_context(trx_ctx) - ,act(a) - ,receiver(act.account) - ,used_authorizations(act.authorization.size(), false) - ,recurse_depth(depth) - ,idx64(*this) - ,idx128(*this) - ,idx256(*this) - ,idx_double(*this) - ,idx_long_double(*this) - { - reset_console(); - } - + apply_context(controller& con, transaction_context& trx_ctx, uint32_t action_ordinal, uint32_t depth=0); /// Execution methods: public: - void exec_one( action_trace& trace ); - void exec( action_trace& trace ); + void exec_one(); + void exec(); void execute_inline( action&& a ); void execute_context_free_inline( action&& a ); void schedule_deferred_transaction( const uint128_t& sender_id, account_name payer, transaction&& trx, bool replace_existing ); bool cancel_deferred_transaction( const uint128_t& sender_id, account_name sender ); bool cancel_deferred_transaction( const uint128_t& sender_id ) { return cancel_deferred_transaction(sender_id, receiver); } + protected: + uint32_t schedule_action( uint32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free ); + uint32_t schedule_action( action&& act_to_schedule, account_name receiver, bool context_free ); + /// Authorization methods: public: @@ -517,23 +505,8 @@ class apply_context { /// Console methods: public: - void reset_console(); - std::ostringstream& get_console_stream() { return _pending_console_output; } - const std::ostringstream& get_console_stream()const { return _pending_console_output; } - - template - void console_append(T val) { - _pending_console_output << val; - } - - template - void console_append(T val, Ts ...rest) { - console_append(val); - console_append(rest...); - }; - - inline void console_append_formatted(const string& fmt, const variant_object& vo) { - console_append(fc::format_string(fmt, vo)); + void console_append( const string& val ) { + _pending_console_output += val; } /// Database methods: @@ -570,26 +543,37 @@ class apply_context { bytes get_packed_transaction(); uint64_t next_global_sequence(); - uint64_t next_recv_sequence( account_name receiver ); + uint64_t next_recv_sequence( const account_metadata_object& receiver_account ); uint64_t next_auth_sequence( account_name actor ); void add_ram_usage( account_name account, int64_t ram_delta ); void finalize_trace( action_trace& trace, const fc::time_point& start ); + bool is_context_free()const { return context_free; } + bool is_privileged()const { return privileged; } + action_name get_receiver()const { return receiver; } + const action& get_action()const { return *act; } + + action_name get_sender() const; + /// Fields: public: controller& control; chainbase::database& db; ///< database where state is stored transaction_context& trx_context; ///< transaction context in which the action is running - const action& act; ///< message being applied + + private: + const action* act = nullptr; ///< action being applied + // act pointer may be invalidated on call to trx_context.schedule_action account_name receiver; ///< the code that is currently running - vector used_authorizations; ///< Parallel to act.authorization; tracks which permissions have been used while processing the message uint32_t recurse_depth; ///< how deep inline actions can recurse + uint32_t first_receiver_action_ordinal = 0; + uint32_t action_ordinal = 0; bool privileged = false; bool context_free = false; - bool used_context_free_api = false; + public: generic_index idx64; generic_index idx128; generic_index idx256; @@ -599,10 +583,10 @@ class apply_context { private: iterator_cache keyval_cache; - vector _notified; ///< keeps track of new accounts to be notifed of current message - vector _inline_actions; ///< queued inline messages - vector _cfa_inline_actions; ///< queued inline messages - std::ostringstream _pending_console_output; + vector< std::pair > _notified; ///< keeps track of new accounts to be notifed of current message + vector _inline_actions; ///< action_ordinals of queued inline actions + vector _cfa_inline_actions; ///< action_ordinals of queued inline context-free actions + std::string _pending_console_output; flat_set _account_ram_deltas; ///< flat_set of account_delta so json is an array of objects //bytes _cached_trx; diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index bf9cf0bedb8..fc751826d95 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -1,24 +1,41 @@ #pragma once #include #include +#include + +#include namespace eosio { namespace chain { + namespace detail { + template + struct block_header_extension_types { + using block_header_extensions_t = fc::static_variant< Ts... >; + using decompose_t = decompose< Ts... >; + }; + } + + using block_header_extension_types = detail::block_header_extension_types< + protocol_feature_activation + >; + + using block_header_extensions = block_header_extension_types::block_header_extensions_t; + struct block_header { block_timestamp_type timestamp; account_name producer; /** - * By signing this block this producer is confirming blocks [block_num() - confirmed, blocknum()) + * By signing this block this producer is confirming blocks [block_num() - confirmed, blocknum()) * as being the best blocks for that range and that he has not signed any other - * statements that would contradict. + * statements that would contradict. * * No producer should sign a block with overlapping ranges or it is proof of byzantine * behavior. When producing a block a producer is always confirming at least the block he * is building off of. A producer cannot confirm "this" block, only prior blocks. */ - uint16_t confirmed = 1; + uint16_t confirmed = 1; block_id_type previous; @@ -35,10 +52,14 @@ namespace eosio { namespace chain { extensions_type header_extensions; + block_header() = default; + digest_type digest()const; block_id_type id() const; uint32_t block_num() const { return num_from_id(previous) + 1; } static uint32_t num_from_id(const block_id_type& id); + + vector validate_and_extract_header_extensions()const; }; @@ -47,18 +68,11 @@ namespace eosio { namespace chain { signature_type producer_signature; }; - struct header_confirmation { - block_id_type block_id; - account_name producer; - signature_type producer_signature; - }; - } } /// namespace eosio::chain -FC_REFLECT(eosio::chain::block_header, +FC_REFLECT(eosio::chain::block_header, (timestamp)(producer)(confirmed)(previous) (transaction_mroot)(action_mroot) (schedule_version)(new_producers)(header_extensions)) FC_REFLECT_DERIVED(eosio::chain::signed_block_header, (eosio::chain::block_header), (producer_signature)) -FC_REFLECT(eosio::chain::header_confirmation, (block_id)(producer)(producer_signature) ) diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index c318843d5df..41e19253138 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -5,63 +5,130 @@ namespace eosio { namespace chain { +struct block_header_state; + +namespace detail { + struct block_header_state_common { + uint32_t block_num = 0; + uint32_t dpos_proposed_irreversible_blocknum = 0; + uint32_t dpos_irreversible_blocknum = 0; + producer_schedule_type active_schedule; + incremental_merkle blockroot_merkle; + flat_map producer_to_last_produced; + flat_map producer_to_last_implied_irb; + public_key_type block_signing_key; + vector confirm_count; + }; + + struct schedule_info { + uint32_t schedule_lib_num = 0; /// last irr block num + digest_type schedule_hash; + producer_schedule_type schedule; + }; +} + +struct pending_block_header_state : public detail::block_header_state_common { + protocol_feature_activation_set_ptr prev_activated_protocol_features; + detail::schedule_info prev_pending_schedule; + bool was_pending_promoted = false; + block_id_type previous; + account_name producer; + block_timestamp_type timestamp; + uint32_t active_schedule_version = 0; + uint16_t confirmed = 1; + + signed_block_header make_block_header( const checksum256_type& transaction_mroot, + const checksum256_type& action_mroot, + optional&& new_producers, + vector&& new_protocol_feature_activations )const; + + block_header_state finish_next( const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee = false )&&; + + block_header_state finish_next( signed_block_header& h, + const std::function&, + const vector& )>& validator, + const std::function& signer )&&; + +protected: + block_header_state _finish_next( const signed_block_header& h, + const std::function&, + const vector& )>& validator )&&; +}; + + /** * @struct block_header_state * @brief defines the minimum state necessary to validate transaction headers */ -struct block_header_state { - block_id_type id; - uint32_t block_num = 0; - signed_block_header header; - uint32_t dpos_proposed_irreversible_blocknum = 0; - uint32_t dpos_irreversible_blocknum = 0; - uint32_t bft_irreversible_blocknum = 0; - uint32_t pending_schedule_lib_num = 0; /// last irr block num - digest_type pending_schedule_hash; - producer_schedule_type pending_schedule; - producer_schedule_type active_schedule; - incremental_merkle blockroot_merkle; - flat_map producer_to_last_produced; - flat_map producer_to_last_implied_irb; - public_key_type block_signing_key; - vector confirm_count; - vector confirmations; - - block_header_state next( const signed_block_header& h, bool trust = false )const; - block_header_state generate_next( block_timestamp_type when )const; - - void set_new_producers( producer_schedule_type next_pending ); - void set_confirmed( uint16_t num_prev_blocks ); - void add_confirmation( const header_confirmation& c ); - bool maybe_promote_pending(); - - - bool has_pending_producers()const { return pending_schedule.producers.size(); } - uint32_t calc_dpos_last_irreversible()const; - bool is_active_producer( account_name n )const; - - /* - block_timestamp_type get_slot_time( uint32_t slot_num )const; - uint32_t get_slot_at_time( block_timestamp_type t )const; - producer_key get_scheduled_producer( uint32_t slot_num )const; - uint32_t producer_participation_rate()const; - */ - - producer_key get_scheduled_producer( block_timestamp_type t )const; - const block_id_type& prev()const { return header.previous; } - digest_type sig_digest()const; - void sign( const std::function& signer ); - public_key_type signee()const; - void verify_signee(const public_key_type& signee)const; -}; +struct block_header_state : public detail::block_header_state_common { + block_id_type id; + signed_block_header header; + detail::schedule_info pending_schedule; + protocol_feature_activation_set_ptr activated_protocol_features; + /// this data is redundant with the data stored in header, but it acts as a cache that avoids + /// duplication of work + vector header_exts; + block_header_state() = default; + + explicit block_header_state( detail::block_header_state_common&& base ) + :detail::block_header_state_common( std::move(base) ) + {} + + pending_block_header_state next( block_timestamp_type when, uint16_t num_prev_blocks_to_confirm )const; + + block_header_state next( const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee = false )const; + + bool has_pending_producers()const { return pending_schedule.schedule.producers.size(); } + uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; + bool is_active_producer( account_name n )const; + + producer_key get_scheduled_producer( block_timestamp_type t )const; + const block_id_type& prev()const { return header.previous; } + digest_type sig_digest()const; + void sign( const std::function& signer ); + public_key_type signee()const; + void verify_signee(const public_key_type& signee)const; + + const vector& get_new_protocol_feature_activations()const; +}; + +using block_header_state_ptr = std::shared_ptr; } } /// namespace eosio::chain -FC_REFLECT( eosio::chain::block_header_state, - (id)(block_num)(header)(dpos_proposed_irreversible_blocknum)(dpos_irreversible_blocknum)(bft_irreversible_blocknum) - (pending_schedule_lib_num)(pending_schedule_hash) - (pending_schedule)(active_schedule)(blockroot_merkle) - (producer_to_last_produced)(producer_to_last_implied_irb)(block_signing_key) - (confirm_count)(confirmations) ) +FC_REFLECT( eosio::chain::detail::block_header_state_common, + (block_num) + (dpos_proposed_irreversible_blocknum) + (dpos_irreversible_blocknum) + (active_schedule) + (blockroot_merkle) + (producer_to_last_produced) + (producer_to_last_implied_irb) + (block_signing_key) + (confirm_count) +) + +FC_REFLECT( eosio::chain::detail::schedule_info, + (schedule_lib_num) + (schedule_hash) + (schedule) +) + +FC_REFLECT_DERIVED( eosio::chain::block_header_state, (eosio::chain::detail::block_header_state_common), + (id) + (header) + (pending_schedule) + (activated_protocol_features) +) diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 2292392ade4..e91161cf716 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -12,15 +12,39 @@ namespace eosio { namespace chain { struct block_state : public block_header_state { - explicit block_state( const block_header_state& cur ):block_header_state(cur){} - block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ); - block_state( const block_header_state& prev, block_timestamp_type when ); + block_state( const block_header_state& prev, + signed_block_ptr b, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee + ); + + block_state( pending_block_header_state&& cur, + signed_block_ptr&& b, // unsigned block + vector&& trx_metas, + const std::function&, + const vector& )>& validator, + const std::function& signer + ); + + block_state( pending_block_header_state&& cur, + const signed_block_ptr& b, // signed block + vector&& trx_metas, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee + ); + block_state() = default; - /// weak_ptr prev_block_state.... + bool is_valid()const { return validated; } + + signed_block_ptr block; bool validated = false; - bool in_current_chain = false; /// this data is redundant with the data stored in block, but facilitates /// recapturing transactions when we pop a block @@ -31,4 +55,4 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain -FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated)(in_current_chain) ) +FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated) ) diff --git a/libraries/chain/include/eosio/chain/chain_id_type.hpp b/libraries/chain/include/eosio/chain/chain_id_type.hpp index a16fc143ae6..59ab8f248b0 100644 --- a/libraries/chain/include/eosio/chain/chain_id_type.hpp +++ b/libraries/chain/include/eosio/chain/chain_id_type.hpp @@ -47,8 +47,6 @@ namespace chain { friend class eosio::net_plugin_impl; friend struct eosio::handshake_message; - - friend struct ::hello; // TODO: Rushed hack to support bnet_plugin. Need a better solution. }; } } // namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp index 3b3e64f264f..5546b301999 100644 --- a/libraries/chain/include/eosio/chain/chain_snapshot.hpp +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -12,10 +12,13 @@ struct chain_snapshot_header { /** * Version history * 1: initial version + * 2: Updated chain snapshot for v1.8.0 initial protocol features release: + * - Incompatible with version 1. + * - Adds new indices for: protocol_state_object and account_ram_correction_object */ - static constexpr uint32_t minimum_compatible_version = 1; - static constexpr uint32_t current_version = 1; + static constexpr uint32_t minimum_compatible_version = 2; + static constexpr uint32_t current_version = 2; uint32_t version = current_version; @@ -31,4 +34,4 @@ struct chain_snapshot_header { } } -FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) \ No newline at end of file +FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) diff --git a/libraries/chain/include/eosio/chain/code_object.hpp b/libraries/chain/include/eosio/chain/code_object.hpp new file mode 100644 index 00000000000..3309704a335 --- /dev/null +++ b/libraries/chain/include/eosio/chain/code_object.hpp @@ -0,0 +1,44 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once +#include +#include + +#include "multi_index_includes.hpp" + +namespace eosio { namespace chain { + + class code_object : public chainbase::object { + OBJECT_CTOR(code_object, (code)) + + id_type id; + digest_type code_hash; //< code_hash should not be changed within a chainbase modifier lambda + shared_blob code; + uint64_t code_ref_count; + uint32_t first_block_used; + uint8_t vm_type = 0; //< vm_type should not be changed within a chainbase modifier lambda + uint8_t vm_version = 0; //< vm_version should not be changed within a chainbase modifier lambda + }; + + struct by_code_hash; + using code_index = chainbase::shared_multi_index_container< + code_object, + indexed_by< + ordered_unique, member>, + ordered_unique, + composite_key< code_object, + member, + member, + member + > + > + > + >; + +} } // eosio::chain + +CHAINBASE_SET_INDEX_TYPE(eosio::chain::code_object, eosio::chain::code_index) + +FC_REFLECT(eosio::chain::code_object, (code_hash)(code)(code_ref_count)(first_block_used)(vm_type)(vm_version)) diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index 0d5ff9e9469..a780cf42832 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -18,7 +18,7 @@ const static auto default_reversible_cache_size = 340*1024*1024ll;/// 1MB * 340 const static auto default_reversible_guard_size = 2*1024*1024ll;/// 1MB * 340 blocks based on 21 producer BFT delay const static auto default_state_dir_name = "state"; -const static auto forkdb_filename = "forkdb.dat"; +const static auto forkdb_filename = "fork_db.dat"; const static auto default_state_size = 1*1024*1024*1024ll; const static auto default_state_guard_size = 128*1024*1024ll; diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index dc6b25b0501..bc58cb3e6d9 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -20,9 +20,9 @@ namespace eosio { namespace chain { OBJECT_CTOR(table_id_object) id_type id; - account_name code; - scope_name scope; - table_name table; + account_name code; //< code should not be changed within a chainbase modifier lambda + scope_name scope; //< scope should not be changed within a chainbase modifier lambda + table_name table; //< table should not be changed within a chainbase modifier lambda account_name payer; uint32_t count = 0; /// the number of elements in the table }; @@ -59,8 +59,8 @@ namespace eosio { namespace chain { static const int number_of_keys = 1; id_type id; - table_id t_id; - uint64_t primary_key; + table_id t_id; //< t_id should not be changed within a chainbase modifier lambda + uint64_t primary_key; //< primary_key should not be changed within a chainbase modifier lambda account_name payer = 0; shared_blob value; }; @@ -90,10 +90,10 @@ namespace eosio { namespace chain { typedef SecondaryKey secondary_key_type; typename chainbase::object::id_type id; - table_id t_id; - uint64_t primary_key; + table_id t_id; //< t_id should not be changed within a chainbase modifier lambda + uint64_t primary_key; //< primary_key should not be changed within a chainbase modifier lambda account_name payer = 0; - SecondaryKey secondary_key; + SecondaryKey secondary_key; //< secondary_key should not be changed within a chainbase modifier lambda }; diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f4aa46fa0dd..c7702d09414 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -2,11 +2,13 @@ #include #include #include +#include #include #include #include #include +#include namespace chainbase { class database; @@ -25,6 +27,7 @@ namespace eosio { namespace chain { struct controller_impl; using chainbase::database; + using chainbase::pinnable_mapped_file; using boost::signals2::signal; class dynamic_global_property_object; @@ -33,7 +36,7 @@ namespace eosio { namespace chain { class account_object; using resource_limits::resource_limits_manager; using apply_handler = std::function; - using unapplied_transactions_type = map; + using unapplied_transactions_type = map; class fork_database; @@ -73,6 +76,7 @@ namespace eosio { namespace chain { bool disable_replay_opts = false; bool contracts_console = false; bool allow_ram_billing_in_notify = false; + bool disable_all_subjective_mitigations = false; //< for testing purposes only genesis_state genesis; wasm_interface::vm_type wasm_runtime = chain::config::default_wasm_runtime; @@ -80,6 +84,9 @@ namespace eosio { namespace chain { db_read_mode read_mode = db_read_mode::SPECULATIVE; validation_mode block_validation_mode = validation_mode::FULL; + pinnable_mapped_file::map_mode db_map_mode = pinnable_mapped_file::map_mode::mapped; + vector db_hugepage_paths; + flat_set resource_greylist; flat_set trusted_producers; }; @@ -92,16 +99,33 @@ namespace eosio { namespace chain { }; explicit controller( const config& cfg ); + controller( const config& cfg, protocol_feature_set&& pfs ); ~controller(); void add_indices(); void startup( std::function shutdown, const snapshot_reader_ptr& snapshot = nullptr ); + void preactivate_feature( const digest_type& feature_digest ); + + vector get_preactivated_protocol_features()const; + + void validate_protocol_features( const vector& features_to_activate )const; + + /** + * Starts a new pending block session upon which new transactions can + * be pushed. + * + * Will only activate protocol features that have been pre-activated. + */ + void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0 ); + /** * Starts a new pending block session upon which new transactions can * be pushed. */ - void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0 ); + void start_block( block_timestamp_type time, + uint16_t confirm_block_count, + const vector& new_protocol_feature_activations ); void abort_block(); @@ -127,7 +151,7 @@ namespace eosio { namespace chain { */ transaction_trace_ptr push_scheduled_transaction( const transaction_id_type& scheduled, fc::time_point deadline, uint32_t billed_cpu_time_us = 0 ); - void finalize_block(); + block_state_ptr finalize_block( const std::function& signer_callback ); void sign_block( const std::function& signer_callback ); void commit_block(); void pop_block(); @@ -135,7 +159,7 @@ namespace eosio { namespace chain { std::future create_block_state_future( const signed_block_ptr& b ); void push_block( std::future& block_state_future ); - boost::asio::thread_pool& get_thread_pool(); + boost::asio::io_context& get_thread_pool(); const chainbase::database& db()const; @@ -148,6 +172,7 @@ namespace eosio { namespace chain { resource_limits_manager& get_mutable_resource_limits_manager(); const authorization_manager& get_authorization_manager()const; authorization_manager& get_mutable_authorization_manager(); + const protocol_feature_manager& get_protocol_feature_manager()const; const flat_set& get_actor_whitelist() const; const flat_set& get_actor_blacklist() const; @@ -175,10 +200,18 @@ namespace eosio { namespace chain { time_point fork_db_head_block_time()const; account_name fork_db_head_block_producer()const; + uint32_t fork_db_pending_head_block_num()const; + block_id_type fork_db_pending_head_block_id()const; + time_point fork_db_pending_head_block_time()const; + account_name fork_db_pending_head_block_producer()const; + time_point pending_block_time()const; - block_state_ptr pending_block_state()const; + account_name pending_block_producer()const; + public_key_type pending_block_signing_key()const; optional pending_producer_block_id()const; + const vector& get_pending_trx_receipts()const; + const producer_schedule_type& active_producers()const; const producer_schedule_type& pending_producers()const; optional proposed_producers()const; @@ -202,6 +235,7 @@ namespace eosio { namespace chain { void check_contract_list( account_name code )const; void check_action_list( account_name code, action_name action )const; void check_key_list( const public_key_type& key )const; + bool is_building_block()const; bool is_producing_block()const; bool is_ram_billing_in_notify_allowed()const; @@ -216,6 +250,9 @@ namespace eosio { namespace chain { void validate_db_available_size() const; void validate_reversible_available_size() const; + bool is_protocol_feature_activated( const digest_type& feature_digest )const; + bool is_builtin_activated( builtin_protocol_feature_t f )const; + bool is_known_unexpired_transaction( const transaction_id_type& id) const; int64_t set_proposed_producers( vector producers ); @@ -235,12 +272,17 @@ namespace eosio { namespace chain { void set_subjective_cpu_leeway(fc::microseconds leeway); + void add_to_ram_correction( account_name account, uint64_t ram_bytes ); + bool all_subjective_mitigations_disabled()const; + + static fc::optional convert_exception_to_error_code( const fc::exception& e ); + signal pre_accepted_block; signal accepted_block_header; signal accepted_block; signal irreversible_block; signal accepted_transaction; - signal applied_transaction; + signal)> applied_transaction; signal bad_alloc; /* diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 6c3e504d349..f67e9c85a7c 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -68,10 +68,52 @@ { throw( effect_type( e.what(), e.get_log() ) ); } +#define FC_DECLARE_DERIVED_EXCEPTION_WITH_ERROR_CODE( TYPE, BASE, CODE, WHAT ) \ + class TYPE : public BASE \ + { \ + public: \ + enum code_enum { \ + code_value = CODE, \ + }; \ + explicit TYPE( int64_t code, const std::string& name_value, const std::string& what_value ) \ + :BASE( code, name_value, what_value ){} \ + explicit TYPE( fc::log_message&& m, int64_t code, const std::string& name_value, const std::string& what_value ) \ + :BASE( std::move(m), code, name_value, what_value ){} \ + explicit TYPE( fc::log_messages&& m, int64_t code, const std::string& name_value, const std::string& what_value )\ + :BASE( std::move(m), code, name_value, what_value ){}\ + explicit TYPE( const fc::log_messages& m, int64_t code, const std::string& name_value, const std::string& what_value )\ + :BASE( m, code, name_value, what_value ){}\ + TYPE( const std::string& what_value, const fc::log_messages& m ) \ + :BASE( m, CODE, BOOST_PP_STRINGIZE(TYPE), what_value ){} \ + TYPE( fc::log_message&& m ) \ + :BASE( fc::move(m), CODE, BOOST_PP_STRINGIZE(TYPE), WHAT ){}\ + TYPE( fc::log_messages msgs ) \ + :BASE( fc::move( msgs ), CODE, BOOST_PP_STRINGIZE(TYPE), WHAT ) {} \ + TYPE( const TYPE& c ) \ + :BASE(c),error_code(c.error_code) {} \ + TYPE( const BASE& c ) \ + :BASE(c){} \ + TYPE():BASE(CODE, BOOST_PP_STRINGIZE(TYPE), WHAT){}\ + \ + virtual std::shared_ptr dynamic_copy_exception()const\ + { return std::make_shared( *this ); } \ + virtual NO_RETURN void dynamic_rethrow_exception()const \ + { if( code() == CODE ) throw *this;\ + else fc::exception::dynamic_rethrow_exception(); \ + } \ + fc::optional error_code; \ + }; + namespace eosio { namespace chain { - FC_DECLARE_EXCEPTION( chain_exception, - 3000000, "blockchain exception" ) + enum class system_error_code : uint64_t { + generic_system_error = 10000000000000000000ULL, + contract_restricted_error_code, //< contract used an error code reserved for system usage + }; + + + FC_DECLARE_DERIVED_EXCEPTION_WITH_ERROR_CODE( chain_exception, fc::exception, + 3000000, "blockchain exception" ) /** * chain_exception * |- chain_type_exception @@ -158,7 +200,10 @@ namespace eosio { namespace chain { 3030008, "Block is not signed with expected key" ) FC_DECLARE_DERIVED_EXCEPTION( wrong_producer, block_validate_exception, 3030009, "Block is not signed by expected producer" ) - + FC_DECLARE_DERIVED_EXCEPTION( invalid_block_header_extension, block_validate_exception, + 3030010, "Invalid block header extension" ) + FC_DECLARE_DERIVED_EXCEPTION( ill_formed_protocol_feature_activation, block_validate_exception, + 3030011, "Block includes an ill-formed protocol feature activation extension" ) @@ -193,6 +238,12 @@ namespace eosio { namespace chain { 3040013, "Transaction is too big" ) FC_DECLARE_DERIVED_EXCEPTION( unknown_transaction_compression, transaction_exception, 3040014, "Unknown transaction compression" ) + FC_DECLARE_DERIVED_EXCEPTION( invalid_transaction_extension, transaction_exception, + 3040015, "Invalid transaction extension" ) + FC_DECLARE_DERIVED_EXCEPTION( ill_formed_deferred_transaction_generation_context, transaction_exception, + 3040016, "Transaction includes an ill-formed deferred transaction generation context extension" ) + FC_DECLARE_DERIVED_EXCEPTION( disallowed_transaction_extensions_bad_block_exception, transaction_exception, + 3040017, "Transaction includes disallowed extensions (invalid block)" ) FC_DECLARE_DERIVED_EXCEPTION( action_validate_exception, chain_exception, @@ -216,6 +267,10 @@ namespace eosio { namespace chain { 3050008, "Abort Called" ) FC_DECLARE_DERIVED_EXCEPTION( inline_action_too_big, action_validate_exception, 3050009, "Inline Action exceeds maximum size limit" ) + FC_DECLARE_DERIVED_EXCEPTION( unauthorized_ram_usage_increase, action_validate_exception, + 3050010, "Action attempts to increase RAM usage of account without authorization" ) + FC_DECLARE_DERIVED_EXCEPTION( restricted_error_code_exception, action_validate_exception, + 3050011, "eosio_assert_code assertion failure uses restricted error code value" ) FC_DECLARE_DERIVED_EXCEPTION( database_exception, chain_exception, 3060000, "Database exception" ) @@ -270,6 +325,7 @@ namespace eosio { namespace chain { 3080007, "Transaction exceeded the current greylisted account network usage limit" ) FC_DECLARE_DERIVED_EXCEPTION( greylist_cpu_usage_exceeded, resource_exhausted_exception, 3080008, "Transaction exceeded the current greylisted account CPU usage limit" ) + FC_DECLARE_DERIVED_EXCEPTION( leeway_deadline_exception, deadline_exception, 3081001, "Transaction reached the deadline set due to leeway on account CPU limits" ) @@ -460,6 +516,10 @@ namespace eosio { namespace chain { 3170007, "The configured snapshot directory does not exist" ) FC_DECLARE_DERIVED_EXCEPTION( snapshot_exists_exception, producer_exception, 3170008, "The requested snapshot already exists" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_finalization_exception, producer_exception, + 3170009, "Snapshot Finalization Exception" ) + FC_DECLARE_DERIVED_EXCEPTION( invalid_protocol_features_to_activate, producer_exception, + 3170010, "The protocol features to be activated were not valid" ) FC_DECLARE_DERIVED_EXCEPTION( reversible_blocks_exception, chain_exception, 3180000, "Reversible Blocks exception" ) @@ -519,4 +579,13 @@ namespace eosio { namespace chain { 3240000, "Snapshot exception" ) FC_DECLARE_DERIVED_EXCEPTION( snapshot_validation_exception, snapshot_exception, 3240001, "Snapshot Validation Exception" ) + + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_exception, chain_exception, + 3250000, "Protocol feature exception" ) + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_validation_exception, protocol_feature_exception, + 3250001, "Protocol feature validation exception" ) + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_bad_block_exception, protocol_feature_exception, + 3250002, "Protocol feature exception (invalid block)" ) + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_iterator_exception, protocol_feature_exception, + 3250003, "Protocol feature iterator exception" ) } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 998157ab41a..e225d0fdcd1 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -22,56 +22,78 @@ namespace eosio { namespace chain { class fork_database { public: - fork_database( const fc::path& data_dir ); + explicit fork_database( const fc::path& data_dir ); ~fork_database(); + void open( const std::function&, + const vector& )>& validator ); void close(); - block_state_ptr get_block(const block_id_type& id)const; - block_state_ptr get_block_in_current_chain_by_num( uint32_t n )const; -// vector get_blocks_by_number(uint32_t n)const; + block_header_state_ptr get_block_header( const block_id_type& id )const; + block_state_ptr get_block( const block_id_type& id )const; /** - * Provides a "valid" blockstate upon which other forks may build. + * Purges any existing blocks from the fork database and resets the root block_header_state to the provided value. + * The head will also be reset to point to the root. */ - void set( block_state_ptr s ); + void reset( const block_header_state& root_bhs ); - /** this method will attempt to append the block to an existing - * block_state and will return a pointer to the new block state or - * throw on error. + /** + * Removes validated flag from all blocks in fork database and resets head to point to the root. */ - block_state_ptr add( signed_block_ptr b, bool skip_validate_signee ); - block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous ); - void remove( const block_id_type& id ); + void rollback_head_to_root(); - void add( const header_confirmation& c ); + /** + * Advance root block forward to some other block in the tree. + */ + void advance_root( const block_id_type& id ); + /** + * Add block state to fork database. + * Must link to existing block in fork database or the root. + */ + void add( const block_state_ptr& next_block, bool ignore_duplicate = false ); + + void remove( const block_id_type& id ); + + const block_state_ptr& root()const; const block_state_ptr& head()const; + block_state_ptr pending_head()const; /** - * Given two head blocks, return two branches of the fork graph that - * end with a common ancestor (same prior block) + * Returns the sequence of block states resulting from trimming the branch from the + * root block (exclusive) to the block with an id of `h` (inclusive) by removing any + * block states corresponding to block numbers greater than `trim_after_block_num`. + * + * The order of the sequence is in descending block number order. + * A block with an id of `h` must exist in the fork database otherwise this method will throw an exception. */ - pair< branch_type, branch_type > fetch_branch_from( const block_id_type& first, - const block_id_type& second )const; + branch_type fetch_branch( const block_id_type& h, uint32_t trim_after_block_num = std::numeric_limits::max() )const; /** - * If the block is invalid, it will be removed. If it is valid, then blocks older - * than the LIB are pruned after emitting irreversible signal. + * Returns the block state with a block number of `block_num` that is on the branch that + * contains a block with an id of`h`, or the empty shared pointer if no such block can be found. */ - void set_validity( const block_state_ptr& h, bool valid ); - void mark_in_current_chain( const block_state_ptr& h, bool in_current_chain ); - void prune( const block_state_ptr& h ); + block_state_ptr search_on_branch( const block_id_type& h, uint32_t block_num )const; /** - * This signal is emited when a block state becomes irreversible, once irreversible - * it is removed unless it is the head block. + * Given two head blocks, return two branches of the fork graph that + * end with a common ancestor (same prior block) */ - signal irreversible; + pair< branch_type, branch_type > fetch_branch_from( const block_id_type& first, + const block_id_type& second )const; + + + void mark_valid( const block_state_ptr& h ); + + static const uint32_t magic_number; + + static const uint32_t min_supported_version; + static const uint32_t max_supported_version; private: - void set_bft_irreversible( block_id_type id ); unique_ptr my; }; diff --git a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp index 7c3da995e65..74e07a4d9e9 100644 --- a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp @@ -28,9 +28,9 @@ namespace eosio { namespace chain { OBJECT_CTOR(generated_transaction_object, (packed_trx) ) id_type id; - transaction_id_type trx_id; - account_name sender; - uint128_t sender_id = 0; /// ID given this transaction by the sender + transaction_id_type trx_id; //< trx_id should not be changed within a chainbase modifier lambda + account_name sender; //< sender should not be changed within a chainbase modifier lambda + uint128_t sender_id = 0; /// ID given this transaction by the sender (should not be changed within a chainbase modifier lambda) account_name payer; time_point delay_until; /// this generated transaction will not be applied until the specified time time_point expiration; /// this generated transaction will not be applied after this time diff --git a/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp b/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp new file mode 100644 index 00000000000..bd736d6a285 --- /dev/null +++ b/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp @@ -0,0 +1,14 @@ + +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { namespace chain { + +extern const std::vector genesis_intrinsics; + +} } // namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index 7f3c09cccf5..14ed594c0bd 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -18,32 +18,36 @@ namespace eosio { namespace chain { /** * @class global_property_object - * @brief Maintains global state information (committee_member list, current fees) + * @brief Maintains global state information about block producer schedules and chain configuration parameters * @ingroup object * @ingroup implementation - * - * This is an implementation detail. The values here are set by committee_members to tune the blockchain parameters. */ class global_property_object : public chainbase::object { OBJECT_CTOR(global_property_object, (proposed_schedule)) - id_type id; - optional proposed_schedule_block_num; - shared_producer_schedule_type proposed_schedule; - chain_config configuration; + public: + id_type id; + optional proposed_schedule_block_num; + shared_producer_schedule_type proposed_schedule; + chain_config configuration; }; + using global_property_multi_index = chainbase::shared_multi_index_container< + global_property_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(global_property_object, global_property_object::id_type, id) + > + > + >; /** * @class dynamic_global_property_object - * @brief Maintains global state information (committee_member list, current fees) + * @brief Maintains global state information that frequently change * @ingroup object * @ingroup implementation - * - * This is an implementation detail. The values here are calculated during normal chain operations and reflect the - * current values of global blockchain properties. */ class dynamic_global_property_object : public chainbase::object { @@ -53,15 +57,6 @@ namespace eosio { namespace chain { uint64_t global_action_sequence = 0; }; - using global_property_multi_index = chainbase::shared_multi_index_container< - global_property_object, - indexed_by< - ordered_unique, - BOOST_MULTI_INDEX_MEMBER(global_property_object, global_property_object::id_type, id) - > - > - >; - using dynamic_global_property_multi_index = chainbase::shared_multi_index_container< dynamic_global_property_object, indexed_by< @@ -77,10 +72,10 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property_object, eosio::chain::glo CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) -FC_REFLECT(eosio::chain::dynamic_global_property_object, - (global_action_sequence) +FC_REFLECT(eosio::chain::global_property_object, + (proposed_schedule_block_num)(proposed_schedule)(configuration) ) -FC_REFLECT(eosio::chain::global_property_object, - (proposed_schedule_block_num)(proposed_schedule)(configuration) +FC_REFLECT(eosio::chain::dynamic_global_property_object, + (global_action_sequence) ) diff --git a/libraries/chain/include/eosio/chain/permission_link_object.hpp b/libraries/chain/include/eosio/chain/permission_link_object.hpp index b7b4ea76f57..ee5b1287b18 100644 --- a/libraries/chain/include/eosio/chain/permission_link_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_link_object.hpp @@ -39,6 +39,7 @@ namespace eosio { namespace chain { /// May be empty; if so, it sets a default @ref required_permission for all messages to @ref code action_name message_type; /// The permission level which @ref account requires for the specified message types + /// all of the above fields should not be changed within a chainbase modifier lambda permission_name required_permission; }; @@ -61,8 +62,7 @@ namespace eosio { namespace chain { composite_key > > diff --git a/libraries/chain/include/eosio/chain/permission_object.hpp b/libraries/chain/include/eosio/chain/permission_object.hpp index 7db580b74d8..335ce754907 100644 --- a/libraries/chain/include/eosio/chain/permission_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_object.hpp @@ -32,8 +32,8 @@ namespace eosio { namespace chain { id_type id; permission_usage_object::id_type usage_id; id_type parent; ///< parent permission - account_name owner; ///< the account this permission belongs to - permission_name name; ///< human-readable name for the permission + account_name owner; ///< the account this permission belongs to (should not be changed within a chainbase modifier lambda) + permission_name name; ///< human-readable name for the permission (should not be changed within a chainbase modifier lambda) time_point last_updated; ///< the last time this authority was updated shared_authority auth; ///< authority required to execute this permission diff --git a/libraries/chain/include/eosio/chain/producer_object.hpp b/libraries/chain/include/eosio/chain/producer_object.hpp deleted file mode 100644 index 61d23870280..00000000000 --- a/libraries/chain/include/eosio/chain/producer_object.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ -#pragma once -#include -#include - -#include "multi_index_includes.hpp" - -namespace eosio { namespace chain { -class producer_object : public chainbase::object { - OBJECT_CTOR(producer_object) - - id_type id; - account_name owner; - uint64_t last_aslot = 0; - public_key_type signing_key; - int64_t total_missed = 0; - uint32_t last_confirmed_block_num = 0; - - - /// The blockchain configuration values this producer recommends - chain_config configuration; -}; - -struct by_key; -struct by_owner; -using producer_multi_index = chainbase::shared_multi_index_container< - producer_object, - indexed_by< - ordered_unique, member>, - ordered_unique, member>, - ordered_unique, - composite_key, - member - > - > - > ->; - -} } // eosio::chain - -CHAINBASE_SET_INDEX_TYPE(eosio::chain::producer_object, eosio::chain::producer_multi_index) - -FC_REFLECT(eosio::chain::producer_object::id_type, (_id)) -FC_REFLECT(eosio::chain::producer_object, (id)(owner)(last_aslot)(signing_key)(total_missed)(last_confirmed_block_num) - (configuration)) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp new file mode 100644 index 00000000000..03ab31be131 --- /dev/null +++ b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp @@ -0,0 +1,49 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { namespace chain { + +struct protocol_feature_activation : fc::reflect_init { + static constexpr uint16_t extension_id() { return 0; } + static constexpr bool enforce_unique() { return true; } + + protocol_feature_activation() = default; + + protocol_feature_activation( const vector& pf ) + :protocol_features( pf ) + {} + + protocol_feature_activation( vector&& pf ) + :protocol_features( std::move(pf) ) + {} + + void reflector_init(); + + vector protocol_features; +}; + +struct protocol_feature_activation_set; + +using protocol_feature_activation_set_ptr = std::shared_ptr; + +struct protocol_feature_activation_set { + flat_set protocol_features; + + protocol_feature_activation_set() = default; + + protocol_feature_activation_set( const protocol_feature_activation_set& orig_pfa_set, + vector additional_features, + bool enforce_disjoint = true + ); +}; + + +} } // namespace eosio::chain + +FC_REFLECT(eosio::chain::protocol_feature_activation, (protocol_features)) +FC_REFLECT(eosio::chain::protocol_feature_activation_set, (protocol_features)) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp new file mode 100644 index 00000000000..c2f0140433f --- /dev/null +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -0,0 +1,380 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include +#include + +namespace eosio { namespace chain { + +enum class protocol_feature_t : uint32_t { + builtin +}; + +enum class builtin_protocol_feature_t : uint32_t { + preactivate_feature, + only_link_to_existing_permission, + replace_deferred, + no_duplicate_deferred_id, + fix_linkauth_restriction, + disallow_empty_producer_schedule, + restrict_action_to_self, + only_bill_first_authorizer, + forward_setcode, + get_sender, + ram_restrictions +}; + +struct protocol_feature_subjective_restrictions { + time_point earliest_allowed_activation_time; + bool preactivation_required = false; + bool enabled = false; +}; + +struct builtin_protocol_feature_spec { + const char* codename = nullptr; + digest_type description_digest; + flat_set builtin_dependencies; + protocol_feature_subjective_restrictions subjective_restrictions{time_point{}, true, true}; +}; + +extern const std::unordered_map> builtin_protocol_feature_codenames; + +const char* builtin_protocol_feature_codename( builtin_protocol_feature_t ); + +class protocol_feature_base : public fc::reflect_init { +public: + protocol_feature_base() = default; + + protocol_feature_base( protocol_feature_t feature_type, + const digest_type& description_digest, + flat_set&& dependencies, + const protocol_feature_subjective_restrictions& restrictions ); + + void reflector_init(); + + protocol_feature_t get_type()const { return _type; } + +public: + std::string protocol_feature_type; + digest_type description_digest; + flat_set dependencies; + protocol_feature_subjective_restrictions subjective_restrictions; +protected: + protocol_feature_t _type; +}; + +class builtin_protocol_feature : public protocol_feature_base { +public: + static const char* feature_type_string; + + builtin_protocol_feature() = default; + + builtin_protocol_feature( builtin_protocol_feature_t codename, + const digest_type& description_digest, + flat_set&& dependencies, + const protocol_feature_subjective_restrictions& restrictions ); + + void reflector_init(); + + digest_type digest()const; + + builtin_protocol_feature_t get_codename()const { return _codename; } + + friend class protocol_feature_set; + +public: + std::string builtin_feature_codename; +protected: + builtin_protocol_feature_t _codename; +}; + +struct protocol_feature { + digest_type feature_digest; + digest_type description_digest; + flat_set dependencies; + time_point earliest_allowed_activation_time; + bool preactivation_required = false; + bool enabled = false; + optional builtin_feature; + + fc::variant to_variant( bool include_subjective_restrictions = true, + fc::mutable_variant_object* additional_fields = nullptr )const; + + friend bool operator <( const protocol_feature& lhs, const protocol_feature& rhs ) { + return lhs.feature_digest < rhs.feature_digest; + } + + friend bool operator <( const digest_type& lhs, const protocol_feature& rhs ) { + return lhs < rhs.feature_digest; + } + + friend bool operator <( const protocol_feature& lhs, const digest_type& rhs ) { + return lhs.feature_digest < rhs; + } +}; + +class protocol_feature_set { +protected: + using protocol_feature_set_type = std::set< protocol_feature, std::less<> >; + +public: + protocol_feature_set(); + + enum class recognized_t { + unrecognized, + disabled, + too_early, + ready + }; + + recognized_t is_recognized( const digest_type& feature_digest, time_point now )const; + + optional get_builtin_digest( builtin_protocol_feature_t feature_codename )const; + + const protocol_feature& get_protocol_feature( const digest_type& feature_digest )const; + + bool validate_dependencies( const digest_type& feature_digest, + const std::function& validator )const; + + static builtin_protocol_feature + make_default_builtin_protocol_feature( + builtin_protocol_feature_t codename, + const std::function& handle_dependency + ); + + const protocol_feature& add_feature( const builtin_protocol_feature& f ); + + class const_iterator : public std::iterator { + protected: + protocol_feature_set_type::const_iterator _itr; + + protected: + explicit const_iterator( protocol_feature_set_type::const_iterator itr ) + :_itr(itr) + {} + + const protocol_feature* get_pointer()const { return &*_itr; } + + friend class protocol_feature_set; + + public: + const_iterator() = default; + + friend bool operator == ( const const_iterator& lhs, const const_iterator& rhs ) { + return (lhs._itr == rhs._itr); + } + + friend bool operator != ( const const_iterator& lhs, const const_iterator& rhs ) { + return (lhs._itr != rhs._itr); + } + + const protocol_feature& operator*()const { + return *get_pointer(); + } + + const protocol_feature* operator->()const { + return get_pointer(); + } + + const_iterator& operator++() { + ++_itr; + return *this; + } + + const_iterator& operator--() { + --_itr; + return *this; + } + + const_iterator operator++(int) { + const_iterator result(*this); + ++(*this); + return result; + } + + const_iterator operator--(int) { + const_iterator result(*this); + --(*this); + return result; + } + }; + + using const_reverse_iterator = std::reverse_iterator; + + const_iterator cbegin()const { return const_iterator( _recognized_protocol_features.cbegin() ); } + const_iterator begin()const { return cbegin(); } + + const_iterator cend()const { return const_iterator( _recognized_protocol_features.cend() ); } + const_iterator end()const { return cend(); } + + const_reverse_iterator crbegin()const { return std::make_reverse_iterator( cend() ); } + const_reverse_iterator rbegin()const { return crbegin(); } + + const_reverse_iterator crend()const { return std::make_reverse_iterator( cbegin() ); } + const_reverse_iterator rend()const { return crend(); } + + bool empty()const { return _recognized_protocol_features.empty(); } + std::size_t size()const { return _recognized_protocol_features.size(); } + std::size_t max_size()const { return _recognized_protocol_features.max_size(); } + + template + const_iterator find( const K& x )const { + return const_iterator( _recognized_protocol_features.find( x ) ); + } + + template + const_iterator lower_bound( const K& x )const { + return const_iterator( _recognized_protocol_features.lower_bound( x ) ); + } + + template + const_iterator upper_bound( const K& x )const { + return const_iterator( _recognized_protocol_features.upper_bound( x ) ); + } + + friend class protocol_feature_manager; + +protected: + protocol_feature_set_type _recognized_protocol_features; + vector _recognized_builtin_protocol_features; +}; + + +class protocol_feature_manager { +public: + + protocol_feature_manager( protocol_feature_set&& pfs ); + + class const_iterator : public std::iterator { + protected: + const protocol_feature_manager* _pfm = nullptr; + std::size_t _index = 0; + + protected: + static constexpr std::size_t end_index = std::numeric_limits::max(); + + explicit const_iterator( const protocol_feature_manager* pfm, std::size_t i = end_index ) + :_pfm(pfm) + ,_index(i) + {} + + const protocol_feature* get_pointer()const; + + friend class protocol_feature_manager; + + public: + const_iterator() = default; + + friend bool operator == ( const const_iterator& lhs, const const_iterator& rhs ) { + return std::tie( lhs._pfm, lhs._index ) == std::tie( rhs._pfm, rhs._index ); + } + + friend bool operator != ( const const_iterator& lhs, const const_iterator& rhs ) { + return !(lhs == rhs); + } + + uint32_t activation_ordinal()const; + + uint32_t activation_block_num()const; + + const protocol_feature& operator*()const { + return *get_pointer(); + } + + const protocol_feature* operator->()const { + return get_pointer(); + } + + const_iterator& operator++(); + + const_iterator& operator--(); + + const_iterator operator++(int) { + const_iterator result(*this); + ++(*this); + return result; + } + + const_iterator operator--(int) { + const_iterator result(*this); + --(*this); + return result; + } + }; + + friend class const_iterator; + + using const_reverse_iterator = std::reverse_iterator; + + void init( chainbase::database& db ); + + bool is_initialized()const { return _initialized; } + + const protocol_feature_set& get_protocol_feature_set()const { return _protocol_feature_set; } + + optional get_builtin_digest( builtin_protocol_feature_t feature_codename )const { + return _protocol_feature_set.get_builtin_digest( feature_codename ); + } + + // All methods below require is_initialized() as a precondition. + + const_iterator cbegin()const; + const_iterator begin()const { return cbegin(); } + + const_iterator cend()const { return const_iterator( this ); } + const_iterator end()const { return cend(); } + + const_reverse_iterator crbegin()const { return std::make_reverse_iterator( cend() ); } + const_reverse_iterator rbegin()const { return crbegin(); } + + const_reverse_iterator crend()const { return std::make_reverse_iterator( cbegin() ); } + const_reverse_iterator rend()const { return crend(); } + + const_iterator at_activation_ordinal( uint32_t activation_ordinal )const; + + const_iterator lower_bound( uint32_t block_num )const; + + const_iterator upper_bound( uint32_t block_num )const; + + + bool is_builtin_activated( builtin_protocol_feature_t feature_codename, uint32_t current_block_num )const; + + void activate_feature( const digest_type& feature_digest, uint32_t current_block_num ); + void popped_blocks_to( uint32_t block_num ); + +protected: + + struct protocol_feature_entry { + protocol_feature_set::const_iterator iterator_to_protocol_feature; + uint32_t activation_block_num; + }; + + struct builtin_protocol_feature_entry { + static constexpr size_t no_previous = std::numeric_limits::max(); + static constexpr uint32_t not_active = std::numeric_limits::max(); + + size_t previous = no_previous; + uint32_t activation_block_num = not_active; + }; + +protected: + protocol_feature_set _protocol_feature_set; + vector _activated_protocol_features; + vector _builtin_protocol_features; + size_t _head_of_builtin_activation_list = builtin_protocol_feature_entry::no_previous; + bool _initialized = false; +}; + +} } // namespace eosio::chain + +FC_REFLECT(eosio::chain::protocol_feature_subjective_restrictions, + (earliest_allowed_activation_time)(preactivation_required)(enabled)) + +FC_REFLECT(eosio::chain::protocol_feature_base, + (protocol_feature_type)(dependencies)(description_digest)(subjective_restrictions)) + +FC_REFLECT_DERIVED(eosio::chain::builtin_protocol_feature, (eosio::chain::protocol_feature_base), + (builtin_feature_codename)) diff --git a/libraries/chain/include/eosio/chain/protocol_state_object.hpp b/libraries/chain/include/eosio/chain/protocol_state_object.hpp new file mode 100644 index 00000000000..dfbb4373a8a --- /dev/null +++ b/libraries/chain/include/eosio/chain/protocol_state_object.hpp @@ -0,0 +1,95 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include +#include +#include +#include +#include "multi_index_includes.hpp" + +namespace eosio { namespace chain { + + /** + * @class protocol_state_object + * @brief Maintains global state information about consensus protocol rules + * @ingroup object + * @ingroup implementation + */ + class protocol_state_object : public chainbase::object + { + OBJECT_CTOR(protocol_state_object, (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)) + + public: + struct activated_protocol_feature { + digest_type feature_digest; + uint32_t activation_block_num = 0; + + activated_protocol_feature() = default; + + activated_protocol_feature( const digest_type& feature_digest, uint32_t activation_block_num ) + :feature_digest( feature_digest ) + ,activation_block_num( activation_block_num ) + {} + + bool operator==(const activated_protocol_feature& rhs) const { + return feature_digest == rhs.feature_digest && activation_block_num == rhs.activation_block_num; + } + }; + + public: + id_type id; + shared_vector activated_protocol_features; + shared_vector preactivated_protocol_features; + whitelisted_intrinsics_type whitelisted_intrinsics; + uint32_t num_supported_key_types = 0; + }; + + using protocol_state_multi_index = chainbase::shared_multi_index_container< + protocol_state_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(protocol_state_object, protocol_state_object::id_type, id) + > + > + >; + + struct snapshot_protocol_state_object { + vector activated_protocol_features; + vector preactivated_protocol_features; + std::set whitelisted_intrinsics; + uint32_t num_supported_key_types = 0; + }; + + namespace detail { + template<> + struct snapshot_row_traits { + using value_type = protocol_state_object; + using snapshot_type = snapshot_protocol_state_object; + + static snapshot_protocol_state_object to_snapshot_row( const protocol_state_object& value, + const chainbase::database& db ); + + static void from_snapshot_row( snapshot_protocol_state_object&& row, + protocol_state_object& value, + chainbase::database& db ); + }; + } + +}} + +CHAINBASE_SET_INDEX_TYPE(eosio::chain::protocol_state_object, eosio::chain::protocol_state_multi_index) + +FC_REFLECT(eosio::chain::protocol_state_object::activated_protocol_feature, + (feature_digest)(activation_block_num) + ) + +FC_REFLECT(eosio::chain::protocol_state_object, + (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)(num_supported_key_types) + ) + +FC_REFLECT(eosio::chain::snapshot_protocol_state_object, + (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)(num_supported_key_types) + ) diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 4b0c58beeb0..08d1d1f7c3b 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -12,6 +12,14 @@ namespace eosio { namespace chain { namespace resource_limits { static_assert(std::is_integral::value, "ratios must have integral types"); T numerator; T denominator; + + friend inline bool operator ==( const ratio& lhs, const ratio& rhs ) { + return std::tie(lhs.numerator, lhs.denominator) == std::tie(rhs.numerator, rhs.denominator); + } + + friend inline bool operator !=( const ratio& lhs, const ratio& rhs ) { + return !(lhs == rhs); + } }; } @@ -27,6 +35,15 @@ namespace eosio { namespace chain { namespace resource_limits { ratio expand_rate; // the rate at which an uncongested resource expands its limits void validate()const; // throws if the parameters do not satisfy basic sanity checks + + friend inline bool operator ==( const elastic_limit_parameters& lhs, const elastic_limit_parameters& rhs ) { + return std::tie(lhs.target, lhs.max, lhs.periods, lhs.max_multiplier, lhs.contract_rate, lhs.expand_rate) + == std::tie(rhs.target, rhs.max, rhs.periods, rhs.max_multiplier, rhs.contract_rate, rhs.expand_rate); + } + + friend inline bool operator !=( const elastic_limit_parameters& lhs, const elastic_limit_parameters& rhs ) { + return !(lhs == rhs); + } }; struct account_resource_limit { diff --git a/libraries/chain/include/eosio/chain/resource_limits_private.hpp b/libraries/chain/include/eosio/chain/resource_limits_private.hpp index dc5b26008bd..e3ac7346263 100644 --- a/libraries/chain/include/eosio/chain/resource_limits_private.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits_private.hpp @@ -133,8 +133,8 @@ namespace eosio { namespace chain { namespace resource_limits { OBJECT_CTOR(resource_limits_object) id_type id; - account_name owner; - bool pending = false; + account_name owner; //< owner should not be changed within a chainbase modifier lambda + bool pending = false; //< pending should not be changed within a chainbase modifier lambda int64_t net_weight = -1; int64_t cpu_weight = -1; @@ -162,7 +162,7 @@ namespace eosio { namespace chain { namespace resource_limits { OBJECT_CTOR(resource_usage_object) id_type id; - account_name owner; + account_name owner; //< owner should not be changed within a chainbase modifier lambda usage_accumulator net_usage; usage_accumulator cpu_usage; diff --git a/libraries/chain/include/eosio/chain/reversible_block_object.hpp b/libraries/chain/include/eosio/chain/reversible_block_object.hpp index ea9a4c9e122..c493a4915e2 100644 --- a/libraries/chain/include/eosio/chain/reversible_block_object.hpp +++ b/libraries/chain/include/eosio/chain/reversible_block_object.hpp @@ -17,7 +17,7 @@ namespace eosio { namespace chain { OBJECT_CTOR(reversible_block_object,(packedblock) ) id_type id; - uint32_t blocknum = 0; + uint32_t blocknum = 0; //< blocknum should not be changed within a chainbase modifier lambda shared_string packedblock; void set_block( const signed_block_ptr& b ) { @@ -32,6 +32,15 @@ namespace eosio { namespace chain { fc::raw::unpack( ds, *result ); return result; } + + block_id_type get_block_id()const { + fc::datastream ds( packedblock.data(), packedblock.size() ); + block_header h; + fc::raw::unpack( ds, h ); + // Only need the block id to then look up the block state in fork database, so just unpack the block_header from the stored packed data. + // Avoid calling get_block() since that constructs a new signed_block in heap memory and unpacks the full signed_block from the stored packed data. + return h.id(); + } }; struct by_num; diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 31b32cbd91f..b3aea3085f5 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -4,6 +4,8 @@ */ #pragma once +#include +#include #include #include #include @@ -11,9 +13,36 @@ namespace eosio { namespace chain { + /** + * Wrapper class for boost asio thread pool and io_context run. + * Also names threads so that tools like htop can see thread name. + */ + class named_thread_pool { + public: + // name_prefix is name appended with -## of thread. + // short name_prefix (6 chars or under) is recommended as console_appender uses 9 chars for thread name + named_thread_pool( std::string name_prefix, size_t num_threads ); + + // calls stop() + ~named_thread_pool(); + + boost::asio::io_context& get_executor() { return _ioc; } + + // destroy work guard, stop io_context, join thread_pool, and stop thread_pool + void stop(); + + private: + using ioc_work_t = boost::asio::executor_work_guard; + + boost::asio::thread_pool _thread_pool; + boost::asio::io_context _ioc; + fc::optional _ioc_work; + }; + + // async on thread_pool and return future template - auto async_thread_pool( boost::asio::thread_pool& thread_pool, F&& f ) { + auto async_thread_pool( boost::asio::io_context& thread_pool, F&& f ) { auto task = std::make_shared>( std::forward( f ) ); boost::asio::post( thread_pool, [task]() { (*task)(); } ); return task->get_future(); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 58de120bdd8..0db1be762ff 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -20,33 +20,36 @@ namespace eosio { namespace chain { friend bool operator<( const account_delta& lhs, const account_delta& rhs ) { return lhs.account < rhs.account; } }; - struct base_action_trace { - base_action_trace( const action_receipt& r ):receipt(r){} - base_action_trace(){} - - action_receipt receipt; - action act; - bool context_free = false; - fc::microseconds elapsed; - string console; + struct transaction_trace; + using transaction_trace_ptr = std::shared_ptr; - transaction_id_type trx_id; ///< the transaction that generated this action - uint32_t block_num = 0; - block_timestamp_type block_time; + struct action_trace { + action_trace( const transaction_trace& trace, const action& act, account_name receiver, bool context_free, + uint32_t action_ordinal, uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ); + action_trace( const transaction_trace& trace, action&& act, account_name receiver, bool context_free, + uint32_t action_ordinal, uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ); + action_trace(){} + + fc::unsigned_int action_ordinal; + fc::unsigned_int creator_action_ordinal; + fc::unsigned_int closest_unnotified_ancestor_action_ordinal; + fc::optional receipt; + action_name receiver; + action act; + bool context_free = false; + fc::microseconds elapsed; + string console; + transaction_id_type trx_id; ///< the transaction that generated this action + uint32_t block_num = 0; + block_timestamp_type block_time; fc::optional producer_block_id; flat_set account_ram_deltas; fc::optional except; + fc::optional error_code; }; - struct action_trace : public base_action_trace { - using base_action_trace::base_action_trace; - - vector inline_traces; - }; - - struct transaction_trace; - using transaction_trace_ptr = std::shared_ptr; - struct transaction_trace { transaction_id_type id; uint32_t block_num = 0; @@ -56,10 +59,12 @@ namespace eosio { namespace chain { fc::microseconds elapsed; uint64_t net_usage = 0; bool scheduled = false; - vector action_traces; ///< disposable + vector action_traces; + fc::optional account_ram_delta; transaction_trace_ptr failed_dtrx_trace; fc::optional except; + fc::optional error_code; std::exception_ptr except_ptr; }; @@ -68,13 +73,11 @@ namespace eosio { namespace chain { FC_REFLECT( eosio::chain::account_delta, (account)(delta) ) -FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(context_free)(elapsed)(console)(trx_id) - (block_num)(block_time)(producer_block_id)(account_ram_deltas)(except) ) - -FC_REFLECT_DERIVED( eosio::chain::action_trace, - (eosio::chain::base_action_trace), (inline_traces) ) +FC_REFLECT( eosio::chain::action_trace, + (action_ordinal)(creator_action_ordinal)(closest_unnotified_ancestor_action_ordinal)(receipt) + (receiver)(act)(context_free)(elapsed)(console)(trx_id)(block_num)(block_time) + (producer_block_id)(account_ram_deltas)(except)(error_code) ) FC_REFLECT( eosio::chain::transaction_trace, (id)(block_num)(block_time)(producer_block_id) (receipt)(elapsed)(net_usage)(scheduled) - (action_traces)(failed_dtrx_trace)(except) ) + (action_traces)(account_ram_delta)(failed_dtrx_trace)(except)(error_code) ) diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index db61e5b17cb..8f103eb6601 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -9,6 +9,39 @@ namespace eosio { namespace chain { + struct deferred_transaction_generation_context : fc::reflect_init { + static constexpr uint16_t extension_id() { return 0; } + static constexpr bool enforce_unique() { return true; } + + deferred_transaction_generation_context() = default; + + deferred_transaction_generation_context( const transaction_id_type& sender_trx_id, uint128_t sender_id, account_name sender ) + :sender_trx_id( sender_trx_id ) + ,sender_id( sender_id ) + ,sender( sender ) + {} + + void reflector_init(); + + transaction_id_type sender_trx_id; + uint128_t sender_id; + account_name sender; + }; + + namespace detail { + template + struct transaction_extension_types { + using transaction_extensions_t = fc::static_variant< Ts... >; + using decompose_t = decompose< Ts... >; + }; + } + + using transaction_extension_types = detail::transaction_extension_types< + deferred_transaction_generation_context + >; + + using transaction_extensions = transaction_extension_types::transaction_extensions_t; + /** * The transaction header contains the fixed-sized data * associated with each transaction. It is separated from @@ -66,7 +99,8 @@ namespace eosio { namespace chain { bool allow_duplicate_keys = false) const; uint32_t total_actions()const { return context_free_actions.size() + actions.size(); } - account_name first_authorizor()const { + + account_name first_authorizer()const { for( const auto& a : actions ) { for( const auto& u : a.authorization ) return u.actor; @@ -74,6 +108,7 @@ namespace eosio { namespace chain { return account_name(); } + vector validate_and_extract_extensions()const; }; struct signed_transaction : public transaction @@ -173,47 +208,11 @@ namespace eosio { namespace chain { using packed_transaction_ptr = std::shared_ptr; - /** - * When a transaction is generated it can be scheduled to occur - * in the future. It may also fail to execute for some reason in - * which case the sender needs to be notified. When the sender - * sends a transaction they will assign it an ID which will be - * passed back to the sender if the transaction fails for some - * reason. - */ - struct deferred_transaction : public signed_transaction - { - uint128_t sender_id; /// ID assigned by sender of generated, accessible via WASM api when executing normal or error - account_name sender; /// receives error handler callback - account_name payer; - time_point_sec execute_after; /// delayed execution - - deferred_transaction() = default; - - deferred_transaction(uint128_t sender_id, account_name sender, account_name payer,time_point_sec execute_after, - const signed_transaction& txn) - : signed_transaction(txn), - sender_id(sender_id), - sender(sender), - payer(payer), - execute_after(execute_after) - {} - }; - - struct deferred_reference { - deferred_reference(){} - deferred_reference( const account_name& sender, const uint128_t& sender_id) - :sender(sender),sender_id(sender_id) - {} - - account_name sender; - uint128_t sender_id; - }; - uint128_t transaction_id_to_sender_id( const transaction_id_type& tid ); } } /// namespace eosio::chain +FC_REFLECT(eosio::chain::deferred_transaction_generation_context, (sender_trx_id)(sender_id)(sender) ) FC_REFLECT( eosio::chain::transaction_header, (expiration)(ref_block_num)(ref_block_prefix) (max_net_usage_words)(max_cpu_usage_ms)(delay_sec) ) FC_REFLECT_DERIVED( eosio::chain::transaction, (eosio::chain::transaction_header), (context_free_actions)(actions)(transaction_extensions) ) @@ -221,5 +220,3 @@ FC_REFLECT_DERIVED( eosio::chain::signed_transaction, (eosio::chain::transaction FC_REFLECT_ENUM( eosio::chain::packed_transaction::compression_type, (none)(zlib)) // @ignore unpacked_trx FC_REFLECT( eosio::chain::packed_transaction, (signatures)(compression)(packed_context_free_data)(packed_trx) ) -FC_REFLECT_DERIVED( eosio::chain::deferred_transaction, (eosio::chain::signed_transaction), (sender_id)(sender)(payer)(execute_after) ) -FC_REFLECT( eosio::chain::deferred_reference, (sender)(sender_id) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index b0327dafb18..a799d677144 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -64,15 +64,30 @@ namespace eosio { namespace chain { void add_ram_usage( account_name account, int64_t ram_delta ); - void dispatch_action( action_trace& trace, const action& a, account_name receiver, bool context_free = false, uint32_t recurse_depth = 0 ); - inline void dispatch_action( action_trace& trace, const action& a, bool context_free = false ) { - dispatch_action(trace, a, a.account, context_free); - }; + action_trace& get_action_trace( uint32_t action_ordinal ); + const action_trace& get_action_trace( uint32_t action_ordinal )const; + + /** invalidates any action_trace references returned by get_action_trace */ + uint32_t schedule_action( const action& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ); + + /** invalidates any action_trace references returned by get_action_trace */ + uint32_t schedule_action( action&& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ); + + /** invalidates any action_trace references returned by get_action_trace */ + uint32_t schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ); + + void execute_action( uint32_t action_ordinal, uint32_t recurse_depth ); + void schedule_transaction(); void record_transaction( const transaction_id_type& id, fc::time_point_sec expire ); void validate_cpu_usage_to_bill( int64_t u, bool check_minimum = true )const; + void disallow_transaction_extensions( const char* error_msg )const; + /// Fields: public: diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 0847159e6de..bba816ab651 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include #include namespace boost { namespace asio { @@ -53,13 +54,11 @@ class transaction_metadata { // must be called from main application thread static signing_keys_future_type - start_recover_keys( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + start_recover_keys( const transaction_metadata_ptr& mtrx, boost::asio::io_context& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ); // start_recover_keys must be called first recovery_keys_type recover_keys( const chain_id_type& chain_id ); - - }; } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/transaction_object.hpp b/libraries/chain/include/eosio/chain/transaction_object.hpp index 50a7eb62cdd..cf87e11b5e9 100644 --- a/libraries/chain/include/eosio/chain/transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/transaction_object.hpp @@ -27,7 +27,7 @@ namespace eosio { namespace chain { id_type id; time_point_sec expiration; - transaction_id_type trx_id; + transaction_id_type trx_id; //< trx_id should not be changed within a chainbase modifier lambda }; struct by_expiration; diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 21fbf216c43..d6323fbd23d 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -8,7 +8,7 @@ #include -#include +#include #include #include #include @@ -95,6 +95,8 @@ namespace eosio { namespace chain { using shared_vector = boost::interprocess::vector>; template using shared_set = boost::interprocess::set, allocator>; + template + using shared_flat_multimap = boost::interprocess::flat_multimap< K, V, std::less, allocator< std::pair > >; /** * For bugs in boost interprocess we moved our blob data to shared_string @@ -151,7 +153,7 @@ namespace eosio { namespace chain { { null_object_type = 0, account_object_type, - account_sequence_object_type, + account_metadata_object_type, permission_object_type, permission_usage_object_type, permission_link_object_type, @@ -167,7 +169,7 @@ namespace eosio { namespace chain { block_summary_object_type, transaction_object_type, generated_transaction_object_type, - producer_object_type, + UNUSED_producer_object_type, UNUSED_chain_property_object_type, account_control_history_object_type, ///< Defined by history_plugin UNUSED_account_transaction_history_object_type, @@ -187,11 +189,60 @@ namespace eosio { namespace chain { account_history_object_type, ///< Defined by history_plugin action_history_object_type, ///< Defined by history_plugin reversible_block_object_type, + protocol_state_object_type, + account_ram_correction_object_type, + code_object_type, OBJECT_TYPE_COUNT ///< Sentry value which contains the number of different object types }; + /** + * Important notes on using chainbase objects in EOSIO code: + * + * There are several constraints that need to be followed when using chainbase objects. + * Some of these constraints are due to the requirements imposed by the chainbase library, + * others are due to requirements to ensure determinism in the EOSIO chain library. + * + * Before listing the constraints, the "restricted field set" must be defined. + * + * Every chainbase object includes a field called id which has the type id_type. + * The id field is always included in the restricted field set. + * + * A field of a chainbase object is considered to be in the restricted field set if it is involved in the + * derivation of the key used for one of the indices in the chainbase multi-index unless its only involvement + * is through being included in composite_keys that end with the id field. + * + * So if the multi-index includes an index like the following + * ``` + * ordered_unique< tag, + * composite_key< generated_transaction_object, + * BOOST_MULTI_INDEX_MEMBER( generated_transaction_object, account_name, sender), + * BOOST_MULTI_INDEX_MEMBER( generated_transaction_object, uint128_t, sender_id) + * > + * > + * ``` + * both `sender` and `sender_id` fields are part of the restricted field set. + * + * On the other hand, an index like the following + * ``` + * ordered_unique< tag, + * composite_key< generated_transaction_object, + * BOOST_MULTI_INDEX_MEMBER( generated_transaction_object, time_point, expiration), + * BOOST_MULTI_INDEX_MEMBER( generated_transaction_object, generated_transaction_object::id_type, id) + * > + * > + * ``` + * would not by itself require the `expiration` field to be part of the restricted field set. + * + * The restrictions on usage of the chainbase objects within this code base are: + * + The chainbase object includes the id field discussed above. + * + The multi-index must include an ordered_unique index tagged with by_id that is based on the id field as the sole key. + * + No other types of indices other than ordered_unique are allowed. + * If an index is desired that does not enforce uniqueness, then use a composite key that ends with the id field. + * + When creating a chainbase object, the constructor lambda should never mutate the id field. + * + When modifying a chainbase object, the modifier lambda should never mutate any fields in the restricted field set. + */ + class account_object; - class producer_object; using block_id_type = fc::sha256; using checksum_type = fc::sha256; @@ -207,6 +258,14 @@ namespace eosio { namespace chain { using uint128_t = unsigned __int128; using bytes = vector; + struct sha256_less { + bool operator()( const fc::sha256& lhs, const fc::sha256& rhs ) const { + return + std::tie(lhs._hash[0], lhs._hash[1], lhs._hash[2], lhs._hash[3]) < + std::tie(rhs._hash[0], rhs._hash[1], rhs._hash[2], rhs._hash[3]); + } + }; + /** * Extentions are prefixed with type and are a buffer that can be @@ -215,6 +274,104 @@ namespace eosio { namespace chain { typedef vector>> extensions_type; + template + class end_insert_iterator : public std::iterator< std::output_iterator_tag, void, void, void, void > + { + protected: + Container* container; + + public: + using container_type = Container; + + explicit end_insert_iterator( Container& c ) + :container(&c) + {} + + end_insert_iterator& operator=( typename Container::const_reference value ) { + container->insert( container->cend(), value ); + return *this; + } + + end_insert_iterator& operator*() { return *this; } + end_insert_iterator& operator++() { return *this; } + end_insert_iterator operator++(int) { return *this; } + }; + + template + inline end_insert_iterator end_inserter( Container& c ) { + return end_insert_iterator( c ); + } + + template + struct enum_hash + { + static_assert( std::is_enum::value, "enum_hash can only be used on enumeration types" ); + + using underlying_type = typename std::underlying_type::type; + + std::size_t operator()(T t) const + { + return std::hash{}( static_cast(t) ); + } + }; + // enum_hash needed to support old gcc compiler of Ubuntu 16.04 + + namespace detail { + struct extract_match { + bool enforce_unique = false; + }; + + template + struct decompose; + + template<> + struct decompose<> { + template + static auto extract( uint16_t id, const vector& data, ResultVariant& result ) + -> fc::optional + { + return {}; + } + }; + + template + struct decompose { + using head_t = T; + using tail_t = decompose< Rest... >; + + template + static auto extract( uint16_t id, const vector& data, ResultVariant& result ) + -> fc::optional + { + if( id == head_t::extension_id() ) { + result = fc::raw::unpack( data ); + return { extract_match{ head_t::enforce_unique() } }; + } + + return tail_t::template extract( id, data, result ); + } + }; + } + + template + static inline auto has_field( F flags, E field ) + -> std::enable_if_t< std::is_integral::value && std::is_unsigned::value && + std::is_enum::value && std::is_same< F, std::underlying_type_t >::value, bool> + { + return ( (flags & static_cast(field)) != 0 ); + } + + template + static inline auto set_field( F flags, E field, bool value = true ) + -> std::enable_if_t< std::is_integral::value && std::is_unsigned::value && + std::is_enum::value && std::is_same< F, std::underlying_type_t >::value, F > + { + if( value ) + return ( flags | static_cast(field) ); + else + return ( flags & ~static_cast(field) ); + } + } } // eosio::chain FC_REFLECT( eosio::chain::void_t, ) diff --git a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp index d59e81f9ba7..2d93bad31d8 100644 --- a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp @@ -55,11 +55,6 @@ namespace eosio { namespace chain { namespace wasm_injections { // get the next available index that is greater than the last exported function static void get_next_indices( Module& module, int& next_function_index, int& next_actual_index ) { - int exports = 0; - for ( auto exp : module.exports ) - if ( exp.kind == IR::ObjectKind::function ) - exports++; - next_function_index = module.functions.imports.size() + module.functions.defs.size() + registered_injected.size(); next_actual_index = next_injected_index++; } diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 7e6991996af..341331989e5 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -1,5 +1,7 @@ #pragma once +#include #include +#include #include #include "Runtime/Linker.h" #include "Runtime/Runtime.h" @@ -17,31 +19,49 @@ namespace eosio { namespace chain { namespace webassembly { namespace common { class intrinsics_accessor; - struct root_resolver : Runtime::Resolver { - //when validating is true; only allow "env" imports. Otherwise allow any imports. This resolver is used - //in two cases: once by the generic validating code where we only want "env" to pass; and then second in the - //wavm runtime where we need to allow linkage to injected functions - root_resolver(bool validating = false) : validating(validating) {} - bool validating; + class root_resolver : public Runtime::Resolver { + public: + // The non-default constructor puts root_resolver in a mode where it does validation, i.e. only allows "env" imports. + // This mode is used by the generic validating code that runs during setcode, where we only want "env" to pass. + // The default constructor is used when no validation is required such as when the wavm runtime needs to + // allow linkage to the intrinsics and the injected functions. + + root_resolver() {} + + root_resolver( const whitelisted_intrinsics_type& whitelisted_intrinsics ) + :whitelisted_intrinsics(&whitelisted_intrinsics) + {} bool resolve(const string& mod_name, const string& export_name, IR::ObjectType type, - Runtime::ObjectInstance*& out) override { - try { - //protect access to "private" injected functions; so for now just simply allow "env" since injected functions - // are in a different module - if(validating && mod_name != "env") - EOS_ASSERT( false, wasm_exception, "importing from module that is not 'env': ${module}.${export}", ("module",mod_name)("export",export_name) ); - - // Try to resolve an intrinsic first. - if(Runtime::IntrinsicResolver::singleton.resolve(mod_name,export_name,type, out)) { - return true; - } - - EOS_ASSERT( false, wasm_exception, "${module}.${export} unresolveable", ("module",mod_name)("export",export_name) ); - return false; - } FC_CAPTURE_AND_RETHROW( (mod_name)(export_name) ) } + Runtime::ObjectInstance*& out) override + { try { + bool fail = false; + + if( whitelisted_intrinsics != nullptr ) { + // Protect access to "private" injected functions; so for now just simply allow "env" since injected + // functions are in a different module. + EOS_ASSERT( mod_name == "env", wasm_exception, + "importing from module that is not 'env': ${module}.${export}", + ("module",mod_name)("export",export_name) ); + + // Only consider imports that are in the whitelisted set of intrinsics + fail = !is_intrinsic_whitelisted( *whitelisted_intrinsics, export_name ); + } + + // Try to resolve an intrinsic first. + if( !fail && Runtime::IntrinsicResolver::singleton.resolve( mod_name, export_name, type, out ) ) { + return true; + } + + EOS_THROW( wasm_exception, "${module}.${export} unresolveable", + ("module",mod_name)("export",export_name) ); + return false; + } FC_CAPTURE_AND_RETHROW( (mod_name)(export_name) ) } + + protected: + const whitelisted_intrinsics_type* whitelisted_intrinsics = nullptr; }; } } @@ -56,14 +76,23 @@ namespace eosio { namespace chain { wabt }; - wasm_interface(vm_type vm); + wasm_interface(vm_type vm, const chainbase::database& db); ~wasm_interface(); + //call before dtor to skip what can be minutes of dtor overhead with some runtimes; can cause leaks + void indicate_shutting_down(); + //validates code -- does a WASM validation pass and checks the wasm against EOSIO specific constraints static void validate(const controller& control, const bytes& code); + //indicate that a particular code probably won't be used after given block_num + void code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num); + + //indicate the current LIB. evicts old cache entries + void current_lib(const uint32_t lib); + //Calls apply or error on a given code - void apply(const digest_type& code_id, const shared_string& code, apply_context& context); + void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context); //Immediately exits currently running wasm. UB is called when no wasm running void exit(); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index c3af34d79ea..45f70460b02 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -19,11 +20,24 @@ using namespace fc; using namespace eosio::chain::webassembly; using namespace IR; using namespace Runtime; +using boost::multi_index_container; namespace eosio { namespace chain { struct wasm_interface_impl { - wasm_interface_impl(wasm_interface::vm_type vm) { + struct wasm_cache_entry { + digest_type code_hash; + uint32_t first_block_num_used; + uint32_t last_block_num_used; + std::unique_ptr module; + uint8_t vm_type = 0; + uint8_t vm_version = 0; + }; + struct by_hash; + struct by_first_block_num; + struct by_last_block_num; + + wasm_interface_impl(wasm_interface::vm_type vm, const chainbase::database& d) : db(d) { if(vm == wasm_interface::vm_type::wavm) runtime_interface = std::make_unique(); else if(vm == wasm_interface::vm_type::wabt) @@ -32,6 +46,14 @@ namespace eosio { namespace chain { EOS_THROW(wasm_exception, "wasm_interface_impl fall through"); } + ~wasm_interface_impl() { + if(is_shutting_down) + for(wasm_cache_index::iterator it = wasm_instantiation_cache.begin(); it != wasm_instantiation_cache.end(); ++it) + wasm_instantiation_cache.modify(it, [](wasm_cache_entry& e) { + e.module.release(); + }); + } + std::vector parse_initial_memory(const Module& module) { std::vector mem_image; @@ -50,19 +72,49 @@ namespace eosio { namespace chain { return mem_image; } - std::unique_ptr& get_instantiated_module( const digest_type& code_id, - const shared_string& code, - transaction_context& trx_context ) + void code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num) { + wasm_cache_index::iterator it = wasm_instantiation_cache.find(boost::make_tuple(code_hash, vm_type, vm_version)); + if(it != wasm_instantiation_cache.end()) + wasm_instantiation_cache.modify(it, [block_num](wasm_cache_entry& e) { + e.last_block_num_used = block_num; + }); + } + + void current_lib(uint32_t lib) { + //anything last used before or on the LIB can be evicted + wasm_instantiation_cache.get().erase(wasm_instantiation_cache.get().begin(), wasm_instantiation_cache.get().upper_bound(lib)); + } + + const std::unique_ptr& get_instantiated_module( const digest_type& code_hash, const uint8_t& vm_type, + const uint8_t& vm_version, transaction_context& trx_context ) { - auto it = instantiation_cache.find(code_id); - if(it == instantiation_cache.end()) { + wasm_cache_index::iterator it = wasm_instantiation_cache.find( + boost::make_tuple(code_hash, vm_type, vm_version) ); + const code_object* codeobject = nullptr; + if(it == wasm_instantiation_cache.end()) { + codeobject = &db.get(boost::make_tuple(code_hash, vm_type, vm_version)); + + it = wasm_instantiation_cache.emplace( wasm_interface_impl::wasm_cache_entry{ + .code_hash = code_hash, + .first_block_num_used = codeobject->first_block_used, + .last_block_num_used = UINT32_MAX, + .module = nullptr, + .vm_type = vm_type, + .vm_version = vm_version + } ).first; + } + + if(!it->module) { + if(!codeobject) + codeobject = &db.get(boost::make_tuple(code_hash, vm_type, vm_version)); + auto timer_pause = fc::make_scoped_exit([&](){ trx_context.resume_billing_timer(); }); trx_context.pause_billing_timer(); IR::Module module; try { - Serialization::MemoryInputStream stream((const U8*)code.data(), code.size()); + Serialization::MemoryInputStream stream((const U8*)codeobject->code.data(), codeobject->code.size()); WASM::serialize(stream, module); module.userSections.clear(); } catch(const Serialization::FatalSerializationException& e) { @@ -84,13 +136,34 @@ namespace eosio { namespace chain { } catch(const IR::ValidationException& e) { EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); } - it = instantiation_cache.emplace(code_id, runtime_interface->instantiate_module((const char*)bytes.data(), bytes.size(), parse_initial_memory(module))).first; + + wasm_instantiation_cache.modify(it, [&](auto& c) { + c.module = runtime_interface->instantiate_module((const char*)bytes.data(), bytes.size(), parse_initial_memory(module)); + }); } - return it->second; + return it->module; } + bool is_shutting_down = false; std::unique_ptr runtime_interface; - map> instantiation_cache; + + typedef boost::multi_index_container< + wasm_cache_entry, + indexed_by< + ordered_unique, + composite_key< wasm_cache_entry, + member, + member, + member + > + >, + ordered_non_unique, member>, + ordered_non_unique, member> + > + > wasm_cache_index; + wasm_cache_index wasm_instantiation_cache; + + const chainbase::database& db; }; #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index f619e318b3f..5bce9db8b40 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -23,14 +23,6 @@ class wavm_runtime : public eosio::chain::wasm_runtime_interface { std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; void immediately_exit_currently_running_module() override; - - struct runtime_guard { - runtime_guard(); - ~runtime_guard(); - }; - - private: - std::shared_ptr _runtime_guard; }; //This is a temporary hack for the single threaded implementation @@ -312,7 +304,12 @@ struct intrinsic_invoker_impl, std::tuple> { template static native_to_wasm_t invoke(Translated... translated) { - return convert_native_to_wasm(the_running_instance_context, Method(the_running_instance_context, translated...)); + try { + return convert_native_to_wasm(the_running_instance_context, Method(the_running_instance_context, translated...)); + } + catch(...) { + Platform::immediately_exit(std::current_exception()); + } } template @@ -331,7 +328,12 @@ struct intrinsic_invoker_impl, std::tuple template static void invoke(Translated... translated) { - Method(the_running_instance_context, translated...); + try { + Method(the_running_instance_context, translated...); + } + catch(...) { + Platform::immediately_exit(std::current_exception()); + } } template diff --git a/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp new file mode 100644 index 00000000000..96fbf2e2195 --- /dev/null +++ b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp @@ -0,0 +1,27 @@ + +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { namespace chain { + + using whitelisted_intrinsics_type = shared_flat_multimap; + + // TODO: Improve performance by using std::string_view when we switch to C++17. + + bool is_intrinsic_whitelisted( const whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ); + + void add_intrinsic_to_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ); + + void remove_intrinsic_from_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ); + + void reset_intrinsic_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, + const std::set& s ); + + std::set convert_intrinsic_whitelist_to_set( const whitelisted_intrinsics_type& whitelisted_intrinsics ); + +} } // namespace eosio::chain diff --git a/libraries/chain/protocol_feature_activation.cpp b/libraries/chain/protocol_feature_activation.cpp new file mode 100644 index 00000000000..b0b7a563073 --- /dev/null +++ b/libraries/chain/protocol_feature_activation.cpp @@ -0,0 +1,55 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ + +#include +#include + +#include + +namespace eosio { namespace chain { + + void protocol_feature_activation::reflector_init() { + static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, + "protocol_feature_activation expects FC to support reflector_init" ); + + + EOS_ASSERT( protocol_features.size() > 0, ill_formed_protocol_feature_activation, + "Protocol feature activation extension must have at least one protocol feature digest", + ); + + set s; + + for( const auto& d : protocol_features ) { + auto res = s.insert( d ); + EOS_ASSERT( res.second, ill_formed_protocol_feature_activation, + "Protocol feature digest ${d} was repeated in the protocol feature activation extension", + ("d", d) + ); + } + } + + protocol_feature_activation_set::protocol_feature_activation_set( + const protocol_feature_activation_set& orig_pfa_set, + vector additional_features, + bool enforce_disjoint + ) + { + std::sort( additional_features.begin(), additional_features.end() ); + + const auto& s1 = orig_pfa_set.protocol_features; + const auto& s2 = additional_features; + + auto expected_size = s1.size() + s2.size(); + protocol_features.reserve( expected_size ); + + std::set_union( s1.cbegin(), s1.cend(), s2.cbegin(), s2.cend(), end_inserter( protocol_features ) ); + + EOS_ASSERT( !enforce_disjoint || protocol_features.size() == expected_size, + invalid_block_header_extension, + "duplication of protocol feature digests" + ); + } + +} } // eosio::chain diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp new file mode 100644 index 00000000000..ca0457ce11d --- /dev/null +++ b/libraries/chain/protocol_feature_manager.cpp @@ -0,0 +1,672 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ + +#include +#include +#include + +#include + +#include +#include + +namespace eosio { namespace chain { + + const std::unordered_map> + builtin_protocol_feature_codenames = + boost::assign::map_list_of + ( builtin_protocol_feature_t::preactivate_feature, builtin_protocol_feature_spec{ + "PREACTIVATE_FEATURE", + fc::variant("64fe7df32e9b86be2b296b3f81dfd527f84e82b98e363bc97e40bc7a83733310").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: PREACTIVATE_FEATURE + +Adds privileged intrinsic to enable a contract to pre-activate a protocol feature specified by its digest. +Pre-activated protocol features must be activated in the next block. +*/ + {}, + {time_point{}, false, true} // enabled without preactivation and ready to go at any time + } ) + ( builtin_protocol_feature_t::only_link_to_existing_permission, builtin_protocol_feature_spec{ + "ONLY_LINK_TO_EXISTING_PERMISSION", + fc::variant("f3c3d91c4603cde2397268bfed4e662465293aab10cd9416db0d442b8cec2949").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: ONLY_LINK_TO_EXISTING_PERMISSION + +Disallows linking an action to a non-existing permission. +*/ + {} + } ) + ( builtin_protocol_feature_t::replace_deferred, builtin_protocol_feature_spec{ + "REPLACE_DEFERRED", + fc::variant("9908b3f8413c8474ab2a6be149d3f4f6d0421d37886033f27d4759c47a26d944").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: REPLACE_DEFERRED + +Fix the problems associated with replacing an existing deferred transaction. +Also corrects the RAM usage of accounts affected by the replace deferred transaction bug. +*/ + {} + } ) + ( builtin_protocol_feature_t::no_duplicate_deferred_id, builtin_protocol_feature_spec{ + "NO_DUPLICATE_DEFERRED_ID", + fc::variant("45967387ee92da70171efd9fefd1ca8061b5efe6f124d269cd2468b47f1575a0").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: NO_DUPLICATE_DEFERRED_ID +Depends on: REPLACE_DEFERRED + +Ensures transactions generated by contracts for deferred execution are adjusted to avoid transaction ID conflicts. +Also allows a contract to send a deferred transaction in a manner that enables the contract to know the transaction ID ahead of time. +*/ + {builtin_protocol_feature_t::replace_deferred} + } ) + ( builtin_protocol_feature_t::fix_linkauth_restriction, builtin_protocol_feature_spec{ + "FIX_LINKAUTH_RESTRICTION", + fc::variant("a98241c83511dc86c857221b9372b4aa7cea3aaebc567a48604e1d3db3557050").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: FIX_LINKAUTH_RESTRICTION + +Removes the restriction on eosio::linkauth for non-native actions named one of the five special action names: +updateauth, deleteauth, linkauth, unlinkauth, or canceldelay. +*/ + {} + } ) + ( builtin_protocol_feature_t::disallow_empty_producer_schedule, builtin_protocol_feature_spec{ + "DISALLOW_EMPTY_PRODUCER_SCHEDULE", + fc::variant("2853617cec3eabd41881eb48882e6fc5e81a0db917d375057864b3befbe29acd").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: DISALLOW_EMPTY_PRODUCER_SCHEDULE + +Disallows proposing an empty producer schedule. +*/ + {} + } ) + ( builtin_protocol_feature_t::restrict_action_to_self, builtin_protocol_feature_spec{ + "RESTRICT_ACTION_TO_SELF", + fc::variant("e71b6712188391994c78d8c722c1d42c477cf091e5601b5cf1befd05721a57f3").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: RESTRICT_ACTION_TO_SELF + +Disallows bypass of authorization checks by unprivileged contracts when sending inline actions or deferred transactions. +The original protocol rules allow a bypass of authorization checks for actions sent by a contract to itself. +This protocol feature removes that bypass. +*/ + {} + } ) + ( builtin_protocol_feature_t::only_bill_first_authorizer, builtin_protocol_feature_spec{ + "ONLY_BILL_FIRST_AUTHORIZER", + fc::variant("2f1f13e291c79da5a2bbad259ed7c1f2d34f697ea460b14b565ac33b063b73e2").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: ONLY_BILL_FIRST_AUTHORIZER + +Adds CPU and network bandwidth usage to only the first authorizer of a transaction. +*/ + {} + } ) + ( builtin_protocol_feature_t::forward_setcode, builtin_protocol_feature_spec{ + "FORWARD_SETCODE", + fc::variant("898082c59f921d0042e581f00a59d5ceb8be6f1d9c7a45b6f07c0e26eaee0222").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: FORWARD_SETCODE + +Forward eosio::setcode actions to the WebAssembly code deployed on the eosio account. +*/ + {} + } ) + ( builtin_protocol_feature_t::get_sender, builtin_protocol_feature_spec{ + "GET_SENDER", + fc::variant("1eab748b95a2e6f4d7cb42065bdee5566af8efddf01a55a0a8d831b823f8828a").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: GET_SENDER + +Allows contracts to determine which account is the sender of an inline action. +*/ + {} + } ) + ( builtin_protocol_feature_t::ram_restrictions, builtin_protocol_feature_spec{ + "RAM_RESTRICTIONS", + fc::variant("1812fdb5096fd854a4958eb9d53b43219d114de0e858ce00255bd46569ad2c68").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: RAM_RESTRICTIONS + +Modifies the restrictions on operations within actions that increase RAM usage of accounts other than the receiver. + +An unprivileged contract responding to a notification: +is not allowed to schedule a deferred transaction in which the RAM costs are paid by an account other than the receiver; +but is allowed to execute database operations that increase RAM usage of an account other than the receiver as long as +the action's net effect on RAM usage for the account is to not increase it. + +An unprivileged contract executing an action (but not as a response to a notification): +is not allowed to schedule a deferred transaction in which the RAM costs are paid by an account other than the receiver +unless that account authorized the action; +but is allowed to execute database operations that increase RAM usage of an account other than the receiver as long as +either the account authorized the action or the action's net effect on RAM usage for the account is to not increase it. +*/ + {} + } ) + ; + + + const char* builtin_protocol_feature_codename( builtin_protocol_feature_t codename ) { + auto itr = builtin_protocol_feature_codenames.find( codename ); + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, + "Unsupported builtin_protocol_feature_t passed to builtin_protocol_feature_codename: ${codename}", + ("codename", static_cast(codename)) ); + + return itr->second.codename; + } + + protocol_feature_base::protocol_feature_base( protocol_feature_t feature_type, + const digest_type& description_digest, + flat_set&& dependencies, + const protocol_feature_subjective_restrictions& restrictions ) + :description_digest( description_digest ) + ,dependencies( std::move(dependencies) ) + ,subjective_restrictions( restrictions ) + ,_type( feature_type ) + { + switch( feature_type ) { + case protocol_feature_t::builtin: + protocol_feature_type = builtin_protocol_feature::feature_type_string; + break; + default: + { + EOS_THROW( protocol_feature_validation_exception, + "Unsupported protocol_feature_t passed to constructor: ${type}", + ("type", static_cast(feature_type)) ); + } + break; + } + } + + void protocol_feature_base::reflector_init() { + static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, + "protocol_feature_activation expects FC to support reflector_init" ); + + if( protocol_feature_type == builtin_protocol_feature::feature_type_string ) { + _type = protocol_feature_t::builtin; + } else { + EOS_THROW( protocol_feature_validation_exception, + "Unsupported protocol feature type: ${type}", ("type", protocol_feature_type) ); + } + } + + const char* builtin_protocol_feature::feature_type_string = "builtin"; + + builtin_protocol_feature::builtin_protocol_feature( builtin_protocol_feature_t codename, + const digest_type& description_digest, + flat_set&& dependencies, + const protocol_feature_subjective_restrictions& restrictions ) + :protocol_feature_base( protocol_feature_t::builtin, description_digest, std::move(dependencies), restrictions ) + ,_codename(codename) + { + auto itr = builtin_protocol_feature_codenames.find( codename ); + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, + "Unsupported builtin_protocol_feature_t passed to constructor: ${codename}", + ("codename", static_cast(codename)) ); + + builtin_feature_codename = itr->second.codename; + } + + void builtin_protocol_feature::reflector_init() { + protocol_feature_base::reflector_init(); + + for( const auto& p : builtin_protocol_feature_codenames ) { + if( builtin_feature_codename.compare( p.second.codename ) == 0 ) { + _codename = p.first; + return; + } + } + + EOS_THROW( protocol_feature_validation_exception, + "Unsupported builtin protocol feature codename: ${codename}", + ("codename", builtin_feature_codename) ); + } + + + digest_type builtin_protocol_feature::digest()const { + digest_type::encoder enc; + fc::raw::pack( enc, _type ); + fc::raw::pack( enc, description_digest ); + fc::raw::pack( enc, dependencies ); + fc::raw::pack( enc, _codename ); + + return enc.result(); + } + + fc::variant protocol_feature::to_variant( bool include_subjective_restrictions, + fc::mutable_variant_object* additional_fields )const + { + EOS_ASSERT( builtin_feature, protocol_feature_exception, "not a builtin protocol feature" ); + + fc::mutable_variant_object mvo; + + mvo( "feature_digest", feature_digest ); + + if( additional_fields ) { + for( const auto& e : *additional_fields ) { + if( e.key().compare( "feature_digest" ) != 0 ) + mvo( e.key(), e.value() ); + } + } + + if( include_subjective_restrictions ) { + fc::mutable_variant_object subjective_restrictions; + + subjective_restrictions( "enabled", enabled ); + subjective_restrictions( "preactivation_required", preactivation_required ); + subjective_restrictions( "earliest_allowed_activation_time", earliest_allowed_activation_time ); + + mvo( "subjective_restrictions", std::move( subjective_restrictions ) ); + } + + mvo( "description_digest", description_digest ); + mvo( "dependencies", dependencies ); + mvo( "protocol_feature_type", builtin_protocol_feature::feature_type_string ); + + fc::variants specification; + auto add_to_specification = [&specification]( const char* key_name, auto&& value ) { + fc::mutable_variant_object obj; + obj( "name", key_name ); + obj( "value", std::forward( value ) ); + specification.emplace_back( std::move(obj) ); + }; + + + add_to_specification( "builtin_feature_codename", builtin_protocol_feature_codename( *builtin_feature ) ); + + mvo( "specification", std::move( specification ) ); + + return fc::variant( std::move(mvo) ); + } + + protocol_feature_set::protocol_feature_set() + { + _recognized_builtin_protocol_features.reserve( builtin_protocol_feature_codenames.size() ); + } + + + protocol_feature_set::recognized_t + protocol_feature_set::is_recognized( const digest_type& feature_digest, time_point now )const { + auto itr = _recognized_protocol_features.find( feature_digest ); + + if( itr == _recognized_protocol_features.end() ) + return recognized_t::unrecognized; + + if( !itr->enabled ) + return recognized_t::disabled; + + if( itr->earliest_allowed_activation_time > now ) + return recognized_t::too_early; + + return recognized_t::ready; + } + + optional protocol_feature_set::get_builtin_digest( builtin_protocol_feature_t feature_codename )const { + uint32_t indx = static_cast( feature_codename ); + + if( indx >= _recognized_builtin_protocol_features.size() ) + return {}; + + if( _recognized_builtin_protocol_features[indx] == _recognized_protocol_features.end() ) + return {}; + + return _recognized_builtin_protocol_features[indx]->feature_digest; + } + + const protocol_feature& protocol_feature_set::get_protocol_feature( const digest_type& feature_digest )const { + auto itr = _recognized_protocol_features.find( feature_digest ); + + EOS_ASSERT( itr != _recognized_protocol_features.end(), protocol_feature_exception, + "unrecognized protocol feature with digest: ${digest}", + ("digest", feature_digest) + ); + + return *itr; + } + + bool protocol_feature_set::validate_dependencies( + const digest_type& feature_digest, + const std::function& validator + )const { + auto itr = _recognized_protocol_features.find( feature_digest ); + + if( itr == _recognized_protocol_features.end() ) return false; + + for( const auto& d : itr->dependencies ) { + if( !validator(d) ) return false; + } + + return true; + } + + builtin_protocol_feature + protocol_feature_set::make_default_builtin_protocol_feature( + builtin_protocol_feature_t codename, + const std::function& handle_dependency + ) { + auto itr = builtin_protocol_feature_codenames.find( codename ); + + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, + "Unsupported builtin_protocol_feature_t: ${codename}", + ("codename", static_cast(codename)) ); + + flat_set dependencies; + dependencies.reserve( itr->second.builtin_dependencies.size() ); + + for( const auto& d : itr->second.builtin_dependencies ) { + dependencies.insert( handle_dependency( d ) ); + } + + return {itr->first, itr->second.description_digest, std::move(dependencies), itr->second.subjective_restrictions}; + } + + const protocol_feature& protocol_feature_set::add_feature( const builtin_protocol_feature& f ) { + auto builtin_itr = builtin_protocol_feature_codenames.find( f._codename ); + EOS_ASSERT( builtin_itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, + "Builtin protocol feature has unsupported builtin_protocol_feature_t: ${codename}", + ("codename", static_cast( f._codename )) ); + + uint32_t indx = static_cast( f._codename ); + + if( indx < _recognized_builtin_protocol_features.size() ) { + EOS_ASSERT( _recognized_builtin_protocol_features[indx] == _recognized_protocol_features.end(), + protocol_feature_exception, + "builtin protocol feature with codename '${codename}' already added", + ("codename", f.builtin_feature_codename) ); + } + + auto feature_digest = f.digest(); + + const auto& expected_builtin_dependencies = builtin_itr->second.builtin_dependencies; + flat_set satisfied_builtin_dependencies; + satisfied_builtin_dependencies.reserve( expected_builtin_dependencies.size() ); + + for( const auto& d : f.dependencies ) { + auto itr = _recognized_protocol_features.find( d ); + EOS_ASSERT( itr != _recognized_protocol_features.end(), protocol_feature_exception, + "builtin protocol feature with codename '${codename}' and digest of ${digest} has a dependency on a protocol feature with digest ${dependency_digest} that is not recognized", + ("codename", f.builtin_feature_codename) + ("digest", feature_digest) + ("dependency_digest", d ) + ); + + if( itr->builtin_feature + && expected_builtin_dependencies.find( *itr->builtin_feature ) + != expected_builtin_dependencies.end() ) + { + satisfied_builtin_dependencies.insert( *itr->builtin_feature ); + } + } + + if( expected_builtin_dependencies.size() > satisfied_builtin_dependencies.size() ) { + flat_set missing_builtins; + missing_builtins.reserve( expected_builtin_dependencies.size() - satisfied_builtin_dependencies.size() ); + std::set_difference( expected_builtin_dependencies.begin(), expected_builtin_dependencies.end(), + satisfied_builtin_dependencies.begin(), satisfied_builtin_dependencies.end(), + end_inserter( missing_builtins ) + ); + + vector missing_builtins_with_names; + missing_builtins_with_names.reserve( missing_builtins.size() ); + for( const auto& builtin_codename : missing_builtins ) { + auto itr = builtin_protocol_feature_codenames.find( builtin_codename ); + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), + protocol_feature_exception, + "Unexpected error" + ); + missing_builtins_with_names.emplace_back( itr->second.codename ); + } + + EOS_THROW( protocol_feature_validation_exception, + "Not all the builtin dependencies of the builtin protocol feature with codename '${codename}' and digest of ${digest} were satisfied.", + ("missing_dependencies", missing_builtins_with_names) + ); + } + + auto res = _recognized_protocol_features.insert( protocol_feature{ + feature_digest, + f.description_digest, + f.dependencies, + f.subjective_restrictions.earliest_allowed_activation_time, + f.subjective_restrictions.preactivation_required, + f.subjective_restrictions.enabled, + f._codename + } ); + + EOS_ASSERT( res.second, protocol_feature_exception, + "builtin protocol feature with codename '${codename}' has a digest of ${digest} but another protocol feature with the same digest has already been added", + ("codename", f.builtin_feature_codename)("digest", feature_digest) ); + + if( indx >= _recognized_builtin_protocol_features.size() ) { + for( auto i =_recognized_builtin_protocol_features.size(); i <= indx; ++i ) { + _recognized_builtin_protocol_features.push_back( _recognized_protocol_features.end() ); + } + } + + _recognized_builtin_protocol_features[indx] = res.first; + return *res.first; + } + + + + protocol_feature_manager::protocol_feature_manager( protocol_feature_set&& pfs ) + :_protocol_feature_set( std::move(pfs) ) + { + _builtin_protocol_features.resize( _protocol_feature_set._recognized_builtin_protocol_features.size() ); + } + + void protocol_feature_manager::init( chainbase::database& db ) { + EOS_ASSERT( !is_initialized(), protocol_feature_exception, "cannot initialize protocol_feature_manager twice" ); + + + auto reset_initialized = fc::make_scoped_exit( [this]() { _initialized = false; } ); + _initialized = true; + + for( const auto& f : db.get().activated_protocol_features ) { + activate_feature( f.feature_digest, f.activation_block_num ); + } + + reset_initialized.cancel(); + } + + const protocol_feature* protocol_feature_manager::const_iterator::get_pointer()const { + //EOS_ASSERT( _pfm, protocol_feature_iterator_exception, "cannot dereference singular iterator" ); + //EOS_ASSERT( _index != end_index, protocol_feature_iterator_exception, "cannot dereference end iterator" ); + return &*(_pfm->_activated_protocol_features[_index].iterator_to_protocol_feature); + } + + uint32_t protocol_feature_manager::const_iterator::activation_ordinal()const { + EOS_ASSERT( _pfm, + protocol_feature_iterator_exception, + "called activation_ordinal() on singular iterator" + ); + EOS_ASSERT( _index != end_index, + protocol_feature_iterator_exception, + "called activation_ordinal() on end iterator" + ); + + return _index; + } + + uint32_t protocol_feature_manager::const_iterator::activation_block_num()const { + EOS_ASSERT( _pfm, + protocol_feature_iterator_exception, + "called activation_block_num() on singular iterator" + ); + EOS_ASSERT( _index != end_index, + protocol_feature_iterator_exception, + "called activation_block_num() on end iterator" + ); + + return _pfm->_activated_protocol_features[_index].activation_block_num; + } + + protocol_feature_manager::const_iterator& protocol_feature_manager::const_iterator::operator++() { + EOS_ASSERT( _pfm, protocol_feature_iterator_exception, "cannot increment singular iterator" ); + EOS_ASSERT( _index != end_index, protocol_feature_iterator_exception, "cannot increment end iterator" ); + + ++_index; + if( _index >= _pfm->_activated_protocol_features.size() ) { + _index = end_index; + } + + return *this; + } + + protocol_feature_manager::const_iterator& protocol_feature_manager::const_iterator::operator--() { + EOS_ASSERT( _pfm, protocol_feature_iterator_exception, "cannot decrement singular iterator" ); + if( _index == end_index ) { + EOS_ASSERT( _pfm->_activated_protocol_features.size() > 0, + protocol_feature_iterator_exception, + "cannot decrement end iterator when no protocol features have been activated" + ); + _index = _pfm->_activated_protocol_features.size() - 1; + } else { + EOS_ASSERT( _index > 0, + protocol_feature_iterator_exception, + "cannot decrement iterator at the beginning of protocol feature activation list" ) + ; + --_index; + } + return *this; + } + + protocol_feature_manager::const_iterator protocol_feature_manager::cbegin()const { + if( _activated_protocol_features.size() == 0 ) { + return cend(); + } else { + return const_iterator( this, 0 ); + } + } + + protocol_feature_manager::const_iterator + protocol_feature_manager::at_activation_ordinal( uint32_t activation_ordinal )const { + if( activation_ordinal >= _activated_protocol_features.size() ) { + return cend(); + } + + return const_iterator{this, static_cast(activation_ordinal)}; + } + + protocol_feature_manager::const_iterator + protocol_feature_manager::lower_bound( uint32_t block_num )const { + const auto begin = _activated_protocol_features.cbegin(); + const auto end = _activated_protocol_features.cend(); + auto itr = std::lower_bound( begin, end, block_num, []( const protocol_feature_entry& lhs, uint32_t rhs ) { + return lhs.activation_block_num < rhs; + } ); + + if( itr == end ) { + return cend(); + } + + return const_iterator{this, static_cast(itr - begin)}; + } + + protocol_feature_manager::const_iterator + protocol_feature_manager::upper_bound( uint32_t block_num )const { + const auto begin = _activated_protocol_features.cbegin(); + const auto end = _activated_protocol_features.cend(); + auto itr = std::upper_bound( begin, end, block_num, []( uint32_t lhs, const protocol_feature_entry& rhs ) { + return lhs < rhs.activation_block_num; + } ); + + if( itr == end ) { + return cend(); + } + + return const_iterator{this, static_cast(itr - begin)}; + } + + bool protocol_feature_manager::is_builtin_activated( builtin_protocol_feature_t feature_codename, + uint32_t current_block_num )const + { + uint32_t indx = static_cast( feature_codename ); + + if( indx >= _builtin_protocol_features.size() ) return false; + + return (_builtin_protocol_features[indx].activation_block_num <= current_block_num); + } + + void protocol_feature_manager::activate_feature( const digest_type& feature_digest, + uint32_t current_block_num ) + { + EOS_ASSERT( is_initialized(), protocol_feature_exception, "protocol_feature_manager is not yet initialized" ); + + auto itr = _protocol_feature_set.find( feature_digest ); + + EOS_ASSERT( itr != _protocol_feature_set.end(), protocol_feature_exception, + "unrecognized protocol feature digest: ${digest}", ("digest", feature_digest) ); + + if( _activated_protocol_features.size() > 0 ) { + const auto& last = _activated_protocol_features.back(); + EOS_ASSERT( last.activation_block_num <= current_block_num, + protocol_feature_exception, + "last protocol feature activation block num is ${last_activation_block_num} yet " + "attempting to activate protocol feature with a current block num of ${current_block_num}" + "protocol features is ${last_activation_block_num}", + ("current_block_num", current_block_num) + ("last_activation_block_num", last.activation_block_num) + ); + } + + EOS_ASSERT( itr->builtin_feature, + protocol_feature_exception, + "invariant failure: encountered non-builtin protocol feature which is not yet supported" + ); + + uint32_t indx = static_cast( *itr->builtin_feature ); + + EOS_ASSERT( indx < _builtin_protocol_features.size(), protocol_feature_exception, + "invariant failure while trying to activate feature with digest '${digest}': " + "unsupported builtin_protocol_feature_t ${codename}", + ("digest", feature_digest) + ("codename", indx) + ); + + EOS_ASSERT( _builtin_protocol_features[indx].activation_block_num == builtin_protocol_feature_entry::not_active, + protocol_feature_exception, + "cannot activate already activated builtin feature with digest: ${digest}", + ("digest", feature_digest) + ); + + _activated_protocol_features.push_back( protocol_feature_entry{itr, current_block_num} ); + _builtin_protocol_features[indx].previous = _head_of_builtin_activation_list; + _builtin_protocol_features[indx].activation_block_num = current_block_num; + _head_of_builtin_activation_list = indx; + } + + void protocol_feature_manager::popped_blocks_to( uint32_t block_num ) { + EOS_ASSERT( is_initialized(), protocol_feature_exception, "protocol_feature_manager is not yet initialized" ); + + while( _head_of_builtin_activation_list != builtin_protocol_feature_entry::no_previous ) { + auto& e = _builtin_protocol_features[_head_of_builtin_activation_list]; + if( e.activation_block_num <= block_num ) break; + + _head_of_builtin_activation_list = e.previous; + e.previous = builtin_protocol_feature_entry::no_previous; + e.activation_block_num = builtin_protocol_feature_entry::not_active; + } + + while( _activated_protocol_features.size() > 0 + && block_num < _activated_protocol_features.back().activation_block_num ) + { + _activated_protocol_features.pop_back(); + } + } + +} } // eosio::chain diff --git a/libraries/chain/protocol_state_object.cpp b/libraries/chain/protocol_state_object.cpp new file mode 100644 index 00000000000..7009ee57874 --- /dev/null +++ b/libraries/chain/protocol_state_object.cpp @@ -0,0 +1,58 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include + +namespace eosio { namespace chain { + + namespace detail { + + snapshot_protocol_state_object + snapshot_row_traits::to_snapshot_row( const protocol_state_object& value, + const chainbase::database& db ) + { + snapshot_protocol_state_object res; + + res.activated_protocol_features.reserve( value.activated_protocol_features.size() ); + for( const auto& v : value.activated_protocol_features ) { + res.activated_protocol_features.emplace_back( v ); + } + + res.preactivated_protocol_features.reserve( value.preactivated_protocol_features.size() ); + for( const auto& v : value.preactivated_protocol_features ) { + res.preactivated_protocol_features.emplace_back( v ); + } + + res.whitelisted_intrinsics = convert_intrinsic_whitelist_to_set( value.whitelisted_intrinsics ); + + res.num_supported_key_types = value.num_supported_key_types; + + return res; + } + + void + snapshot_row_traits::from_snapshot_row( snapshot_protocol_state_object&& row, + protocol_state_object& value, + chainbase::database& db ) + { + value.activated_protocol_features.clear(); + value.activated_protocol_features.reserve( row.activated_protocol_features.size() ); + for( const auto& v : row.activated_protocol_features ) { + value.activated_protocol_features.emplace_back( v ); + } + + value.preactivated_protocol_features.clear(); + value.preactivated_protocol_features.reserve( row.preactivated_protocol_features.size() ); + for( const auto& v : row.preactivated_protocol_features ) { + value.preactivated_protocol_features.emplace_back( v ); + } + + reset_intrinsic_whitelist( value.whitelisted_intrinsics, row.whitelisted_intrinsics ); + + value.num_supported_key_types = row.num_supported_key_types; + } + + } + +}} diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 43ced268542..d98ca79fbc8 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -102,6 +102,8 @@ void resource_limits_manager::set_block_parameters(const elastic_limit_parameter cpu_limit_parameters.validate(); net_limit_parameters.validate(); const auto& config = _db.get(); + if( config.cpu_limit_parameters == cpu_limit_parameters && config.net_limit_parameters == net_limit_parameters ) + return; _db.modify(config, [&](resource_limits_config_object& c){ c.cpu_limit_parameters = cpu_limit_parameters; c.net_limit_parameters = net_limit_parameters; diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp new file mode 100644 index 00000000000..1d8a2707c14 --- /dev/null +++ b/libraries/chain/thread_utils.cpp @@ -0,0 +1,40 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include + +namespace eosio { namespace chain { + + +// +// named_thread_pool +// +named_thread_pool::named_thread_pool( std::string name_prefix, size_t num_threads ) +: _thread_pool( num_threads ) +{ + _ioc_work.emplace( boost::asio::make_work_guard( _ioc ) ); + for( size_t i = 0; i < num_threads; ++i ) { + boost::asio::post( _thread_pool, [&ioc = _ioc, name_prefix, i]() { + std::string tn = name_prefix + "-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } +} + +named_thread_pool::~named_thread_pool() { + stop(); +} + +void named_thread_pool::stop() { + _ioc_work.reset(); + _ioc.stop(); + _thread_pool.join(); + _thread_pool.stop(); +} + + +} } // eosio::chain \ No newline at end of file diff --git a/libraries/chain/trace.cpp b/libraries/chain/trace.cpp new file mode 100644 index 00000000000..44a7ba94cfd --- /dev/null +++ b/libraries/chain/trace.cpp @@ -0,0 +1,41 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include + +namespace eosio { namespace chain { + +action_trace::action_trace( + const transaction_trace& trace, const action& act, account_name receiver, bool context_free, + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal +) +:action_ordinal( action_ordinal ) +,creator_action_ordinal( creator_action_ordinal ) +,closest_unnotified_ancestor_action_ordinal( closest_unnotified_ancestor_action_ordinal ) +,receiver( receiver ) +,act( act ) +,context_free( context_free ) +,trx_id( trace.id ) +,block_num( trace.block_num ) +,block_time( trace.block_time ) +,producer_block_id( trace.producer_block_id ) +{} + +action_trace::action_trace( + const transaction_trace& trace, action&& act, account_name receiver, bool context_free, + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal +) +:action_ordinal( action_ordinal ) +,creator_action_ordinal( creator_action_ordinal ) +,closest_unnotified_ancestor_action_ordinal( closest_unnotified_ancestor_action_ordinal ) +,receiver( receiver ) +,act( std::move(act) ) +,context_free( context_free ) +,trx_id( trace.id ) +,block_num( trace.block_num ) +,block_time( trace.block_time ) +,producer_block_id( trace.producer_block_id ) +{} + +} } // eosio::chain diff --git a/libraries/chain/transaction.cpp b/libraries/chain/transaction.cpp index e1910ce02eb..1ebdfeccc01 100644 --- a/libraries/chain/transaction.cpp +++ b/libraries/chain/transaction.cpp @@ -50,6 +50,16 @@ typedef multi_index_container< > > recovery_cache_type; +void deferred_transaction_generation_context::reflector_init() { + static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, + "deferred_transaction_generation_context expects FC to support reflector_init" ); + + + EOS_ASSERT( sender != account_name(), ill_formed_deferred_transaction_generation_context, + "Deferred transaction generation context extension must have a non-empty sender account", + ); +} + void transaction_header::set_reference_block( const block_id_type& reference_block ) { ref_block_num = fc::endian_reverse_u32(reference_block._hash[0]); ref_block_prefix = reference_block._hash[1]; @@ -134,6 +144,45 @@ fc::microseconds transaction::get_signature_keys( const vector& return sig_cpu_usage; } FC_CAPTURE_AND_RETHROW() } +vector transaction::validate_and_extract_extensions()const { + using transaction_extensions_t = transaction_extension_types::transaction_extensions_t; + using decompose_t = transaction_extension_types::decompose_t; + + static_assert( std::is_same::value, + "transaction_extensions is not setup as expected" ); + + vector results; + + uint16_t id_type_lower_bound = 0; + + for( size_t i = 0; i < transaction_extensions.size(); ++i ) { + const auto& e = transaction_extensions[i]; + auto id = e.first; + + EOS_ASSERT( id >= id_type_lower_bound, invalid_transaction_extension, + "Transaction extensions are not in the correct order (ascending id types required)" + ); + + results.emplace_back(); + + auto match = decompose_t::extract( id, e.second, results.back() ); + EOS_ASSERT( match, invalid_transaction_extension, + "Transaction extension with id type ${id} is not supported", + ("id", id) + ); + + if( match->enforce_unique ) { + EOS_ASSERT( i == 0 || id > id_type_lower_bound, invalid_transaction_extension, + "Transaction extension with id type ${id} is not allowed to repeat", + ("id", id) + ); + } + + id_type_lower_bound = id; + } + + return results; +} const signature_type& signed_transaction::sign(const private_key_type& key, const chain_id_type& chain_id) { signatures.push_back(key.sign(sig_digest(chain_id, context_free_data))); diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index e2c8cd56c3b..8ec0d36e918 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -163,11 +163,18 @@ namespace bacc = boost::accumulators; undo_session = c.mutable_db().start_undo_session(true); } trace->id = id; - trace->block_num = c.pending_block_state()->block_num; + trace->block_num = c.head_block_num() + 1; trace->block_time = c.pending_block_time(); trace->producer_block_id = c.pending_producer_block_id(); executed.reserve( trx.total_actions() ); - EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, "we don't support any extensions yet" ); + } + + void transaction_context::disallow_transaction_extensions( const char* error_msg )const { + if( control.is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, error_msg ); + } else { + EOS_THROW( disallowed_transaction_extensions_bad_block_exception, error_msg ); + } } void transaction_context::init(uint64_t initial_net_usage) @@ -219,9 +226,13 @@ namespace bacc = boost::accumulators; validate_cpu_usage_to_bill( billed_cpu_time_us, false ); // Fail early if the amount to be billed is too high // Record accounts to be billed for network and CPU usage - for( const auto& act : trx.actions ) { - for( const auto& auth : act.authorization ) { - bill_to_accounts.insert( auth.actor ); + if( control.is_builtin_activated(builtin_protocol_feature_t::only_bill_first_authorizer) ) { + bill_to_accounts.insert( trx.first_authorizer() ); + } else { + for( const auto& act : trx.actions ) { + for( const auto& auth : act.authorization ) { + bill_to_accounts.insert( auth.actor ); + } } } validate_ram_usage.reserve( bill_to_accounts.size() ); @@ -279,6 +290,10 @@ namespace bacc = boost::accumulators; void transaction_context::init_for_implicit_trx( uint64_t initial_net_usage ) { + if( trx.transaction_extensions.size() > 0 ) { + disallow_transaction_extensions( "no transaction extensions supported yet for implicit transactions" ); + } + published = control.pending_block_time(); init( initial_net_usage); } @@ -287,6 +302,10 @@ namespace bacc = boost::accumulators; uint64_t packed_trx_prunable_size, bool skip_recording ) { + if( trx.transaction_extensions.size() > 0 ) { + disallow_transaction_extensions( "no transaction extensions supported yet for input transactions" ); + } + const auto& cfg = control.get_global_properties().configuration; uint64_t discounted_size_for_pruned_data = packed_trx_prunable_size; @@ -323,6 +342,14 @@ namespace bacc = boost::accumulators; void transaction_context::init_for_deferred_trx( fc::time_point p ) { + if( (trx.expiration.sec_since_epoch() != 0) && (trx.transaction_extensions.size() > 0) ) { + disallow_transaction_extensions( "no transaction extensions supported yet for deferred transactions" ); + } + // If (trx.expiration.sec_since_epoch() == 0) then it was created after NO_DUPLICATE_DEFERRED_ID activation, + // and so validation of its extensions was done either in: + // * apply_context::schedule_deferred_transaction for contract-generated transactions; + // * or transaction_context::init_for_input_trx for delayed input transactions. + published = p; trace->scheduled = true; apply_context_free = false; @@ -334,17 +361,23 @@ namespace bacc = boost::accumulators; if( apply_context_free ) { for( const auto& act : trx.context_free_actions ) { - trace->action_traces.emplace_back(); - dispatch_action( trace->action_traces.back(), act, true ); + schedule_action( act, act.account, true, 0, 0 ); } } if( delay == fc::microseconds() ) { for( const auto& act : trx.actions ) { - trace->action_traces.emplace_back(); - dispatch_action( trace->action_traces.back(), act ); + schedule_action( act, act.account, false, 0, 0 ); } - } else { + } + + auto& action_traces = trace->action_traces; + uint32_t num_original_actions_to_execute = action_traces.size(); + for( uint32_t i = 1; i <= num_original_actions_to_execute; ++i ) { + execute_action( i, 0 ); + } + + if( delay != fc::microseconds() ) { schedule_transaction(); } } @@ -567,14 +600,75 @@ namespace bacc = boost::accumulators; return std::make_tuple(account_net_limit, account_cpu_limit, greylisted_net, greylisted_cpu); } - void transaction_context::dispatch_action( action_trace& trace, const action& a, account_name receiver, bool context_free, uint32_t recurse_depth ) { - apply_context acontext( control, *this, a, recurse_depth ); - acontext.context_free = context_free; - acontext.receiver = receiver; + action_trace& transaction_context::get_action_trace( uint32_t action_ordinal ) { + EOS_ASSERT( 0 < action_ordinal && action_ordinal <= trace->action_traces.size() , + transaction_exception, + "action_ordinal ${ordinal} is outside allowed range [1,${max}]", + ("ordinal", action_ordinal)("max", trace->action_traces.size()) + ); + return trace->action_traces[action_ordinal-1]; + } - acontext.exec( trace ); + const action_trace& transaction_context::get_action_trace( uint32_t action_ordinal )const { + EOS_ASSERT( 0 < action_ordinal && action_ordinal <= trace->action_traces.size() , + transaction_exception, + "action_ordinal ${ordinal} is outside allowed range [1,${max}]", + ("ordinal", action_ordinal)("max", trace->action_traces.size()) + ); + return trace->action_traces[action_ordinal-1]; } + uint32_t transaction_context::schedule_action( const action& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ) + { + uint32_t new_action_ordinal = trace->action_traces.size() + 1; + + trace->action_traces.emplace_back( *trace, act, receiver, context_free, + new_action_ordinal, creator_action_ordinal, + closest_unnotified_ancestor_action_ordinal ); + + return new_action_ordinal; + } + + uint32_t transaction_context::schedule_action( action&& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ) + { + uint32_t new_action_ordinal = trace->action_traces.size() + 1; + + trace->action_traces.emplace_back( *trace, std::move(act), receiver, context_free, + new_action_ordinal, creator_action_ordinal, + closest_unnotified_ancestor_action_ordinal ); + + return new_action_ordinal; + } + + uint32_t transaction_context::schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ) + { + uint32_t new_action_ordinal = trace->action_traces.size() + 1; + + trace->action_traces.reserve( new_action_ordinal ); + + const action& provided_action = get_action_trace( action_ordinal ).act; + + // The reserve above is required so that the emplace_back below does not invalidate the provided_action reference. + + trace->action_traces.emplace_back( *trace, provided_action, receiver, context_free, + new_action_ordinal, creator_action_ordinal, + closest_unnotified_ancestor_action_ordinal ); + + return new_action_ordinal; + } + + void transaction_context::execute_action( uint32_t action_ordinal, uint32_t recurse_depth ) { + apply_context acontext( control, *this, action_ordinal, recurse_depth ); + acontext.exec(); + } + + void transaction_context::schedule_transaction() { // Charge ahead of time for the additional net usage needed to retire the delayed transaction // whether that be by successfully executing, soft failure, hard failure, or expiration. @@ -584,7 +678,7 @@ namespace bacc = boost::accumulators; + static_cast(config::transaction_id_net_usage) ); // Will exit early if net usage cannot be payed. } - auto first_auth = trx.first_authorizor(); + auto first_auth = trx.first_authorizer(); uint32_t trx_size = 0; const auto& cgto = control.mutable_db().create( [&]( auto& gto ) { @@ -598,7 +692,9 @@ namespace bacc = boost::accumulators; trx_size = gto.set( trx ); }); - add_ram_usage( cgto.payer, (config::billable_size_v + trx_size) ); + int64_t ram_delta = (config::billable_size_v + trx_size); + add_ram_usage( cgto.payer, ram_delta ); + trace->account_ram_delta = account_delta( cgto.payer, ram_delta ); } void transaction_context::record_transaction( const transaction_id_type& id, fc::time_point_sec expire ) { diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 9c33121a5a6..ddcbd2d934e 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -26,7 +26,7 @@ recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chai } signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, + boost::asio::io_context& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index df0ce578b0e..6ad6a9cb131 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -28,7 +29,7 @@ namespace eosio { namespace chain { using namespace webassembly; using namespace webassembly::common; - wasm_interface::wasm_interface(vm_type vm) : my( new wasm_interface_impl(vm) ) {} + wasm_interface::wasm_interface(vm_type vm, const chainbase::database& d) : my( new wasm_interface_impl(vm, d) ) {} wasm_interface::~wasm_interface() {} @@ -46,7 +47,9 @@ namespace eosio { namespace chain { wasm_validations::wasm_binary_validation validator(control, module); validator.validate(); - root_resolver resolver(true); + const auto& pso = control.db().get(); + + root_resolver resolver( pso.whitelisted_intrinsics ); LinkResult link_result = linkModule(module, resolver); //there are a couple opportunties for improvement here-- @@ -54,8 +57,20 @@ namespace eosio { namespace chain { //Hard: Kick off instantiation in a separate thread at this location } - void wasm_interface::apply( const digest_type& code_id, const shared_string& code, apply_context& context ) { - my->get_instantiated_module(code_id, code, context.trx_context)->apply(context); + void wasm_interface::indicate_shutting_down() { + my->is_shutting_down = true; + } + + void wasm_interface::code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num) { + my->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + } + + void wasm_interface::current_lib(const uint32_t lib) { + my->current_lib(lib); + } + + void wasm_interface::apply( const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context ) { + my->get_instantiated_module(code_hash, vm_type, vm_version, context.trx_context)->apply(context); } void wasm_interface::exit() { @@ -74,9 +89,8 @@ class context_aware_api { context_aware_api(apply_context& ctx, bool context_free = false ) :context(ctx) { - if( context.context_free ) + if( context.is_context_free() ) EOS_ASSERT( context_free, unaccessible_api, "only context free api's can be used in this context" ); - context.used_context_free_api |= !context_free; } void checktime() { @@ -93,7 +107,7 @@ class context_free_api : public context_aware_api { context_free_api( apply_context& ctx ) :context_aware_api(ctx, true) { /* the context_free_data is not available during normal application because it is prunable */ - EOS_ASSERT( context.context_free, unaccessible_api, "this API may only be called from context_free apply" ); + EOS_ASSERT( context.is_context_free(), unaccessible_api, "this API may only be called from context_free apply" ); } int get_context_free_data( uint32_t index, array_ptr buffer, size_t buffer_size )const { @@ -106,7 +120,7 @@ class privileged_api : public context_aware_api { privileged_api( apply_context& ctx ) :context_aware_api(ctx) { - EOS_ASSERT( context.privileged, unaccessible_api, "${code} does not have permission to call this API", ("code",context.receiver) ); + EOS_ASSERT( context.is_privileged(), unaccessible_api, "${code} does not have permission to call this API", ("code",context.get_receiver()) ); } /** @@ -130,6 +144,15 @@ class privileged_api : public context_aware_api { EOS_ASSERT( false, unsupported_feature, "Unsupported Hardfork Detected" ); } + /** + * Pre-activates the specified protocol feature. + * Fails if the feature is unrecognized, disabled, or not allowed to be activated at the current time. + * Also fails if the feature was already activated or pre-activated. + */ + void preactivate_feature( const digest_type& feature_digest ) { + context.control.preactivate_feature( feature_digest ); + } + /** * update the resource limits associated with an account. Note these new values will not take effect until the * next resource "tick" which is currently defined as a cycle boundary inside a block. @@ -156,6 +179,13 @@ class privileged_api : public context_aware_api { datastream ds( packed_producer_schedule, datalen ); vector producers; fc::raw::unpack(ds, producers); + EOS_ASSERT( producers.size() > 0 + || !context.control.is_builtin_activated( + builtin_protocol_feature_t::disallow_empty_producer_schedule + ), + wasm_execution_error, + "Producer schedule cannot be empty" + ); EOS_ASSERT(producers.size() <= config::max_producers, wasm_execution_error, "Producer schedule exceeds the maximum producer count for this chain"); // check that producers are unique std::set unique_producers; @@ -194,13 +224,13 @@ class privileged_api : public context_aware_api { } bool is_privileged( account_name n )const { - return context.db.get( n ).privileged; + return context.db.get( n ).is_privileged(); } void set_privileged( account_name n, bool is_priv ) { - const auto& a = context.db.get( n ); + const auto& a = context.db.get( n ); context.db.modify( a, [&]( auto& ma ){ - ma.privileged = is_priv; + ma.set_privileged( is_priv ); }); } @@ -903,6 +933,16 @@ class system_api : public context_aware_api { return static_cast( context.trx_context.published.time_since_epoch().count() ); } + /** + * Returns true if the specified protocol feature is activated, false if not. + */ + bool is_feature_activated( const digest_type& feature_digest ) { + return context.control.is_protocol_feature_activated( feature_digest ); + } + + name get_sender() { + return context.get_sender(); + } }; constexpr size_t max_assert_message = 1024; @@ -935,8 +975,23 @@ class context_free_system_api : public context_aware_api { void eosio_assert_code( bool condition, uint64_t error_code ) { if( BOOST_UNLIKELY( !condition ) ) { - EOS_THROW( eosio_assert_code_exception, - "assertion failure with error code: ${error_code}", ("error_code", error_code) ); + if( error_code >= static_cast(system_error_code::generic_system_error) ) { + restricted_error_code_exception e( FC_LOG_MESSAGE( + error, + "eosio_assert_code called with reserved error code: ${error_code}", + ("error_code", error_code) + ) ); + e.error_code = static_cast(system_error_code::contract_restricted_error_code); + throw e; + } else { + eosio_assert_code_exception e( FC_LOG_MESSAGE( + error, + "assertion failure with error code: ${error_code}", + ("error_code", error_code) + ) ); + e.error_code = error_code; + throw e; + } } } @@ -952,21 +1007,21 @@ class action_api : public context_aware_api { :context_aware_api(ctx,true){} int read_action_data(array_ptr memory, size_t buffer_size) { - auto s = context.act.data.size(); + auto s = context.get_action().data.size(); if( buffer_size == 0 ) return s; auto copy_size = std::min( buffer_size, s ); - memcpy( memory, context.act.data.data(), copy_size ); + memcpy( memory, context.get_action().data.data(), copy_size ); return copy_size; } int action_data_size() { - return context.act.data.size(); + return context.get_action().data.size(); } name current_receiver() { - return context.receiver; + return context.get_receiver(); } }; @@ -979,7 +1034,7 @@ class console_api : public context_aware_api { // Kept as intrinsic rather than implementing on WASM side (using prints_l and strlen) because strlen is faster on native side. void prints(null_terminated_ptr str) { if ( !ignore ) { - context.console_append(str); + context.console_append( static_cast(str) ); } } @@ -991,13 +1046,17 @@ class console_api : public context_aware_api { void printi(int64_t val) { if ( !ignore ) { - context.console_append(val); + std::ostringstream oss; + oss << val; + context.console_append( oss.str() ); } } void printui(uint64_t val) { if ( !ignore ) { - context.console_append(val); + std::ostringstream oss; + oss << val; + context.console_append( oss.str() ); } } @@ -1013,11 +1072,13 @@ class console_api : public context_aware_api { fc::uint128_t v(val_magnitude>>64, static_cast(val_magnitude) ); + string s; if( is_negative ) { - context.console_append("-"); + s += '-'; } + s += fc::variant(v).get_string(); - context.console_append(fc::variant(v).get_string()); + context.console_append( s ); } } @@ -1031,26 +1092,22 @@ class console_api : public context_aware_api { void printsf( float val ) { if ( !ignore ) { // Assumes float representation on native side is the same as on the WASM side - auto& console = context.get_console_stream(); - auto orig_prec = console.precision(); - - console.precision( std::numeric_limits::digits10 ); - context.console_append(val); - - console.precision( orig_prec ); + std::ostringstream oss; + oss.setf( std::ios::scientific, std::ios::floatfield ); + oss.precision( std::numeric_limits::digits10 ); + oss << val; + context.console_append( oss.str() ); } } void printdf( double val ) { if ( !ignore ) { // Assumes double representation on native side is the same as on the WASM side - auto& console = context.get_console_stream(); - auto orig_prec = console.precision(); - - console.precision( std::numeric_limits::digits10 ); - context.console_append(val); - - console.precision( orig_prec ); + std::ostringstream oss; + oss.setf( std::ios::scientific, std::ios::floatfield ); + oss.precision( std::numeric_limits::digits10 ); + oss << val; + context.console_append( oss.str() ); } } @@ -1068,23 +1125,23 @@ class console_api : public context_aware_api { */ if ( !ignore ) { - auto& console = context.get_console_stream(); - auto orig_prec = console.precision(); + std::ostringstream oss; + oss.setf( std::ios::scientific, std::ios::floatfield ); #ifdef __x86_64__ - console.precision( std::numeric_limits::digits10 ); + oss.precision( std::numeric_limits::digits10 ); extFloat80_t val_approx; f128M_to_extF80M(&val, &val_approx); #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" - context.console_append( *(long double*)(&val_approx) ); + oss << *(long double*)(&val_approx); #pragma GCC diagnostic pop #else - console.precision( std::numeric_limits::digits10 ); + oss.precision( std::numeric_limits::digits10 ); double val_approx = from_softfloat64( f128M_to_f64(&val) ); - context.console_append(val_approx); + oss << val_approx; #endif - console.precision( orig_prec ); + context.console_append( oss.str() ); } } @@ -1700,6 +1757,7 @@ REGISTER_INTRINSICS(privileged_api, (set_blockchain_parameters_packed, void(int,int) ) (is_privileged, int(int64_t) ) (set_privileged, void(int64_t, int) ) + (preactivate_feature, void(int) ) ); REGISTER_INJECTED_INTRINSICS(transaction_context, @@ -1776,8 +1834,10 @@ REGISTER_INTRINSICS(permission_api, REGISTER_INTRINSICS(system_api, - (current_time, int64_t() ) - (publication_time, int64_t() ) + (current_time, int64_t() ) + (publication_time, int64_t() ) + (is_feature_activated, int(int) ) + (get_sender, int64_t() ) ); REGISTER_INTRINSICS(context_free_system_api, diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp index 2d45fa4ee01..a23919e0ec6 100644 --- a/libraries/chain/webassembly/wabt.cpp +++ b/libraries/chain/webassembly/wabt.cpp @@ -28,7 +28,7 @@ class wabt_instantiated_module : public wasm_instantiated_module_interface { continue; _initial_globals.emplace_back(_env->GetGlobal(i), _env->GetGlobal(i)->typed_value); } - + if(_env->GetMemoryCount()) _initial_memory_configuration = _env->GetMemory(0)->page_limits; } @@ -50,9 +50,9 @@ class wabt_instantiated_module : public wasm_instantiated_module_interface { memcpy(memory->data.data(), _initial_memory.data(), _initial_memory.size()); } - _params[0].set_i64(uint64_t(context.receiver)); - _params[1].set_i64(uint64_t(context.act.account)); - _params[2].set_i64(uint64_t(context.act.name)); + _params[0].set_i64(uint64_t(context.get_receiver())); + _params[1].set_i64(uint64_t(context.get_action().account)); + _params[2].set_i64(uint64_t(context.get_action().name)); ExecResult res = _executor.RunStartFunction(_instatiated_module); EOS_ASSERT( res.result == interp::Result::Ok, wasm_execution_error, "wabt start function failure (${s})", ("s", ResultToString(res.result)) ); @@ -92,7 +92,7 @@ std::unique_ptr wabt_runtime::instantiate_mo wabt::Result res = ReadBinaryInterp(env.get(), code_bytes, code_size, read_binary_options, &errors, &instantiated_module); EOS_ASSERT( Succeeded(res), wasm_execution_error, "Error building wabt interp: ${e}", ("e", wabt::FormatErrorsToString(errors, Location::Type::Binary)) ); - + return std::make_unique(std::move(env), initial_memory, instantiated_module); } diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index e614398c74e..e006d237c5c 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -12,7 +12,8 @@ #include "Runtime/Linker.h" #include "Runtime/Intrinsics.h" -#include +#include +#include using namespace IR; using namespace Runtime; @@ -21,18 +22,61 @@ namespace eosio { namespace chain { namespace webassembly { namespace wavm { running_instance_context the_running_instance_context; +namespace detail { +struct wavm_runtime_initializer { + wavm_runtime_initializer() { + Runtime::init(); + } +}; + +using live_module_ref = std::list::iterator; + +struct wavm_live_modules { + live_module_ref add_live_module(ModuleInstance* module_instance) { + return live_modules.insert(live_modules.begin(), asObject(module_instance)); + } + + void remove_live_module(live_module_ref it) { + live_modules.erase(it); + run_wavm_garbage_collection(); + } + + void run_wavm_garbage_collection() { + //need to pass in a mutable list of root objects we want the garbage collector to retain + std::vector root; + std::copy(live_modules.begin(), live_modules.end(), std::back_inserter(root)); + Runtime::freeUnreferencedObjects(std::move(root)); + } + + std::list live_modules; +}; + +static wavm_live_modules the_wavm_live_modules; + +} + class wavm_instantiated_module : public wasm_instantiated_module_interface { public: wavm_instantiated_module(ModuleInstance* instance, std::unique_ptr module, std::vector initial_mem) : _initial_memory(initial_mem), _instance(instance), - _module(std::move(module)) - {} + _module_ref(detail::the_wavm_live_modules.add_live_module(instance)) + { + //The memory instance is reused across all wavm_instantiated_modules, but for wasm instances + // that didn't declare "memory", getDefaultMemory() won't see it. It would also be possible + // to say something like if(module->memories.size()) here I believe + if(getDefaultMemory(_instance)) + _initial_memory_config = module->memories.defs.at(0).type; + } + + ~wavm_instantiated_module() { + detail::the_wavm_live_modules.remove_live_module(_module_ref); + } void apply(apply_context& context) override { - vector args = {Value(uint64_t(context.receiver)), - Value(uint64_t(context.act.account)), - Value(uint64_t(context.act.name))}; + vector args = {Value(uint64_t(context.get_receiver())), + Value(uint64_t(context.get_action().account)), + Value(uint64_t(context.get_action().name))}; call("apply", args, context); } @@ -52,7 +96,7 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { if(default_mem) { //reset memory resizes the sandbox'ed memory to the module's init memory size and then // (effectively) memzeros it all - resetMemory(default_mem, _module->memories.defs[0].type); + resetMemory(default_mem, _initial_memory_config); char* memstart = &memoryRef(getDefaultMemory(_instance), 0); memcpy(memstart, _initial_memory.data(), _initial_memory.size()); @@ -78,31 +122,12 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { //naked pointer because ModuleInstance is opaque //_instance is deleted via WAVM's object garbage collection when wavm_rutime is deleted ModuleInstance* _instance; - std::unique_ptr _module; + detail::live_module_ref _module_ref; + MemoryType _initial_memory_config; }; - -wavm_runtime::runtime_guard::runtime_guard() { - // TODO clean this up - //check_wasm_opcode_dispositions(); - Runtime::init(); -} - -wavm_runtime::runtime_guard::~runtime_guard() { - Runtime::freeUnreferencedObjects({}); -} - -static weak_ptr __runtime_guard_ptr; -static std::mutex __runtime_guard_lock; - wavm_runtime::wavm_runtime() { - std::lock_guard l(__runtime_guard_lock); - if (__runtime_guard_ptr.use_count() == 0) { - _runtime_guard = std::make_shared(); - __runtime_guard_ptr = _runtime_guard; - } else { - _runtime_guard = __runtime_guard_ptr.lock(); - } + static detail::wavm_runtime_initializer the_wavm_runtime_initializer; } wavm_runtime::~wavm_runtime() { @@ -131,7 +156,7 @@ void wavm_runtime::immediately_exit_currently_running_module() { #ifdef _WIN32 throw wasm_exit(); #else - Platform::immediately_exit(); + Platform::immediately_exit(nullptr); #endif } diff --git a/libraries/chain/whitelisted_intrinsics.cpp b/libraries/chain/whitelisted_intrinsics.cpp new file mode 100644 index 00000000000..6a4756bf502 --- /dev/null +++ b/libraries/chain/whitelisted_intrinsics.cpp @@ -0,0 +1,108 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include +#include + +namespace eosio { namespace chain { + + template + bool find_intrinsic_helper( uint64_t h, const std::string& name, Iterator& itr, const Iterator& end ) { + for( ; itr != end && itr->first == h; ++itr ) { + if( itr->second.compare( 0, itr->second.size(), name.c_str(), name.size() ) == 0 ) { + return true; + } + } + + return false; + } + + whitelisted_intrinsics_type::iterator + find_intrinsic( whitelisted_intrinsics_type& whitelisted_intrinsics, uint64_t h, const std::string& name ) + { + auto itr = whitelisted_intrinsics.lower_bound( h ); + const auto end = whitelisted_intrinsics.end(); + + if( !find_intrinsic_helper( h, name, itr, end ) ) + return end; + + return itr; + } + + whitelisted_intrinsics_type::const_iterator + find_intrinsic( const whitelisted_intrinsics_type& whitelisted_intrinsics, uint64_t h, const std::string& name ) + { + auto itr = whitelisted_intrinsics.lower_bound( h ); + const auto end = whitelisted_intrinsics.end(); + + if( !find_intrinsic_helper( h, name, itr, end ) ) + return end; + + return itr; + } + + bool is_intrinsic_whitelisted( const whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ) + { + uint64_t h = static_cast( std::hash{}( name ) ); + auto itr = whitelisted_intrinsics.lower_bound( h ); + const auto end = whitelisted_intrinsics.end(); + + return find_intrinsic_helper( h, name, itr, end ); + } + + + void add_intrinsic_to_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ) + { + uint64_t h = static_cast( std::hash{}( name ) ); + auto itr = find_intrinsic( whitelisted_intrinsics, h, name ); + EOS_ASSERT( itr == whitelisted_intrinsics.end(), database_exception, + "cannot add intrinsic '${name}' since it already exists in the whitelist", + ("name", name) + ); + + whitelisted_intrinsics.emplace( std::piecewise_construct, + std::forward_as_tuple( h ), + std::forward_as_tuple( name.c_str(), name.size(), + whitelisted_intrinsics.get_allocator() ) + ); + } + + void remove_intrinsic_from_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ) + { + uint64_t h = static_cast( std::hash{}( name ) ); + auto itr = find_intrinsic( whitelisted_intrinsics, h, name ); + EOS_ASSERT( itr != whitelisted_intrinsics.end(), database_exception, + "cannot remove intrinsic '${name}' since it does not exist in the whitelist", + ("name", name) + ); + + whitelisted_intrinsics.erase( itr ); + } + + void reset_intrinsic_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, + const std::set& s ) + { + whitelisted_intrinsics.clear(); + + for( const auto& name : s ) { + uint64_t h = static_cast( std::hash{}( name ) ); + whitelisted_intrinsics.emplace( std::piecewise_construct, + std::forward_as_tuple( h ), + std::forward_as_tuple( name.c_str(), name.size(), + whitelisted_intrinsics.get_allocator() ) + ); + } + } + + std::set convert_intrinsic_whitelist_to_set( const whitelisted_intrinsics_type& whitelisted_intrinsics ) { + std::set s; + + for( const auto& p : whitelisted_intrinsics ) { + s.emplace( p.second.c_str(), p.second.size() ); + } + + return s; + } + +} } diff --git a/libraries/chainbase b/libraries/chainbase index 02c1ea29133..b769749d533 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 02c1ea2913358959a26036779b512432f036946e +Subproject commit b769749d53303ec1a037d483c631d02268cbf012 diff --git a/libraries/fc b/libraries/fc index f0ca2761421..6593ef120a7 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit f0ca276142159206e0dcdff096bd0a1548114a7a +Subproject commit 6593ef120a79d97e3274a986acc84b3741a15de9 diff --git a/libraries/softfloat b/libraries/softfloat index 203b6df7ded..138dda49cea 160000 --- a/libraries/softfloat +++ b/libraries/softfloat @@ -1 +1 @@ -Subproject commit 203b6df7dedc5bae1b2a7b1b23562335a6344578 +Subproject commit 138dda49cead84a93d052a241807694f5d1b0750 diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 4328bda7ee8..cda857ff2c2 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -56,6 +56,14 @@ namespace boost { namespace test_tools { namespace tt_detail { } } } namespace eosio { namespace testing { + enum class setup_policy { + none, + old_bios_only, + preactivate_feature_only, + preactivate_feature_and_new_bios, + full + }; + std::vector read_wasm( const char* fn ); std::vector read_abi( const char* fn ); std::string read_wast( const char* fn ); @@ -67,6 +75,10 @@ namespace eosio { namespace testing { bool expect_assert_message(const fc::exception& ex, string expected); + using subjective_restriction_map = std::map; + + protocol_feature_set make_protocol_feature_set(const subjective_restriction_map& custom_subjective_restrictions = {}); + /** * @class tester * @brief provides utility function to simplify the creation of unit tests @@ -82,15 +94,18 @@ namespace eosio { namespace testing { virtual ~base_tester() {}; - void init(bool push_genesis = true, db_read_mode read_mode = db_read_mode::SPECULATIVE); + void init(const setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE); void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); + void init(controller::config config, protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot = nullptr); + void execute_setup_policy(const setup_policy policy); void close(); - void open( const snapshot_reader_ptr& snapshot ); + void open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot); + void open( const snapshot_reader_ptr& snapshot); bool is_same_chain( base_tester& other ); - virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; - virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; + virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; + virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; virtual signed_block_ptr finish_block() = 0; void produce_blocks( uint32_t n = 1, bool empty = false ); void produce_blocks_until_end_of_round(); @@ -111,7 +126,7 @@ namespace eosio { namespace testing { vector get_scheduled_transactions() const; transaction_trace_ptr push_transaction( packed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); - transaction_trace_ptr push_transaction( signed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); + transaction_trace_ptr push_transaction( signed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US, bool no_throw = false ); action_result push_action(action&& cert_act, uint64_t authorizer); // TODO/QUESTION: Is this needed? transaction_trace_ptr push_action( const account_name& code, @@ -152,7 +167,8 @@ namespace eosio { namespace testing { return traces; } - void push_genesis_block(); + void set_before_preactivate_bios_contract(); + void set_bios_contract(); vector get_producer_keys( const vector& producer_names )const; transaction_trace_ptr set_producers(const vector& producer_names); @@ -280,8 +296,12 @@ namespace eosio { namespace testing { return cfg; } + void schedule_protocol_features_wo_preactivation(const vector feature_digests); + void preactivate_protocol_features(const vector feature_digests); + void preactivate_all_builtin_protocol_features(); + protected: - signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); + signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false ); void _start_block(fc::time_point block_time); signed_block_ptr _finish_block(); @@ -296,25 +316,31 @@ namespace eosio { namespace testing { controller::config cfg; map chain_transactions; map last_produced_block; + public: + vector protocol_features_to_be_activated_wo_preactivation; }; class tester : public base_tester { public: - tester(bool push_genesis = true, db_read_mode read_mode = db_read_mode::SPECULATIVE ) { - init(push_genesis, read_mode); + tester(setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE) { + init(policy, read_mode); } tester(controller::config config) { init(config); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + tester(controller::config config, protocol_feature_set&& pfs) { + init(config, std::move(pfs)); + } + + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override { @@ -327,6 +353,10 @@ namespace eosio { namespace testing { class validating_tester : public base_tester { public: virtual ~validating_tester() { + if( !validating_node ) { + elog( "~validating_tester() called with empty validating_node; likely in the middle of failure" ); + return; + } try { if( num_blocks_to_producer_before_shutdown > 0 ) produce_blocks( num_blocks_to_producer_before_shutdown ); @@ -366,11 +396,11 @@ namespace eosio { namespace testing { vcfg.trusted_producers = trusted_producers; - validating_node = std::make_unique(vcfg); + validating_node = std::make_unique(vcfg, make_protocol_feature_set()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); - init(true); + init(); } validating_tester(controller::config config) { @@ -381,23 +411,23 @@ namespace eosio { namespace testing { vcfg.blocks_dir = vcfg.blocks_dir.parent_path() / std::string("v_").append( vcfg.blocks_dir.filename().generic_string() ); vcfg.state_dir = vcfg.state_dir.parent_path() / std::string("v_").append( vcfg.state_dir.filename().generic_string() ); - validating_node = std::make_unique(vcfg); + validating_node = std::make_unique(vcfg, make_protocol_feature_set()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); init(config); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { - auto sb = _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + auto sb = _produce_block(skip_time, false); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); return sb; } - signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ ) { - return _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) { + return _produce_block(skip_time, false); } void validate_push_block(const signed_block_ptr& sb) { @@ -405,9 +435,9 @@ namespace eosio { namespace testing { validating_node->push_block( bs ); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - auto sb = _produce_block(skip_time, true, skip_flag | 2); + auto sb = _produce_block(skip_time, true); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); @@ -431,7 +461,7 @@ namespace eosio { namespace testing { hbh.producer == vn_hbh.producer; validating_node.reset(); - validating_node = std::make_unique(vcfg); + validating_node = std::make_unique(vcfg, make_protocol_feature_set()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 63a0788931f..a9a442d8380 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -79,11 +79,50 @@ namespace eosio { namespace testing { memcpy( data.data(), obj.value.data(), obj.value.size() ); } + protocol_feature_set make_protocol_feature_set(const subjective_restriction_map& custom_subjective_restrictions) { + protocol_feature_set pfs; + + map< builtin_protocol_feature_t, optional > visited_builtins; + + std::function add_builtins = + [&pfs, &visited_builtins, &add_builtins, &custom_subjective_restrictions] + ( builtin_protocol_feature_t codename ) -> digest_type { + auto res = visited_builtins.emplace( codename, optional() ); + if( !res.second ) { + EOS_ASSERT( res.first->second, protocol_feature_exception, + "invariant failure: cycle found in builtin protocol feature dependencies" + ); + return *res.first->second; + } + + auto f = protocol_feature_set::make_default_builtin_protocol_feature( codename, + [&add_builtins]( builtin_protocol_feature_t d ) { + return add_builtins( d ); + } ); + + const auto itr = custom_subjective_restrictions.find(codename); + if( itr != custom_subjective_restrictions.end() ) { + f.subjective_restrictions = itr->second; + } + + const auto& pf = pfs.add_feature( f ); + res.first->second = pf.feature_digest; + + return pf.feature_digest; + }; + + for( const auto& p : builtin_protocol_feature_codenames ) { + add_builtins( p.first ); + } + + return pfs; + } + bool base_tester::is_same_chain( base_tester& other ) { return control->head_block_id() == other.control->head_block_id(); } - void base_tester::init(bool push_genesis, db_read_mode read_mode) { + void base_tester::init(const setup_policy policy, db_read_mode read_mode) { cfg.blocks_dir = tempdir.path() / config::default_blocks_dir_name; cfg.state_dir = tempdir.path() / config::default_state_dir_name; cfg.state_size = 1024*1024*8; @@ -104,26 +143,69 @@ namespace eosio { namespace testing { } open(nullptr); - - if (push_genesis) - push_genesis_block(); + execute_setup_policy(policy); } - void base_tester::init(controller::config config, const snapshot_reader_ptr& snapshot) { cfg = config; open(snapshot); } + void base_tester::init(controller::config config, protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot) { + cfg = config; + open(std::move(pfs), snapshot); + } + + void base_tester::execute_setup_policy(const setup_policy policy) { + const auto& pfm = control->get_protocol_feature_manager(); + + auto schedule_preactivate_protocol_feature = [&]() { + auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + FC_ASSERT( preactivate_feature_digest, "PREACTIVATE_FEATURE not found" ); + schedule_protocol_features_wo_preactivation( { *preactivate_feature_digest } ); + }; + + switch (policy) { + case setup_policy::old_bios_only: { + set_before_preactivate_bios_contract(); + break; + } + case setup_policy::preactivate_feature_only: { + schedule_preactivate_protocol_feature(); + produce_block(); // block production is required to activate protocol feature + break; + } + case setup_policy::preactivate_feature_and_new_bios: { + schedule_preactivate_protocol_feature(); + produce_block(); + set_bios_contract(); + break; + } + case setup_policy::full: { + schedule_preactivate_protocol_feature(); + produce_block(); + set_bios_contract(); + preactivate_all_builtin_protocol_features(); + produce_block(); + break; + } + case setup_policy::none: + default: + break; + }; + } void base_tester::close() { control.reset(); chain_transactions.clear(); } + void base_tester::open( const snapshot_reader_ptr& snapshot ) { + open( make_protocol_feature_set(), snapshot ); + } - void base_tester::open( const snapshot_reader_ptr& snapshot) { - control.reset( new controller(cfg) ); + void base_tester::open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot ) { + control.reset( new controller(cfg, std::move(pfs)) ); control->add_indices(); control->startup( []() { return false; }, snapshot); chain_transactions.clear(); @@ -154,12 +236,12 @@ namespace eosio { namespace testing { return b; } - signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs, uint32_t skip_flag) { + signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs) { auto head = control->head_block_state(); auto head_time = control->head_block_time(); auto next_time = head_time + skip_time; - if( !control->pending_block_state() || control->pending_block_state()->header.timestamp != next_time ) { + if( !control->is_building_block() || control->pending_block_time() != next_time ) { _start_block( next_time ); } @@ -200,11 +282,30 @@ namespace eosio { namespace testing { } control->abort_block(); - control->start_block( block_time, head_block_number - last_produced_block_num ); + + vector feature_to_be_activated; + // First add protocol features to be activated WITHOUT preactivation + feature_to_be_activated.insert( + feature_to_be_activated.end(), + protocol_features_to_be_activated_wo_preactivation.begin(), + protocol_features_to_be_activated_wo_preactivation.end() + ); + // Then add protocol features to be activated WITH preactivation + const auto preactivated_protocol_features = control->get_preactivated_protocol_features(); + feature_to_be_activated.insert( + feature_to_be_activated.end(), + preactivated_protocol_features.begin(), + preactivated_protocol_features.end() + ); + + control->start_block( block_time, head_block_number - last_produced_block_num, feature_to_be_activated ); + + // Clear the list, if start block finishes successfuly, the protocol features should be assumed to be activated + protocol_features_to_be_activated_wo_preactivation.clear(); } signed_block_ptr base_tester::_finish_block() { - FC_ASSERT( control->pending_block_state(), "must first start a block before it can be finished" ); + FC_ASSERT( control->is_building_block(), "must first start a block before it can be finished" ); auto producer = control->head_block_state()->get_scheduled_producer( control->pending_block_time() ); private_key_type priv_key; @@ -217,10 +318,9 @@ namespace eosio { namespace testing { priv_key = private_key_itr->second; } - control->finalize_block(); - control->sign_block( [&]( digest_type d ) { + control->finalize_block( [&]( digest_type d ) { return priv_key.sign(d); - }); + } ); control->commit_block(); last_produced_block[control->head_block_state()->header.producer] = control->head_block_state()->id; @@ -344,7 +444,7 @@ namespace eosio { namespace testing { uint32_t billed_cpu_time_us ) { try { - if( !control->pending_block_state() ) + if( !control->is_building_block() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); auto mtrx = std::make_shared( std::make_shared(trx) ); @@ -360,10 +460,11 @@ namespace eosio { namespace testing { transaction_trace_ptr base_tester::push_transaction( signed_transaction& trx, fc::time_point deadline, - uint32_t billed_cpu_time_us + uint32_t billed_cpu_time_us, + bool no_throw ) { try { - if( !control->pending_block_state() ) + if( !control->is_building_block() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); auto c = packed_transaction::none; @@ -377,6 +478,7 @@ namespace eosio { namespace testing { auto mtrx = std::make_shared(trx, c); transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); + if (no_throw) return r; if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; return r; @@ -838,11 +940,17 @@ namespace eosio { namespace testing { sync_dbs(other, *this); } - void base_tester::push_genesis_block() { + void base_tester::set_before_preactivate_bios_contract() { + set_code(config::system_account_name, contracts::before_preactivate_eosio_bios_wasm()); + set_abi(config::system_account_name, contracts::before_preactivate_eosio_bios_abi().data()); + } + + void base_tester::set_bios_contract() { set_code(config::system_account_name, contracts::eosio_bios_wasm()); set_abi(config::system_account_name, contracts::eosio_bios_abi().data()); } + vector base_tester::get_producer_keys( const vector& producer_names )const { // Create producer schedule vector schedule; @@ -865,6 +973,58 @@ namespace eosio { namespace testing { return tid; } + void base_tester::schedule_protocol_features_wo_preactivation(const vector feature_digests) { + protocol_features_to_be_activated_wo_preactivation.insert( + protocol_features_to_be_activated_wo_preactivation.end(), + feature_digests.begin(), + feature_digests.end() + ); + } + + void base_tester::preactivate_protocol_features(const vector feature_digests) { + for( const auto& feature_digest: feature_digests ) { + push_action( config::system_account_name, N(activate), config::system_account_name, + fc::mutable_variant_object()("feature_digest", feature_digest) ); + } + } + + void base_tester::preactivate_all_builtin_protocol_features() { + const auto& pfm = control->get_protocol_feature_manager(); + const auto& pfs = pfm.get_protocol_feature_set(); + const auto current_block_num = control->head_block_num() + (control->is_building_block() ? 1 : 0); + const auto current_block_time = ( control->is_building_block() ? control->pending_block_time() + : control->head_block_time() + fc::milliseconds(config::block_interval_ms) ); + + set preactivation_set; + vector preactivations; + + std::function add_digests = + [&pfm, &pfs, current_block_num, current_block_time, &preactivation_set, &preactivations, &add_digests] + ( const digest_type& feature_digest ) { + const auto& pf = pfs.get_protocol_feature( feature_digest ); + FC_ASSERT( pf.builtin_feature, "called add_digests on a non-builtin protocol feature" ); + if( !pf.enabled || pf.earliest_allowed_activation_time > current_block_time + || pfm.is_builtin_activated( *pf.builtin_feature, current_block_num ) ) return; + + auto res = preactivation_set.emplace( feature_digest ); + if( !res.second ) return; + + for( const auto& dependency : pf.dependencies ) { + add_digests( dependency ); + } + + preactivations.emplace_back( feature_digest ); + }; + + for( const auto& f : builtin_protocol_feature_codenames ) { + auto digest = pfs.get_builtin_digest( f.first ); + if( !digest ) continue; + add_digests( *digest ); + } + + preactivate_protocol_features( preactivations ); + } + bool fc_exception_message_is::operator()( const fc::exception& ex ) { auto message = ex.get_log().at( 0 ).get_message(); bool match = (message == expected); diff --git a/libraries/wabt b/libraries/wabt index bf353aa719c..ce5c90e456f 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit bf353aa719c88b7152ee09e7f877a507cb7df27b +Subproject commit ce5c90e456f004558e123d606a6d1601587f00bc diff --git a/libraries/wasm-jit/CMakeLists.txt b/libraries/wasm-jit/CMakeLists.txt index fc691f83a95..8fcdec4e9ce 100644 --- a/libraries/wasm-jit/CMakeLists.txt +++ b/libraries/wasm-jit/CMakeLists.txt @@ -36,6 +36,7 @@ include_directories(${WAVM_INCLUDE_DIR}) # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address") # endif() +set(CMAKE_CXX_STANDARD 11) option(WAVM_METRICS_OUTPUT "controls printing the timings of some operations to stdout" OFF) if(WAVM_METRICS_OUTPUT) add_definitions("-DWAVM_METRICS_OUTPUT=1") diff --git a/libraries/wasm-jit/Include/Platform/Platform.h b/libraries/wasm-jit/Include/Platform/Platform.h index 8d8769d4834..edaeef071d8 100644 --- a/libraries/wasm-jit/Include/Platform/Platform.h +++ b/libraries/wasm-jit/Include/Platform/Platform.h @@ -134,7 +134,7 @@ namespace Platform Uptr& outTrapOperand, const std::function& thunk ); - PLATFORM_API void immediately_exit(); + PLATFORM_API [[noreturn]] void immediately_exit(std::exception_ptr except); // // Threading diff --git a/libraries/wasm-jit/Source/Platform/POSIX.cpp b/libraries/wasm-jit/Source/Platform/POSIX.cpp index 4305381b39f..b6810e97480 100644 --- a/libraries/wasm-jit/Source/Platform/POSIX.cpp +++ b/libraries/wasm-jit/Source/Platform/POSIX.cpp @@ -176,6 +176,7 @@ namespace Platform THREAD_LOCAL Uptr* signalOperand = nullptr; THREAD_LOCAL bool isReentrantSignal = false; THREAD_LOCAL bool isCatchingSignals = false; + thread_local std::exception_ptr thrown_exception; void signalHandler(int signalNumber,siginfo_t* signalInfo,void*) { @@ -252,6 +253,7 @@ namespace Platform jmp_buf oldSignalReturnEnv; memcpy(&oldSignalReturnEnv,&signalReturnEnv,sizeof(jmp_buf)); const bool oldIsCatchingSignals = isCatchingSignals; + thrown_exception = nullptr; // Use setjmp to allow signals to jump back to this point. bool isReturningFromSignalHandler = sigsetjmp(signalReturnEnv,1); @@ -273,10 +275,14 @@ namespace Platform signalCallStack = nullptr; signalOperand = nullptr; + if(thrown_exception) + std::rethrow_exception(thrown_exception); + return signalType; } - void immediately_exit() { + void immediately_exit(std::exception_ptr except) { + thrown_exception = except; siglongjmp(signalReturnEnv,1); } diff --git a/libraries/wasm-jit/Source/Platform/Windows.cpp b/libraries/wasm-jit/Source/Platform/Windows.cpp index bc3c30fc46a..2c34c613b64 100644 --- a/libraries/wasm-jit/Source/Platform/Windows.cpp +++ b/libraries/wasm-jit/Source/Platform/Windows.cpp @@ -359,6 +359,10 @@ namespace Platform { errorUnless(SetEvent(reinterpret_cast(event))); } + + void immediately_exit(std::exception_ptr except) { + std::rethrow_exception(except); + } } #endif diff --git a/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp b/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp index 5ca01bad510..2c002c497ea 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp +++ b/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp @@ -27,12 +27,6 @@ namespace LLVMJIT llvm::Constant* defaultTableMaxElementIndex; llvm::Constant* defaultMemoryBase; llvm::Constant* defaultMemoryEndOffset; - - llvm::DIBuilder diBuilder; - llvm::DICompileUnit* diCompileUnit; - llvm::DIFile* diModuleScope; - - llvm::DIType* diValueTypes[(Uptr)ValueType::num]; llvm::MDNode* likelyFalseBranchWeights; llvm::MDNode* likelyTrueBranchWeights; @@ -41,20 +35,7 @@ namespace LLVMJIT : module(inModule) , moduleInstance(inModuleInstance) , llvmModule(new llvm::Module("",context)) - , diBuilder(*llvmModule) - { - diModuleScope = diBuilder.createFile("unknown","unknown"); - diCompileUnit = diBuilder.createCompileUnit(0xffff,diModuleScope,"WAVM",true,"",0); - - diValueTypes[(Uptr)ValueType::any] = nullptr; - diValueTypes[(Uptr)ValueType::i32] = diBuilder.createBasicType("i32",32,llvm::dwarf::DW_ATE_signed); - diValueTypes[(Uptr)ValueType::i64] = diBuilder.createBasicType("i64",64,llvm::dwarf::DW_ATE_signed); - diValueTypes[(Uptr)ValueType::f32] = diBuilder.createBasicType("f32",32,llvm::dwarf::DW_ATE_float); - diValueTypes[(Uptr)ValueType::f64] = diBuilder.createBasicType("f64",64,llvm::dwarf::DW_ATE_float); - #if ENABLE_SIMD_PROTOTYPE - diValueTypes[(Uptr)ValueType::v128] = diBuilder.createBasicType("v128",128,llvm::dwarf::DW_ATE_signed); - #endif - + { auto zeroAsMetadata = llvm::ConstantAsMetadata::get(emitLiteral(I32(0))); auto i32MaxAsMetadata = llvm::ConstantAsMetadata::get(emitLiteral(I32(INT32_MAX))); likelyFalseBranchWeights = llvm::MDTuple::getDistinct(context,{llvm::MDString::get(context,"branch_weights"),zeroAsMetadata,i32MaxAsMetadata}); @@ -1481,22 +1462,6 @@ namespace LLVMJIT void EmitFunctionContext::emit() { - // Create debug info for the function. - llvm::SmallVector diFunctionParameterTypes; - for(auto parameterType : functionType->parameters) { diFunctionParameterTypes.push_back(moduleContext.diValueTypes[(Uptr)parameterType]); } - auto diFunctionType = moduleContext.diBuilder.createSubroutineType(moduleContext.diBuilder.getOrCreateTypeArray(diFunctionParameterTypes)); - diFunction = moduleContext.diBuilder.createFunction( - moduleContext.diModuleScope, - functionInstance->debugName, - llvmFunction->getName(), - moduleContext.diModuleScope, - 0, - diFunctionType, - false, - true, - 0); - llvmFunction->setSubprogram(diFunction); - // Create the return basic block, and push the root control context for the function. auto returnBlock = llvm::BasicBlock::Create(context,"return",llvmFunction); auto returnPHI = createPHI(returnBlock,functionType->ret); @@ -1544,10 +1509,8 @@ namespace LLVMJIT OperatorDecoderStream decoder(functionDef.code); UnreachableOpVisitor unreachableOpVisitor(*this); OperatorPrinter operatorPrinter(module,functionDef); - Uptr opIndex = 0; while(decoder && controlStack.size()) { - irBuilder.SetCurrentDebugLocation(llvm::DILocation::get(context,(unsigned int)opIndex++,0,diFunction)); if(ENABLE_LOGGING) { logOperator(decoder.decodeOpWithoutConsume(operatorPrinter)); @@ -1624,9 +1587,6 @@ namespace LLVMJIT // Compile each function in the module. for(Uptr functionDefIndex = 0;functionDefIndex < module.functions.defs.size();++functionDefIndex) { EmitFunctionContext(*this,module,module.functions.defs[functionDefIndex],moduleInstance->functionDefs[functionDefIndex],functionDefs[functionDefIndex]).emit(); } - - // Finalize the debug info. - diBuilder.finalize(); Timing::logRatePerSecond("Emitted LLVM IR",emitTimer,(F64)llvmModule->size(),"functions"); diff --git a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp b/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp index ba5354c5d22..18cf2f4cfb1 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp +++ b/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp @@ -112,15 +112,9 @@ namespace LLVMJIT void registerEHFrames(U8* addr, U64 loadAddr,uintptr_t numBytes) override { - llvm::RTDyldMemoryManager::registerEHFrames(addr,loadAddr,numBytes); - hasRegisteredEHFrames = true; - ehFramesAddr = addr; - ehFramesLoadAddr = loadAddr; - ehFramesNumBytes = numBytes; } void deregisterEHFrames(U8* addr, U64 loadAddr,uintptr_t numBytes) override { - llvm::RTDyldMemoryManager::deregisterEHFrames(addr,loadAddr,numBytes); } virtual bool needsToReserveAllocationSpace() override { return true; } @@ -296,13 +290,6 @@ namespace LLVMJIT JITModule(ModuleInstance* inModuleInstance): moduleInstance(inModuleInstance) {} ~JITModule() override { - // Delete the module's symbols, and remove them from the global address-to-symbol map. - Platform::Lock addressToSymbolMapLock(addressToSymbolMapMutex); - for(auto symbol : functionDefSymbols) - { - addressToSymbolMap.erase(addressToSymbolMap.find(symbol->baseAddress + symbol->numBytes)); - delete symbol; - } } void notifySymbolLoaded(const char* name,Uptr baseAddress,Uptr numBytes,std::map&& offsetToOpIndexMap) override @@ -314,14 +301,7 @@ namespace LLVMJIT WAVM_ASSERT_THROW(moduleInstance); WAVM_ASSERT_THROW(functionDefIndex < moduleInstance->functionDefs.size()); FunctionInstance* functionInstance = moduleInstance->functionDefs[functionDefIndex]; - auto symbol = new JITSymbol(functionInstance,baseAddress,numBytes,std::move(offsetToOpIndexMap)); - functionDefSymbols.push_back(symbol); functionInstance->nativeFunction = reinterpret_cast(baseAddress); - - { - Platform::Lock addressToSymbolMapLock(addressToSymbolMapMutex); - addressToSymbolMap[baseAddress + numBytes] = symbol; - } } } }; @@ -498,9 +478,6 @@ namespace LLVMJIT llvm::object::ObjectFile* object = jitUnit->loadedObjects[objectIndex].object; llvm::RuntimeDyld::LoadedObjectInfo* loadedObject = jitUnit->loadedObjects[objectIndex].loadedObject; - // Create a DWARF context to interpret the debug information in this compilation unit. - auto dwarfContext = llvm::make_unique(*object,loadedObject); - // Iterate over the functions in the loaded object. for(auto symbolSizePair : llvm::object::computeSymbolSizes(*object)) { @@ -520,16 +497,12 @@ namespace LLVMJIT loadedAddress += (Uptr)loadedObject->getSectionLoadAddress(*symbolSection.get()); } - // Get the DWARF line info for this symbol, which maps machine code addresses to WebAssembly op indices. - llvm::DILineInfoTable lineInfoTable = dwarfContext->getLineInfoForAddressRange(loadedAddress,symbolSizePair.second); - std::map offsetToOpIndexMap; - for(auto lineInfo : lineInfoTable) { offsetToOpIndexMap.emplace(U32(lineInfo.first - loadedAddress),lineInfo.second.Line); } - #if PRINT_DISASSEMBLY Log::printf(Log::Category::error,"Disassembly for function %s\n",name.get().data()); disassembleFunction(reinterpret_cast(loadedAddress),Uptr(symbolSizePair.second)); #endif + std::map offsetToOpIndexMap; // Notify the JIT unit that the symbol was loaded. WAVM_ASSERT_THROW(symbolSizePair.second <= UINTPTR_MAX); jitUnit->notifySymbolLoaded( diff --git a/libraries/wasm-jit/Source/Runtime/LLVMJIT.h b/libraries/wasm-jit/Source/Runtime/LLVMJIT.h index afadb6053e4..cad02a101ed 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMJIT.h +++ b/libraries/wasm-jit/Source/Runtime/LLVMJIT.h @@ -49,8 +49,6 @@ #include "llvm/Support/DynamicLibrary.h" #include "llvm/Transforms/Scalar.h" #include "llvm/IR/DIBuilder.h" -#include "llvm/DebugInfo/DIContext.h" -#include "llvm/DebugInfo/DWARF/DWARFContext.h" #include #include #include diff --git a/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp b/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp index a8d1fe80c71..9e38eeebea0 100644 --- a/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp +++ b/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp @@ -8,6 +8,16 @@ namespace Runtime { + static void causeIntrensicException(Exception::Cause cause) { + try { + Platform::immediately_exit(std::make_exception_ptr(Exception{cause, std::vector()})); + } + catch (...) { + Platform::immediately_exit(std::current_exception()); + } + __builtin_unreachable(); + } + template Float quietNaN(Float value) { @@ -104,11 +114,11 @@ namespace Runtime { if(sourceValue != sourceValue) { - causeException(Exception::Cause::invalidFloatOperation); + causeIntrensicException(Exception::Cause::invalidFloatOperation); } else if(sourceValue >= maxValue || (isMinInclusive ? sourceValue <= minValue : sourceValue < minValue)) { - causeException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); + causeIntrensicException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); } return (Dest)sourceValue; } @@ -125,45 +135,51 @@ namespace Runtime DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,divideByZeroOrIntegerOverflowTrap,divideByZeroOrIntegerOverflowTrap,none) { - causeException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); + causeIntrensicException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); } DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,unreachableTrap,unreachableTrap,none) { - causeException(Exception::Cause::reachedUnreachable); + causeIntrensicException(Exception::Cause::reachedUnreachable); } DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,accessViolationTrap,accessViolationTrap,none) { - causeException(Exception::Cause::accessViolation); + causeIntrensicException(Exception::Cause::accessViolation); } DEFINE_INTRINSIC_FUNCTION3(wavmIntrinsics,indirectCallSignatureMismatch,indirectCallSignatureMismatch,none,i32,index,i64,expectedSignatureBits,i64,tableBits) { - TableInstance* table = reinterpret_cast(tableBits); - void* elementValue = table->baseAddress[index].value; - const FunctionType* actualSignature = table->baseAddress[index].type; - const FunctionType* expectedSignature = reinterpret_cast((Uptr)expectedSignatureBits); - std::string ipDescription = ""; - LLVMJIT::describeInstructionPointer(reinterpret_cast(elementValue),ipDescription); - Log::printf(Log::Category::debug,"call_indirect signature mismatch: expected %s at index %u but got %s (%s)\n", - asString(expectedSignature).c_str(), - index, - actualSignature ? asString(actualSignature).c_str() : "nullptr", - ipDescription.c_str() - ); - causeException(elementValue == nullptr ? Exception::Cause::undefinedTableElement : Exception::Cause::indirectCallSignatureMismatch); + try { + TableInstance* table = reinterpret_cast(tableBits); + void* elementValue = table->baseAddress[index].value; + const FunctionType* actualSignature = table->baseAddress[index].type; + const FunctionType* expectedSignature = reinterpret_cast((Uptr)expectedSignatureBits); + std::string ipDescription = ""; + LLVMJIT::describeInstructionPointer(reinterpret_cast(elementValue),ipDescription); + Log::printf(Log::Category::debug,"call_indirect signature mismatch: expected %s at index %u but got %s (%s)\n", + asString(expectedSignature).c_str(), + index, + actualSignature ? asString(actualSignature).c_str() : "nullptr", + ipDescription.c_str() + ); + causeIntrensicException(elementValue == nullptr ? Exception::Cause::undefinedTableElement : Exception::Cause::indirectCallSignatureMismatch); + } + catch (...) { + Platform::immediately_exit(std::current_exception()); + } } DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,indirectCallIndexOutOfBounds,indirectCallIndexOutOfBounds,none) { - causeException(Exception::Cause::undefinedTableElement); + causeIntrensicException(Exception::Cause::undefinedTableElement); } DEFINE_INTRINSIC_FUNCTION2(wavmIntrinsics,_growMemory,growMemory,i32,i32,deltaPages,i64,memoryBits) { MemoryInstance* memory = reinterpret_cast(memoryBits); - WAVM_ASSERT_THROW(memory); + if(!memory) + causeIntrensicException(Exception::Cause::outOfMemory); const Iptr numPreviousMemoryPages = growMemory(memory,(Uptr)deltaPages); if(numPreviousMemoryPages + (Uptr)deltaPages > IR::maxMemoryPages) { return -1; } else { return (I32)numPreviousMemoryPages; } @@ -172,7 +188,8 @@ namespace Runtime DEFINE_INTRINSIC_FUNCTION1(wavmIntrinsics,_currentMemory,currentMemory,i32,i64,memoryBits) { MemoryInstance* memory = reinterpret_cast(memoryBits); - WAVM_ASSERT_THROW(memory); + if(!memory) + causeIntrensicException(Exception::Cause::outOfMemory); Uptr numMemoryPages = getMemoryNumPages(memory); if(numMemoryPages > UINT32_MAX) { numMemoryPages = UINT32_MAX; } return (U32)numMemoryPages; diff --git a/libraries/yubihsm b/libraries/yubihsm index e1922fffc15..9189fdb92cc 160000 --- a/libraries/yubihsm +++ b/libraries/yubihsm @@ -1 +1 @@ -Subproject commit e1922fffc15d0720ba08f110a66b9c752774e107 +Subproject commit 9189fdb92cc90840e51760de5f297ac7d908b3cd diff --git a/pipeline.jsonc b/pipeline.jsonc index 3e190708f4f..b9430524cf4 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -1,14 +1,36 @@ { - "eosio-lrt": + "eos-multiversion-tests": { - "pipeline-branch": "legacy-os" + "environment": + { + "IMAGE_TAG": "_1-8-0-rc2" + }, + "configuration": + [ + "170=v1.7.0" + ] }, - "eosio-nightly-builds": + "eosio-docker-builds": { - "pipeline-branch": "legacy-os" + "environment": + { + "BUILDER_TAG": "v1.8.0" + } }, - "eosio-base-images": + "eosio-sync-tests": { - "pipeline-branch": "release/1.7.x" + "environment": + { + "SKIP_PRE_V180": "true", + "SKIP_V180": "false" + } + }, + "eosio-replay-tests": + { + "environment": + { + "SKIP_PRE_V180": "true", + "SKIP_V180": "false" + } } } \ No newline at end of file diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 8c93df9c48e..e07a10c5b8d 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -1,4 +1,3 @@ -add_subdirectory(bnet_plugin) add_subdirectory(net_plugin) add_subdirectory(net_api_plugin) add_subdirectory(http_plugin) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 55cce74a33b..f47cefa8e63 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -1,8 +1,8 @@ # Community Plugin List -This file contains a list of community authored plugins for `nodeos`, acting as a directory of the plugins that are available. +This file contains a list of community authored plugins for `nodeos` and APIs/tools that are associated with plugins, acting as a directory of the community authored plugins that are available. -Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new plugins. +Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new related projects. | Description | URL | | ----------- | --- | @@ -17,7 +17,9 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Chintai ZMQ Watcher | https://github.com/acoutts/chintai-zeromq-watcher-plugin | | Mongo History API | https://github.com/CryptoLions/EOS-mongo-history-API | | State History API | https://github.com/acoutts/EOS-state-history-API | +| Hyperion History API | https://github.com/eosrio/Hyperion-History-API | +| Chronicle | https://github.com/EOSChronicleProject/eos-chronicle | ## DISCLAIMER: -The fact that a plugin is listed in this file does not mean the plugin has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. +The resources listed here are developed, offered and maintained by third-parties and not by block.one. Providing information, material or commentaries about such third-party resources does not mean we endorse or recommend any of these resources. We are not responsible, and disclaim any responsibility or liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. USAGE AND RELIANCE IS ENTIRELY AT YOUR OWN RISK. diff --git a/plugins/bnet_plugin/CMakeLists.txt b/plugins/bnet_plugin/CMakeLists.txt deleted file mode 100644 index d49438298cf..00000000000 --- a/plugins/bnet_plugin/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -file(GLOB HEADERS "include/eosio/bnet_plugin/*.hpp") -add_library( bnet_plugin - bnet_plugin.cpp - ${HEADERS} ) - -target_link_libraries( bnet_plugin chain_plugin eosio_chain appbase ) -target_include_directories( bnet_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp deleted file mode 100644 index b788d833503..00000000000 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ /dev/null @@ -1,1562 +0,0 @@ -/** - * The purpose of this protocol is to synchronize (and keep synchronized) two - * blockchains using a very simple algorithm: - * - * 1. find the last block id on our local chain that the remote peer knows about - * 2. if we have the next block send it to them - * 3. if we don't have the next block send them a the oldest unexpired transaction - * - * There are several input events: - * - * 1. new block accepted by local chain - * 2. block deemed irreversible by local chain - * 3. new block header accepted by local chain - * 4. transaction accepted by local chain - * 5. block received from remote peer - * 6. transaction received from remote peer - * 7. socket ready for next write - * - * Each session is responsible for maintaining the following - * - * 1. the most recent block on our current best chain which we know - * with certainty that the remote peer has. - * - this could be the peers last irreversible block - * - a block ID after the LIB that the peer has notified us of - * - a block which we have sent to the remote peer - * - a block which the peer has sent us - * 2. the block IDs we have received from the remote peer so that - * we can disconnect peer if one of those blocks is deemed invalid - * - we can clear these IDs once the block becomes reversible - * 3. the transactions we have received from the remote peer so that - * we do not send them something that they already know. - * - this includes transactions sent as part of blocks - * - we clear this cache after we have applied a block that - * includes the transactions because we know the controller - * should not notify us again (they would be dupe) - * - * Assumptions: - * 1. all blocks we send the peer are valid and will be held in the - * peers fork database until they become irreversible or are replaced - * by an irreversible alternative. - * 2. we don't care what fork the peer is on, so long as we know they have - * the block prior to the one we want to send. The peer will sort it out - * with its fork database and hopfully come to our conclusion. - * 3. the peer will send us blocks on the same basis - * - */ - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -using tcp = boost::asio::ip::tcp; -namespace ws = boost::beast::websocket; - -namespace eosio { - using namespace chain; - - static appbase::abstract_plugin& _bnet_plugin = app().register_plugin(); - -} /// namespace eosio - -namespace fc { - extern std::unordered_map& get_logger_map(); -} - -const fc::string logger_name("bnet_plugin"); -fc::logger plugin_logger; -std::string peer_log_format; - -#define peer_dlog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::debug ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( debug, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant()) ) ); \ - FC_MULTILINE_MACRO_END - -#define peer_ilog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::info ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( info, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant()) ) ); \ - FC_MULTILINE_MACRO_END - -#define peer_wlog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::warn ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( warn, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant()) ) ); \ - FC_MULTILINE_MACRO_END - -#define peer_elog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::error ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( error, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant())) ); \ - FC_MULTILINE_MACRO_END - - -using eosio::public_key_type; -using eosio::chain_id_type; -using eosio::block_id_type; -using eosio::block_timestamp_type; -using std::string; -using eosio::sha256; -using eosio::signed_block_ptr; -using eosio::packed_transaction_ptr; -using std::vector; - -struct hello { - public_key_type peer_id; - string network_version; - string agent; - string protocol_version = "1.0.1"; - string user; - string password; - chain_id_type chain_id; - bool request_transactions = false; - uint32_t last_irr_block_num = 0; - vector pending_block_ids; -}; -// @swap user, password -FC_REFLECT( hello, (peer_id)(network_version)(user)(password)(agent)(protocol_version)(chain_id)(request_transactions)(last_irr_block_num)(pending_block_ids) ) - -struct hello_extension_irreversible_only {}; - -FC_REFLECT( hello_extension_irreversible_only, BOOST_PP_SEQ_NIL ) - -using hello_extension = fc::static_variant; - -/** - * This message is sent upon successful speculative application of a transaction - * and informs a peer not to send this message. - */ -struct trx_notice { - vector signed_trx_id; ///< hash of trx + sigs -}; - -FC_REFLECT( trx_notice, (signed_trx_id) ) - -/** - * This message is sent upon successfully adding a transaction to the fork database - * and informs the remote peer that there is no need to send this block. - */ -struct block_notice { - vector block_ids; -}; - -FC_REFLECT( block_notice, (block_ids) ); - -struct ping { - fc::time_point sent; - fc::sha256 code; - uint32_t lib; ///< the last irreversible block -}; -FC_REFLECT( ping, (sent)(code)(lib) ) - -struct pong { - fc::time_point sent; - fc::sha256 code; -}; -FC_REFLECT( pong, (sent)(code) ) - -using bnet_message = fc::static_variant; - - -struct by_id; -struct by_num; -struct by_received; -struct by_expired; - -namespace eosio { - using namespace chain::plugin_interface; - - class bnet_plugin_impl; - - template - void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) { - if( !strand.running_in_this_thread() ) { - elog( "wrong strand: ${f} : line ${n}, exiting", ("f", func)("n", line) ); - app().quit(); - } - } - - /** - * Each session is presumed to operate in its own strand so that - * operations can execute in parallel. - */ - class session : public std::enable_shared_from_this - { - public: - enum session_state { - hello_state, - sending_state, - idle_state - }; - - struct block_status { - block_status( block_id_type i, bool kby_peer, bool rfrom_peer) - { - known_by_peer = kby_peer; - received_from_peer = rfrom_peer; - id = i; - } - - bool known_by_peer = false; ///< we sent block to peer or peer sent us notice - bool received_from_peer = false; ///< peer sent us this block and considers full block valid - block_id_type id; ///< the block id; - // block_id_type prev; ///< the prev block id - - // shared_ptr< vector > block_msg; ///< packed bnet_message for this block - - uint32_t block_num()const { return block_header::num_from_id(id); } - }; - - typedef boost::multi_index_container, member >, - ordered_non_unique< tag, const_mem_fun > - > - > block_status_index; - - - struct transaction_status { - time_point received; - time_point expired; /// 5 seconds from last accepted - transaction_id_type id; - transaction_metadata_ptr trx; - - void mark_known_by_peer() { received = fc::time_point::maximum(); trx.reset(); } - bool known_by_peer()const { return received == fc::time_point::maximum(); } - }; - - typedef boost::multi_index_container, member >, - ordered_non_unique< tag, member >, - ordered_non_unique< tag, member > - > - > transaction_status_index; - - block_status_index _block_status; - transaction_status_index _transaction_status; - const uint32_t _max_block_status_range = 2048; // limit tracked block_status known_by_peer - - public_key_type _local_peer_id; - uint32_t _local_lib = 0; - block_id_type _local_lib_id; - uint32_t _local_head_block_num = 0; - block_id_type _local_head_block_id; /// the last block id received on local channel - - - public_key_type _remote_peer_id; - uint32_t _remote_lib = 0; - block_id_type _remote_lib_id; - bool _remote_request_trx = false; - bool _remote_request_irreversible_only = false; - - uint32_t _last_sent_block_num = 0; - block_id_type _last_sent_block_id; /// the id of the last block sent - bool _recv_remote_hello = false; - bool _sent_remote_hello = false; - - - fc::sha256 _current_code; - fc::time_point _last_recv_ping_time = fc::time_point::now(); - ping _last_recv_ping; - ping _last_sent_ping; - - - int _session_num = 0; - session_state _state = hello_state; - tcp::resolver _resolver; - bnet_ptr _net_plugin; - boost::asio::io_service& _ios; - unique_ptr> _ws; - boost::asio::strand< boost::asio::io_context::executor_type> _strand; - - methods::get_block_by_number::method_type& _get_block_by_number; - - - string _peer; - string _remote_host; - string _remote_port; - - vector _out_buffer; - //boost::beast::multi_buffer _in_buffer; - boost::beast::flat_buffer _in_buffer; - flat_set _block_header_notices; - fc::optional _logger_variant; - - - int next_session_id()const { - static std::atomic session_count(0); - return ++session_count; - } - - /** - * Creating session from server socket acceptance - */ - explicit session( tcp::socket socket, bnet_ptr net_plug ) - :_resolver(socket.get_io_service()), - _net_plugin( std::move(net_plug) ), - _ios(socket.get_io_service()), - _ws( new ws::stream(move(socket)) ), - _strand(_ws->get_executor() ), - _get_block_by_number( app().get_method() ) - { - _session_num = next_session_id(); - set_socket_options(); - _ws->binary(true); - wlog( "open session ${n}",("n",_session_num) ); - } - - - /** - * Creating outgoing session - */ - explicit session( boost::asio::io_context& ioc, bnet_ptr net_plug ) - :_resolver(ioc), - _net_plugin( std::move(net_plug) ), - _ios(ioc), - _ws( new ws::stream(ioc) ), - _strand( _ws->get_executor() ), - _get_block_by_number( app().get_method() ) - { - _session_num = next_session_id(); - _ws->binary(true); - wlog( "open session ${n}",("n",_session_num) ); - } - - ~session(); - - - void set_socket_options() { - try { - /** to minimize latency when sending short messages */ - _ws->next_layer().set_option( boost::asio::ip::tcp::no_delay(true) ); - - /** to minimize latency when sending large 1MB blocks, the send buffer should not have to - * wait for an "ack", making this larger could result in higher latency for smaller urgent - * messages. - */ - _ws->next_layer().set_option( boost::asio::socket_base::send_buffer_size( 1024*1024 ) ); - _ws->next_layer().set_option( boost::asio::socket_base::receive_buffer_size( 1024*1024 ) ); - } catch ( ... ) { - elog( "uncaught exception on set socket options" ); - } - } - - void run() { - _ws->async_accept( boost::asio::bind_executor( - _strand, - std::bind( &session::on_accept, - shared_from_this(), - std::placeholders::_1) ) ); - } - - void run( const string& peer ) { - auto c = peer.find(':'); - auto host = peer.substr( 0, c ); - auto port = peer.substr( c+1, peer.size() ); - - _peer = peer; - _remote_host = host; - _remote_port = port; - - _resolver.async_resolve( _remote_host, _remote_port, - boost::asio::bind_executor( _strand, - std::bind( &session::on_resolve, - shared_from_this(), - std::placeholders::_1, - std::placeholders::_2 ) ) ); - } - - void on_resolve( boost::system::error_code ec, - tcp::resolver::results_type results ) { - if( ec ) return on_fail( ec, "resolve" ); - - boost::asio::async_connect( _ws->next_layer(), - results.begin(), results.end(), - boost::asio::bind_executor( _strand, - std::bind( &session::on_connect, - shared_from_this(), - std::placeholders::_1 ) ) ); - } - - void on_connect( boost::system::error_code ec ) { - if( ec ) return on_fail( ec, "connect" ); - - set_socket_options(); - - _ws->async_handshake( _remote_host, "/", - boost::asio::bind_executor( _strand, - std::bind( &session::on_handshake, - shared_from_this(), - std::placeholders::_1 ) ) ); - } - - void on_handshake( boost::system::error_code ec ) { - if( ec ) return on_fail( ec, "handshake" ); - - do_hello(); - do_read(); - } - - /** - * This will be called "every time" a the transaction is accepted which happens - * on the speculative block (potentially several such blocks) and when a block - * that contains the transaction is applied and/or when switching forks. - * - * We will add it to the transaction status table as "received now" to be the - * basis of sending it to the peer. When we send it to the peer "received now" - * will be set to the infinite future to mark it as sent so we don't resend it - * when it is accepted again. - * - * Each time the transaction is "accepted" we extend the time we cache it by - * 5 seconds from now. Every time a block is applied we purge all accepted - * transactions that have reached 5 seconds without a new "acceptance". - */ - void on_accepted_transaction( transaction_metadata_ptr t ) { - //ilog( "accepted ${t}", ("t",t->id) ); - auto itr = _transaction_status.find( t->id ); - if( itr != _transaction_status.end() ) { - if( !itr->known_by_peer() ) { - _transaction_status.modify( itr, [&]( auto& stat ) { - stat.expired = std::min( fc::time_point::now() + fc::seconds(5), t->packed_trx->expiration() ); - }); - } - return; - } - - transaction_status stat; - stat.received = fc::time_point::now(); - stat.expired = stat.received + fc::seconds(5); - stat.id = t->id; - stat.trx = t; - _transaction_status.insert( stat ); - - maybe_send_next_message(); - } - - /** - * Remove all transactions that expired from cache prior to now - */ - void purge_transaction_cache() { - auto& idx = _transaction_status.get(); - auto itr = idx.begin(); - auto now = fc::time_point::now(); - while( itr != idx.end() && itr->expired < now ) { - idx.erase(itr); - itr = idx.begin(); - } - } - - /** - * When our local LIB advances we can purge our known history up to - * the LIB or up to the last block known by the remote peer. - */ - void on_new_lib( block_state_ptr s ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - _local_lib = s->block_num; - _local_lib_id = s->id; - - auto purge_to = std::min( _local_lib, _last_sent_block_num ); - - auto& idx = _block_status.get(); - auto itr = idx.begin(); - while( itr != idx.end() && itr->block_num() < purge_to ) { - idx.erase(itr); - itr = idx.begin(); - } - - if( _remote_request_irreversible_only ) { - auto bitr = _block_status.find(s->id); - if ( bitr == _block_status.end() || !bitr->received_from_peer ) { - _block_header_notices.insert(s->id); - } - } - - maybe_send_next_message(); - } - - - void on_bad_block( signed_block_ptr b ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - try { - auto id = b->id(); - auto itr = _block_status.find( id ); - if( itr == _block_status.end() ) return; - if( itr->received_from_peer ) { - peer_elog(this, "bad signed_block_ptr : unknown" ); - elog( "peer sent bad block #${b} ${i}, disconnect", ("b", b->block_num())("i",b->id()) ); - _ws->next_layer().close(); - } - } catch ( ... ) { - elog( "uncaught exception" ); - } - } - - void on_accepted_block_header( const block_state_ptr& s ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - // ilog( "accepted block header ${n}", ("n",s->block_num) ); - const auto& id = s->id; - - if( fc::time_point::now() - s->block->timestamp < fc::seconds(6) ) { - // ilog( "queue notice to peer that we have this block so hopefully they don't send it to us" ); - auto itr = _block_status.find( id ); - if( !_remote_request_irreversible_only && ( itr == _block_status.end() || !itr->received_from_peer ) ) { - _block_header_notices.insert( id ); - } - if( itr == _block_status.end() ) { - _block_status.insert( block_status(id, false, false) ); - } - } - } - - void on_accepted_block( const block_state_ptr& s ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - //idump((_block_status.size())(_transaction_status.size())); - //ilog( "accepted block ${n}", ("n",s->block_num) ); - - const auto& id = s->id; - - _local_head_block_id = id; - _local_head_block_num = block_header::num_from_id(id); - - if( _local_head_block_num < _last_sent_block_num ) { - _last_sent_block_num = _local_lib; - _last_sent_block_id = _local_lib_id; - } - - purge_transaction_cache(); - - /** purge all transactions from cache, I will send them as part of a block - * in the future unless peer tells me they already have block. - */ - for( const auto& receipt : s->block->transactions ) { - if( receipt.trx.which() == 1 ) { - const auto& pt = receipt.trx.get(); - const auto& tid = pt.id(); - auto itr = _transaction_status.find( tid ); - if( itr != _transaction_status.end() ) - _transaction_status.erase(itr); - } - } - - maybe_send_next_message(); /// attempt to send if we are idle - } - - - template - void async_get_pending_block_ids( L&& callback ) { - /// send peer my head block status which is read from chain plugin - app().post(priority::low, [self = shared_from_this(),callback]{ - auto& control = app().get_plugin().chain(); - auto lib = control.last_irreversible_block_num(); - auto head = control.fork_db_head_block_id(); - auto head_num = block_header::num_from_id(head); - - - std::vector ids; - if( lib > 0 ) { - ids.reserve((head_num-lib)+1); - for( auto i = lib; i <= head_num; ++i ) { - ids.emplace_back(control.get_block_id_for_num(i)); - } - } - self->_ios.post( boost::asio::bind_executor( - self->_strand, - [callback,ids,lib](){ - callback(ids,lib); - } - )); - }); - } - - template - void async_get_block_num( uint32_t blocknum, L&& callback ) { - app().post(priority::low, [self = shared_from_this(), blocknum, callback]{ - auto& control = app().get_plugin().chain(); - signed_block_ptr sblockptr; - try { - //ilog( "fetch block ${n}", ("n",blocknum) ); - sblockptr = control.fetch_block_by_number( blocknum ); - } catch ( const fc::exception& e ) { - edump((e.to_detail_string())); - } - - self->_ios.post( boost::asio::bind_executor( - self->_strand, - [callback,sblockptr](){ - callback(sblockptr); - } - )); - }); - } - - void do_hello(); - - - void send( const bnet_message& msg ) { try { - auto ps = fc::raw::pack_size(msg); - _out_buffer.resize(ps); - fc::datastream ds(_out_buffer.data(), ps); - fc::raw::pack(ds, msg); - send(); - } FC_LOG_AND_RETHROW() } - - template - void send( const bnet_message& msg, const T& ex ) { try { - auto ex_size = fc::raw::pack_size(ex); - auto ps = fc::raw::pack_size(msg) + fc::raw::pack_size(unsigned_int(ex_size)) + ex_size; - _out_buffer.resize(ps); - fc::datastream ds(_out_buffer.data(), ps); - fc::raw::pack( ds, msg ); - fc::raw::pack( ds, unsigned_int(ex_size) ); - fc::raw::pack( ds, ex ); - send(); - } FC_LOG_AND_RETHROW() } - - void send() { try { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - - _state = sending_state; - _ws->async_write( boost::asio::buffer(_out_buffer), - boost::asio::bind_executor( - _strand, - std::bind( &session::on_write, - shared_from_this(), - std::placeholders::_1, - std::placeholders::_2 ) ) ); - } FC_LOG_AND_RETHROW() } - - void mark_block_status( const block_id_type& id, bool known_by_peer, bool recv_from_peer ) { - auto itr = _block_status.find(id); - if( itr == _block_status.end() ) { - // optimization to avoid sending blocks to nodes that already know about them - // to avoid unbounded memory growth limit number tracked - const auto min_block_num = std::min( _local_lib, _last_sent_block_num ); - const auto max_block_num = min_block_num + _max_block_status_range; - const auto block_num = block_header::num_from_id( id ); - if( block_num > min_block_num && block_num < max_block_num && _block_status.size() < _max_block_status_range ) - _block_status.insert( block_status( id, known_by_peer, recv_from_peer ) ); - } else { - _block_status.modify( itr, [&]( auto& item ) { - item.known_by_peer = known_by_peer; - if (recv_from_peer) item.received_from_peer = true; - }); - } - } - - /** - * This method will determine whether there is a message in the - * out queue, if so it returns. Otherwise it determines the best - * message to send. - */ - void maybe_send_next_message() { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - if( _state == sending_state ) return; /// in process of sending - if( _out_buffer.size() ) return; /// in process of sending - if( !_recv_remote_hello || !_sent_remote_hello ) return; - - clear_expired_trx(); - - if( send_block_notice() ) return; - if( send_pong() ) return; - if( send_ping() ) return; - - /// we don't know where we are (waiting on accept block localhost) - if( _local_head_block_id == block_id_type() ) return ; - if( send_next_block() ) return; - if( send_next_trx() ) return; - } - - bool send_block_notice() { - if( _block_header_notices.size() == 0 ) - return false; - - block_notice notice; - notice.block_ids.reserve( _block_header_notices.size() ); - for( auto& id : _block_header_notices ) - notice.block_ids.emplace_back(id); - send(notice); - _block_header_notices.clear(); - return true; - } - - bool send_pong() { - if( _last_recv_ping.code == fc::sha256() ) - return false; - - send( pong{ fc::time_point::now(), _last_recv_ping.code } ); - _last_recv_ping.code = fc::sha256(); - return true; - } - - bool send_ping() { - auto delta_t = fc::time_point::now() - _last_sent_ping.sent; - if( delta_t < fc::seconds(3) ) return false; - - if( _last_sent_ping.code == fc::sha256() ) { - _last_sent_ping.sent = fc::time_point::now(); - _last_sent_ping.code = fc::sha256::hash(_last_sent_ping.sent); /// TODO: make this more random - _last_sent_ping.lib = _local_lib; - send( _last_sent_ping ); - } - - /// we expect the peer to send us a ping every 3 seconds, so if we haven't gotten one - /// in the past 6 seconds then the connection is likely hung. Unfortunately, we cannot - /// use the round-trip time of ping/pong to measure latency because during syncing the - /// remote peer can be stuck doing CPU intensive tasks that block its reading of the - /// buffer. This buffer gets filled with perhaps 100 blocks taking .1 seconds each for - /// a total processing time of 10+ seconds. That said, the peer should come up for air - /// every .1 seconds so should still be able to send out a ping every 3 seconds. - // - // We don't want to wait a RTT for each block because that could also slow syncing for - // empty blocks... - // - //if( fc::time_point::now() - _last_recv_ping_time > fc::seconds(6) ) { - // do_goodbye( "no ping from peer in last 6 seconds...." ); - //} - return true; - } - - bool is_known_by_peer( block_id_type id ) { - auto itr = _block_status.find(id); - if( itr == _block_status.end() ) return false; - return itr->known_by_peer; - } - - void clear_expired_trx() { - auto& idx = _transaction_status.get(); - auto itr = idx.begin(); - while( itr != idx.end() && itr->expired < fc::time_point::now() ) { - idx.erase(itr); - itr = idx.begin(); - } - } - - bool send_next_trx() { try { - if( !_remote_request_trx ) return false; - - auto& idx = _transaction_status.get(); - auto start = idx.begin(); - if( start == idx.end() || start->known_by_peer() ) - return false; - - - auto ptrx_ptr = start->trx->packed_trx; - - idx.modify( start, [&]( auto& stat ) { - stat.mark_known_by_peer(); - }); - - // wlog("sending trx ${id}", ("id",start->id) ); - send(ptrx_ptr); - - return true; - - } FC_LOG_AND_RETHROW() } - - void on_async_get_block( const signed_block_ptr& nextblock ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - if( !nextblock) { - _state = idle_state; - maybe_send_next_message(); - return; - } - - /// if something changed, the next block doesn't link to the last - /// block we sent, local chain must have switched forks - if( nextblock->previous != _last_sent_block_id && _last_sent_block_id != block_id_type() ) { - if( !is_known_by_peer( nextblock->previous ) ) { - _last_sent_block_id = _local_lib_id; - _last_sent_block_num = _local_lib; - _state = idle_state; - maybe_send_next_message(); - return; - } - } - - /// at this point we know the peer can link this block - - auto next_id = nextblock->id(); - - /// if the peer already knows about this block, great no need to - /// send it, mark it as 'sent' and move on. - if( is_known_by_peer( next_id ) ) { - _last_sent_block_id = next_id; - _last_sent_block_num = nextblock->block_num(); - - _state = idle_state; - maybe_send_next_message(); - return; - } - - mark_block_status( next_id, true, false ); - - _last_sent_block_id = next_id; - _last_sent_block_num = nextblock->block_num(); - - send( nextblock ); - status( "sending block " + std::to_string( block_header::num_from_id(next_id) ) ); - - if( nextblock->timestamp > (fc::time_point::now() - fc::seconds(5)) ) { - mark_block_transactions_known_by_peer( nextblock ); - } - } - - /** - * Send the next block after the last block in our current fork that - * we know the remote peer knows. - */ - bool send_next_block() { - - if ( _remote_request_irreversible_only && _last_sent_block_id == _local_lib_id ) { - return false; - } - - if( _last_sent_block_id == _local_head_block_id ) /// we are caught up - return false; - - ///< set sending state because this callback may result in sending a message - _state = sending_state; - async_get_block_num( _last_sent_block_num + 1, - [self=shared_from_this()]( auto sblockptr ) { - self->on_async_get_block( sblockptr ); - }); - - return true; - } - - void on_fail( boost::system::error_code ec, const char* what ) { - try { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - elog( "${w}: ${m}", ("w", what)("m", ec.message() ) ); - _ws->next_layer().close(); - } catch ( ... ) { - elog( "uncaught exception on close" ); - } - } - - void on_accept( boost::system::error_code ec ) { - if( ec ) { - return on_fail( ec, "accept" ); - } - - do_hello(); - do_read(); - } - - void do_read() { - _ws->async_read( _in_buffer, - boost::asio::bind_executor( - _strand, - std::bind( &session::on_read, - shared_from_this(), - std::placeholders::_1, - std::placeholders::_2))); - } - - void on_read( boost::system::error_code ec, std::size_t bytes_transferred ) { - boost::ignore_unused(bytes_transferred); - - if( ec == ws::error::closed ) - return on_fail( ec, "close on read" ); - - if( ec ) { - return on_fail( ec, "read" );; - } - - try { - auto d = boost::asio::buffer_cast(boost::beast::buffers_front(_in_buffer.data())); - auto s = boost::asio::buffer_size(_in_buffer.data()); - fc::datastream ds(d,s); - - bnet_message msg; - fc::raw::unpack( ds, msg ); - on_message( msg, ds ); - _in_buffer.consume( ds.tellp() ); - - wait_on_app(); - return; - - } catch ( ... ) { - wlog( "close bad payload" ); - } - try { - _ws->close( boost::beast::websocket::close_code::bad_payload ); - } catch ( ... ) { - elog( "uncaught exception on close" ); - } - } - - /** if we just call do_read here then this thread might run ahead of - * the main thread, instead we post an event to main which will then - * post a new read event when ready. - * - * This also keeps the "shared pointer" alive in the callback preventing - * the connection from being closed. - */ - void wait_on_app() { - app().post( priority::medium, [self = shared_from_this()]() { - app().get_io_service().post( boost::asio::bind_executor( self->_strand, [self] { self->do_read(); } ) ); - } ); - } - - void on_message( const bnet_message& msg, fc::datastream& ds ) { - try { - switch( msg.which() ) { - case bnet_message::tag::value: - on( msg.get(), ds ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - default: - wlog( "bad message received" ); - _ws->close( boost::beast::websocket::close_code::bad_payload ); - return; - } - maybe_send_next_message(); - } catch( const fc::exception& e ) { - elog( "${e}", ("e",e.to_detail_string())); - _ws->close( boost::beast::websocket::close_code::bad_payload ); - } - } - - void on( const block_notice& notice ) { - peer_ilog(this, "received block_notice"); - for( const auto& id : notice.block_ids ) { - status( "received notice " + std::to_string( block_header::num_from_id(id) ) ); - mark_block_status( id, true, false ); - } - } - - void on( const hello& hi, fc::datastream& ds ); - - void on( const ping& p ) { - peer_ilog(this, "received ping"); - _last_recv_ping = p; - _remote_lib = p.lib; - _last_recv_ping_time = fc::time_point::now(); - } - - void on( const pong& p ) { - peer_ilog(this, "received pong"); - if( p.code != _last_sent_ping.code ) { - peer_elog(this, "bad ping : invalid pong code"); - return do_goodbye( "invalid pong code" ); - } - _last_sent_ping.code = fc::sha256(); - } - - void do_goodbye( const string& reason ) { - try { - status( "goodbye - " + reason ); - _ws->next_layer().close(); - } catch ( ... ) { - elog( "uncaught exception on close" ); - } - } - - void check_for_redundant_connection(); - - void on( const signed_block_ptr& b ) { - peer_ilog(this, "received signed_block_ptr"); - if (!b) { - peer_elog(this, "bad signed_block_ptr : null pointer"); - EOS_THROW(block_validate_exception, "bad block" ); - } - status( "received block " + std::to_string(b->block_num()) ); - //ilog( "recv block ${n}", ("n", b->block_num()) ); - auto id = b->id(); - mark_block_status( id, true, true ); - - app().get_channel().publish(priority::high, b); - - mark_block_transactions_known_by_peer( b ); - } - - void mark_block_transactions_known_by_peer( const signed_block_ptr& b ) { - for( const auto& receipt : b->transactions ) { - if( receipt.trx.which() == 1 ) { - const auto& pt = receipt.trx.get(); - const auto& id = pt.id(); - mark_transaction_known_by_peer(id); - } - } - } - - /** - * @return true if trx is known by local host, false if new to this host - */ - bool mark_transaction_known_by_peer( const transaction_id_type& id ) { - auto itr = _transaction_status.find( id ); - if( itr != _transaction_status.end() ) { - _transaction_status.modify( itr, [&]( auto& stat ) { - stat.mark_known_by_peer(); - }); - return true; - } else { - transaction_status stat; - stat.id = id; - stat.mark_known_by_peer(); - stat.expired = fc::time_point::now()+fc::seconds(5); - _transaction_status.insert(stat); - } - return false; - } - - void on( const packed_transaction_ptr& p ); - - void on_write( boost::system::error_code ec, std::size_t bytes_transferred ) { - boost::ignore_unused(bytes_transferred); - verify_strand_in_this_thread(_strand, __func__, __LINE__); - if( ec ) { - _ws->next_layer().close(); - return on_fail( ec, "write" ); - } - _state = idle_state; - _out_buffer.resize(0); - maybe_send_next_message(); - } - - void status( const string& msg ) { - // ilog( "${remote_peer}: ${msg}", ("remote_peer",fc::variant(_remote_peer_id).as_string().substr(3,5) )("msg",msg) ); - } - - const fc::variant_object& get_logger_variant() { - if (!_logger_variant) { - boost::system::error_code ec; - auto rep = _ws->lowest_layer().remote_endpoint(ec); - string ip = ec ? "" : rep.address().to_string(); - string port = ec ? "" : std::to_string(rep.port()); - - auto lep = _ws->lowest_layer().local_endpoint(ec); - string lip = ec ? "" : lep.address().to_string(); - string lport = ec ? "" : std::to_string(lep.port()); - - _logger_variant.emplace(fc::mutable_variant_object() - ("_name", _peer) - ("_id", _remote_peer_id) - ("_ip", ip) - ("_port", port) - ("_lip", lip) - ("_lport", lport) - ); - } - return *_logger_variant; - } - }; - - - /** - * Accepts incoming connections and launches the sessions - */ - class listener : public std::enable_shared_from_this { - private: - tcp::acceptor _acceptor; - tcp::socket _socket; - bnet_ptr _net_plugin; - - public: - listener( boost::asio::io_context& ioc, tcp::endpoint endpoint, bnet_ptr np ) - :_acceptor(ioc), _socket(ioc), _net_plugin(std::move(np)) - { - boost::system::error_code ec; - - _acceptor.open( endpoint.protocol(), ec ); - if( ec ) { on_fail( ec, "open" ); return; } - - _acceptor.set_option( boost::asio::socket_base::reuse_address(true) ); - - _acceptor.bind( endpoint, ec ); - if( ec ) { on_fail( ec, "bind" ); return; } - - _acceptor.listen( boost::asio::socket_base::max_listen_connections, ec ); - if( ec ) on_fail( ec, "listen" ); - } - - void run() { - EOS_ASSERT( _acceptor.is_open(), plugin_exception, "unable top open listen socket" ); - do_accept(); - } - - void do_accept() { - _acceptor.async_accept( _socket, [self=shared_from_this()]( auto ec ){ self->on_accept(ec); } ); - } - - void on_fail( boost::system::error_code ec, const char* what ) { - elog( "${w}: ${m}", ("w", what)("m", ec.message() ) ); - } - - void on_accept( boost::system::error_code ec ); - }; - - - class bnet_plugin_impl : public std::enable_shared_from_this { - public: - bnet_plugin_impl() = default; - - const private_key_type _peer_pk = fc::crypto::private_key::generate(); /// one time random key to identify this process - public_key_type _peer_id = _peer_pk.get_public_key(); - string _bnet_endpoint_address = "0.0.0.0"; - uint16_t _bnet_endpoint_port = 4321; - bool _request_trx = true; - bool _follow_irreversible = false; - - std::vector _connect_to_peers; /// list of peers to connect to - std::vector _socket_threads; - int32_t _num_threads = 1; - - std::unique_ptr _ioc; // lifetime guarded by shared_ptr of bnet_plugin_impl - std::shared_ptr _listener; - std::shared_ptr _timer; // only access on app io_service - std::map > _sessions; // only access on app io_service - - channels::irreversible_block::channel_type::handle _on_irb_handle; - channels::accepted_block::channel_type::handle _on_accepted_block_handle; - channels::accepted_block_header::channel_type::handle _on_accepted_block_header_handle; - channels::rejected_block::channel_type::handle _on_bad_block_handle; - channels::accepted_transaction::channel_type::handle _on_appled_trx_handle; - - void async_add_session( std::weak_ptr wp ) { - app().post(priority::low, [wp,this]{ - if( auto l = wp.lock() ) { - _sessions[l.get()] = wp; - } - }); - } - - void on_session_close( const session* s ) { - auto itr = _sessions.find(s); - if( _sessions.end() != itr ) - _sessions.erase(itr); - } - - template - void for_each_session( Call callback ) { - app().post(priority::low, [this, callback = callback] { - for (const auto& item : _sessions) { - if (auto ses = item.second.lock()) { - ses->_ios.post(boost::asio::bind_executor( - ses->_strand, - [ses, cb = callback]() { cb(ses); } - )); - } - } - }); - } - - void on_accepted_transaction( transaction_metadata_ptr trx ) { - if( trx->implicit || trx->scheduled ) return; - for_each_session( [trx]( auto ses ){ ses->on_accepted_transaction( trx ); } ); - } - - /** - * Notify all active connection of the new irreversible block so they - * can purge their block cache - */ - void on_irreversible_block( block_state_ptr s ) { - for_each_session( [s]( auto ses ){ ses->on_new_lib( s ); } ); - } - - /** - * Notify all active connections of the new accepted block so - * they can relay it. This method also pre-packages the block - * as a packed bnet_message so the connections can simply relay - * it on. - */ - void on_accepted_block( block_state_ptr s ) { - _ioc->post( [s,this] { /// post this to the thread pool because packing can be intensive - for_each_session( [s]( auto ses ){ ses->on_accepted_block( s ); } ); - }); - } - - void on_accepted_block_header( block_state_ptr s ) { - _ioc->post( [s,this] { /// post this to the thread pool because packing can be intensive - for_each_session( [s]( auto ses ){ ses->on_accepted_block_header( s ); } ); - }); - } - - /** - * We received a bad block which either - * 1. didn't link to known chain - * 2. violated the consensus rules - * - * Any peer which sent us this block (not noticed) - * should be disconnected as they are objectively bad - */ - void on_bad_block( signed_block_ptr s ) { - for_each_session( [s]( auto ses ) { ses->on_bad_block(s); } ); - }; - - void on_reconnect_peers() { - for( const auto& peer : _connect_to_peers ) { - bool found = false; - for( const auto& con : _sessions ) { - auto ses = con.second.lock(); - if( ses && (ses->_peer == peer) ) { - found = true; - break; - } - } - - if( !found ) { - wlog( "attempt to connect to ${p}", ("p",peer) ); - auto s = std::make_shared( *_ioc, shared_from_this() ); - s->_local_peer_id = _peer_id; - _sessions[s.get()] = s; - s->run( peer ); - } - } - - start_reconnect_timer(); - } - - - void start_reconnect_timer() { - /// add some random delay so that all my peers don't attempt to reconnect to me - /// at the same time after shutting down.. - _timer->expires_from_now( boost::posix_time::microseconds( 1000000*(10+rand()%5) ) ); - _timer->async_wait(app().get_priority_queue().wrap(priority::low, [=](const boost::system::error_code& ec) { - if( ec ) { return; } - on_reconnect_peers(); - })); - } - }; - - - void listener::on_accept( boost::system::error_code ec ) { - if( ec ) { - if( ec == boost::system::errc::too_many_files_open ) - do_accept(); - return; - } - std::shared_ptr newsession; - try { - newsession = std::make_shared( move( _socket ), _net_plugin ); - } - catch( std::exception& e ) { - //making a session creates an instance of std::random_device which may open /dev/urandom - // for example. Unfortuately the only defined error is a std::exception derivative - _socket.close(); - } - if( newsession ) { - _net_plugin->async_add_session( newsession ); - newsession->_local_peer_id = _net_plugin->_peer_id; - newsession->run(); - } - do_accept(); - } - - - bnet_plugin::bnet_plugin() - :my(std::make_shared()) { - } - - bnet_plugin::~bnet_plugin() { - } - - void bnet_plugin::set_program_options(options_description& cli, options_description& cfg) { - cfg.add_options() - ("bnet-endpoint", bpo::value()->default_value("0.0.0.0:4321"), "the endpoint upon which to listen for incoming connections" ) - ("bnet-follow-irreversible", bpo::value()->default_value(false), "this peer will request only irreversible blocks from other nodes" ) - ("bnet-threads", bpo::value(), "the number of threads to use to process network messages" ) - ("bnet-connect", bpo::value>()->composing(), "remote endpoint of other node to connect to; Use multiple bnet-connect options as needed to compose a network" ) - ("bnet-no-trx", bpo::bool_switch()->default_value(false), "this peer will request no pending transactions from other nodes" ) - ("bnet-peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ), - "The string used to format peers when logging messages about them. Variables are escaped with ${}.\n" - "Available Variables:\n" - " _name \tself-reported name\n\n" - " _id \tself-reported ID (Public Key)\n\n" - " _ip \tremote IP address of peer\n\n" - " _port \tremote port number of peer\n\n" - " _lip \tlocal IP address connected to peer\n\n" - " _lport \tlocal port number connected to peer\n\n") - ; - } - - void bnet_plugin::plugin_initialize(const variables_map& options) { - ilog( "Initialize bnet plugin" ); - - try { - peer_log_format = options.at( "bnet-peer-log-format" ).as(); - - if( options.count( "bnet-endpoint" )) { - auto ip_port = options.at( "bnet-endpoint" ).as(); - - //auto host = boost::asio::ip::host_name(ip_port); - auto port = ip_port.substr( ip_port.find( ':' ) + 1, ip_port.size()); - auto host = ip_port.substr( 0, ip_port.find( ':' )); - my->_bnet_endpoint_address = host; - my->_bnet_endpoint_port = std::stoi( port ); - idump((ip_port)( host )( port )( my->_follow_irreversible )); - } - if( options.count( "bnet-follow-irreversible" )) { - my->_follow_irreversible = options.at( "bnet-follow-irreversible" ).as(); - } - - - if( options.count( "bnet-connect" )) { - my->_connect_to_peers = options.at( "bnet-connect" ).as>(); - } - if( options.count( "bnet-threads" )) { - my->_num_threads = options.at( "bnet-threads" ).as(); - if( my->_num_threads > 8 ) - my->_num_threads = 8; - } - my->_request_trx = !options.at( "bnet-no-trx" ).as(); - - } FC_LOG_AND_RETHROW() - } - - void bnet_plugin::plugin_startup() { - handle_sighup(); // Sets logger - - wlog( "bnet startup " ); - - auto& chain = app().get_plugin().chain(); - FC_ASSERT ( chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, "bnet is not compatible with \"irreversible\" read_mode"); - - my->_on_appled_trx_handle = app().get_channel() - .subscribe( [this]( transaction_metadata_ptr t ){ - my->on_accepted_transaction(t); - }); - - my->_on_irb_handle = app().get_channel() - .subscribe( [this]( block_state_ptr s ){ - my->on_irreversible_block(s); - }); - - my->_on_accepted_block_handle = app().get_channel() - .subscribe( [this]( block_state_ptr s ){ - my->on_accepted_block(s); - }); - - my->_on_accepted_block_header_handle = app().get_channel() - .subscribe( [this]( block_state_ptr s ){ - my->on_accepted_block_header(s); - }); - - my->_on_bad_block_handle = app().get_channel() - .subscribe( [this]( signed_block_ptr b ){ - my->on_bad_block(b); - }); - - - if( app().get_plugin().chain().get_read_mode() == chain::db_read_mode::READ_ONLY ) { - if (my->_request_trx) { - my->_request_trx = false; - ilog( "forced bnet-no-trx to true since in read-only mode" ); - } - } - - const auto address = boost::asio::ip::make_address( my->_bnet_endpoint_address ); - my->_ioc.reset( new boost::asio::io_context{my->_num_threads} ); - - - auto& ioc = *my->_ioc; - my->_timer = std::make_shared( app().get_io_service() ); - - my->start_reconnect_timer(); - - my->_listener = std::make_shared( ioc, - tcp::endpoint{ address, my->_bnet_endpoint_port }, - my ); - my->_listener->run(); - - my->_socket_threads.reserve( my->_num_threads ); - for( auto i = 0; i < my->_num_threads; ++i ) { - my->_socket_threads.emplace_back( [&ioc]{ wlog( "start thread" ); ioc.run(); wlog( "end thread" ); } ); - } - - for( const auto& peer : my->_connect_to_peers ) { - auto s = std::make_shared( ioc, my ); - s->_local_peer_id = my->_peer_id; - my->_sessions[s.get()] = s; - s->run( peer ); - } - } - - void bnet_plugin::plugin_shutdown() { - try { - my->_timer->cancel(); - my->_timer.reset(); - } catch ( ... ) { - elog( "exception thrown on timer shutdown" ); - } - - /// shut down all threads and close all connections - - my->for_each_session([](auto ses){ - ses->do_goodbye( "shutting down" ); - }); - - my->_listener.reset(); - my->_ioc->stop(); - - wlog( "joining bnet threads" ); - for( auto& t : my->_socket_threads ) { - t.join(); - } - wlog( "done joining threads" ); - - my->for_each_session([](auto ses){ - EOS_ASSERT( false, plugin_exception, "session ${ses} still active", ("ses", ses->_session_num) ); - }); - - // lifetime of _ioc is guarded by shared_ptr of bnet_plugin_impl - } - - void bnet_plugin::handle_sighup() { - if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) - plugin_logger = fc::get_logger_map()[logger_name]; - } - - - session::~session() { - wlog( "close session ${n}",("n",_session_num) ); - std::weak_ptr netp = _net_plugin; - app().post(priority::low, [netp,ses=this]{ - if( auto net = netp.lock() ) - net->on_session_close(ses); - }); - } - - void session::do_hello() { - /// TODO: find more effecient way to move large array of ids in event of fork - async_get_pending_block_ids( [self = shared_from_this() ]( const vector& ids, uint32_t lib ){ - hello hello_msg; - hello_msg.peer_id = self->_local_peer_id; - hello_msg.last_irr_block_num = lib; - hello_msg.pending_block_ids = ids; - hello_msg.request_transactions = self->_net_plugin->_request_trx; - hello_msg.chain_id = app().get_plugin().get_chain_id(); // TODO: Quick fix in a rush. Maybe a better solution is needed. - - self->_local_lib = lib; - if ( self->_net_plugin->_follow_irreversible ) { - self->send( hello_msg, hello_extension(hello_extension_irreversible_only()) ); - } else { - self->send( hello_msg ); - } - self->_sent_remote_hello = true; - }); - } - - void session::check_for_redundant_connection() { - app().post(priority::low, [self=shared_from_this()]{ - self->_net_plugin->for_each_session( [self]( auto ses ){ - if( ses != self && ses->_remote_peer_id == self->_remote_peer_id ) { - self->do_goodbye( "redundant connection" ); - } - }); - }); - } - - void session::on( const hello& hi, fc::datastream& ds ) { - peer_ilog(this, "received hello"); - _recv_remote_hello = true; - - if( hi.chain_id != app().get_plugin().get_chain_id() ) { // TODO: Quick fix in a rush. Maybe a better solution is needed. - peer_elog(this, "bad hello : wrong chain id"); - return do_goodbye( "disconnecting due to wrong chain id" ); - } - - if( hi.peer_id == _local_peer_id ) { - return do_goodbye( "connected to self" ); - } - - if ( _net_plugin->_follow_irreversible && hi.protocol_version <= "1.0.0") { - return do_goodbye( "need newer protocol version that supports sending only irreversible blocks" ); - } - - if ( hi.protocol_version >= "1.0.1" ) { - //optional extensions - while ( 0 < ds.remaining() ) { - unsigned_int size; - fc::raw::unpack( ds, size ); // next extension size - auto ex_start = ds.pos(); - fc::datastream dsw( ex_start, size ); - unsigned_int wich; - fc::raw::unpack( dsw, wich ); - hello_extension ex; - if ( wich < ex.count() ) { //know extension - fc::datastream dsx( ex_start, size ); //unpack needs to read static_variant _tag again - fc::raw::unpack( dsx, ex ); - if ( ex.which() == hello_extension::tag::value ) { - _remote_request_irreversible_only = true; - } - } else { - //unsupported extension, we just ignore it - //another side does know our protocol version, i.e. it know which extensions we support - //so, it some extensions were crucial, another side will close the connection - } - ds.skip(size); //move to next extension - } - } - - _last_sent_block_num = hi.last_irr_block_num; - _remote_request_trx = hi.request_transactions; - _remote_peer_id = hi.peer_id; - _remote_lib = hi.last_irr_block_num; - - for( const auto& id : hi.pending_block_ids ) - mark_block_status( id, true, false ); - - check_for_redundant_connection(); - - } - - void session::on( const packed_transaction_ptr& p ) { - peer_ilog(this, "received packed_transaction_ptr"); - if (!p) { - peer_elog(this, "bad packed_transaction_ptr : null pointer"); - EOS_THROW(transaction_exception, "bad transaction"); - } - if( !_net_plugin->_request_trx ) - return; - - // ilog( "recv trx ${n}", ("n", id) ); - if( p->expiration() < fc::time_point::now() ) return; - - const auto& id = p->id(); - - if( mark_transaction_known_by_peer( id ) ) - return; - - auto ptr = std::make_shared(p); - - app().get_channel().publish(priority::low, ptr); - } -} /// namespace eosio diff --git a/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp b/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp deleted file mode 100644 index 5874f2a28ba..00000000000 --- a/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp +++ /dev/null @@ -1,55 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ -#pragma once -#include - -#include - -namespace fc { class variant; } - -namespace eosio { - using chain::transaction_id_type; - using std::shared_ptr; - using namespace appbase; - using chain::name; - using fc::optional; - using chain::uint128_t; - - typedef shared_ptr bnet_ptr; - typedef shared_ptr bnet_const_ptr; - - - -/** - * This plugin tracks all actions and keys associated with a set of configured accounts. It enables - * wallets to paginate queries for bnet. - * - * An action will be included in the account's bnet if any of the following: - * - receiver - * - any account named in auth list - * - * A key will be linked to an account if the key is referneced in authorities of updateauth or newaccount - */ -class bnet_plugin : public plugin { - public: - APPBASE_PLUGIN_REQUIRES((chain_plugin)) - - bnet_plugin(); - virtual ~bnet_plugin(); - - virtual void set_program_options(options_description& cli, options_description& cfg) override; - - void plugin_initialize(const variables_map& options); - void plugin_startup(); - void plugin_shutdown(); - void handle_sighup() override; - - private: - bnet_ptr my; -}; - -} /// namespace eosio - - diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 8b9fd3f843c..3a591eedbb5 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -28,10 +28,10 @@ chain_api_plugin::~chain_api_plugin(){} void chain_api_plugin::set_program_options(options_description&, options_description&) {} void chain_api_plugin::plugin_initialize(const variables_map&) {} -struct async_result_visitor : public fc::visitor { +struct async_result_visitor : public fc::visitor { template - std::string operator()(const T& v) const { - return fc::json::to_string(v); + fc::variant operator()(const T& v) const { + return fc::variant(v); } }; @@ -41,8 +41,8 @@ struct async_result_visitor : public fc::visitor { api_handle.validate(); \ try { \ if (body.empty()) body = "{}"; \ - auto result = api_handle.call_name(fc::json::from_string(body).as()); \ - cb(http_response_code, fc::json::to_string(result)); \ + fc::variant result( api_handle.call_name(fc::json::from_string(body).as()) ); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ @@ -84,6 +84,7 @@ void chain_api_plugin::plugin_startup() { _http_plugin.add_api({ CHAIN_RO_CALL(get_info, 200l), + CHAIN_RO_CALL(get_activated_protocol_features, 200), CHAIN_RO_CALL(get_block, 200), CHAIN_RO_CALL(get_block_header_state, 200), CHAIN_RO_CALL(get_account, 200), @@ -105,7 +106,8 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_transaction_id, 200), CHAIN_RW_CALL_ASYNC(push_block, chain_apis::read_write::push_block_results, 202), CHAIN_RW_CALL_ASYNC(push_transaction, chain_apis::read_write::push_transaction_results, 202), - CHAIN_RW_CALL_ASYNC(push_transactions, chain_apis::read_write::push_transactions_results, 202) + CHAIN_RW_CALL_ASYNC(push_transactions, chain_apis::read_write::push_transactions_results, 202), + CHAIN_RW_CALL_ASYNC(send_transaction, chain_apis::read_write::send_transaction_results, 202) }); } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 2551548f294..85dc3a7f6bf 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include @@ -199,6 +199,7 @@ chain_plugin::chain_plugin() :my(new chain_plugin_impl()) { app().register_config_type(); app().register_config_type(); + app().register_config_type(); } chain_plugin::~chain_plugin(){} @@ -208,6 +209,8 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip cfg.add_options() ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to application data dir)") + ("protocol-features-dir", bpo::value()->default_value("protocol_features"), + "the location of the protocol_features directory (absolute path or relative to application config dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") ("wasm-runtime", bpo::value()->value_name("wavm/wabt"), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), @@ -237,12 +240,12 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("sender-bypass-whiteblacklist", boost::program_options::value>()->composing()->multitoken(), "Deferred transactions sent by accounts in this list do not have any of the subjective whitelist/blacklist checks applied to them (may specify multiple times)") ("read-mode", boost::program_options::value()->default_value(eosio::chain::db_read_mode::SPECULATIVE), - "Database read mode (\"speculative\", \"head\", or \"read-only\").\n"// or \"irreversible\").\n" + "Database read mode (\"speculative\", \"head\", \"read-only\", \"irreversible\").\n" "In \"speculative\" mode database contains changes done up to the head block plus changes made by transactions not yet included to the blockchain.\n" "In \"head\" mode database contains changes done up to the current head block.\n" - "In \"read-only\" mode database contains incoming block changes but no speculative transaction processing.\n" + "In \"read-only\" mode database contains changes done up to the current head block and transactions cannot be pushed to the chain API.\n" + "In \"irreversible\" mode database contains changes done up to the last irreversible block and transactions cannot be pushed to the chain API.\n" ) - //"In \"irreversible\" mode database contains changes done up the current irreversible block.\n") ("validation-mode", boost::program_options::value()->default_value(eosio::chain::validation_mode::FULL), "Chain validation mode (\"full\" or \"light\").\n" "In \"full\" mode all incoming blocks will be fully validated.\n" @@ -250,6 +253,19 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("disable-ram-billing-notify-checks", bpo::bool_switch()->default_value(false), "Disable the check which subjectively fails a transaction if a contract bills more RAM to another account within the context of a notification handler (i.e. when the receiver is not the code of the action).") ("trusted-producer", bpo::value>()->composing(), "Indicate a producer whose blocks headers signed by it will be fully validated, but transactions in those validated blocks will be trusted.") + ("database-map-mode", bpo::value()->default_value(chainbase::pinnable_mapped_file::map_mode::mapped), + "Database map mode (\"mapped\", \"heap\", or \"locked\").\n" + "In \"mapped\" mode database is memory mapped as a file.\n" + "In \"heap\" mode database is preloaded in to swappable memory.\n" +#ifdef __linux__ + "In \"locked\" mode database is preloaded, locked in to memory, and optionally can use huge pages.\n" +#else + "In \"locked\" mode database is preloaded and locked in to memory.\n" +#endif + ) +#ifdef __linux__ + ("database-hugepage-path", bpo::value>()->composing(), "Optional path for database hugepages when in \"locked\" mode (may specify multiple times)") +#endif ; // TODO: rate limiting @@ -329,6 +345,213 @@ void clear_directory_contents( const fc::path& p ) { } } +void clear_chainbase_files( const fc::path& p ) { + if( !fc::is_directory( p ) ) + return; + + fc::remove( p / "shared_memory.bin" ); + fc::remove( p / "shared_memory.meta" ); +} + +optional read_builtin_protocol_feature( const fc::path& p ) { + try { + return fc::json::from_file( p ); + } catch( const fc::exception& e ) { + wlog( "problem encountered while reading '${path}':\n${details}", + ("path", p.generic_string())("details",e.to_detail_string()) ); + } catch( ... ) { + dlog( "unknown problem encountered while reading '${path}'", + ("path", p.generic_string()) ); + } + return {}; +} + +protocol_feature_set initialize_protocol_features( const fc::path& p, bool populate_missing_builtins = true ) { + using boost::filesystem::directory_iterator; + + protocol_feature_set pfs; + + bool directory_exists = true; + + if( fc::exists( p ) ) { + EOS_ASSERT( fc::is_directory( p ), plugin_exception, + "Path to protocol-features is not a directory: ${path}", + ("path", p.generic_string()) + ); + } else { + if( populate_missing_builtins ) + bfs::create_directories( p ); + else + directory_exists = false; + } + + auto log_recognized_protocol_feature = []( const builtin_protocol_feature& f, const digest_type& feature_digest ) { + if( f.subjective_restrictions.enabled ) { + if( f.subjective_restrictions.preactivation_required ) { + if( f.subjective_restrictions.earliest_allowed_activation_time == time_point{} ) { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled with preactivation required", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ); + } else { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled with preactivation required and with an earliest allowed activation time of ${earliest_time}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("earliest_time", f.subjective_restrictions.earliest_allowed_activation_time) + ); + } + } else { + if( f.subjective_restrictions.earliest_allowed_activation_time == time_point{} ) { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled without activation restrictions", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ); + } else { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled without preactivation required but with an earliest allowed activation time of ${earliest_time}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("earliest_time", f.subjective_restrictions.earliest_allowed_activation_time) + ); + } + } + } else { + ilog( "Recognized builtin protocol feature '${codename}' (with digest of '${digest}') but support for it is not enabled", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ); + } + }; + + map found_builtin_protocol_features; + map > builtin_protocol_features_to_add; + // The bool in the pair is set to true if the builtin protocol feature has already been visited to add + map< builtin_protocol_feature_t, optional > visited_builtins; + + // Read all builtin protocol features + if( directory_exists ) { + for( directory_iterator enditr, itr{p}; itr != enditr; ++itr ) { + auto file_path = itr->path(); + if( !fc::is_regular_file( file_path ) || file_path.extension().generic_string().compare( ".json" ) != 0 ) + continue; + + auto f = read_builtin_protocol_feature( file_path ); + + if( !f ) continue; + + auto res = found_builtin_protocol_features.emplace( f->get_codename(), file_path ); + + EOS_ASSERT( res.second, plugin_exception, + "Builtin protocol feature '${codename}' was already included from a previous_file", + ("codename", builtin_protocol_feature_codename(f->get_codename())) + ("current_file", file_path.generic_string()) + ("previous_file", res.first->second.generic_string()) + ); + + const auto feature_digest = f->digest(); + + builtin_protocol_features_to_add.emplace( std::piecewise_construct, + std::forward_as_tuple( feature_digest ), + std::forward_as_tuple( *f, false ) ); + } + } + + // Add builtin protocol features to the protocol feature manager in the right order (to satisfy dependencies) + using itr_type = map>::iterator; + std::function add_protocol_feature = + [&pfs, &builtin_protocol_features_to_add, &visited_builtins, &log_recognized_protocol_feature, &add_protocol_feature]( const itr_type& itr ) -> void { + if( itr->second.second ) { + return; + } else { + itr->second.second = true; + visited_builtins.emplace( itr->second.first.get_codename(), itr->first ); + } + + for( const auto& d : itr->second.first.dependencies ) { + auto itr2 = builtin_protocol_features_to_add.find( d ); + if( itr2 != builtin_protocol_features_to_add.end() ) { + add_protocol_feature( itr2 ); + } + } + + pfs.add_feature( itr->second.first ); + + log_recognized_protocol_feature( itr->second.first, itr->first ); + }; + + for( auto itr = builtin_protocol_features_to_add.begin(); itr != builtin_protocol_features_to_add.end(); ++itr ) { + add_protocol_feature( itr ); + } + + auto output_protocol_feature = [&p]( const builtin_protocol_feature& f, const digest_type& feature_digest ) { + static constexpr int max_tries = 10; + + string filename( "BUILTIN-" ); + filename += builtin_protocol_feature_codename( f.get_codename() ); + filename += ".json"; + + auto file_path = p / filename; + + EOS_ASSERT( !fc::exists( file_path ), plugin_exception, + "Could not save builtin protocol feature with codename '${codename}' because a file at the following path already exists: ${path}", + ("codename", builtin_protocol_feature_codename( f.get_codename() )) + ("path", file_path.generic_string()) + ); + + if( fc::json::save_to_file( f, file_path ) ) { + ilog( "Saved default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("path", file_path.generic_string()) + ); + } else { + elog( "Error occurred while writing default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("path", file_path.generic_string()) + ); + } + }; + + std::function add_missing_builtins = + [&pfs, &visited_builtins, &output_protocol_feature, &log_recognized_protocol_feature, &add_missing_builtins, populate_missing_builtins] + ( builtin_protocol_feature_t codename ) -> digest_type { + auto res = visited_builtins.emplace( codename, optional() ); + if( !res.second ) { + EOS_ASSERT( res.first->second, protocol_feature_exception, + "invariant failure: cycle found in builtin protocol feature dependencies" + ); + return *res.first->second; + } + + auto f = protocol_feature_set::make_default_builtin_protocol_feature( codename, + [&add_missing_builtins]( builtin_protocol_feature_t d ) { + return add_missing_builtins( d ); + } ); + + if( !populate_missing_builtins ) + f.subjective_restrictions.enabled = false; + + const auto& pf = pfs.add_feature( f ); + res.first->second = pf.feature_digest; + + log_recognized_protocol_feature( f, pf.feature_digest ); + + if( populate_missing_builtins ) + output_protocol_feature( f, pf.feature_digest ); + + return pf.feature_digest; + }; + + for( const auto& p : builtin_protocol_feature_codenames ) { + auto itr = found_builtin_protocol_features.find( p.first ); + if( itr != found_builtin_protocol_features.end() ) continue; + + add_missing_builtins( p.first ); + } + + return pfs; +} + void chain_plugin::plugin_initialize(const variables_map& options) { ilog("initializing chain plugin"); @@ -379,6 +602,18 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->blocks_dir = bld; } + protocol_feature_set pfs; + { + fc::path protocol_features_dir; + auto pfd = options.at( "protocol-features-dir" ).as(); + if( pfd.is_relative()) + protocol_features_dir = app().config_dir() / pfd; + else + protocol_features_dir = pfd; + + pfs = initialize_protocol_features( protocol_features_dir ); + } + if( options.count("checkpoint") ) { auto cps = options.at("checkpoint").as>(); my->loaded_checkpoints.reserve(cps.size()); @@ -460,8 +695,13 @@ void chain_plugin::plugin_initialize(const variables_map& options) { p = bfs::current_path() / p; } - fc::json::save_to_file( gs, p, true ); - ilog( "Saved genesis JSON to '${path}'", ("path", p.generic_string())); + EOS_ASSERT( fc::json::save_to_file( gs, p, true ), + misc_exception, + "Error occurred while writing genesis JSON to '${path}'", + ("path", p.generic_string()) + ); + + ilog( "Saved genesis JSON to '${path}'", ("path", p.generic_string()) ); } EOS_THROW( extract_genesis_state_exception, "extracted genesis state from blocks.log" ); @@ -504,15 +744,13 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config->blocks_dir / config::reversible_blocks_dir_name ); fc::copy( backup_dir / config::reversible_blocks_dir_name / "shared_memory.bin", my->chain_config->blocks_dir / config::reversible_blocks_dir_name / "shared_memory.bin" ); - fc::copy( backup_dir / config::reversible_blocks_dir_name / "shared_memory.meta", - my->chain_config->blocks_dir / config::reversible_blocks_dir_name / "shared_memory.meta" ); } } } else if( options.at( "replay-blockchain" ).as()) { ilog( "Replay requested: deleting state database" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not work for a regular replay of the blockchain." ); - clear_directory_contents( my->chain_config->state_dir ); + clear_chainbase_files( my->chain_config->state_dir ); if( options.at( "fix-reversible-blocks" ).as()) { if( !recover_reversible_blocks( my->chain_config->blocks_dir / config::reversible_blocks_dir_name, my->chain_config->reversible_cache_size )) { @@ -560,10 +798,25 @@ void chain_plugin::plugin_initialize(const variables_map& options) { }); infile.close(); - EOS_ASSERT( options.count( "genesis-json" ) == 0 && options.count( "genesis-timestamp" ) == 0, + EOS_ASSERT( options.count( "genesis-timestamp" ) == 0, plugin_config_exception, - "--snapshot is incompatible with --genesis-json and --genesis-timestamp as the snapshot contains genesis information"); + "--snapshot is incompatible with --genesis-timestamp as the snapshot contains genesis information"); + if( options.count( "genesis-json" )) { + auto genesis_path = options.at( "genesis-json" ).as(); + if( genesis_path.is_relative() ) { + genesis_path = bfs::current_path() / genesis_path; + } + EOS_ASSERT( fc::is_regular_file( genesis_path ), + plugin_config_exception, + "Specified genesis file '${genesis}' does not exist.", + ("genesis", genesis_path.generic_string())); + auto genesis_file = fc::json::from_file( genesis_path ).as(); + EOS_ASSERT( my->chain_config->genesis == genesis_file, plugin_config_exception, + "Genesis state provided via command line arguments does not match the existing genesis state in the snapshot. " + "It is not necessary to provide a genesis state argument when loading a snapshot." + ); + } auto shared_mem_path = my->chain_config->state_dir / "shared_memory.bin"; EOS_ASSERT( !fc::exists(shared_mem_path), plugin_config_exception, @@ -629,14 +882,19 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if ( options.count("read-mode") ) { my->chain_config->read_mode = options.at("read-mode").as(); - EOS_ASSERT( my->chain_config->read_mode != db_read_mode::IRREVERSIBLE, plugin_config_exception, "irreversible mode not currently supported." ); } if ( options.count("validation-mode") ) { my->chain_config->block_validation_mode = options.at("validation-mode").as(); } - my->chain.emplace( *my->chain_config ); + my->chain_config->db_map_mode = options.at("database-map-mode").as(); +#ifdef __linux__ + if( options.count("database-hugepage-path") ) + my->chain_config->db_hugepage_paths = options.at("database-hugepage-path").as>(); +#endif + + my->chain.emplace( *my->chain_config, std::move(pfs) ); my->chain_id.emplace( my->chain->get_chain_id()); // set up method providers @@ -692,8 +950,8 @@ void chain_plugin::plugin_initialize(const variables_map& options) { } ); my->applied_transaction_connection = my->chain->applied_transaction.connect( - [this]( const transaction_trace_ptr& trace ) { - my->applied_transaction_channel.publish( priority::low, trace ); + [this]( std::tuple t ) { + my->applied_transaction_channel.publish( priority::low, std::get<0>(t) ); } ); my->chain->add_indices(); @@ -737,8 +995,8 @@ void chain_plugin::plugin_shutdown() { my->irreversible_block_connection.reset(); my->accepted_transaction_connection.reset(); my->applied_transaction_connection.reset(); - my->chain->get_thread_pool().stop(); - my->chain->get_thread_pool().join(); + if(app().is_quiting()) + my->chain->get_wasm_interface().indicate_shutting_down(); my->chain.reset(); } @@ -1024,12 +1282,12 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params return { itoh(static_cast(app().version())), db.get_chain_id(), - db.fork_db_head_block_num(), + db.head_block_num(), db.last_irreversible_block_num(), db.last_irreversible_block_id(), - db.fork_db_head_block_id(), - db.fork_db_head_block_time(), - db.fork_db_head_block_producer(), + db.head_block_id(), + db.head_block_time(), + db.head_block_producer(), rm.get_virtual_block_cpu_limit(), rm.get_virtual_block_net_limit(), rm.get_block_cpu_limit(), @@ -1037,7 +1295,77 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params //std::bitset<64>(db.get_dynamic_global_properties().recent_slots_filled).to_string(), //__builtin_popcountll(db.get_dynamic_global_properties().recent_slots_filled) / 64.0, app().version_string(), + db.fork_db_pending_head_block_num(), + db.fork_db_pending_head_block_id() + }; +} + +read_only::get_activated_protocol_features_results +read_only::get_activated_protocol_features( const read_only::get_activated_protocol_features_params& params )const { + read_only::get_activated_protocol_features_results result; + const auto& pfm = db.get_protocol_feature_manager(); + + uint32_t lower_bound_value = std::numeric_limits::lowest(); + uint32_t upper_bound_value = std::numeric_limits::max(); + + if( params.lower_bound ) { + lower_bound_value = *params.lower_bound; + } + + if( params.upper_bound ) { + upper_bound_value = *params.upper_bound; + } + + if( upper_bound_value < lower_bound_value ) + return result; + + auto walk_range = [&]( auto itr, auto end_itr, auto&& convert_iterator ) { + fc::mutable_variant_object mvo; + mvo( "activation_ordinal", 0 ); + mvo( "activation_block_num", 0 ); + + auto& activation_ordinal_value = mvo["activation_ordinal"]; + auto& activation_block_num_value = mvo["activation_block_num"]; + + auto cur_time = fc::time_point::now(); + auto end_time = cur_time + fc::microseconds(1000 * 10); /// 10ms max time + for( unsigned int count = 0; + cur_time <= end_time && count < params.limit && itr != end_itr; + ++itr, cur_time = fc::time_point::now() ) + { + const auto& conv_itr = convert_iterator( itr ); + activation_ordinal_value = conv_itr.activation_ordinal(); + activation_block_num_value = conv_itr.activation_block_num(); + + result.activated_protocol_features.emplace_back( conv_itr->to_variant( false, &mvo ) ); + ++count; + } + if( itr != end_itr ) { + result.more = convert_iterator( itr ).activation_ordinal() ; + } + }; + + auto get_next_if_not_end = [&pfm]( auto&& itr ) { + if( itr == pfm.cend() ) return itr; + + ++itr; + return itr; }; + + auto lower = ( params.search_by_block_num ? pfm.lower_bound( lower_bound_value ) + : pfm.at_activation_ordinal( lower_bound_value ) ); + + auto upper = ( params.search_by_block_num ? pfm.upper_bound( lower_bound_value ) + : get_next_if_not_end( pfm.at_activation_ordinal( upper_bound_value ) ) ); + + if( params.reverse ) { + walk_range( std::make_reverse_iterator(upper), std::make_reverse_iterator(lower), + []( auto&& ritr ) { return --(ritr.base()); } ); + } else { + walk_range( lower, upper, []( auto&& itr ) { return itr; } ); + } + + return result; } uint64_t read_only::get_table_index_name(const read_only::get_table_rows_params& p, bool& primary) { @@ -1091,7 +1419,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { try { return boost::lexical_cast(str.c_str(), str.size()); } catch( ... ) { } - + try { auto trimmed_str = str; boost::trim(trimmed_str); @@ -1105,7 +1433,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { return symb.value(); } catch( ... ) { } } - + try { return ( eosio::chain::string_to_symbol( 0, str.c_str() ) >> 8 ); } catch( ... ) { @@ -1496,14 +1824,24 @@ read_only::get_scheduled_transactions( const read_only::get_scheduled_transactio fc::variant read_only::get_block(const read_only::get_block_params& params) const { signed_block_ptr block; - EOS_ASSERT(!params.block_num_or_id.empty() && params.block_num_or_id.size() <= 64, chain::block_id_type_exception, "Invalid Block number or ID, must be greater than 0 and less than 64 characters" ); + optional block_num; + + EOS_ASSERT( !params.block_num_or_id.empty() && params.block_num_or_id.size() <= 64, + chain::block_id_type_exception, + "Invalid Block number or ID, must be greater than 0 and less than 64 characters" + ); + try { - block = db.fetch_block_by_id(fc::variant(params.block_num_or_id).as()); - if (!block) { - block = db.fetch_block_by_number(fc::to_uint64(params.block_num_or_id)); - } + block_num = fc::to_uint64(params.block_num_or_id); + } catch( ... ) {} - } EOS_RETHROW_EXCEPTIONS(chain::block_id_type_exception, "Invalid block ID: ${block_num_or_id}", ("block_num_or_id", params.block_num_or_id)) + if( block_num.valid() ) { + block = db.fetch_block_by_number( *block_num ); + } else { + try { + block = db.fetch_block_by_id( fc::variant(params.block_num_or_id).as() ); + } EOS_RETHROW_EXCEPTIONS(chain::block_id_type_exception, "Invalid block ID: ${block_num_or_id}", ("block_num_or_id", params.block_num_or_id)) + } EOS_ASSERT( block, unknown_block_exception, "Could not find block: ${block}", ("block", params.block_num_or_id)); @@ -1551,7 +1889,6 @@ void read_write::push_block(read_write::push_block_params&& params, next_functio } void read_write::push_transaction(const read_write::push_transaction_params& params, next_function next) { - try { auto pretty_input = std::make_shared(); auto resolver = make_resolver(this, abi_serializer_max_time); @@ -1571,6 +1908,53 @@ void read_write::push_transaction(const read_write::push_transaction_params& par fc::variant output; try { output = db.to_variant_with_abi( *trx_trace_ptr, abi_serializer_max_time ); + + // Create map of (closest_unnotified_ancestor_action_ordinal, global_sequence) with action trace + std::map< std::pair, fc::mutable_variant_object > act_traces_map; + for( const auto& act_trace : output["action_traces"].get_array() ) { + if (act_trace["receipt"].is_null() && act_trace["except"].is_null()) continue; + auto closest_unnotified_ancestor_action_ordinal = + act_trace["closest_unnotified_ancestor_action_ordinal"].as().value; + auto global_sequence = act_trace["receipt"].is_null() ? + std::numeric_limits::max() : + act_trace["receipt"]["global_sequence"].as(); + act_traces_map.emplace( std::make_pair( closest_unnotified_ancestor_action_ordinal, + global_sequence ), + act_trace.get_object() ); + } + + std::function(uint32_t)> convert_act_trace_to_tree_struct = + [&](uint32_t closest_unnotified_ancestor_action_ordinal) { + vector restructured_act_traces; + auto it = act_traces_map.lower_bound( + std::make_pair( closest_unnotified_ancestor_action_ordinal, 0) + ); + for( ; + it != act_traces_map.end() && it->first.first == closest_unnotified_ancestor_action_ordinal; ++it ) + { + auto& act_trace_mvo = it->second; + + auto action_ordinal = act_trace_mvo["action_ordinal"].as().value; + act_trace_mvo["inline_traces"] = convert_act_trace_to_tree_struct(action_ordinal); + if (act_trace_mvo["receipt"].is_null()) { + act_trace_mvo["receipt"] = fc::mutable_variant_object() + ("abi_sequence", 0) + ("act_digest", digest_type::hash(trx_trace_ptr->action_traces[action_ordinal-1].act)) + ("auth_sequence", flat_map()) + ("code_sequence", 0) + ("global_sequence", 0) + ("receiver", act_trace_mvo["receiver"]) + ("recv_sequence", 0); + } + restructured_act_traces.push_back( std::move(act_trace_mvo) ); + } + return restructured_act_traces; + }; + + fc::mutable_variant_object output_mvo(output); + output_mvo["action_traces"] = convert_act_trace_to_tree_struct(0); + + output = output_mvo; } catch( chain::abi_exception& ) { output = *trx_trace_ptr; } @@ -1580,8 +1964,6 @@ void read_write::push_transaction(const read_write::push_transaction_params& par } CATCH_AND_CALL(next); } }); - - } catch ( boost::interprocess::bad_alloc& ) { chain_plugin::handle_db_exhaustion(); } CATCH_AND_CALL(next); @@ -1616,7 +1998,43 @@ void read_write::push_transactions(const read_write::push_transactions_params& p result->reserve(params.size()); push_recurse(this, 0, params_copy, result, next); + } catch ( boost::interprocess::bad_alloc& ) { + chain_plugin::handle_db_exhaustion(); + } CATCH_AND_CALL(next); +} + +void read_write::send_transaction(const read_write::send_transaction_params& params, next_function next) { + + try { + auto pretty_input = std::make_shared(); + auto resolver = make_resolver(this, abi_serializer_max_time); + transaction_metadata_ptr ptrx; + try { + abi_serializer::from_variant(params, *pretty_input, resolver, abi_serializer_max_time); + ptrx = std::make_shared( pretty_input ); + } EOS_RETHROW_EXCEPTIONS(chain::packed_transaction_type_exception, "Invalid packed transaction") + app().get_method()(ptrx, true, [this, next](const fc::static_variant& result) -> void{ + if (result.contains()) { + next(result.get()); + } else { + auto trx_trace_ptr = result.get(); + + try { + fc::variant output; + try { + output = db.to_variant_with_abi( *trx_trace_ptr, abi_serializer_max_time ); + } catch( chain::abi_exception& ) { + output = *trx_trace_ptr; + } + + const chain::transaction_id_type& id = trx_trace_ptr->id; + next(read_write::send_transaction_results{id, output}); + } CATCH_AND_CALL(next); + } + }); + } catch ( boost::interprocess::bad_alloc& ) { + chain_plugin::handle_db_exhaustion(); } CATCH_AND_CALL(next); } @@ -1638,17 +2056,19 @@ read_only::get_code_results read_only::get_code( const get_code_params& params ) get_code_results result; result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get( params.account_name ); + const auto& accnt_obj = d.get( params.account_name ); + const auto& accnt_metadata_obj = d.get( params.account_name ); EOS_ASSERT( params.code_as_wasm, unsupported_feature, "Returning WAST from get_code is no longer supported" ); - if( accnt.code.size() ) { - result.wasm = string(accnt.code.begin(), accnt.code.end()); - result.code_hash = accnt.code_version; + if( accnt_metadata_obj.code_hash != digest_type() ) { + const auto& code_obj = d.get(accnt_metadata_obj.code_hash); + result.wasm = string(code_obj.code.begin(), code_obj.code.end()); + result.code_hash = code_obj.code_hash; } abi_def abi; - if( abi_serializer::to_abi(accnt.abi, abi) ) { + if( abi_serializer::to_abi(accnt_obj.abi, abi) ) { result.abi = std::move(abi); } @@ -1659,11 +2079,10 @@ read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_p get_code_hash_results result; result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get( params.account_name ); + const auto& accnt = d.get( params.account_name ); - if( accnt.code.size() ) { - result.code_hash = accnt.code_version; - } + if( accnt.code_hash != digest_type() ) + result.code_hash = accnt.code_hash; return result; } @@ -1673,9 +2092,13 @@ read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const g result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get(params.account_name); - result.wasm = blob{{accnt.code.begin(), accnt.code.end()}}; - result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; + const auto& accnt_obj = d.get(params.account_name); + const auto& accnt_metadata_obj = d.get(params.account_name); + if( accnt_metadata_obj.code_hash != digest_type() ) { + const auto& code_obj = d.get(accnt_metadata_obj.code_hash); + result.wasm = blob{{code_obj.code.begin(), code_obj.code.end()}}; + } + result.abi = blob{{accnt_obj.abi.begin(), accnt_obj.abi.end()}}; return result; } @@ -1685,11 +2108,13 @@ read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get(params.account_name); - result.abi_hash = fc::sha256::hash( accnt.abi.data(), accnt.abi.size() ); - result.code_hash = accnt.code_version; + const auto& accnt_obj = d.get(params.account_name); + const auto& accnt_metadata_obj = d.get(params.account_name); + result.abi_hash = fc::sha256::hash( accnt_obj.abi.data(), accnt_obj.abi.size() ); + if( accnt_metadata_obj.code_hash != digest_type() ) + result.code_hash = accnt_metadata_obj.code_hash; if( !params.abi_hash || *params.abi_hash != result.abi_hash ) - result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; + result.abi = blob{{accnt_obj.abi.begin(), accnt_obj.abi.end()}}; return result; } @@ -1706,11 +2131,12 @@ read_only::get_account_results read_only::get_account( const get_account_params& rm.get_account_limits( result.account_name, result.ram_quota, result.net_weight, result.cpu_weight ); - const auto& a = db.get_account(result.account_name); + const auto& accnt_obj = db.get_account( result.account_name ); + const auto& accnt_metadata_obj = db.db().get( result.account_name ); - result.privileged = a.privileged; - result.last_code_update = a.last_code_update; - result.created = a.creation_date; + result.privileged = accnt_metadata_obj.is_privileged(); + result.last_code_update = accnt_metadata_obj.last_code_update; + result.created = accnt_obj.creation_date; bool grelisted = db.is_resource_greylisted(result.account_name); result.net_limit = rm.get_account_net_limit_ex( result.account_name, !grelisted); diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 22d36b63f43..6c85c354481 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -104,9 +104,26 @@ class read_only { //string recent_slots; //double participation_rate = 0; optional server_version_string; + optional fork_db_head_block_num; + optional fork_db_head_block_id; }; get_info_results get_info(const get_info_params&) const; + struct get_activated_protocol_features_params { + optional lower_bound; + optional upper_bound; + uint32_t limit = 10; + bool search_by_block_num = false; + bool reverse = false; + }; + + struct get_activated_protocol_features_results { + fc::variants activated_protocol_features; + optional more; + }; + + get_activated_protocol_features_results get_activated_protocol_features( const get_activated_protocol_features_params& params )const; + struct producer_info { name producer_name; }; @@ -583,6 +600,10 @@ class read_write { using push_transactions_results = vector; void push_transactions(const push_transactions_params& params, chain::plugin_interface::next_function next); + using send_transaction_params = push_transaction_params; + using send_transaction_results = push_transaction_results; + void send_transaction(const send_transaction_params& params, chain::plugin_interface::next_function next); + friend resolver_factory; }; @@ -708,7 +729,9 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) +(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(fork_db_head_block_num)(fork_db_head_block_id) ) +FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_params, (lower_bound)(upper_bound)(limit)(search_by_block_num)(reverse) ) +FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_results, (activated_protocol_features)(more) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) diff --git a/plugins/db_size_api_plugin/db_size_api_plugin.cpp b/plugins/db_size_api_plugin/db_size_api_plugin.cpp index 8eed8b388ed..8c6df9566fe 100644 --- a/plugins/db_size_api_plugin/db_size_api_plugin.cpp +++ b/plugins/db_size_api_plugin/db_size_api_plugin.cpp @@ -18,7 +18,7 @@ using namespace eosio; try { \ if (body.empty()) body = "{}"; \ INVOKE \ - cb(http_response_code, fc::json::to_string(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp b/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp index d6f8f53e7b3..32db7146f6d 100644 --- a/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp +++ b/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp @@ -60,7 +60,7 @@ using results_pair = std::pair; try { \ if (body.empty()) body = "{}"; \ const auto result = api_handle->invoke_cb(body); \ - response_cb(result.first, fc::json::to_string(result.second)); \ + response_cb(result.first, fc::variant(result.second)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, response_cb); \ } \ diff --git a/plugins/history_api_plugin/history_api_plugin.cpp b/plugins/history_api_plugin/history_api_plugin.cpp index f9030d8c91c..d76dd7fd44b 100644 --- a/plugins/history_api_plugin/history_api_plugin.cpp +++ b/plugins/history_api_plugin/history_api_plugin.cpp @@ -24,8 +24,8 @@ void history_api_plugin::plugin_initialize(const variables_map&) {} [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ - auto result = api_handle.call_name(fc::json::from_string(body).as()); \ - cb(200, fc::json::to_string(result)); \ + fc::variant result( api_handle.call_name(fc::json::from_string(body).as()) ); \ + cb(200, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 0be3d9f11ca..12cd3a5e731 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -148,34 +148,34 @@ namespace eosio { if (bypass_filter) { pass_on = true; } - if (filter_on.find({ act.receipt.receiver, 0, 0 }) != filter_on.end()) { + if (filter_on.find({ act.receiver, 0, 0 }) != filter_on.end()) { pass_on = true; } - if (filter_on.find({ act.receipt.receiver, act.act.name, 0 }) != filter_on.end()) { + if (filter_on.find({ act.receiver, act.act.name, 0 }) != filter_on.end()) { pass_on = true; } for (const auto& a : act.act.authorization) { - if (filter_on.find({ act.receipt.receiver, 0, a.actor }) != filter_on.end()) { + if (filter_on.find({ act.receiver, 0, a.actor }) != filter_on.end()) { pass_on = true; } - if (filter_on.find({ act.receipt.receiver, act.act.name, a.actor }) != filter_on.end()) { + if (filter_on.find({ act.receiver, act.act.name, a.actor }) != filter_on.end()) { pass_on = true; } } if (!pass_on) { return false; } - if (filter_out.find({ act.receipt.receiver, 0, 0 }) != filter_out.end()) { + if (filter_out.find({ act.receiver, 0, 0 }) != filter_out.end()) { return false; } - if (filter_out.find({ act.receipt.receiver, act.act.name, 0 }) != filter_out.end()) { + if (filter_out.find({ act.receiver, act.act.name, 0 }) != filter_out.end()) { return false; } for (const auto& a : act.act.authorization) { - if (filter_out.find({ act.receipt.receiver, 0, a.actor }) != filter_out.end()) { + if (filter_out.find({ act.receiver, 0, a.actor }) != filter_out.end()) { return false; } - if (filter_out.find({ act.receipt.receiver, act.act.name, a.actor }) != filter_out.end()) { + if (filter_out.find({ act.receiver, act.act.name, a.actor }) != filter_out.end()) { return false; } } @@ -186,17 +186,17 @@ namespace eosio { set account_set( const action_trace& act ) { set result; - result.insert( act.receipt.receiver ); + result.insert( act.receiver ); for( const auto& a : act.act.authorization ) { if( bypass_filter || - filter_on.find({ act.receipt.receiver, 0, 0}) != filter_on.end() || - filter_on.find({ act.receipt.receiver, 0, a.actor}) != filter_on.end() || - filter_on.find({ act.receipt.receiver, act.act.name, 0}) != filter_on.end() || - filter_on.find({ act.receipt.receiver, act.act.name, a.actor }) != filter_on.end() ) { - if ((filter_out.find({ act.receipt.receiver, 0, 0 }) == filter_out.end()) && - (filter_out.find({ act.receipt.receiver, 0, a.actor }) == filter_out.end()) && - (filter_out.find({ act.receipt.receiver, act.act.name, 0 }) == filter_out.end()) && - (filter_out.find({ act.receipt.receiver, act.act.name, a.actor }) == filter_out.end())) { + filter_on.find({ act.receiver, 0, 0}) != filter_on.end() || + filter_on.find({ act.receiver, 0, a.actor}) != filter_on.end() || + filter_on.find({ act.receiver, act.act.name, 0}) != filter_on.end() || + filter_on.find({ act.receiver, act.act.name, a.actor }) != filter_on.end() ) { + if ((filter_out.find({ act.receiver, 0, 0 }) == filter_out.end()) && + (filter_out.find({ act.receiver, 0, a.actor }) == filter_out.end()) && + (filter_out.find({ act.receiver, act.act.name, 0 }) == filter_out.end()) && + (filter_out.find({ act.receiver, act.act.name, a.actor }) == filter_out.end())) { result.insert( a.actor ); } } @@ -204,7 +204,7 @@ namespace eosio { return result; } - void record_account_action( account_name n, const base_action_trace& act ) { + void record_account_action( account_name n, const action_trace& act ) { auto& chain = chain_plug->chain(); chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) @@ -216,13 +216,11 @@ namespace eosio { if( itr->account == n ) asn = itr->account_sequence_num + 1; - //idump((n)(act.receipt.global_sequence)(asn)); const auto& a = db.create( [&]( auto& aho ) { aho.account = n; - aho.action_sequence_num = act.receipt.global_sequence; + aho.action_sequence_num = act.receipt->global_sequence; aho.account_sequence_num = asn; }); - //idump((a.account)(a.action_sequence_num)(a.action_sequence_num)); } void on_system_action( const action_trace& at ) { @@ -263,8 +261,8 @@ namespace eosio { aho.packed_action_trace.resize(ps); datastream ds( aho.packed_action_trace.data(), ps ); fc::raw::pack( ds, at ); - aho.action_sequence_num = at.receipt.global_sequence; - aho.block_num = chain.pending_block_state()->block_num; + aho.action_sequence_num = at.receipt->global_sequence; + aho.block_num = chain.head_block_num() + 1; aho.block_time = chain.pending_block_time(); aho.trx_id = at.trx_id; }); @@ -274,11 +272,8 @@ namespace eosio { record_account_action( a, at ); } } - if( at.receipt.receiver == chain::config::system_account_name ) + if( at.receiver == chain::config::system_account_name ) on_system_action( at ); - for( const auto& iline : at.inline_traces ) { - on_action_trace( iline ); - } } void on_applied_transaction( const transaction_trace_ptr& trace ) { @@ -286,6 +281,7 @@ namespace eosio { trace->receipt->status != transaction_receipt_header::soft_fail) ) return; for( const auto& atrace : trace->action_traces ) { + if( !atrace.receipt ) continue; on_action_trace( atrace ); } } @@ -348,15 +344,15 @@ namespace eosio { auto& chain = my->chain_plug->chain(); chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) - // TODO: Use separate chainbase database for managing the state of the history_plugin (or remove deprecated history_plugin entirely) + // TODO: Use separate chainbase database for managing the state of the history_plugin (or remove deprecated history_plugin entirely) db.add_index(); db.add_index(); db.add_index(); db.add_index(); my->applied_transaction_connection.emplace( - chain.applied_transaction.connect( [&]( const transaction_trace_ptr& p ) { - my->on_applied_transaction( p ); + chain.applied_transaction.connect( [&]( std::tuple t ) { + my->on_applied_transaction( std::get<0>(t) ); } )); } FC_LOG_AND_RETHROW() } @@ -494,14 +490,9 @@ namespace eosio { } auto blk = chain.fetch_block_by_number( result.block_num ); - if( blk == nullptr ) { // still in pending - auto blk_state = chain.pending_block_state(); - if( blk_state != nullptr ) { - blk = blk_state->block; - } - } - if( blk != nullptr ) { - for (const auto &receipt: blk->transactions) { + if( blk || chain.is_building_block() ) { + const vector& receipts = blk ? blk->transactions : chain.get_pending_trx_receipts(); + for (const auto &receipt: receipts) { if (receipt.trx.contains()) { auto &pt = receipt.trx.get(); if (pt.id() == result.id) { @@ -518,7 +509,7 @@ namespace eosio { break; } } - } + } } } else { auto blk = chain.fetch_block_by_number(*p.block_num_hint); diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 3bb5b530d5f..cb3b663f226 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,11 @@ namespace eosio { using std::shared_ptr; using websocketpp::connection_hdl; + enum https_ecdh_curve_t { + SECP384R1, + PRIME256V1 + }; + static http_plugin_defaults current_http_plugin_defaults; void http_plugin::set_defaults(const http_plugin_defaults config) { @@ -123,7 +129,6 @@ namespace eosio { using websocket_local_server_type = websocketpp::server; using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; - using io_work_t = boost::asio::executor_work_guard; static bool verbose_http_errors = false; @@ -140,15 +145,14 @@ namespace eosio { websocket_server_type server; uint16_t thread_pool_size = 2; - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; - std::atomic bytes_in_flight{0}; + optional thread_pool; + std::atomic bytes_in_flight{0}; size_t max_bytes_in_flight = 0; optional https_listen_endpoint; string https_cert_chain; string https_key; + https_ecdh_curve_t https_ecdh_curve = SECP384R1; websocket_server_tls_type https_server; @@ -193,7 +197,7 @@ namespace eosio { //going for the A+! Do a few more things on the native context to get ECDH in use - fc::ec_key ecdh = EC_KEY_new_by_curve_name(NID_secp384r1); + fc::ec_key ecdh = EC_KEY_new_by_curve_name(https_ecdh_curve == SECP384R1 ? NID_secp384r1 : NID_X9_62_prime256v1); if (!ecdh) EOS_THROW(chain::http_exception, "Failed to set NID_secp384r1"); if(SSL_CTX_set_tmp_ecdh(ctx->native_handle(), (EC_KEY*)ecdh) != 1) @@ -301,21 +305,23 @@ namespace eosio { con->defer_http_response(); bytes_in_flight += body.size(); app().post( appbase::priority::low, - [ioc = this->server_ioc, &bytes_in_flight = this->bytes_in_flight, handler_itr, + [&ioc = thread_pool->get_executor(), &bytes_in_flight = this->bytes_in_flight, handler_itr, resource{std::move( resource )}, body{std::move( body )}, con]() { try { - bytes_in_flight -= body.size(); handler_itr->second( resource, body, - [ioc{std::move(ioc)}, &bytes_in_flight, con]( int code, std::string response_body ) { - bytes_in_flight += response_body.size(); - boost::asio::post( *ioc, [ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() { - size_t body_size = response_body.size(); - con->set_body( std::move( response_body ) ); + [&ioc, &bytes_in_flight, con]( int code, fc::variant response_body ) { + boost::asio::post( ioc, [response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { + std::string json = fc::json::to_string( response_body ); + response_body.clear(); + const size_t json_size = json.size(); + bytes_in_flight += json_size; + con->set_body( std::move( json ) ); con->set_status( websocketpp::http::status_code::value( code ) ); con->send_http_response(); - bytes_in_flight -= body_size; + bytes_in_flight -= json_size; } ); }); + bytes_in_flight -= body.size(); } catch( ... ) { handle_exception( con ); con->send_http_response(); @@ -338,11 +344,11 @@ namespace eosio { void create_server_for_endpoint(const tcp::endpoint& ep, websocketpp::server>& ws) { try { ws.clear_access_channels(websocketpp::log::alevel::all); - ws.init_asio(&(*server_ioc)); + ws.init_asio( &thread_pool->get_executor() ); ws.set_reuse_addr(true); ws.set_max_http_body_size(max_body_size); // capture server_ioc shared_ptr in http handler to keep it alive while in use - ws.set_http_handler([&, ioc = this->server_ioc](connection_hdl hdl) { + ws.set_http_handler([&](connection_hdl hdl) { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ @@ -366,7 +372,9 @@ namespace eosio { return true; } - http_plugin::http_plugin():my(new http_plugin_impl()){} + http_plugin::http_plugin():my(new http_plugin_impl()){ + app().register_config_type(); + } http_plugin::~http_plugin(){} void http_plugin::set_program_options(options_description&, options_description& cfg) { @@ -394,6 +402,11 @@ namespace eosio { ("https-private-key-file", bpo::value(), "Filename with https private key in PEM format. Required for https") + ("https-ecdh-curve", bpo::value()->notifier([this](https_ecdh_curve_t c) { + my->https_ecdh_curve = c; + })->default_value(SECP384R1), + "Configure https ECDH curve to use: secp384r1 or prime256v1") + ("access-control-allow-origin", bpo::value()->notifier([this](const string& v) { my->access_control_allow_origin = v; ilog("configured http with Access-Control-Allow-Origin: ${o}", ("o", my->access_control_allow_origin)); @@ -516,12 +529,7 @@ namespace eosio { void http_plugin::plugin_startup() { - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); - } + my->thread_pool.emplace( "http", my->thread_pool_size ); if(my->listen_endpoint) { try { @@ -545,10 +553,10 @@ namespace eosio { if(my->unix_endpoint) { try { my->unix_server.clear_access_channels(websocketpp::log::alevel::all); - my->unix_server.init_asio(&(*my->server_ioc)); + my->unix_server.init_asio( &my->thread_pool->get_executor() ); my->unix_server.set_max_http_body_size(my->max_body_size); my->unix_server.listen(*my->unix_endpoint); - my->unix_server.set_http_handler([&, ioc = my->server_ioc](connection_hdl hdl) { + my->unix_server.set_http_handler([&, &ioc = my->thread_pool->get_executor()](connection_hdl hdl) { my->handle_http_request( my->unix_server.get_con_from_hdl(hdl)); }); my->unix_server.start_accept(); @@ -592,7 +600,7 @@ namespace eosio { try { if (body.empty()) body = "{}"; auto result = (*this).get_supported_apis(); - cb(200, fc::json::to_string(result)); + cb(200, fc::variant(result)); } catch (...) { handle_exception("node", "get_supported_apis", body, cb); } @@ -608,12 +616,7 @@ namespace eosio { if(my->unix_server.is_listening()) my->unix_server.stop_listening(); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } } @@ -629,21 +632,21 @@ namespace eosio { throw; } catch (chain::unknown_block_exception& e) { error_results results{400, "Unknown Block", error_results::error_info(e, verbose_http_errors)}; - cb( 400, fc::json::to_string( results )); + cb( 400, fc::variant( results )); } catch (chain::unsatisfied_authorization& e) { error_results results{401, "UnAuthorized", error_results::error_info(e, verbose_http_errors)}; - cb( 401, fc::json::to_string( results )); + cb( 401, fc::variant( results )); } catch (chain::tx_duplicate& e) { error_results results{409, "Conflict", error_results::error_info(e, verbose_http_errors)}; - cb( 409, fc::json::to_string( results )); + cb( 409, fc::variant( results )); } catch (fc::eof_exception& e) { error_results results{422, "Unprocessable Entity", error_results::error_info(e, verbose_http_errors)}; - cb( 422, fc::json::to_string( results )); + cb( 422, fc::variant( results )); elog( "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name )); dlog("Bad arguments: ${args}", ("args", body)); } catch (fc::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(e, verbose_http_errors)}; - cb( 500, fc::json::to_string( results )); + cb( 500, fc::variant( results )); if (e.code() != chain::greylist_net_usage_exceeded::code_value && e.code() != chain::greylist_cpu_usage_exceeded::code_value) { elog( "FC Exception encountered while processing ${api}.${call}", ("api", api_name)( "call", call_name )); @@ -651,14 +654,14 @@ namespace eosio { } } catch (std::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, e.what())), verbose_http_errors)}; - cb( 500, fc::json::to_string( results )); + cb( 500, fc::variant( results )); elog( "STD Exception encountered while processing ${api}.${call}", ("api", api_name)( "call", call_name )); dlog( "Exception Details: ${e}", ("e", e.what())); } catch (...) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Exception" )), verbose_http_errors)}; - cb( 500, fc::json::to_string( results )); + cb( 500, fc::variant( results )); elog( "Unknown Exception encountered while processing ${api}.${call}", ("api", api_name)( "call", call_name )); } @@ -689,4 +692,26 @@ namespace eosio { return result; } + + std::istream& operator>>(std::istream& in, https_ecdh_curve_t& curve) { + std::string s; + in >> s; + if (s == "secp384r1") + curve = SECP384R1; + else if (s == "prime256v1") + curve = PRIME256V1; + else + in.setstate(std::ios_base::failbit); + return in; + } + + std::ostream& operator<<(std::ostream& osm, https_ecdh_curve_t curve) { + if (curve == SECP384R1) { + osm << "secp384r1"; + } else if (curve == PRIME256V1) { + osm << "prime256v1"; + } + + return osm; + } } diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index a522b2b1739..eaa132ce0e4 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -17,7 +17,7 @@ namespace eosio { * * Arguments: response_code, response_body */ - using url_response_callback = std::function; + using url_response_callback = std::function; /** * @brief Callback type for a URL handler diff --git a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp index 4664d1d378a..3f646237e9f 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp @@ -82,8 +82,7 @@ class local_connection : public lib::enable_shared_from_this { return socket::make_error_code(socket::error::invalid_state); } - m_socket = lib::make_shared( - lib::ref(*service)); + m_socket = lib::make_shared(*service); m_state = READY; @@ -266,8 +265,7 @@ class local_endpoint : public config::socket_type { m_alog->write(log::alevel::devel,"asio::init_asio"); m_io_service = ptr; - m_acceptor = lib::make_shared( - lib::ref(*m_io_service)); + m_acceptor = lib::make_shared(*m_io_service); m_state = READY; ec = lib::error_code(); diff --git a/plugins/login_plugin/login_plugin.cpp b/plugins/login_plugin/login_plugin.cpp index 0aeac67dce4..374a04b25dc 100644 --- a/plugins/login_plugin/login_plugin.cpp +++ b/plugins/login_plugin/login_plugin.cpp @@ -68,8 +68,8 @@ void login_plugin::plugin_initialize(const variables_map& options) { try { \ if (body.empty()) \ body = "{}"; \ - auto result = call_name(fc::json::from_string(body).as()); \ - cb(http_response_code, fc::json::to_string(result)); \ + fc::variant result( call_name(fc::json::from_string(body).as()) ); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception("login", #call_name, body, cb); \ } \ diff --git a/plugins/mongo_db_plugin/CMakeLists.txt b/plugins/mongo_db_plugin/CMakeLists.txt index dc76525f3a2..29f45bd6808 100644 --- a/plugins/mongo_db_plugin/CMakeLists.txt +++ b/plugins/mongo_db_plugin/CMakeLists.txt @@ -4,46 +4,13 @@ if(BUILD_MONGO_DB_PLUGIN) if (libmongoc-1.0_FOUND) - # EOS has no direct dependencies on libmongoc but its shared libraries - # will need to be present at runtime for the C++ libraries we use: - # libbsoncxx & libmongocxx (both from github.com/mongodb/mongo-cxx-driver) - - # The *.cmake package files provided by mongo-cxx-driver don't give us the - # absolute path to the libraries, which is needed whenever they are not - # installed in system-known locations. CMake requires the absolute paths - # in target_link_libraries() since we are builiding an archive and the - # link step for all executables using this archive must include the - # mongo-cxx-driver libraries libmongocxx and libbsoncxx. - find_package(libbsoncxx-static REQUIRED) - message(STATUS "Found bsoncxx headers: ${LIBBSONCXX_STATIC_INCLUDE_DIRS}") - - # mongo-cxx-driver 3.2 release altered LIBBSONCXX_LIBRARIES semantics. Instead of library names, - # it now hold library paths. - if((LIBBSONCXX_STATIC_VERSION_MAJOR LESS 3) OR ((LIBBSONCXX_STATIC_VERSION_MAJOR EQUAL 3) AND (LIBBSONCXX_STATIC_VERSION_MINOR LESS 2))) - find_library(EOS_LIBBSONCXX ${LIBBSONCXX_STATIC_LIBRARIES} - PATHS ${LIBBSONCXX_STATIC_LIBRARY_DIRS} NO_DEFAULT_PATH) - else() - set(EOS_LIBBSONCXX ${LIBBSONCXX_STATIC_LIBRARIES}) - endif() - - message(STATUS "Found bsoncxx library: ${EOS_LIBBSONCXX}") - find_package(libmongocxx-static REQUIRED) - message(STATUS "Found mongocxx headers: ${LIBMONGOCXX_STATIC_INCLUDE_DIRS}") - - # mongo-cxx-driver 3.2 release altered LIBBSONCXX_LIBRARIES semantics. Instead of library names, - # it now hold library paths. - if((LIBMONGOCXX_STATIC_VERSION_MAJOR LESS 3) OR ((LIBMONGOCXX_STATIC_VERSION_MAJOR EQUAL 3) AND (LIBMONGOCXX_STATIC_VERSION_MINOR LESS 2))) - find_library(EOS_LIBMONGOCXX ${LIBMONGOCXX_STATIC_LIBRARIES} - PATHS ${LIBMONGOCXX_STATIC_LIBRARY_DIRS} NO_DEFAULT_PATH) - else() - set(EOS_LIBMONGOCXX ${LIBMONGOCXX_STATIC_LIBRARIES}) - endif() + find_package(libmongoc-static-1.0 REQUIRED) + find_package(libbson-static-1.0 REQUIRED) - message(STATUS "Found mongocxx library: ${EOS_LIBMONGOCXX}") else() - message("Could NOT find MongoDB. mongo_db_plugin with MongoDB support will not be included.") + message(FATAL_ERROR "Could NOT find mongo-c-driver. Disable mongo support or ensure mongo-c-driver and mongo-cxx-driver is built and installed") return() endif() @@ -62,11 +29,35 @@ if(BUILD_MONGO_DB_PLUGIN) PRIVATE ${LIBMONGOCXX_STATIC_DEFINITIONS} ${LIBBSONCXX_STATIC_DEFINITIONS} ) + # We can't just use *_STATIC_LIBRARIES variables to link against because the static + # variants of these may try to static link against libraries we don't want (like a system + # libc/c++). But we need to know if mongo c driver was built with ICU, SASL2, or snappy support + # so that we can continue to link to those. This certainly is a bit on the fragile side but + # try to parse what is included in MONGOC_STATIC_LIBRARIES to see what we should link to + foreach(MONGO_S_LIB ${MONGOC_STATIC_LIBRARIES}) + string(REGEX MATCH "libsasl2\\${CMAKE_SHARED_LIBRARY_SUFFIX}$" REGOUT ${MONGO_S_LIB}) + if(REGOUT) + set(LINK_SASL "sasl2") + endif() + + string(REGEX MATCH "libicuuc\\${CMAKE_SHARED_LIBRARY_SUFFIX}$" REGOUT ${MONGO_S_LIB}) + if(REGOUT) + set(LINK_ICU "icuuc") + endif() + + string(REGEX MATCH "libsnappy\\${CMAKE_SHARED_LIBRARY_SUFFIX}$" REGOUT ${MONGO_S_LIB}) + if(REGOUT) + set(LINK_SNAPPY "snappy") + endif() + endforeach() + target_link_libraries(mongo_db_plugin PUBLIC chain_plugin eosio_chain appbase - ${EOS_LIBMONGOCXX} ${EOS_LIBBSONCXX} + ${LIBMONGOCXX_STATIC_LIBRARY_PATH} ${LIBBSONCXX_STATIC_LIBRARY_PATH} + ${MONGOC_STATIC_LIBRARY} ${BSON_STATIC_LIBRARY} + resolv ${LINK_SASL} ${LINK_ICU} ${LINK_SNAPPY} ) - + else() message("mongo_db_plugin not selected and will be omitted.") endif() diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index e4feea00187..e05f1a01815 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -117,6 +118,7 @@ class mongo_db_plugin_impl { void init(); void wipe_database(); + void create_expiration_index(mongocxx::collection& collection, uint32_t expire_after_seconds); template void queue(Queue& queue, const Entry& e); @@ -135,6 +137,7 @@ class mongo_db_plugin_impl { bool store_transactions = true; bool store_transaction_traces = true; bool store_action_traces = true; + uint32_t expire_after_seconds = 0; std::string db_name; mongocxx::instance mongo_inst; @@ -748,7 +751,7 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti const signed_transaction& trx = t->packed_trx->get_signed_transaction(); if( !filter_include( trx ) ) return; - + auto trans_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -827,22 +830,20 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces using namespace bsoncxx::types; using bsoncxx::builder::basic::kvp; - if( executed && atrace.receipt.receiver == chain::config::system_account_name ) { + if( executed && atrace.receiver == chain::config::system_account_name ) { update_account( atrace.act ); } bool added = false; const bool in_filter = (store_action_traces || store_transaction_traces) && start_block_reached && - filter_include( atrace.receipt.receiver, atrace.act.name, atrace.act.authorization ); + filter_include( atrace.receiver, atrace.act.name, atrace.act.authorization ); write_ttrace |= in_filter; if( start_block_reached && store_action_traces && in_filter ) { auto action_traces_doc = bsoncxx::builder::basic::document{}; - const chain::base_action_trace& base = atrace; // without inline action traces - // improve data distributivity when using mongodb sharding action_traces_doc.append( kvp( "_id", make_custom_oid() ) ); - auto v = to_variant_with_abi( base ); + auto v = to_variant_with_abi( atrace ); string json = fc::json::to_string( v ); try { const auto& value = bsoncxx::from_json( json ); @@ -868,10 +869,6 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces added = true; } - for( const auto& iline_atrace : atrace.inline_traces ) { - added |= add_action_trace( bulk_action_traces, iline_atrace, t, executed, now, write_ttrace ); - } - return added; } @@ -1396,6 +1393,39 @@ void mongo_db_plugin_impl::wipe_database() { ilog("done wipe_database"); } +void mongo_db_plugin_impl::create_expiration_index(mongocxx::collection& collection, uint32_t expire_after_seconds) { + using bsoncxx::builder::basic::make_document; + using bsoncxx::builder::basic::kvp; + + auto indexes = collection.indexes(); + for( auto& index : indexes.list()) { + auto key = index["key"]; + if( !key ) { + continue; + } + auto field = key["createdAt"]; + if( !field ) { + continue; + } + + auto ttl = index["expireAfterSeconds"]; + if( ttl && ttl.get_int32() == expire_after_seconds ) { + return; + } else { + auto name = index["name"].get_utf8(); + ilog( "mongo db drop ttl index for collection ${collection}", ( "collection", collection.name().to_string())); + indexes.drop_one( name.value ); + break; + } + } + + mongocxx::options::index index_options{}; + index_options.expire_after( std::chrono::seconds( expire_after_seconds )); + index_options.background( true ); + ilog( "mongo db create ttl index for collection ${collection}", ( "collection", collection.name().to_string())); + collection.create_index( make_document( kvp( "createdAt", 1 )), index_options ); +} + void mongo_db_plugin_impl::init() { using namespace bsoncxx::types; using bsoncxx::builder::basic::make_document; @@ -1426,51 +1456,76 @@ void mongo_db_plugin_impl::init() { } try { + // MongoDB administrators (to enable sharding) : + // 1. enableSharding database (default to EOS) + // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces + // 3. Compound index with shard key (default to _id below), to improve query performance. + // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); auto block_states = mongo_conn[db_name][block_states_col]; - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); // accounts indexes - accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); + accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1, "_id" : 1 })xxx" )); // transactions indexes auto trans = mongo_conn[db_name][trans_col]; - trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1, "_id" : 1 })xxx" )); auto trans_trace = mongo_conn[db_name][trans_traces_col]; - trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1, "_id" : 1 })xxx" )); // action traces indexes auto action_traces = mongo_conn[db_name][action_traces_col]; - action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); // pub_keys indexes auto pub_keys = mongo_conn[db_name][pub_keys_col]; - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1, "_id" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1, "_id" : 1 })xxx" )); // account_controls indexes auto account_controls = mongo_conn[db_name][account_controls_col]; account_controls.create_index( - bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1, "_id" : 1 })xxx" )); + account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1, "_id" : 1 })xxx" )); } catch (...) { handle_mongo_exception( "create indexes", __LINE__ ); } } + + if( expire_after_seconds > 0 ) { + try { + mongocxx::collection block_states = mongo_conn[db_name][block_states_col]; + create_expiration_index( block_states, expire_after_seconds ); + mongocxx::collection blocks = mongo_conn[db_name][blocks_col]; + create_expiration_index( blocks, expire_after_seconds ); + mongocxx::collection trans = mongo_conn[db_name][trans_col]; + create_expiration_index( trans, expire_after_seconds ); + mongocxx::collection trans_traces = mongo_conn[db_name][trans_traces_col]; + create_expiration_index( trans_traces, expire_after_seconds ); + mongocxx::collection action_traces = mongo_conn[db_name][action_traces_col]; + create_expiration_index( action_traces, expire_after_seconds ); + } catch(...) { + handle_mongo_exception( "create expiration indexes", __LINE__ ); + } + } } catch (...) { handle_mongo_exception( "mongo init", __LINE__ ); } ilog("starting db plugin thread"); - consume_thread = std::thread([this] { consume_blocks(); }); + consume_thread = std::thread( [this] { + fc::set_os_thread_name( "mongodb" ); + consume_blocks(); + } ); startup = false; } @@ -1516,6 +1571,8 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc "Enables storing transaction traces in mongodb.") ("mongodb-store-action-traces", bpo::value()->default_value(true), "Enables storing action traces in mongodb.") + ("mongodb-expire-after-seconds", bpo::value()->default_value(0), + "Enables expiring data in mongodb after a specified number of seconds.") ("mongodb-filter-on", bpo::value>()->composing(), "Track actions which match receiver:action:actor. Receiver, Action, & Actor may be blank to include all. i.e. eosio:: or :transfer: Use * or leave unspecified to include all.") ("mongodb-filter-out", bpo::value>()->composing(), @@ -1573,6 +1630,9 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) if( options.count( "mongodb-store-action-traces" )) { my->store_action_traces = options.at( "mongodb-store-action-traces" ).as(); } + if( options.count( "mongodb-expire-after-seconds" )) { + my->expire_after_seconds = options.at( "mongodb-expire-after-seconds" ).as(); + } if( options.count( "mongodb-filter-on" )) { auto fo = options.at( "mongodb-filter-on" ).as>(); my->filter_on_star = false; @@ -1635,8 +1695,8 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) my->accepted_transaction( t ); } )); my->applied_transaction_connection.emplace( - chain.applied_transaction.connect( [&]( const chain::transaction_trace_ptr& t ) { - my->applied_transaction( t ); + chain.applied_transaction.connect( [&]( std::tuple t ) { + my->applied_transaction( std::get<0>(t) ); } )); if( my->wipe_database_on_startup ) { diff --git a/plugins/net_api_plugin/net_api_plugin.cpp b/plugins/net_api_plugin/net_api_plugin.cpp index 3b7327c4313..315ea2816e9 100644 --- a/plugins/net_api_plugin/net_api_plugin.cpp +++ b/plugins/net_api_plugin/net_api_plugin.cpp @@ -29,7 +29,7 @@ using namespace eosio; try { \ if (body.empty()) body = "{}"; \ INVOKE \ - cb(http_response_code, fc::json::to_string(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index fb86a8bcd01..106d6094fba 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -18,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -28,10 +30,6 @@ using namespace eosio::chain::plugin_interface::compat; -namespace fc { - extern std::unordered_map& get_logger_map(); -} - namespace eosio { static appbase::abstract_plugin& _net_plugin = app().register_plugin(); @@ -45,6 +43,7 @@ namespace eosio { using fc::time_point; using fc::time_point_sec; using eosio::chain::transaction_id_type; + using eosio::chain::sha256_less; class connection; @@ -67,14 +66,6 @@ namespace eosio { struct by_expiry; struct by_block_num; - struct sha256_less { - bool operator()( const sha256& lhs, const sha256& rhs ) const { - return - std::tie(lhs._hash[0], lhs._hash[1], lhs._hash[2], lhs._hash[3]) < - std::tie(rhs._hash[0], rhs._hash[1], rhs._hash[2], rhs._hash[3]); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -157,11 +148,8 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; - + uint16_t thread_pool_size = 1; + optional thread_pool; void connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); @@ -496,7 +484,7 @@ namespace eosio { peer_block_state_index blk_state; transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us - std::shared_ptr server_ioc; // keep ioc alive + boost::asio::io_context& server_ioc; boost::asio::io_context::strand strand; socket_ptr socket; @@ -579,6 +567,9 @@ namespace eosio { const string peer_name(); + void txn_send_pending(const vector& ids); + void txn_send(const vector& txn_lis); + void blk_send_branch(); void blk_send(const block_id_type& blkid); void stop_send(); @@ -684,7 +675,7 @@ namespace eosio { chain_plugin* chain_plug = nullptr; - constexpr auto stage_str(stages s ); + constexpr static auto stage_str(stages s); public: explicit sync_manager(uint32_t span); @@ -727,9 +718,9 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), - socket( std::make_shared( std::ref( *my_impl->server_ioc ))), + socket( std::make_shared( my_impl->thread_pool->get_executor() ) ), node_id(), last_handshake_recv(), last_handshake_sent(), @@ -753,7 +744,7 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), socket( s ), node_id(), @@ -780,8 +771,8 @@ namespace eosio { void connection::initialize() { auto *rnd = node_id.data(); rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); + response_expected.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + read_delay_timer.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); } bool connection::connected() { @@ -825,9 +816,29 @@ namespace eosio { if( read_delay_timer ) read_delay_timer->cancel(); } + void connection::txn_send_pending(const vector& ids) { + const std::set known_ids(ids.cbegin(), ids.cend()); + my_impl->expire_local_txns(); + for(auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ){ + const bool found = known_ids.find( tx->id ) != known_ids.cend(); + if( !found ) { + queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } + } + } + + void connection::txn_send(const vector& ids) { + for(const auto& t : ids) { + auto tx = my_impl->local_txns.get().find(t); + if( tx != my_impl->local_txns.end() ) { + queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } + } + } + void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); - uint32_t head_num = cc.fork_db_head_block_num(); + uint32_t head_num = cc.fork_db_pending_head_block_num(); notice_message note; note.known_blocks.mode = normal; note.known_blocks.pending = 0; @@ -848,7 +859,7 @@ namespace eosio { } lib_id = last_handshake_recv.last_irreversible_block_id; - head_id = cc.fork_db_head_block_id(); + head_id = cc.fork_db_pending_head_block_id(); } catch (const assert_exception& ex) { fc_elog( logger, "unable to retrieve block info: ${n} for ${p}",("n",ex.to_string())("p",peer_name()) ); @@ -930,7 +941,7 @@ namespace eosio { void connection::queue_write(const std::shared_ptr>& buff, bool trigger_send, int priority, - std::function callback, + std::function callback, bool to_sync_queue) { if( !buffer_queue.add_write_queue( buff, callback, to_sync_queue )) { fc_wlog( logger, "write_queue full ${s} bytes, giving up on connection ${p}", @@ -1105,7 +1116,8 @@ namespace eosio { connection_ptr conn = weak_this.lock(); if (conn) { if (close_after_send != no_reason) { - elog ("sent a go away message: ${r}, closing connection to ${p}",("r", reason_str(close_after_send))("p", conn->peer_name())); + fc_elog( logger, "sent a go away message: ${r}, closing connection to ${p}", + ("r", reason_str(close_after_send))("p", conn->peer_name()) ); my_impl->close(conn); return; } @@ -1171,6 +1183,13 @@ namespace eosio { if( !peer_addr.empty() ) { return peer_addr; } + if( socket != nullptr ) { + boost::system::error_code ec; + auto rep = socket->remote_endpoint(ec); + if( !ec ) { + return rep.address().to_string() + ':' + std::to_string( rep.port() ); + } + } return "connecting client"; } @@ -1244,7 +1263,7 @@ namespace eosio { bool fhset = c->fork_head != block_id_type(); fc_dlog(logger, "fork_head_num = ${fn} fork_head set = ${s}", ("fn", c->fork_head_num)("s", fhset)); - return c->fork_head != block_id_type() && c->fork_head_num < chain_plug->chain().fork_db_head_block_num(); + return c->fork_head != block_id_type() && c->fork_head_num < chain_plug->chain().fork_db_pending_head_block_num(); } return state != in_sync; } @@ -1265,14 +1284,14 @@ namespace eosio { bool sync_manager::sync_required() { fc_dlog(logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", - ("req",sync_last_requested_num)("recv",sync_next_expected_num)("known",sync_known_lib_num)("head",chain_plug->chain().fork_db_head_block_num())); + ("req",sync_last_requested_num)("recv",sync_next_expected_num)("known",sync_known_lib_num)("head",chain_plug->chain().fork_db_pending_head_block_num())); return( sync_last_requested_num < sync_known_lib_num || - chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); + chain_plug->chain().fork_db_pending_head_block_num() < sync_last_requested_num ); } void sync_manager::request_next_chunk( const connection_ptr& conn ) { - uint32_t head_block = chain_plug->chain().fork_db_head_block_num(); + uint32_t head_block = chain_plug->chain().fork_db_pending_head_block_num(); if (head_block < sync_last_requested_num && source && source->current()) { fc_ilog(logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", @@ -1290,43 +1309,46 @@ namespace eosio { source = conn; } else { - if (my_impl->connections.size() == 1) { + if( my_impl->connections.size() == 0 ) { + source.reset(); + } else if( my_impl->connections.size() == 1 ) { if (!source) { source = *my_impl->connections.begin(); } - } - else { + } else { // init to a linear array search auto cptr = my_impl->connections.begin(); auto cend = my_impl->connections.end(); // do we remember the previous source? - if (source) { + if( source ) { //try to find it in the list - cptr = my_impl->connections.find(source); + cptr = my_impl->connections.find( source ); cend = cptr; - if (cptr == my_impl->connections.end()) { + if( cptr == my_impl->connections.end() ) { //not there - must have been closed! cend is now connections.end, so just flatten the ring. source.reset(); cptr = my_impl->connections.begin(); } else { //was found - advance the start to the next. cend is the old source. - if (++cptr == my_impl->connections.end() && cend != my_impl->connections.end() ) { + if( ++cptr == my_impl->connections.end() && cend != my_impl->connections.end() ) { cptr = my_impl->connections.begin(); } } } //scan the list of peers looking for another able to provide sync blocks. - auto cstart_it = cptr; - do { - //select the first one which is current and break out. - if((*cptr)->current()) { - source = *cptr; - break; - } - if(++cptr == my_impl->connections.end()) + if( cptr != my_impl->connections.end() ) { + auto cstart_it = cptr; + do { + //select the first one which is current and break out. + if( (*cptr)->current() ) { + source = *cptr; + break; + } + if( ++cptr == my_impl->connections.end() ) cptr = my_impl->connections.begin(); - } while(cptr != cstart_it); + } while( cptr != cstart_it ); + } // no need to check the result, either source advanced or the whole list was checked and the old source is reused. } } @@ -1370,7 +1392,7 @@ namespace eosio { if (!sync_required()) { uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); - uint32_t hnum = chain_plug->chain().fork_db_head_block_num(); + uint32_t hnum = chain_plug->chain().fork_db_pending_head_block_num(); fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", ("b",bnum)("h",hnum)("t",target)); return; @@ -1417,8 +1439,8 @@ namespace eosio { // //----------------------------- - uint32_t head = cc.fork_db_head_block_num(); - block_id_type head_id = cc.fork_db_head_block_id(); + uint32_t head = cc.fork_db_pending_head_block_num(); + block_id_type head_id = cc.fork_db_pending_head_block_id(); if (head_id == msg.head_id) { fc_dlog(logger, "sync check state 0"); // notify peer of our pending transactions @@ -1502,7 +1524,8 @@ namespace eosio { void sync_manager::recv_notice(const connection_ptr& c, const notice_message& msg) { fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); if( msg.known_blocks.ids.size() > 1 ) { - fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}", ("s", msg.known_blocks.ids.size()) ); + fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", + ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); my_impl->close(c); return; } @@ -1522,7 +1545,7 @@ namespace eosio { void sync_manager::rejected_block(const connection_ptr& c, uint32_t blk_num) { if (state != in_sync ) { - fc_ilog(logger, "block ${bn} not accepted from ${p}",("bn",blk_num)("p",c->peer_name())); + fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_name()) ); sync_last_requested_num = 0; source.reset(); my_impl->close(c); @@ -1534,7 +1557,8 @@ namespace eosio { fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); if (state == lib_catchup) { if (blk_num != sync_next_expected_num) { - fc_ilog(logger, "expected block ${ne} but got ${bn}",("ne",sync_next_expected_num)("bn",blk_num)); + fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", + ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); my_impl->close(c); return; } @@ -1899,9 +1923,9 @@ namespace eosio { void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *socket, [socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - app().post( priority::low, [socket, this, ec, ioc{std::move(ioc)}]() { + auto socket = std::make_shared( my_impl->thread_pool->get_executor() ); + acceptor->async_accept( *socket, [socket, this]( boost::system::error_code ec ) { + app().post( priority::low, [socket, this, ec]() { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; @@ -2093,18 +2117,17 @@ namespace eosio { } } catch(const std::exception &ex) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Exception in handling read data from ${p} ${s}",("p",pname)("s",ex.what()) ); + fc_elog( logger, "Exception in handling read data from ${p}: ${s}", + ("p",conn->peer_name())("s",ex.what()) ); close( conn ); } catch(const fc::exception &ex) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Exception in handling read data ${s}", ("p",pname)("s",ex.to_string()) ); + fc_elog( logger, "Exception in handling read data from ${p}: ${s}", + ("p",conn->peer_name())("s",ex.to_string()) ); close( conn ); } catch (...) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Undefined exception hanlding the read data from connection ${p}",( "p",pname) ); + fc_elog( logger, "Undefined exception handling the read data from ${p}",( "p",conn->peer_name()) ); close( conn ); } }); @@ -2148,7 +2171,8 @@ namespace eosio { msg.visit( m ); } } catch( const fc::exception& e ) { - edump( (e.to_detail_string()) ); + fc_elog( logger, "Exception in handling message from ${p}: ${s}", + ("p", conn->peer_name())("s", e.to_detail_string()) ); close( conn ); return false; } @@ -2312,10 +2336,7 @@ namespace eosio { } void net_plugin_impl::handle_message(const connection_ptr& c, const go_away_message& msg) { - string rsn = reason_str( msg.reason ); - peer_wlog(c, "received go_away_message"); - fc_wlog( logger, "received a go away message from ${p}, reason = ${r}", - ("p", c->peer_name())("r",rsn) ); + peer_wlog(c, "received go_away_message, reason = ${r}", ("r",reason_str( msg.reason )) ); c->no_retry = msg.reason; if(msg.reason == duplicate ) { c->node_id = msg.node_id; @@ -2377,6 +2398,17 @@ namespace eosio { break; } case catch_up : { + if( msg.known_trx.pending > 0) { + // plan to get all except what we already know about. + req.req_trx.mode = catch_up; + send_req = true; + size_t known_sum = local_txns.size(); + if( known_sum ) { + for( const auto& t : local_txns.get() ) { + req.req_trx.ids.push_back( t.id ); + } + } + } break; } case normal: { @@ -2412,7 +2444,8 @@ namespace eosio { void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { if( msg.req_blocks.ids.size() > 1 ) { - fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}", ("s", msg.req_blocks.ids.size()) ); + fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}, closing ${p}", + ("s", msg.req_blocks.ids.size())("p",c->peer_name()) ); close(c); return; } @@ -2434,17 +2467,14 @@ namespace eosio { switch (msg.req_trx.mode) { case catch_up : + c->txn_send_pending(msg.req_trx.ids); + break; + case normal : + c->txn_send(msg.req_trx.ids); break; case none : if(msg.req_blocks.mode == none) c->stop_send(); - // no break - case normal : - if( !msg.req_trx.ids.empty() ) { - elog( "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); - close(c); - return; - } break; default:; } @@ -2623,8 +2653,8 @@ namespace eosio { } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( *server_ioc )); - transaction_check.reset(new boost::asio::steady_timer( *server_ioc )); + connector_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + transaction_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); start_conn_timer(connector_period, std::weak_ptr()); start_txn_timer(); } @@ -2819,7 +2849,7 @@ namespace eosio { controller& cc = my_impl->chain_plug->chain(); hello.head_id = fc::sha256(); hello.last_irreversible_block_id = fc::sha256(); - hello.head_num = cc.fork_db_head_block_num(); + hello.head_num = cc.fork_db_pending_head_block_num(); hello.last_irreversible_block_num = cc.last_irreversible_block_num(); if( hello.last_irreversible_block_num ) { try { @@ -2973,15 +3003,10 @@ namespace eosio { void net_plugin::plugin_startup() { my->producer_plug = app().find_plugin(); - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); - } + my->thread_pool.emplace( "net", my->thread_pool_size ); - my->resolver = std::make_shared( std::ref( *my->server_ioc )); + my->resolver = std::make_shared( my->thread_pool->get_executor() ); if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); @@ -2990,7 +3015,7 @@ namespace eosio { my->listen_endpoint = *my->resolver->resolve( query ); - my->acceptor.reset( new tcp::acceptor( *my->server_ioc ) ); + my->acceptor.reset( new tcp::acceptor( my_impl->thread_pool->get_executor() ) ); if( !my->p2p_server_address.empty() ) { my->p2p_address = my->p2p_server_address; @@ -3010,7 +3035,7 @@ namespace eosio { } } - my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); my->ticker(); if( my->acceptor ) { @@ -3048,16 +3073,12 @@ namespace eosio { } void net_plugin::handle_sighup() { - if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) - logger = fc::get_logger_map()[logger_name]; + fc::logger::update( logger_name, logger ); } void net_plugin::plugin_shutdown() { try { fc_ilog( logger, "shutdown.." ); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->connector_check ) my->connector_check->cancel(); if( my->transaction_check ) @@ -3073,15 +3094,13 @@ namespace eosio { fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); for( auto& con : my->connections ) { + fc_dlog( logger, "close: ${p}", ("p",con->peer_name()) ); my->close( con ); } my->connections.clear(); } - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } fc_ilog( logger, "exit shutdown" ); @@ -3112,6 +3131,7 @@ namespace eosio { for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_addr == host ) { (*itr)->reset(); + fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); my->close(*itr); my->connections.erase(itr); return "connection removed"; diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 7fcde1ac98c..b513ae6a442 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -24,18 +24,44 @@ static appbase::abstract_plugin& _producer_api_plugin = app().register_plugin { + template + fc::variant operator()(const T& v) const { + return fc::variant(v); + } +}; + #define CALL(api_name, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ [&api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ INVOKE \ - cb(http_response_code, fc::json::to_string(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ }} +#define CALL_ASYNC(api_name, api_handle, call_name, call_result, INVOKE, http_response_code) \ +{std::string("/v1/" #api_name "/" #call_name), \ + [&api_handle](string, string body, url_response_callback cb) mutable { \ + if (body.empty()) body = "{}"; \ + auto next = [cb, body](const fc::static_variant& result){\ + if (result.contains()) {\ + try {\ + result.get()->dynamic_rethrow_exception();\ + } catch (...) {\ + http_plugin::handle_exception(#api_name, #call_name, body, cb);\ + }\ + } else {\ + cb(http_response_code, result.visit(async_result_visitor()));\ + }\ + };\ + INVOKE\ + }\ +} + #define INVOKE_R_R(api_handle, call_name, in_param) \ auto result = api_handle.call_name(fc::json::from_string(body).as()); @@ -46,6 +72,9 @@ using namespace eosio; #define INVOKE_R_V(api_handle, call_name) \ auto result = api_handle.call_name(); +#define INVOKE_R_V_ASYNC(api_handle, call_name)\ + api_handle.call_name(next); + #define INVOKE_V_R(api_handle, call_name, in_param) \ api_handle.call_name(fc::json::from_string(body).as()); \ eosio::detail::producer_api_plugin_response result{"ok"}; @@ -79,17 +108,26 @@ void producer_api_plugin::plugin_startup() { CALL(producer, producer, add_greylist_accounts, INVOKE_V_R(producer, add_greylist_accounts, producer_plugin::greylist_params), 201), CALL(producer, producer, remove_greylist_accounts, - INVOKE_V_R(producer, remove_greylist_accounts, producer_plugin::greylist_params), 201), + INVOKE_V_R(producer, remove_greylist_accounts, producer_plugin::greylist_params), 201), CALL(producer, producer, get_greylist, - INVOKE_R_V(producer, get_greylist), 201), + INVOKE_R_V(producer, get_greylist), 201), CALL(producer, producer, get_whitelist_blacklist, INVOKE_R_V(producer, get_whitelist_blacklist), 201), - CALL(producer, producer, set_whitelist_blacklist, - INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), + CALL(producer, producer, set_whitelist_blacklist, + INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), CALL(producer, producer, get_integrity_hash, INVOKE_R_V(producer, get_integrity_hash), 201), - CALL(producer, producer, create_snapshot, - INVOKE_R_V(producer, create_snapshot), 201), + CALL_ASYNC(producer, producer, create_snapshot, producer_plugin::snapshot_information, + INVOKE_R_V_ASYNC(producer, create_snapshot), 201), + CALL(producer, producer, get_scheduled_protocol_feature_activations, + INVOKE_R_V(producer, get_scheduled_protocol_feature_activations), 201), + CALL(producer, producer, schedule_protocol_feature_activations, + INVOKE_V_R(producer, schedule_protocol_feature_activations, producer_plugin::scheduled_protocol_feature_activations), 201), + CALL(producer, producer, get_supported_protocol_features, + INVOKE_R_R(producer, get_supported_protocol_features, + producer_plugin::get_supported_protocol_features_params), 201), + CALL(producer, producer, get_account_ram_corrections, + INVOKE_R_R(producer, get_account_ram_corrections, producer_plugin::get_account_ram_corrections_params), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 66030cc587e..5d1335407ca 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -51,6 +51,30 @@ class producer_plugin : public appbase::plugin { std::string snapshot_name; }; + struct scheduled_protocol_feature_activations { + std::vector protocol_features_to_activate; + }; + + struct get_supported_protocol_features_params { + bool exclude_disabled = false; + bool exclude_unactivatable = false; + }; + + struct get_account_ram_corrections_params { + optional lower_bound; + optional upper_bound; + uint32_t limit = 10; + bool reverse = false; + }; + + struct get_account_ram_corrections_result { + std::vector rows; + optional more; + }; + + template + using next_function = std::function&)>; + producer_plugin(); virtual ~producer_plugin(); @@ -81,7 +105,14 @@ class producer_plugin : public appbase::plugin { void set_whitelist_blacklist(const whitelist_blacklist& params); integrity_hash_information get_integrity_hash() const; - snapshot_information create_snapshot() const; + void create_snapshot(next_function next); + + scheduled_protocol_feature_activations get_scheduled_protocol_feature_activations() const; + void schedule_protocol_feature_activations(const scheduled_protocol_feature_activations& schedule); + + fc::variants get_supported_protocol_features( const get_supported_protocol_features_params& params ) const; + + get_account_ram_corrections_result get_account_ram_corrections( const get_account_ram_corrections_params& params ) const; signal confirmed_block; private: @@ -95,4 +126,7 @@ FC_REFLECT(eosio::producer_plugin::greylist_params, (accounts)); FC_REFLECT(eosio::producer_plugin::whitelist_blacklist, (actor_whitelist)(actor_blacklist)(contract_whitelist)(contract_blacklist)(action_blacklist)(key_blacklist) ) FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(integrity_hash)) FC_REFLECT(eosio::producer_plugin::snapshot_information, (head_block_id)(snapshot_name)) - +FC_REFLECT(eosio::producer_plugin::scheduled_protocol_feature_activations, (protocol_features_to_activate)) +FC_REFLECT(eosio::producer_plugin::get_supported_protocol_features_params, (exclude_disabled)(exclude_unactivatable)) +FC_REFLECT(eosio::producer_plugin::get_account_ram_corrections_params, (lower_bound)(upper_bound)(limit)(reverse)) +FC_REFLECT(eosio::producer_plugin::get_account_ram_corrections_result, (rows)(more)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a780ee89711..7f9ae952cdd 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -3,14 +3,15 @@ * @copyright defined in eos/LICENSE */ #include -#include #include #include #include #include +#include #include #include +#include #include #include @@ -42,12 +43,6 @@ using std::vector; using std::deque; using boost::signals2::scoped_connection; -// HACK TO EXPOSE LOGGER MAP - -namespace fc { - extern std::unordered_map& get_logger_map(); -} - const fc::string logger_name("producer_plugin"); fc::logger _log; @@ -86,6 +81,70 @@ using transaction_id_with_expiry_index = multi_index_container< > >; +struct by_height; + +class pending_snapshot { +public: + using next_t = producer_plugin::next_function; + + pending_snapshot(const block_id_type& block_id, next_t& next, std::string pending_path, std::string final_path) + : block_id(block_id) + , next(next) + , pending_path(pending_path) + , final_path(final_path) + {} + + uint32_t get_height() const { + return block_header::num_from_id(block_id); + } + + static bfs::path get_final_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { + return snapshots_dir / fc::format_string("snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); + } + + static bfs::path get_pending_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { + return snapshots_dir / fc::format_string(".pending-snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); + } + + static bfs::path get_temp_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { + return snapshots_dir / fc::format_string(".incomplete-snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); + } + + producer_plugin::snapshot_information finalize( const chain::controller& chain ) const { + auto in_chain = (bool)chain.fetch_block_by_id( block_id ); + boost::system::error_code ec; + + if (!in_chain) { + bfs::remove(bfs::path(pending_path), ec); + EOS_THROW(snapshot_finalization_exception, + "Snapshotted block was forked out of the chain. ID: ${block_id}", + ("block_id", block_id)); + } + + bfs::rename(bfs::path(pending_path), bfs::path(final_path), ec); + EOS_ASSERT(!ec, snapshot_finalization_exception, + "Unable to finalize valid snapshot of block number ${bn}: [code: ${ec}] ${message}", + ("bn", get_height()) + ("ec", ec.value()) + ("message", ec.message())); + + return {block_id, final_path}; + } + + block_id_type block_id; + next_t next; + std::string pending_path; + std::string final_path; +}; + +using pending_snapshot_index = multi_index_container< + pending_snapshot, + indexed_by< + hashed_unique, BOOST_MULTI_INDEX_MEMBER(pending_snapshot, block_id_type, block_id)>, + ordered_non_unique, BOOST_MULTI_INDEX_CONST_MEM_FUN( pending_snapshot, uint32_t, get_height)> + > +>; + enum class pending_block_mode { producing, speculating @@ -132,7 +191,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _producer_watermarks; pending_block_mode _pending_block_mode; transaction_id_with_expiry_index _persistent_transactions; - fc::optional _thread_pool; + fc::optional _thread_pool; int32_t _max_transaction_time_ms; fc::microseconds _max_irreversible_block_age_us; @@ -142,6 +201,9 @@ class producer_plugin_impl : public std::enable_shared_from_this _protocol_features_to_activate; + bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block + time_point _last_signed_block_time; time_point _start_time = fc::time_point::now(); uint32_t _last_signed_block_num = 0; @@ -158,6 +220,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _accepted_block_connection; fc::optional _irreversible_block_connection; @@ -221,16 +284,13 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto hbn = bsp->block_num; - auto new_block_header = bsp->header; - new_block_header.timestamp = new_block_header.timestamp.next(); - new_block_header.previous = bsp->id; - auto new_bs = bsp->generate_next(new_block_header.timestamp); + auto new_pbhs = bsp->next(bsp->header.timestamp.next(), 0); // for newly installed producers we can set their watermarks to the block they became active - if (new_bs.maybe_promote_pending() && bsp->active_schedule.version != new_bs.active_schedule.version) { + if( bsp->active_schedule.version != new_pbhs.active_schedule.version ) { flat_set new_producers; - new_producers.reserve(new_bs.active_schedule.producers.size()); - for( const auto& p: new_bs.active_schedule.producers) { + new_producers.reserve(new_pbhs.active_schedule.producers.size()); + for( const auto& p: new_pbhs.active_schedule.producers) { if (_producers.count(p.producer_name) > 0) new_producers.insert(p.producer_name); } @@ -247,6 +307,22 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp.to_time_point(); + const chain::controller& chain = chain_plug->chain(); + + // promote any pending snapshots + auto& snapshots_by_height = _pending_snapshot_index.get(); + uint32_t lib_height = lib->block_num(); + + while (!snapshots_by_height.empty() && snapshots_by_height.begin()->get_height() <= lib_height) { + const auto& pending = snapshots_by_height.begin(); + auto next = pending->next; + + try { + next(pending->finalize(chain)); + } CATCH_AND_CALL(next); + + snapshots_by_height.erase(snapshots_by_height.begin()); + } } template @@ -351,9 +427,9 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, *_thread_pool, + signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, _thread_pool->get_executor(), chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, future, trx, persist_until_expired, next]() { + boost::asio::post( _thread_pool->get_executor(), [self = this, future, trx, persist_until_expired, next]() { if( future.valid() ) future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { @@ -364,12 +440,12 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - if (!chain.pending_block_state()) { + if (!chain.is_building_block()) { _pending_incoming_transactions.emplace_back(trx, persist_until_expired, next); return; } - auto block_time = chain.pending_block_state()->header.timestamp.to_time_point(); + auto block_time = chain.pending_block_time(); auto send_response = [this, &trx, &chain, &next](const fc::static_variant& response) { next(response); @@ -378,7 +454,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.producer) + ("prod", chain.pending_block_producer()) ("txid", trx->id) ("why",response.get()->what())); } else { @@ -391,7 +467,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.producer) + ("prod", chain.pending_block_producer()) ("txid", trx->id)); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}", @@ -427,7 +503,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.producer) + ("prod", chain.pending_block_producer()) ("txid", trx->id)); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", @@ -689,7 +765,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ auto thread_pool_size = options.at( "producer-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, plugin_config_exception, "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); - my->_thread_pool.emplace( thread_pool_size ); + my->_thread_pool.emplace( "prod", thread_pool_size ); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); @@ -786,7 +862,6 @@ void producer_plugin::plugin_shutdown() { } if( my->_thread_pool ) { - my->_thread_pool->join(); my->_thread_pool->stop(); } my->_accepted_block_connection.reset(); @@ -794,13 +869,8 @@ void producer_plugin::plugin_shutdown() { } void producer_plugin::handle_sighup() { - auto& logger_map = fc::get_logger_map(); - if(logger_map.find(logger_name) != logger_map.end()) { - _log = logger_map[logger_name]; - } - if( logger_map.find(trx_trace_logger_name) != logger_map.end()) { - _trx_trace_log = logger_map[trx_trace_logger_name]; - } + fc::logger::update( logger_name, _log ); + fc::logger::update( trx_trace_logger_name, _trx_trace_log ); } void producer_plugin::pause() { @@ -927,7 +997,7 @@ producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash( my->schedule_production_loop(); }); - if (chain.pending_block_state()) { + if (chain.is_building_block()) { // abort the pending block chain.abort_block(); } else { @@ -937,35 +1007,196 @@ producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash( return {chain.head_block_id(), chain.calculate_integrity_hash()}; } -producer_plugin::snapshot_information producer_plugin::create_snapshot() const { +void producer_plugin::create_snapshot(producer_plugin::next_function next) { chain::controller& chain = my->chain_plug->chain(); - auto reschedule = fc::make_scoped_exit([this](){ - my->schedule_production_loop(); - }); + auto head_id = chain.head_block_id(); + const auto& snapshot_path = pending_snapshot::get_final_path(head_id, my->_snapshots_dir); + const auto& temp_path = pending_snapshot::get_temp_path(head_id, my->_snapshots_dir); + + // maintain legacy exception if the snapshot exists + if( fc::is_regular_file(snapshot_path) ) { + auto ex = snapshot_exists_exception( FC_LOG_MESSAGE( error, "snapshot named ${name} already exists", ("name", snapshot_path.generic_string()) ) ); + next(ex.dynamic_copy_exception()); + return; + } - if (chain.pending_block_state()) { - // abort the pending block - chain.abort_block(); + auto write_snapshot = [&]( const bfs::path& p ) -> void { + auto reschedule = fc::make_scoped_exit([this](){ + my->schedule_production_loop(); + }); + + if (chain.is_building_block()) { + // abort the pending block + chain.abort_block(); + } else { + reschedule.cancel(); + } + + bfs::create_directory( p.parent_path() ); + + // create the snapshot + auto snap_out = std::ofstream(p.generic_string(), (std::ios::out | std::ios::binary)); + auto writer = std::make_shared(snap_out); + chain.write_snapshot(writer); + writer->finalize(); + snap_out.flush(); + snap_out.close(); + }; + + // If in irreversible mode, create snapshot and return path to snapshot immediately. + if( chain.get_read_mode() == db_read_mode::IRREVERSIBLE ) { + try { + write_snapshot( temp_path ); + + boost::system::error_code ec; + bfs::rename(temp_path, snapshot_path, ec); + EOS_ASSERT(!ec, snapshot_finalization_exception, + "Unable to finalize valid snapshot of block number ${bn}: [code: ${ec}] ${message}", + ("bn", chain.head_block_num()) + ("ec", ec.value()) + ("message", ec.message())); + + next( producer_plugin::snapshot_information{head_id, snapshot_path.generic_string()} ); + } CATCH_AND_CALL (next); + return; + } + + // Otherwise, the result will be returned when the snapshot becomes irreversible. + + // determine if this snapshot is already in-flight + auto& pending_by_id = my->_pending_snapshot_index.get(); + auto existing = pending_by_id.find(head_id); + if( existing != pending_by_id.end() ) { + // if a snapshot at this block is already pending, attach this requests handler to it + pending_by_id.modify(existing, [&next]( auto& entry ){ + entry.next = [prev = entry.next, next](const fc::static_variant& res){ + prev(res); + next(res); + }; + }); } else { - reschedule.cancel(); + const auto& pending_path = pending_snapshot::get_pending_path(head_id, my->_snapshots_dir); + + try { + write_snapshot( temp_path ); // create a new pending snapshot + + boost::system::error_code ec; + bfs::rename(temp_path, pending_path, ec); + EOS_ASSERT(!ec, snapshot_finalization_exception, + "Unable to promote temp snapshot to pending for block number ${bn}: [code: ${ec}] ${message}", + ("bn", chain.head_block_num()) + ("ec", ec.value()) + ("message", ec.message())); + + my->_pending_snapshot_index.emplace(head_id, next, pending_path.generic_string(), snapshot_path.generic_string()); + } CATCH_AND_CALL (next); } +} - auto head_id = chain.head_block_id(); - std::string snapshot_path = (my->_snapshots_dir / fc::format_string("snapshot-${id}.bin", fc::mutable_variant_object()("id", head_id))).generic_string(); +producer_plugin::scheduled_protocol_feature_activations +producer_plugin::get_scheduled_protocol_feature_activations()const { + return {my->_protocol_features_to_activate}; +} - EOS_ASSERT( !fc::is_regular_file(snapshot_path), snapshot_exists_exception, - "snapshot named ${name} already exists", ("name", snapshot_path)); +void producer_plugin::schedule_protocol_feature_activations( const scheduled_protocol_feature_activations& schedule ) { + const chain::controller& chain = my->chain_plug->chain(); + std::set set_of_features_to_activate( schedule.protocol_features_to_activate.begin(), + schedule.protocol_features_to_activate.end() ); + EOS_ASSERT( set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), + invalid_protocol_features_to_activate, "duplicate digests" ); + chain.validate_protocol_features( schedule.protocol_features_to_activate ); + const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); + for (auto &feature_digest : set_of_features_to_activate) { + const auto& pf = pfs.get_protocol_feature(feature_digest); + EOS_ASSERT( !pf.preactivation_required, protocol_feature_exception, + "protocol feature requires preactivation: ${digest}", + ("digest", feature_digest)); + } + my->_protocol_features_to_activate = schedule.protocol_features_to_activate; + my->_protocol_features_signaled = false; +} +fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { + fc::variants results; + const chain::controller& chain = my->chain_plug->chain(); + const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); + const auto next_block_time = chain.head_block_time() + fc::milliseconds(config::block_interval_ms); + + flat_map visited_protocol_features; + visited_protocol_features.reserve( pfs.size() ); + + std::function add_feature = + [&results, &pfs, ¶ms, next_block_time, &visited_protocol_features, &add_feature] + ( const protocol_feature& pf ) -> bool { + if( ( params.exclude_disabled || params.exclude_unactivatable ) && !pf.enabled ) return false; + if( params.exclude_unactivatable && ( next_block_time < pf.earliest_allowed_activation_time ) ) return false; + + auto res = visited_protocol_features.emplace( pf.feature_digest, false ); + if( !res.second ) return res.first->second; + + const auto original_size = results.size(); + for( const auto& dependency : pf.dependencies ) { + if( !add_feature( pfs.get_protocol_feature( dependency ) ) ) { + results.resize( original_size ); + return false; + } + } + + res.first->second = true; + results.emplace_back( pf.to_variant(true) ); + return true; + }; - auto snap_out = std::ofstream(snapshot_path, (std::ios::out | std::ios::binary)); - auto writer = std::make_shared(snap_out); - chain.write_snapshot(writer); - writer->finalize(); - snap_out.flush(); - snap_out.close(); + for( const auto& pf : pfs ) { + add_feature( pf ); + } - return {head_id, snapshot_path}; + return results; +} + +producer_plugin::get_account_ram_corrections_result +producer_plugin::get_account_ram_corrections( const get_account_ram_corrections_params& params ) const { + get_account_ram_corrections_result result; + const auto& db = my->chain_plug->chain().db(); + + const auto& idx = db.get_index(); + account_name lower_bound_value = std::numeric_limits::lowest(); + account_name upper_bound_value = std::numeric_limits::max(); + + if( params.lower_bound ) { + lower_bound_value = *params.lower_bound; + } + + if( params.upper_bound ) { + upper_bound_value = *params.upper_bound; + } + + if( upper_bound_value < lower_bound_value ) + return result; + + auto walk_range = [&]( auto itr, auto end_itr ) { + for( unsigned int count = 0; + count < params.limit && itr != end_itr; + ++itr ) + { + result.rows.push_back( fc::variant( *itr ) ); + ++count; + } + if( itr != end_itr ) { + result.more = itr->name; + } + }; + + auto lower = idx.lower_bound( lower_bound_value ); + auto upper = idx.upper_bound( upper_bound_value ); + if( params.reverse ) { + walk_range( boost::make_reverse_iterator(upper), boost::make_reverse_iterator(lower) ); + } else { + walk_range( lower, upper ); + } + + return result; } optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { @@ -992,7 +1223,7 @@ optional producer_plugin_impl::calculate_next_block_time(const a if (current_watermark_itr != _producer_watermarks.end()) { auto watermark = current_watermark_itr->second; auto block_num = chain.head_block_state()->block_num; - if (chain.pending_block_state()) { + if (chain.is_building_block()) { ++block_num; } if (watermark > block_num) { @@ -1127,15 +1358,52 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } chain.abort_block(); - chain.start_block(block_time, blocks_to_confirm); + + auto features_to_activate = chain.get_preactivated_protocol_features(); + if( _pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0 ) { + bool drop_features_to_activate = false; + try { + chain.validate_protocol_features( _protocol_features_to_activate ); + } catch( const fc::exception& e ) { + wlog( "protocol features to activate are no longer all valid: ${details}", + ("details",e.to_detail_string()) ); + drop_features_to_activate = true; + } + + if( drop_features_to_activate ) { + _protocol_features_to_activate.clear(); + } else { + auto protocol_features_to_activate = _protocol_features_to_activate; // do a copy as pending_block might be aborted + if( features_to_activate.size() > 0 ) { + protocol_features_to_activate.reserve( protocol_features_to_activate.size() + + features_to_activate.size() ); + std::set set_of_features_to_activate( protocol_features_to_activate.begin(), + protocol_features_to_activate.end() ); + for( const auto& f : features_to_activate ) { + auto res = set_of_features_to_activate.insert( f ); + if( res.second ) { + protocol_features_to_activate.push_back( f ); + } + } + features_to_activate.clear(); + } + std::swap( features_to_activate, protocol_features_to_activate ); + _protocol_features_signaled = true; + ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", + ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); + } + } + + chain.start_block( block_time, blocks_to_confirm, features_to_activate ); } FC_LOG_AND_DROP(); - const auto& pbs = chain.pending_block_state(); - if (pbs) { + if( chain.is_building_block() ) { + auto pending_block_time = chain.pending_block_time(); + auto pending_block_signing_key = chain.pending_block_signing_key(); const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); - if (_pending_block_mode == pending_block_mode::producing && pbs->block_signing_key != scheduled_producer.block_signing_key) { - elog("Block Signing Key is not expected value, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.block_signing_key)("actual", pbs->block_signing_key)); + if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_key != scheduled_producer.block_signing_key) { + elog("Block Signing Key is not expected value, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.block_signing_key)("actual", pending_block_signing_key)); _pending_block_mode = pending_block_mode::speculating; } @@ -1149,7 +1417,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { int num_expired_persistent = 0; int orig_count = _persistent_transactions.size(); - while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) { + while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pending_block_time) { if (preprocess_deadline <= fc::time_point::now()) { exhausted = true; break; @@ -1158,7 +1426,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (_pending_block_mode == pending_block_mode::producing) { fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", ("block_num", chain.head_block_num() + 1) - ("prod", chain.pending_block_state()->header.producer) + ("prod", chain.pending_block_producer()) ("txid", txid)); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", @@ -1198,7 +1466,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { int num_failed = 0; int num_processed = 0; auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) { - if (trx->packed_trx->expiration() < pbs->header.timestamp.to_time_point()) { + if (trx->packed_trx->expiration() < pending_block_time) { return tx_category::EXPIRED; } else if (persisted_by_id.find(trx->id) != persisted_by_id.end()) { return tx_category::PERSISTED; @@ -1454,22 +1722,22 @@ void producer_plugin_impl::schedule_production_loop() { if (deadline > fc::time_point::now()) { // ship this block off no later than its deadline - EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" ); + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" ); _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() )); fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}", - ("num", chain.pending_block_state()->block_num)("time",deadline)); + ("num", chain.head_block_num()+1)("time",deadline)); } else { - EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "producing without pending_block_state" ); + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); auto expect_time = chain.pending_block_time() - fc::microseconds(config::block_interval_us); // ship this block off up to 1 block time earlier or immediately if (fc::time_point::now() >= expect_time) { _timer.expires_from_now( boost::posix_time::microseconds( 0 )); fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} immediately", - ("num", chain.pending_block_state()->block_num)); + ("num", chain.head_block_num()+1)); } else { _timer.expires_at(epoch + boost::posix_time::microseconds(expect_time.time_since_epoch().count())); fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} at ${time}", - ("num", chain.pending_block_state()->block_num)("time",expect_time)); + ("num", chain.head_block_num()+1)("time",expect_time)); } } @@ -1479,16 +1747,15 @@ void producer_plugin_impl::schedule_production_loop() { if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { fc_dlog( _log, "Produce block timer running at ${time}", ("time", fc::time_point::now()) ); // pending_block_state expected, but can't assert inside async_wait - auto block_num = chain.pending_block_state() ? chain.pending_block_state()->block_num : 0; + auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; auto res = self->maybe_produce_block(); fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) ); } } ) ); } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){ - fc_dlog(_log, "Specualtive Block Created; Scheduling Speculative/Production Change"); - EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "speculating without pending_block_state" ); - const auto& pbs = chain.pending_block_state(); - schedule_delayed_production_loop(weak_this, pbs->header.timestamp); + fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" ); + schedule_delayed_production_loop(weak_this, chain.pending_block_time()); } else { fc_dlog(_log, "Speculative Block Created"); } @@ -1570,23 +1837,24 @@ void producer_plugin_impl::produce_block() { //ilog("produce_block ${t}", ("t", fc::time_point::now())); // for testing _produce_time_offset_us EOS_ASSERT(_pending_block_mode == pending_block_mode::producing, producer_exception, "called produce_block while not actually producing"); chain::controller& chain = chain_plug->chain(); - const auto& pbs = chain.pending_block_state(); const auto& hbs = chain.head_block_state(); - EOS_ASSERT(pbs, missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); - auto signature_provider_itr = _signature_providers.find( pbs->block_signing_key ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); + auto signature_provider_itr = _signature_providers.find( chain.pending_block_signing_key() ); EOS_ASSERT(signature_provider_itr != _signature_providers.end(), producer_priv_key_not_found, "Attempting to produce a block for which we don't have the private key"); + if (_protocol_features_signaled) { + _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block + _protocol_features_signaled = false; + } + //idump( (fc::time_point::now() - chain.pending_block_time()) ); - chain.finalize_block(); - chain.sign_block( [&]( const digest_type& d ) { + chain.finalize_block( [&]( const digest_type& d ) { auto debug_logger = maybe_make_debug_time_logger(); return signature_provider_itr->second(d); } ); chain.commit_block(); - auto hbt = chain.head_block_time(); - //idump((fc::time_point::now() - hbt)); block_state_ptr new_bs = chain.head_block_state(); _producer_watermarks[new_bs->header.producer] = chain.head_block_num(); diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index 5f97b0280a4..12d43d82139 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -21,39 +22,29 @@ namespace eosio { * +---------+----------------+-----------+------------------+-----+---------+----------------+ * * *.index: - * +-----------+-------------+-----+-----------+ - * | Summary i | Summary i+1 | ... | Summary z | - * +-----------+-------------+-----+-----------+ + * +----------------+------------------+-----+----------------+ + * | Pos of Entry i | Pos of Entry i+1 | ... | Pos of Entry z | + * +----------------+------------------+-----+----------------+ * * each entry: - * uint32_t block_num - * block_id_type block_id - * uint64_t size of payload - * uint8_t version - * payload - * - * each summary: - * uint64_t position of entry in *.log - * - * state payload: - * uint32_t size of deltas - * char[] deltas + * state_history_log_header + * payload */ -// todo: look into switching this to serialization instead of memcpy -// todo: consider reworking versioning -// todo: consider dropping block_num since it's included in block_id -// todo: currently only checks version on the first record. Need in recover_blocks +inline uint64_t ship_magic(uint32_t version) { return N(ship) | version; } +inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship); } +inline uint32_t get_ship_version(uint64_t magic) { return magic; } +inline bool is_ship_supported_version(uint64_t magic) { return get_ship_version(magic) == 0; } +static const uint32_t ship_current_version = 0; + struct state_history_log_header { - uint32_t block_num = 0; - chain::block_id_type block_id; + uint64_t magic = ship_magic(ship_current_version); + chain::block_id_type block_id = {}; uint64_t payload_size = 0; - uint8_t version = 0; -}; - -struct state_history_summary { - uint64_t pos = 0; }; +static const int state_history_log_header_serial_size = sizeof(state_history_log_header::magic) + + sizeof(state_history_log_header::block_id) + + sizeof(state_history_log_header::payload_size); class state_history_log { private: @@ -78,40 +69,59 @@ class state_history_log { uint32_t begin_block() const { return _begin_block; } uint32_t end_block() const { return _end_block; } + void read_header(state_history_log_header& header, bool assert_version = true) { + char bytes[state_history_log_header_serial_size]; + log.read(bytes, sizeof(bytes)); + fc::datastream ds(bytes, sizeof(bytes)); + fc::raw::unpack(ds, header); + EOS_ASSERT(!ds.remaining(), chain::plugin_exception, "state_history_log_header_serial_size mismatch"); + if (assert_version) + EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic), chain::plugin_exception, + "corrupt ${name}.log (0)", ("name", name)); + } + + void write_header(const state_history_log_header& header) { + char bytes[state_history_log_header_serial_size]; + fc::datastream ds(bytes, sizeof(bytes)); + fc::raw::pack(ds, header); + EOS_ASSERT(!ds.remaining(), chain::plugin_exception, "state_history_log_header_serial_size mismatch"); + log.write(bytes, sizeof(bytes)); + } + template void write_entry(const state_history_log_header& header, const chain::block_id_type& prev_id, F write_payload) { - EOS_ASSERT(_begin_block == _end_block || header.block_num <= _end_block, chain::plugin_exception, + auto block_num = chain::block_header::num_from_id(header.block_id); + EOS_ASSERT(_begin_block == _end_block || block_num <= _end_block, chain::plugin_exception, "missed a block in ${name}.log", ("name", name)); - if (_begin_block != _end_block && header.block_num > _begin_block) { - if (header.block_num == _end_block) { + if (_begin_block != _end_block && block_num > _begin_block) { + if (block_num == _end_block) { EOS_ASSERT(prev_id == last_block_id, chain::plugin_exception, "missed a fork change in ${name}.log", ("name", name)); } else { state_history_log_header prev; - get_entry(header.block_num - 1, prev); + get_entry(block_num - 1, prev); EOS_ASSERT(prev_id == prev.block_id, chain::plugin_exception, "missed a fork change in ${name}.log", ("name", name)); } } - if (header.block_num < _end_block) - truncate(header.block_num); + if (block_num < _end_block) + truncate(block_num); log.seekg(0, std::ios_base::end); uint64_t pos = log.tellg(); - log.write((char*)&header, sizeof(header)); + write_header(header); write_payload(log); uint64_t end = log.tellg(); - EOS_ASSERT(end == pos + sizeof(header) + header.payload_size, chain::plugin_exception, + EOS_ASSERT(end == pos + state_history_log_header_serial_size + header.payload_size, chain::plugin_exception, "wrote payload with incorrect size to ${name}.log", ("name", name)); log.write((char*)&pos, sizeof(pos)); index.seekg(0, std::ios_base::end); - state_history_summary summary{.pos = pos}; - index.write((char*)&summary, sizeof(summary)); + index.write((char*)&pos, sizeof(pos)); if (_begin_block == _end_block) - _begin_block = header.block_num; - _end_block = header.block_num + 1; + _begin_block = block_num; + _end_block = block_num + 1; last_block_id = header.block_id; } @@ -120,7 +130,7 @@ class state_history_log { EOS_ASSERT(block_num >= _begin_block && block_num < _end_block, chain::plugin_exception, "read non-existing block in ${name}.log", ("name", name)); log.seekg(get_pos(block_num)); - log.read((char*)&header, sizeof(header)); + read_header(header); return log; } @@ -136,17 +146,18 @@ class state_history_log { uint64_t suffix; log.seekg(size - sizeof(suffix)); log.read((char*)&suffix, sizeof(suffix)); - if (suffix > size || suffix + sizeof(header) > size) { + if (suffix > size || suffix + state_history_log_header_serial_size > size) { elog("corrupt ${name}.log (2)", ("name", name)); return false; } log.seekg(suffix); - log.read((char*)&header, sizeof(header)); - if (suffix + sizeof(header) + header.payload_size + sizeof(suffix) != size) { + read_header(header, false); + if (!is_ship(header.magic) || !is_ship_supported_version(header.magic) || + suffix + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) != size) { elog("corrupt ${name}.log (3)", ("name", name)); return false; } - _end_block = header.block_num + 1; + _end_block = chain::block_header::num_from_id(header.block_id) + 1; last_block_id = header.block_id; if (_begin_block >= _end_block) { elog("corrupt ${name}.log (4)", ("name", name)); @@ -161,18 +172,22 @@ class state_history_log { uint32_t num_found = 0; while (true) { state_history_log_header header; - if (pos + sizeof(header) > size) + if (pos + state_history_log_header_serial_size > size) break; log.seekg(pos); - log.read((char*)&header, sizeof(header)); + read_header(header, false); uint64_t suffix; - if (header.payload_size > size || pos + sizeof(header) + header.payload_size + sizeof(suffix) > size) + if (!is_ship(header.magic) || !is_ship_supported_version(header.magic) || header.payload_size > size || + pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) > size) { + EOS_ASSERT(!is_ship(header.magic) || is_ship_supported_version(header.magic), chain::plugin_exception, + "${name}.log has an unsupported version", ("name", name)); break; - log.seekg(pos + sizeof(header) + header.payload_size); + } + log.seekg(pos + state_history_log_header_serial_size + header.payload_size); log.read((char*)&suffix, sizeof(suffix)); if (suffix != pos) break; - pos = pos + sizeof(header) + header.payload_size + sizeof(suffix); + pos = pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix); if (!(++num_found % 10000)) { printf("%10u blocks found, log pos=%12llu\r", (unsigned)num_found, (unsigned long long)pos); fflush(stdout); @@ -188,13 +203,14 @@ class state_history_log { log.open(log_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app); log.seekg(0, std::ios_base::end); uint64_t size = log.tellg(); - if (size >= sizeof(state_history_log_header)) { + if (size >= state_history_log_header_serial_size) { state_history_log_header header; log.seekg(0); - log.read((char*)&header, sizeof(header)); - EOS_ASSERT(header.version == 0 && sizeof(header) + header.payload_size + sizeof(uint64_t) <= size, + read_header(header, false); + EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic) && + state_history_log_header_serial_size + header.payload_size + sizeof(uint64_t) <= size, chain::plugin_exception, "corrupt ${name}.log (1)", ("name", name)); - _begin_block = header.block_num; + _begin_block = chain::block_header::num_from_id(header.block_id); last_block_id = header.block_id; if (!get_last_block(size)) recover_blocks(size); @@ -208,7 +224,7 @@ class state_history_log { void open_index() { index.open(index_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app); index.seekg(0, std::ios_base::end); - if (index.tellg() == (static_cast(_end_block) - _begin_block) * sizeof(state_history_summary)) + if (index.tellg() == (static_cast(_end_block) - _begin_block) * sizeof(uint64_t)) return; ilog("Regenerate ${name}.index", ("name", name)); index.close(); @@ -220,21 +236,22 @@ class state_history_log { uint32_t num_found = 0; while (pos < size) { state_history_log_header header; - EOS_ASSERT(pos + sizeof(header) <= size, chain::plugin_exception, "corrupt ${name}.log (6)", ("name", name)); + EOS_ASSERT(pos + state_history_log_header_serial_size <= size, chain::plugin_exception, + "corrupt ${name}.log (6)", ("name", name)); log.seekg(pos); - log.read((char*)&header, sizeof(header)); - uint64_t suffix_pos = pos + sizeof(header) + header.payload_size; + read_header(header, false); + uint64_t suffix_pos = pos + state_history_log_header_serial_size + header.payload_size; uint64_t suffix; - EOS_ASSERT(suffix_pos + sizeof(suffix) <= size, chain::plugin_exception, "corrupt ${name}.log (7)", - ("name", name)); + EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic) && + suffix_pos + sizeof(suffix) <= size, + chain::plugin_exception, "corrupt ${name}.log (7)", ("name", name)); log.seekg(suffix_pos); log.read((char*)&suffix, sizeof(suffix)); // ilog("block ${b} at ${pos}-${end} suffix=${suffix} file_size=${fs}", // ("b", header.block_num)("pos", pos)("end", suffix_pos + sizeof(suffix))("suffix", suffix)("fs", size)); EOS_ASSERT(suffix == pos, chain::plugin_exception, "corrupt ${name}.log (8)", ("name", name)); - state_history_summary summary{.pos = pos}; - index.write((char*)&summary, sizeof(summary)); + index.write((char*)&pos, sizeof(pos)); pos = suffix_pos + sizeof(suffix); if (!(++num_found % 10000)) { printf("%10u blocks found, log pos=%12llu\r", (unsigned)num_found, (unsigned long long)pos); @@ -244,10 +261,10 @@ class state_history_log { } uint64_t get_pos(uint32_t block_num) { - state_history_summary summary; - index.seekg((block_num - _begin_block) * sizeof(summary)); - index.read((char*)&summary, sizeof(summary)); - return summary.pos; + uint64_t pos; + index.seekg((block_num - _begin_block) * sizeof(pos)); + index.read((char*)&pos, sizeof(pos)); + return pos; } void truncate(uint32_t block_num) { @@ -267,7 +284,7 @@ class state_history_log { log.seekg(0); index.seekg(0); boost::filesystem::resize_file(log_filename, pos); - boost::filesystem::resize_file(index_filename, (block_num - _begin_block) * sizeof(state_history_summary)); + boost::filesystem::resize_file(index_filename, (block_num - _begin_block) * sizeof(uint64_t)); _end_block = block_num; } log.sync(); @@ -277,3 +294,5 @@ class state_history_log { }; // state_history_log } // namespace eosio + +FC_REFLECT(eosio::state_history_log_header, (magic)(block_id)(payload_size)) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp index f3429e2d190..a682b205e00 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp @@ -22,6 +22,53 @@ using std::shared_ptr; typedef shared_ptr state_history_ptr; +struct partial_transaction { + chain::time_point_sec expiration = {}; + uint16_t ref_block_num = {}; + uint32_t ref_block_prefix = {}; + fc::unsigned_int max_net_usage_words = {}; + uint8_t max_cpu_usage_ms = {}; + fc::unsigned_int delay_sec = {}; + chain::extensions_type transaction_extensions = {}; + vector signatures = {}; + vector context_free_data = {}; + + partial_transaction(const chain::signed_transaction& t) + : expiration(t.expiration) + , ref_block_num(t.ref_block_num) + , ref_block_prefix(t.ref_block_prefix) + , max_net_usage_words(t.max_net_usage_words) + , max_cpu_usage_ms(t.max_cpu_usage_ms) + , delay_sec(t.delay_sec) + , transaction_extensions(t.transaction_extensions) + , signatures(t.signatures) + , context_free_data(t.context_free_data) {} +}; + +struct augmented_transaction_trace { + chain::transaction_trace_ptr trace; + std::shared_ptr partial; + + augmented_transaction_trace() = default; + augmented_transaction_trace(const augmented_transaction_trace&) = default; + augmented_transaction_trace(augmented_transaction_trace&&) = default; + + augmented_transaction_trace(const chain::transaction_trace_ptr& trace) + : trace{trace} {} + + augmented_transaction_trace(const chain::transaction_trace_ptr& trace, + const std::shared_ptr& partial) + : trace{trace} + , partial{partial} {} + + augmented_transaction_trace(const chain::transaction_trace_ptr& trace, const chain::signed_transaction& t) + : trace{trace} + , partial{std::make_shared(t)} {} + + augmented_transaction_trace& operator=(const augmented_transaction_trace&) = default; + augmented_transaction_trace& operator=(augmented_transaction_trace&&) = default; +}; + struct table_delta { fc::unsigned_int struct_version = 0; std::string name{}; diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 37c817dd9cc..6685eadadd5 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -11,12 +11,15 @@ #include #include #include +#include #include #include #include #include #include +#include + template struct history_serial_wrapper { const chainbase::database& db; @@ -24,7 +27,7 @@ struct history_serial_wrapper { }; template -history_serial_wrapper make_history_serial_wrapper(const chainbase::database& db, const T& obj) { +history_serial_wrapper> make_history_serial_wrapper(const chainbase::database& db, const T& obj) { return {db, obj}; } @@ -36,7 +39,8 @@ struct history_context_wrapper { }; template -history_context_wrapper make_history_context_wrapper(const chainbase::database& db, P& context, const T& obj) { +history_context_wrapper, std::decay_t> +make_history_context_wrapper(const chainbase::database& db, const P& context, const T& obj) { return {db, context, obj}; } @@ -66,6 +70,16 @@ datastream& history_serialize_container(datastream& ds, const chainbase: return ds; } +template +datastream& history_context_serialize_container(datastream& ds, const chainbase::database& db, const P& context, + const std::vector& v) { + fc::raw::pack(ds, unsigned_int(v.size())); + for (const auto& x : v) { + ds << make_history_context_wrapper(db, context, x); + } + return ds; +} + template datastream& operator<<(datastream& ds, const history_serial_big_vector_wrapper& obj) { FC_ASSERT(obj.obj.size() <= 1024 * 1024 * 1024); @@ -75,10 +89,19 @@ datastream& operator<<(datastream& ds, const history_serial_big_vector_w return ds; } +template +inline void history_pack_varuint64(datastream& ds, uint64_t val) { + do { + uint8_t b = uint8_t(val) & 0x7f; + val >>= 7; + b |= ((val > 0) << 7); + ds.write((char*)&b, 1); + } while (val); +} + template void history_pack_big_bytes(datastream& ds, const eosio::chain::bytes& v) { - FC_ASSERT(v.size() <= 1024 * 1024 * 1024); - fc::raw::pack(ds, unsigned_int((uint32_t)v.size())); + history_pack_varuint64(ds, v.size()); if (v.size()) ds.write(&v.front(), (uint32_t)v.size()); } @@ -95,6 +118,11 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper +datastream& operator<<(datastream& ds, const history_context_wrapper>& obj) { + return history_context_serialize_container(ds, obj.db, obj.context, obj.obj); +} + template datastream& operator<<(datastream& ds, const history_serial_wrapper>& obj) { fc::raw::pack(ds, obj.obj.first); @@ -106,14 +134,35 @@ template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); fc::raw::pack(ds, as_type(obj.obj.name.value)); + fc::raw::pack(ds, as_type(obj.obj.creation_date)); + fc::raw::pack(ds, as_type(obj.obj.abi)); + return ds; +} + +template +datastream& operator<<(datastream& ds, + const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); + fc::raw::pack(ds, as_type(obj.obj.name.value)); + fc::raw::pack(ds, as_type(obj.obj.is_privileged())); + fc::raw::pack(ds, as_type(obj.obj.last_code_update)); + bool has_code = obj.obj.code_hash != eosio::chain::digest_type(); + fc::raw::pack(ds, has_code); + if (has_code) { + fc::raw::pack(ds, as_type(obj.obj.vm_type)); + fc::raw::pack(ds, as_type(obj.obj.vm_version)); + fc::raw::pack(ds, as_type(obj.obj.code_hash)); + } + return ds; +} + +template +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); fc::raw::pack(ds, as_type(obj.obj.vm_type)); fc::raw::pack(ds, as_type(obj.obj.vm_version)); - fc::raw::pack(ds, as_type(obj.obj.privileged)); - fc::raw::pack(ds, as_type(obj.obj.last_code_update)); - fc::raw::pack(ds, as_type(obj.obj.code_version)); - fc::raw::pack(ds, as_type(obj.obj.creation_date)); + fc::raw::pack(ds, as_type(obj.obj.code_hash)); fc::raw::pack(ds, as_type(obj.obj.code)); - fc::raw::pack(ds, as_type(obj.obj.abi)); return ds; } @@ -129,8 +178,8 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& -operator<<(datastream& ds, - const history_context_wrapper& obj) { +operator<<(datastream& ds, + const history_context_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); fc::raw::pack(ds, as_type(obj.context.code.value)); fc::raw::pack(ds, as_type(obj.context.scope.value)); @@ -186,36 +235,36 @@ datastream& serialize_secondary_index(datastream& ds, const eosio::chain template datastream& -operator<<(datastream& ds, - const history_context_wrapper& obj) { +operator<<(datastream& ds, + const history_context_wrapper& obj) { return serialize_secondary_index(ds, obj.context, obj.obj); } template datastream& -operator<<(datastream& ds, - const history_context_wrapper& obj) { +operator<<(datastream& ds, + const history_context_wrapper& obj) { return serialize_secondary_index(ds, obj.context, obj.obj); } template datastream& -operator<<(datastream& ds, - const history_context_wrapper& obj) { +operator<<(datastream& ds, + const history_context_wrapper& obj) { return serialize_secondary_index(ds, obj.context, obj.obj); } template datastream& -operator<<(datastream& ds, - const history_context_wrapper& obj) { +operator<<(datastream& ds, + const history_context_wrapper& obj) { return serialize_secondary_index(ds, obj.context, obj.obj); } template datastream& operator<<( - datastream& ds, - const history_context_wrapper& obj) { + datastream& ds, + const history_context_wrapper& obj) { return serialize_secondary_index(ds, obj.context, obj.obj); } @@ -282,6 +331,23 @@ datastream& operator<<(datastream& return ds; } +template +datastream& +operator<<(datastream& ds, + const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); + fc::raw::pack(ds, as_type(obj.obj.feature_digest)); + fc::raw::pack(ds, as_type(obj.obj.activation_block_num)); + return ds; +} + +template +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); + history_serialize_container(ds, obj.db, obj.obj.activated_protocol_features); + return ds; +} + template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, as_type(obj.obj.key)); @@ -477,68 +543,134 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper cap_error_code( const fc::optional& error_code ) { + fc::optional result; + + if (!error_code) return result; + + const uint64_t upper_limit = static_cast(eosio::chain::system_error_code::generic_system_error); + + if (*error_code >= upper_limit) { + result = upper_limit; + return result; + } + + result = error_code; + return result; +} + template -datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { +datastream& operator<<(datastream& ds, const history_context_wrapper& obj) { + bool debug_mode = obj.context; fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(obj.obj.receipt))); + fc::raw::pack(ds, as_type(obj.obj.action_ordinal)); + fc::raw::pack(ds, as_type(obj.obj.creator_action_ordinal)); + fc::raw::pack(ds, bool(obj.obj.receipt)); + if (obj.obj.receipt) { + fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.receipt))); + } + fc::raw::pack(ds, as_type(obj.obj.receiver.value)); fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(obj.obj.act))); fc::raw::pack(ds, as_type(obj.obj.context_free)); - fc::raw::pack(ds, as_type(obj.obj.elapsed.count())); - fc::raw::pack(ds, as_type(obj.obj.console)); + fc::raw::pack(ds, as_type(debug_mode ? obj.obj.elapsed.count() : 0)); + if (debug_mode) + fc::raw::pack(ds, as_type(obj.obj.console)); + else + fc::raw::pack(ds, std::string{}); history_serialize_container(ds, obj.db, as_type>(obj.obj.account_ram_deltas)); fc::optional e; - if (obj.obj.except) - e = obj.obj.except->to_string(); + if (obj.obj.except) { + if (debug_mode) + e = obj.obj.except->to_string(); + else + e = "Y"; + } fc::raw::pack(ds, as_type>(e)); + fc::raw::pack(ds, as_type>(debug_mode ? obj.obj.error_code + : cap_error_code(obj.obj.error_code))); - history_serialize_container(ds, obj.db, as_type>(obj.obj.inline_traces)); return ds; } template -datastream& operator<<(datastream& ds, - const history_context_wrapper& obj) { +datastream& operator<<(datastream& ds, + const history_context_wrapper, + eosio::augmented_transaction_trace>& obj) { + auto& trace = *obj.obj.trace; + bool debug_mode = obj.context.second; fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.id)); - if (obj.obj.receipt) { - if (obj.obj.failed_dtrx_trace && - obj.obj.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) + fc::raw::pack(ds, as_type(trace.id)); + if (trace.receipt) { + if (trace.failed_dtrx_trace && trace.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) fc::raw::pack(ds, uint8_t(eosio::chain::transaction_receipt_header::executed)); else - fc::raw::pack(ds, as_type(obj.obj.receipt->status.value)); - fc::raw::pack(ds, as_type(obj.obj.receipt->cpu_usage_us)); - fc::raw::pack(ds, as_type(obj.obj.receipt->net_usage_words)); + fc::raw::pack(ds, as_type(trace.receipt->status.value)); + fc::raw::pack(ds, as_type(trace.receipt->cpu_usage_us)); + fc::raw::pack(ds, as_type(trace.receipt->net_usage_words)); } else { - fc::raw::pack(ds, uint8_t(obj.context)); + fc::raw::pack(ds, uint8_t(obj.context.first)); fc::raw::pack(ds, uint32_t(0)); fc::raw::pack(ds, fc::unsigned_int(0)); } - fc::raw::pack(ds, as_type(obj.obj.elapsed.count())); - fc::raw::pack(ds, as_type(obj.obj.net_usage)); - fc::raw::pack(ds, as_type(obj.obj.scheduled)); - history_serialize_container(ds, obj.db, as_type>(obj.obj.action_traces)); + fc::raw::pack(ds, as_type(debug_mode ? trace.elapsed.count() : 0)); + fc::raw::pack(ds, as_type(trace.net_usage)); + fc::raw::pack(ds, as_type(trace.scheduled)); + history_context_serialize_container(ds, obj.db, debug_mode, + as_type>(trace.action_traces)); + + fc::raw::pack(ds, bool(trace.account_ram_delta)); + if (trace.account_ram_delta) { + fc::raw::pack( + ds, make_history_serial_wrapper(obj.db, as_type(*trace.account_ram_delta))); + } fc::optional e; - if (obj.obj.except) - e = obj.obj.except->to_string(); + if (trace.except) { + if (debug_mode) + e = trace.except->to_string(); + else + e = "Y"; + } fc::raw::pack(ds, as_type>(e)); + fc::raw::pack(ds, as_type>(debug_mode ? trace.error_code + : cap_error_code(trace.error_code))); - fc::raw::pack(ds, bool(obj.obj.failed_dtrx_trace)); - if (obj.obj.failed_dtrx_trace) { + fc::raw::pack(ds, bool(trace.failed_dtrx_trace)); + if (trace.failed_dtrx_trace) { uint8_t stat = eosio::chain::transaction_receipt_header::hard_fail; - if (obj.obj.receipt && obj.obj.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) + if (trace.receipt && trace.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) stat = eosio::chain::transaction_receipt_header::soft_fail; - fc::raw::pack(ds, make_history_context_wrapper(obj.db, stat, *obj.obj.failed_dtrx_trace)); + std::pair context = std::make_pair(stat, debug_mode); + fc::raw::pack( // + ds, make_history_context_wrapper( + obj.db, context, eosio::augmented_transaction_trace{trace.failed_dtrx_trace, obj.obj.partial})); + } + + bool include_partial = obj.obj.partial && !trace.failed_dtrx_trace; + fc::raw::pack(ds, include_partial); + if (include_partial) { + auto& partial = *obj.obj.partial; + fc::raw::pack(ds, fc::unsigned_int(0)); + fc::raw::pack(ds, as_type(partial.expiration)); + fc::raw::pack(ds, as_type(partial.ref_block_num)); + fc::raw::pack(ds, as_type(partial.ref_block_prefix)); + fc::raw::pack(ds, as_type(partial.max_net_usage_words)); + fc::raw::pack(ds, as_type(partial.max_cpu_usage_ms)); + fc::raw::pack(ds, as_type(partial.delay_sec)); + fc::raw::pack(ds, as_type(partial.transaction_extensions)); + fc::raw::pack(ds, as_type>(partial.signatures)); + fc::raw::pack(ds, as_type>(partial.context_free_data)); } return ds; } template -datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { - uint8_t stat = eosio::chain::transaction_receipt_header::hard_fail; - ds << make_history_context_wrapper(obj.db, stat, obj.obj); +datastream& operator<<(datastream& ds, + const history_context_wrapper& obj) { + std::pair context = std::make_pair(eosio::chain::transaction_receipt_header::hard_fail, obj.context); + ds << make_history_context_wrapper(obj.db, context, obj.obj); return ds; } diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 7aead3d1052..27ace9718d3 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -53,18 +53,81 @@ static bytes zlib_compress_bytes(bytes in) { return out; } +static bytes zlib_decompress(const bytes& in) { + bytes out; + bio::filtering_ostream decomp; + decomp.push(bio::zlib_decompressor()); + decomp.push(bio::back_inserter(out)); + bio::write(decomp, in.data(), in.size()); + bio::close(decomp); + return out; +} + +template +bool include_delta(const T& old, const T& curr) { + return true; +} + +bool include_delta(const eosio::chain::table_id_object& old, const eosio::chain::table_id_object& curr) { + return old.payer != curr.payer; +} + +bool include_delta(const eosio::chain::resource_limits::resource_limits_object& old, + const eosio::chain::resource_limits::resource_limits_object& curr) { + return // + old.net_weight != curr.net_weight || // + old.cpu_weight != curr.cpu_weight || // + old.ram_bytes != curr.ram_bytes; +} + +bool include_delta(const eosio::chain::resource_limits::resource_limits_state_object& old, + const eosio::chain::resource_limits::resource_limits_state_object& curr) { + return // + old.average_block_net_usage.last_ordinal != curr.average_block_net_usage.last_ordinal || // + old.average_block_net_usage.value_ex != curr.average_block_net_usage.value_ex || // + old.average_block_net_usage.consumed != curr.average_block_net_usage.consumed || // + old.average_block_cpu_usage.last_ordinal != curr.average_block_cpu_usage.last_ordinal || // + old.average_block_cpu_usage.value_ex != curr.average_block_cpu_usage.value_ex || // + old.average_block_cpu_usage.consumed != curr.average_block_cpu_usage.consumed || // + old.total_net_weight != curr.total_net_weight || // + old.total_cpu_weight != curr.total_cpu_weight || // + old.total_ram_bytes != curr.total_ram_bytes || // + old.virtual_net_limit != curr.virtual_net_limit || // + old.virtual_cpu_limit != curr.virtual_cpu_limit; +} + +bool include_delta(const eosio::chain::account_metadata_object& old, + const eosio::chain::account_metadata_object& curr) { + return // + old.name.value != curr.name.value || // + old.is_privileged() != curr.is_privileged() || // + old.last_code_update != curr.last_code_update || // + old.vm_type != curr.vm_type || // + old.vm_version != curr.vm_version || // + old.code_hash != curr.code_hash; +} + +bool include_delta(const eosio::chain::code_object& old, const eosio::chain::code_object& curr) { // + return false; +} + +bool include_delta(const eosio::chain::protocol_state_object& old, const eosio::chain::protocol_state_object& curr) { + return old.activated_protocol_features != curr.activated_protocol_features; +} + struct state_history_plugin_impl : std::enable_shared_from_this { - chain_plugin* chain_plug = nullptr; - fc::optional trace_log; - fc::optional chain_state_log; - bool stopping = false; - fc::optional applied_transaction_connection; - fc::optional accepted_block_connection; - string endpoint_address = "0.0.0.0"; - uint16_t endpoint_port = 8080; - std::unique_ptr acceptor; - std::map cached_traces; - transaction_trace_ptr onblock_trace; + chain_plugin* chain_plug = nullptr; + fc::optional trace_log; + fc::optional chain_state_log; + bool trace_debug_mode = false; + bool stopping = false; + fc::optional applied_transaction_connection; + fc::optional accepted_block_connection; + string endpoint_address = "0.0.0.0"; + uint16_t endpoint_port = 8080; + std::unique_ptr acceptor; + std::map cached_traces; + fc::optional onblock_trace; void get_log_entry(state_history_log& log, uint32_t block_num, fc::optional& result) { if (block_num < log.begin_block() || block_num >= log.end_block()) @@ -73,10 +136,10 @@ struct state_history_plugin_impl : std::enable_shared_from_thisresize(s); + bytes compressed(s); if (s) - stream.read(result->data(), s); + stream.read(compressed.data(), s); + result = zlib_decompress(compressed); } void get_block(uint32_t block_num, fc::optional& result) { @@ -86,7 +149,8 @@ struct state_history_plugin_impl : std::enable_shared_from_this get_block_id(uint32_t block_num) { @@ -315,7 +379,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this(app().get_io_service()); - acceptor->async_accept(*socket, [self = shared_from_this(), socket, this](auto ec) { + acceptor->async_accept(*socket, [self = shared_from_this(), socket, this](const boost::system::error_code& ec) { if (stopping) return; if (ec) { @@ -344,14 +408,14 @@ struct state_history_plugin_impl : std::enable_shared_from_thisreceipt && trace_log) { if (is_onblock(p)) - onblock_trace = p; + onblock_trace.emplace(p, t); else if (p->failed_dtrx_trace) - cached_traces[p->failed_dtrx_trace->id] = p; + cached_traces[p->failed_dtrx_trace->id] = augmented_transaction_trace{p, t}; else - cached_traces[p->id] = p; + cached_traces[p->id] = augmented_transaction_trace{p, t}; } } @@ -371,9 +435,9 @@ struct state_history_plugin_impl : std::enable_shared_from_this traces; + std::vector traces; if (onblock_trace) - traces.push_back(onblock_trace); + traces.push_back(*onblock_trace); for (auto& r : block_state->block->transactions) { transaction_id_type id; if (r.trx.contains()) @@ -381,7 +445,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this().id(); auto it = cached_traces.find(id); - EOS_ASSERT(it != cached_traces.end() && it->second->receipt, plugin_exception, + EOS_ASSERT(it != cached_traces.end() && it->second.trace->receipt, plugin_exception, "missing trace for transaction ${id}", ("id", id)); traces.push_back(it->second); } @@ -389,10 +453,10 @@ struct state_history_plugin_impl : std::enable_shared_from_thischain().db(); - auto traces_bin = zlib_compress_bytes(fc::raw::pack(make_history_serial_wrapper(db, traces))); + auto traces_bin = zlib_compress_bytes(fc::raw::pack(make_history_context_wrapper(db, trace_debug_mode, traces))); EOS_ASSERT(traces_bin.size() == (uint32_t)traces_bin.size(), plugin_exception, "traces is too big"); - state_history_log_header header{.block_num = block_state->block->block_num(), + state_history_log_header header{.magic = ship_magic(ship_current_version), .block_id = block_state->block->id(), .payload_size = sizeof(uint32_t) + traces_bin.size()}; trace_log->write_entry(header, block_state->block->previous, [&](auto& stream) { @@ -453,7 +517,8 @@ struct state_history_plugin_impl : std::enable_shared_from_this(), pack_row); + process_table("account_metadata", db.get_index(), pack_row); + process_table("code", db.get_index(), pack_row); process_table("contract_table", db.get_index(), pack_row); process_table("contract_row", db.get_index(), pack_contract_row); @@ -476,6 +543,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this(), pack_row); process_table("generated_transaction", db.get_index(), pack_row); + process_table("protocol_state", db.get_index(), pack_row); process_table("permission", db.get_index(), pack_row); process_table("permission_link", db.get_index(), pack_row); @@ -487,7 +555,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock->block_num(), + state_history_log_header header{.magic = ship_magic(ship_current_version), .block_id = block_state->block->id(), .payload_size = sizeof(uint32_t) + deltas_bin.size()}; chain_state_log->write_entry(header, block_state->block->previous, [&](auto& stream) { @@ -514,6 +582,8 @@ void state_history_plugin::set_program_options(options_description& cli, options options("state-history-endpoint", bpo::value()->default_value("127.0.0.1:8080"), "the endpoint upon which to listen for incoming connections. Caution: only expose this port to " "your internal network."); + options("trace-history-debug-mode", bpo::bool_switch()->default_value(false), + "enable debug mode for trace history"); } void state_history_plugin::plugin_initialize(const variables_map& options) { @@ -525,7 +595,9 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { EOS_ASSERT(my->chain_plug, chain::missing_chain_plugin_exception, ""); auto& chain = my->chain_plug->chain(); my->applied_transaction_connection.emplace( - chain.applied_transaction.connect([&](const transaction_trace_ptr& p) { my->on_applied_transaction(p); })); + chain.applied_transaction.connect([&](std::tuple t) { + my->on_applied_transaction(std::get<0>(t), std::get<1>(t)); + })); my->accepted_block_connection.emplace( chain.accepted_block.connect([&](const block_state_ptr& p) { my->on_accepted_block(p); })); @@ -549,6 +621,10 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { } boost::filesystem::create_directories(state_history_dir); + if (options.at("trace-history-debug-mode").as()) { + my->trace_debug_mode = true; + } + if (options.at("trace-history").as()) my->trace_log.emplace("trace_history", (state_history_dir / "trace_history.log").string(), (state_history_dir / "trace_history.index").string()); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index bdedcc81cd9..9d5324b0bbf 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -93,14 +93,30 @@ extern const char* const state_history_plugin_abi = R"({ }, { "name": "action_trace_v0", "fields": [ - { "name": "receipt", "type": "action_receipt" }, + { "name": "action_ordinal", "type": "varuint32" }, + { "name": "creator_action_ordinal", "type": "varuint32" }, + { "name": "receipt", "type": "action_receipt?" }, + { "name": "receiver", "type": "name" }, { "name": "act", "type": "action" }, { "name": "context_free", "type": "bool" }, { "name": "elapsed", "type": "int64" }, { "name": "console", "type": "string" }, { "name": "account_ram_deltas", "type": "account_delta[]" }, { "name": "except", "type": "string?" }, - { "name": "inline_traces", "type": "action_trace[]" } + { "name": "error_code", "type": "uint64?" } + ] + }, + { + "name": "partial_transaction_v0", "fields": [ + { "name": "expiration", "type": "time_point_sec" }, + { "name": "ref_block_num", "type": "uint16" }, + { "name": "ref_block_prefix", "type": "uint32" }, + { "name": "max_net_usage_words", "type": "varuint32" }, + { "name": "max_cpu_usage_ms", "type": "uint8" }, + { "name": "delay_sec", "type": "varuint32" }, + { "name": "transaction_extensions", "type": "extension[]" }, + { "name": "signatures", "type": "signature[]" }, + { "name": "context_free_data", "type": "bytes[]" } ] }, { @@ -113,8 +129,11 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "net_usage", "type": "uint64" }, { "name": "scheduled", "type": "bool" }, { "name": "action_traces", "type": "action_trace[]" }, + { "name": "account_ram_delta", "type": "account_delta?" }, { "name": "except", "type": "string?" }, - { "name": "failed_dtrx_trace", "type": "transaction_trace?" } + { "name": "error_code", "type": "uint64?" }, + { "name": "failed_dtrx_trace", "type": "transaction_trace?" }, + { "name": "partial", "type": "partial_transaction?" } ] }, { @@ -183,18 +202,35 @@ extern const char* const state_history_plugin_abi = R"({ ] }, { - "name": "account_v0", "fields": [ - { "type": "name", "name": "name" }, + "name": "code_id", "fields": [ { "type": "uint8", "name": "vm_type" }, { "type": "uint8", "name": "vm_version" }, - { "type": "bool", "name": "privileged" }, - { "type": "time_point", "name": "last_code_update" }, - { "type": "checksum256", "name": "code_version" }, + { "type": "checksum256", "name": "code_hash" } + ] + }, + { + "name": "account_v0", "fields": [ + { "type": "name", "name": "name" }, { "type": "block_timestamp_type", "name": "creation_date" }, - { "type": "bytes", "name": "code" }, { "type": "bytes", "name": "abi" } ] }, + { + "name": "account_metadata_v0", "fields": [ + { "type": "name", "name": "name" }, + { "type": "bool", "name": "privileged" }, + { "type": "time_point", "name": "last_code_update" }, + { "type": "code_id?", "name": "code" } + ] + }, + { + "name": "code_v0", "fields": [ + { "type": "uint8", "name": "vm_type" }, + { "type": "uint8", "name": "vm_version" }, + { "type": "checksum256", "name": "code_hash" }, + { "type": "bytes", "name": "code" } + ] + }, { "name": "contract_table_v0", "fields": [ { "type": "name", "name": "code" }, @@ -312,6 +348,17 @@ extern const char* const state_history_plugin_abi = R"({ { "type": "bytes", "name": "packed_trx" } ] }, + { + "name": "activated_protocol_feature_v0", "fields": [ + { "type": "checksum256", "name": "feature_digest" }, + { "type": "uint32", "name": "activation_block_num" } + ] + }, + { + "name": "protocol_state_v0", "fields": [ + { "type": "activated_protocol_feature[]", "name": "activated_protocol_features" } + ] + }, { "name": "key_weight", "fields": [ { "type": "public_key", "name": "key" }, @@ -429,11 +476,14 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "action_receipt", "types": ["action_receipt_v0"] }, { "name": "action_trace", "types": ["action_trace_v0"] }, + { "name": "partial_transaction", "types": ["partial_transaction_v0"] }, { "name": "transaction_trace", "types": ["transaction_trace_v0"] }, { "name": "transaction_variant", "types": ["transaction_id", "packed_transaction"] }, { "name": "table_delta", "types": ["table_delta_v0"] }, { "name": "account", "types": ["account_v0"] }, + { "name": "account_metadata", "types": ["account_metadata_v0"] }, + { "name": "code", "types": ["code_v0"] }, { "name": "contract_table", "types": ["contract_table_v0"] }, { "name": "contract_row", "types": ["contract_row_v0"] }, { "name": "contract_index64", "types": ["contract_index64_v0"] }, @@ -444,6 +494,8 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "chain_config", "types": ["chain_config_v0"] }, { "name": "global_property", "types": ["global_property_v0"] }, { "name": "generated_transaction", "types": ["generated_transaction_v0"] }, + { "name": "activated_protocol_feature", "types": ["activated_protocol_feature_v0"] }, + { "name": "protocol_state", "types": ["protocol_state_v0"] }, { "name": "permission", "types": ["permission_v0"] }, { "name": "permission_link", "types": ["permission_link_v0"] }, { "name": "resource_limits", "types": ["resource_limits_v0"] }, @@ -456,6 +508,8 @@ extern const char* const state_history_plugin_abi = R"({ ], "tables": [ { "name": "account", "type": "account", "key_names": ["name"] }, + { "name": "account_metadata", "type": "account_metadata", "key_names": ["name"] }, + { "name": "code", "type": "code", "key_names": ["vm_type", "vm_version", "code_hash"] }, { "name": "contract_table", "type": "contract_table", "key_names": ["code", "scope", "table"] }, { "name": "contract_row", "type": "contract_row", "key_names": ["code", "scope", "table", "primary_key"] }, { "name": "contract_index64", "type": "contract_index64", "key_names": ["code", "scope", "table", "primary_key"] }, @@ -465,6 +519,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "contract_index_long_double", "type": "contract_index_long_double", "key_names": ["code", "scope", "table", "primary_key"] }, { "name": "global_property", "type": "global_property", "key_names": [] }, { "name": "generated_transaction", "type": "generated_transaction", "key_names": ["sender", "sender_id"] }, + { "name": "protocol_state", "type": "protocol_state", "key_names": [] }, { "name": "permission", "type": "permission", "key_names": ["owner", "name"] }, { "name": "permission_link", "type": "permission_link", "key_names": ["account", "code", "message_type"] }, { "name": "resource_limits", "type": "resource_limits", "key_names": ["owner"] }, diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp index 307cccc197e..a932a27cad9 100644 --- a/plugins/test_control_api_plugin/test_control_api_plugin.cpp +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -40,8 +40,8 @@ struct async_result_visitor : public fc::visitor { [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ - auto result = api_handle.call_name(fc::json::from_string(body).as()); \ - cb(http_response_code, fc::json::to_string(result)); \ + fc::variant result( api_handle.call_name(fc::json::from_string(body).as()) ); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index b7593a1a511..dbde59853ef 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -23,8 +23,7 @@ class test_control_plugin_impl { private: void accepted_block(const chain::block_state_ptr& bsp); void applied_irreversible_block(const chain::block_state_ptr& bsp); - void retrieve_next_block_state(const chain::block_state_ptr& bsp); - void process_next_block_state(const chain::block_header_state& bhs); + void process_next_block_state(const chain::block_state_ptr& bsp); fc::optional _accepted_block_connection; fc::optional _irreversible_block_connection; @@ -55,26 +54,17 @@ void test_control_plugin_impl::disconnect() { void test_control_plugin_impl::applied_irreversible_block(const chain::block_state_ptr& bsp) { if (_track_lib) - retrieve_next_block_state(bsp); + process_next_block_state(bsp); } void test_control_plugin_impl::accepted_block(const chain::block_state_ptr& bsp) { if (_track_head) - retrieve_next_block_state(bsp); + process_next_block_state(bsp); } -void test_control_plugin_impl::retrieve_next_block_state(const chain::block_state_ptr& bsp) { - const auto hbn = bsp->block_num; - auto new_block_header = bsp->header; - new_block_header.timestamp = new_block_header.timestamp.next(); - new_block_header.previous = bsp->id; - auto new_bs = bsp->generate_next(new_block_header.timestamp); - process_next_block_state(new_bs); -} - -void test_control_plugin_impl::process_next_block_state(const chain::block_header_state& bhs) { +void test_control_plugin_impl::process_next_block_state(const chain::block_state_ptr& bsp) { const auto block_time = _chain.head_block_time() + fc::microseconds(chain::config::block_interval_us); - const auto& producer_name = bhs.get_scheduled_producer(block_time).producer_name; + const auto producer_name = bsp->get_scheduled_producer(block_time).producer_name; if (_producer != account_name()) ilog("producer ${cprod}, looking for ${lprod}", ("cprod", producer_name.to_string())("lprod", _producer.to_string())); diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index f86253fbfac..670114ea85c 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -28,9 +28,13 @@ using namespace eosio::testing; namespace eosio { namespace detail { struct txn_test_gen_empty {}; + struct txn_test_gen_status { + string status; + }; }} FC_REFLECT(eosio::detail::txn_test_gen_empty, ); +FC_REFLECT(eosio::detail::txn_test_gen_status, (status)); namespace eosio { @@ -45,7 +49,7 @@ using io_work_t = boost::asio::executor_work_guard(); \ - api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ - eosio::detail::txn_test_gen_empty result; + auto status = api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ + eosio::detail::txn_test_gen_status result = { status }; #define INVOKE_V_R_R(api_handle, call_name, in_param0, in_param1) \ const auto& vs = fc::json::json::from_string(body).as(); \ @@ -80,7 +84,7 @@ using io_work_t = boost::asio::executor_work_guard thread_pool; std::shared_ptr timer; + name newaccountA; + name newaccountB; + name newaccountT; void push_next_transaction(const std::shared_ptr>& trxs, const std::function& next ) { chain_plugin& cp = app().get_plugin(); @@ -127,13 +134,11 @@ struct txn_test_gen_plugin_impl { } void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, const std::function& next) { + ilog("create_test_accounts"); std::vector trxs; trxs.reserve(2); try { - name newaccountA("txn.test.a"); - name newaccountB("txn.test.b"); - name newaccountC("txn.test.t"); name creator(init_name); abi_def currency_abi_def = fc::json::from_string(contracts::eosio_token_abi().data()).as(); @@ -170,73 +175,85 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); } - //create "txn.test.t" account + //create "T" account { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountC, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.sign(creator_priv_key, chainid); trxs.emplace_back(std::move(trx)); } - //set txn.test.t contract to eosio.token & initialize it + //set newaccountT contract to eosio.token & initialize it { signed_transaction trx; vector wasm = contracts::eosio_token_wasm(); setcode handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.code.assign(wasm.begin(), wasm.end()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); { setabi handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.abi = fc::raw::pack(json::from_string(contracts::eosio_token_abi().data()).as()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(create); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("create", fc::json::from_string("{\"issuer\":\"txn.test.t\",\"maximum_supply\":\"1000000000.0000 CUR\"}}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("create", + fc::json::from_string(fc::format_string("{\"issuer\":\"${issuer}\",\"maximum_supply\":\"1000000000.0000 CUR\"}}", + fc::mutable_variant_object()("issuer",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(issue); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("issue", + fc::json::from_string(fc::format_string("{\"to\":\"${to}\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("to",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountA.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountB.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.max_net_usage_words = 5000; trx.sign(txn_test_receiver_C_priv_key, chainid); @@ -250,15 +267,17 @@ struct txn_test_gen_plugin_impl { push_transactions(std::move(trxs), next); } - void start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + string start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + ilog("Starting transaction test plugin"); if(running) - throw fc::exception(fc::invalid_operation_exception_code); + return "start_generation already running"; if(period < 1 || period > 2500) - throw fc::exception(fc::invalid_operation_exception_code); + return "period must be between 1 and 2500"; if(batch_size < 1 || batch_size > 250) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be between 1 and 250"; if(batch_size & 1) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be even"; + ilog("Starting transaction test plugin valid"); running = true; @@ -266,20 +285,20 @@ struct txn_test_gen_plugin_impl { auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); abi_serializer eosio_token_serializer{fc::json::from_string(contracts::eosio_token_abi().data()).as(), abi_serializer_max_time}; //create the actions here - act_a_to_b.account = N(txn.test.t); + act_a_to_b.account = newaccountT; act_a_to_b.name = N(transfer); - act_a_to_b.authorization = vector{{name("txn.test.a"),config::active_name}}; - act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; + act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); - act_b_to_a.account = N(txn.test.t); + act_b_to_a.account = newaccountT; act_b_to_a.name = N(transfer); - act_b_to_a.authorization = vector{{name("txn.test.b"),config::active_name}}; - act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_b_to_a.authorization = vector{{newaccountB,config::active_name}}; + act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountB.to_string())("to",newaccountA.to_string())("l", salt))), abi_serializer_max_time); timer_timeout = period; @@ -299,6 +318,7 @@ struct txn_test_gen_plugin_impl { boost::asio::post( *gen_ioc, [this]() { arm_timer(boost::asio::high_resolution_timer::clock_type::now()); }); + return "success"; } void arm_timer(boost::asio::high_resolution_timer::time_point s) { @@ -371,6 +391,7 @@ struct txn_test_gen_plugin_impl { next(e.dynamic_copy_exception()); } + ilog("send ${c} transactions", ("c",trxs.size())); push_transactions(std::move(trxs), next); } @@ -414,6 +435,7 @@ void txn_test_gen_plugin::set_program_options(options_description&, options_desc cfg.add_options() ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") ("txn-test-gen-threads", bpo::value()->default_value(2), "Number of worker threads in txn_test_gen thread pool") + ("txn-test-gen-account-prefix", bpo::value()->default_value("txn.test."), "Prefix to use for accounts generated and used by this plugin") ; } @@ -422,6 +444,10 @@ void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { my.reset( new txn_test_gen_plugin_impl ); my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); my->thread_pool_size = options.at( "txn-test-gen-threads" ).as(); + const std::string thread_pool_account_prefix = options.at( "txn-test-gen-account-prefix" ).as(); + my->newaccountA = thread_pool_account_prefix + "a"; + my->newaccountB = thread_pool_account_prefix + "b"; + my->newaccountT = thread_pool_account_prefix + "t"; EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, "txn-test-gen-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); } FC_LOG_AND_RETHROW() @@ -439,7 +465,7 @@ void txn_test_gen_plugin::plugin_shutdown() { try { my->stop_generation(); } - catch(fc::exception e) { + catch(fc::exception& e) { } } diff --git a/plugins/wallet_api_plugin/wallet_api_plugin.cpp b/plugins/wallet_api_plugin/wallet_api_plugin.cpp index 0d88ec2966d..1750150885c 100644 --- a/plugins/wallet_api_plugin/wallet_api_plugin.cpp +++ b/plugins/wallet_api_plugin/wallet_api_plugin.cpp @@ -30,7 +30,7 @@ using namespace eosio; try { \ if (body.empty()) body = "{}"; \ INVOKE \ - cb(http_response_code, fc::json::to_string(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp index e5c70f1a307..9e33b194a37 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp @@ -32,10 +32,10 @@ class se_wallet final : public wallet_api { string create_key(string key_type) override; bool remove_key(string key) override; - optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; + fc::optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; private: std::unique_ptr my; }; -}} \ No newline at end of file +}} diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp index 480e7a32a44..900577d082c 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp @@ -181,7 +181,7 @@ class soft_wallet final : public wallet_api /* Attempts to sign a digest via the given public_key */ - optional try_sign_digest( const digest_type digest, const public_key_type public_key ) override; + fc::optional try_sign_digest( const digest_type digest, const public_key_type public_key ) override; std::shared_ptr my; void encrypt_keys(); diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp index 0627eceff33..61929b04733 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp @@ -101,7 +101,7 @@ class wallet_api /** Returns a signature given the digest and public_key, if this wallet can sign via that public key */ - virtual optional try_sign_digest( const digest_type digest, const public_key_type public_key ) = 0; + virtual fc::optional try_sign_digest( const digest_type digest, const public_key_type public_key ) = 0; }; }} diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp index e1c0da99118..49caa9c184b 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp @@ -32,10 +32,10 @@ class yubihsm_wallet final : public wallet_api { string create_key(string key_type) override; bool remove_key(string key) override; - optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; + fc::optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; private: std::unique_ptr my; }; -}} \ No newline at end of file +}} diff --git a/plugins/wallet_plugin/se_wallet.cpp b/plugins/wallet_plugin/se_wallet.cpp index 8b43d569881..2f9ccfffc06 100644 --- a/plugins/wallet_plugin/se_wallet.cpp +++ b/plugins/wallet_plugin/se_wallet.cpp @@ -186,10 +186,10 @@ struct se_wallet_impl { return pub; } - optional try_sign_digest(const digest_type d, const public_key_type public_key) { + fc::optional try_sign_digest(const digest_type d, const public_key_type public_key) { auto it = _keys.find(public_key); if(it == _keys.end()) - return optional{}; + return fc::optional{}; fc::ecdsa_sig sig = ECDSA_SIG_new(); CFErrorRef error = nullptr; @@ -370,8 +370,8 @@ bool se_wallet::remove_key(string key) { return my->remove_key(key); } -optional se_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { +fc::optional se_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { return my->try_sign_digest(digest, public_key); } -}} \ No newline at end of file +}} diff --git a/plugins/wallet_plugin/wallet.cpp b/plugins/wallet_plugin/wallet.cpp index 53d57697ccd..a40027cb0a9 100644 --- a/plugins/wallet_plugin/wallet.cpp +++ b/plugins/wallet_plugin/wallet.cpp @@ -120,18 +120,18 @@ class soft_wallet_impl string get_wallet_filename() const { return _wallet_filename; } - optional try_get_private_key(const public_key_type& id)const + fc::optional try_get_private_key(const public_key_type& id)const { auto it = _keys.find(id); if( it != _keys.end() ) return it->second; - return optional(); + return fc::optional(); } - optional try_sign_digest( const digest_type digest, const public_key_type public_key ) { + fc::optional try_sign_digest( const digest_type digest, const public_key_type public_key ) { auto it = _keys.find(public_key); if( it == _keys.end() ) - return optional{}; + return fc::optional{}; return it->second.sign(digest); } @@ -401,7 +401,7 @@ private_key_type soft_wallet::get_private_key( public_key_type pubkey )const return my->get_private_key( pubkey ); } -optional soft_wallet::try_sign_digest( const digest_type digest, const public_key_type public_key ) { +fc::optional soft_wallet::try_sign_digest( const digest_type digest, const public_key_type public_key ) { return my->try_sign_digest(digest, public_key); } diff --git a/plugins/wallet_plugin/wallet_manager.cpp b/plugins/wallet_plugin/wallet_manager.cpp index b5287173670..43fa37bc61a 100644 --- a/plugins/wallet_plugin/wallet_manager.cpp +++ b/plugins/wallet_plugin/wallet_manager.cpp @@ -237,7 +237,7 @@ wallet_manager::sign_transaction(const chain::signed_transaction& txn, const fla bool found = false; for (const auto& i : wallets) { if (!i.second->is_locked()) { - optional sig = i.second->try_sign_digest(stxn.sig_digest(id, stxn.context_free_data), pk); + fc::optional sig = i.second->try_sign_digest(stxn.sig_digest(id, stxn.context_free_data), pk); if (sig) { stxn.signatures.push_back(*sig); found = true; @@ -260,7 +260,7 @@ wallet_manager::sign_digest(const chain::digest_type& digest, const public_key_t try { for (const auto& i : wallets) { if (!i.second->is_locked()) { - optional sig = i.second->try_sign_digest(digest, key); + fc::optional sig = i.second->try_sign_digest(digest, key); if (sig) return *sig; } diff --git a/plugins/wallet_plugin/yubihsm_wallet.cpp b/plugins/wallet_plugin/yubihsm_wallet.cpp index 5676089c0e1..0f367457fa0 100644 --- a/plugins/wallet_plugin/yubihsm_wallet.cpp +++ b/plugins/wallet_plugin/yubihsm_wallet.cpp @@ -125,7 +125,7 @@ struct yubihsm_wallet_impl { void prime_keepalive_timer() { keepalive_timer.expires_at(std::chrono::steady_clock::now() + std::chrono::seconds(20)); - keepalive_timer.async_wait([this](auto ec){ + keepalive_timer.async_wait([this](const boost::system::error_code& ec){ if(ec || !session) return; @@ -139,10 +139,10 @@ struct yubihsm_wallet_impl { }); } - optional try_sign_digest(const digest_type d, const public_key_type public_key) { + fc::optional try_sign_digest(const digest_type d, const public_key_type public_key) { auto it = _keys.find(public_key); if(it == _keys.end()) - return optional{}; + return fc::optional{}; size_t der_sig_sz = 128; uint8_t der_sig[der_sig_sz]; @@ -265,8 +265,8 @@ bool yubihsm_wallet::remove_key(string key) { return true; } -optional yubihsm_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { +fc::optional yubihsm_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { return my->try_sign_digest(digest, public_key); } -}} \ No newline at end of file +}} diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 0d98fdcf63d..0787c5fe937 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -1,4 +1,5 @@ -add_executable( ${CLI_CLIENT_EXECUTABLE_NAME} main.cpp httpc.cpp help_text.cpp localize.hpp config.hpp CLI11.hpp) +configure_file(help_text.cpp.in help_text.cpp @ONLY) +add_executable( ${CLI_CLIENT_EXECUTABLE_NAME} main.cpp httpc.cpp ${CMAKE_CURRENT_BINARY_DIR}/help_text.cpp localize.hpp config.hpp CLI11.hpp) if( UNIX AND NOT APPLE ) set(rt_library rt ) endif() @@ -32,7 +33,7 @@ set(LOCALEDIR ${CMAKE_INSTALL_PREFIX}/share/locale) set(LOCALEDOMAIN ${CLI_CLIENT_EXECUTABLE_NAME}) configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) -target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}) +target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) diff --git a/programs/cleos/config.hpp.in b/programs/cleos/config.hpp.in index 3fe2051d74c..d9d5f45b1de 100644 --- a/programs/cleos/config.hpp.in +++ b/programs/cleos/config.hpp.in @@ -3,10 +3,12 @@ * * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. */ +#pragma once namespace eosio { namespace client { namespace config { constexpr char version_str[] = "${cleos_BUILD_VERSION}"; constexpr char locale_path[] = "${LOCALEDIR}"; constexpr char locale_domain[] = "${LOCALEDOMAIN}"; constexpr char key_store_executable_name[] = "${KEY_STORE_EXECUTABLE_NAME}"; + constexpr char node_executable_name[] = "${NODE_EXECUTABLE_NAME}"; }}} diff --git a/programs/cleos/help_text.cpp b/programs/cleos/help_text.cpp.in similarity index 98% rename from programs/cleos/help_text.cpp rename to programs/cleos/help_text.cpp.in index b1f9161a54d..a133e17cade 100644 --- a/programs/cleos/help_text.cpp +++ b/programs/cleos/help_text.cpp.in @@ -65,7 +65,7 @@ const char* duplicate_key_import_help_text = _("This key is already imported int const char* unknown_abi_table_help_text = _(R"text(The ABI for the code on account "${1}" does not specify table "${2}". Please check the account and table name, and verify that the account has the expected code using: - cleos get code ${1})text"); + @CLI_CLIENT_EXECUTABLE_NAME@ get code ${1})text"); const char* failed_to_find_transaction_text = _("Failed to fetch information for transaction: \033[1m${1}\033[0m from the history plugin\n\n" "\033[32mIf you know the block number which included this transaction you providing it with the \033[2m--block-hint\033[22m option may help\033[0m"); @@ -145,7 +145,7 @@ const char* error_advice_authority_type_exception = R"=====(Ensure that your aut )====="; const char* error_advice_action_type_exception = R"=====(Ensure that your action JSON follows the contract's abi!)====="; const char* error_advice_transaction_type_exception = R"=====(Ensure that your transaction JSON follows the right transaction format! -You can refer to contracts/eosiolib/transaction.hpp for reference)====="; +You can refer to eosio.cdt/libraries/eosiolib/transaction.hpp for reference)====="; const char* error_advice_abi_type_exception = R"=====(Ensure that your abi JSON follows the following format! { "types" : [{ "new_type_name":"type_name", "type":"type_name" }], @@ -201,7 +201,7 @@ const char* error_advice_invalid_ref_block_exception = "Ensure that the referen const char* error_advice_tx_duplicate = "You can try embedding eosio nonce action inside your transaction to ensure uniqueness."; const char* error_advice_invalid_action_args_exception = R"=====(Ensure that your arguments follow the contract abi! -You can check the contract's abi by using 'cleos get code' command.)====="; +You can check the contract's abi by using '@CLI_CLIENT_EXECUTABLE_NAME@ get code' command.)====="; const char* error_advice_permission_query_exception = "Most likely, the given account/ permission doesn't exist in the blockchain."; const char* error_advice_account_query_exception = "Most likely, the given account doesn't exist in the blockchain."; @@ -211,7 +211,7 @@ const char* error_advice_contract_query_exception = "Most likely, the given con const char* error_advice_tx_irrelevant_sig = "Please remove the unnecessary signature from your transaction!"; const char* error_advice_unsatisfied_authorization = "Ensure that you have the related private keys inside your wallet and your wallet is unlocked."; const char* error_advice_missing_auth_exception = R"=====(Ensure that you have the related authority inside your transaction!; -If you are currently using 'cleos push action' command, try to add the relevant authority using -p option.)====="; +If you are currently using '@CLI_CLIENT_EXECUTABLE_NAME@ push action' command, try to add the relevant authority using -p option.)====="; const char* error_advice_irrelevant_auth_exception = "Please remove the unnecessary authority from your action!"; const char* error_advice_missing_chain_api_plugin_exception = "Ensure that you have \033[2meosio::chain_api_plugin\033[0m\033[32m added to your node's configuration!"; diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 5503c8fe8ec..7d9326b9ed7 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -89,17 +89,21 @@ namespace eosio { namespace client { namespace http { if(std::regex_search(header, match, clregex)) response_content_length = std::stoi(match[1]); } - EOS_ASSERT(response_content_length >= 0, invalid_http_response, "Invalid content-length response"); - std::stringstream re; - // Write whatever content we already have to output. - response_content_length -= response.size(); - if (response.size() > 0) - re << &response; + // Attempt to read the response body using the length indicated by the + // Content-length header. If the header was not present just read all available bytes. + if( response_content_length != -1 ) { + response_content_length -= response.size(); + if( response_content_length > 0 ) + boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); + } else { + boost::system::error_code ec; + boost::asio::read(socket, response, boost::asio::transfer_all(), ec); + EOS_ASSERT(!ec || ec == boost::asio::ssl::error::stream_truncated, http_exception, "Unable to read http response: ${err}", ("err",ec.message())); + } - boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); + std::stringstream re; re << &response; - return re.str(); } diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 8d8ba5d67bc..f90e58046c4 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -4,6 +4,8 @@ */ #pragma once +#include "config.hpp" + namespace eosio { namespace client { namespace http { namespace detail { @@ -128,7 +130,7 @@ namespace eosio { namespace client { namespace http { const string wallet_remove_key = wallet_func_base + "/remove_key"; const string wallet_create_key = wallet_func_base + "/create_key"; const string wallet_sign_trx = wallet_func_base + "/sign_transaction"; - const string keosd_stop = "/v1/keosd/stop"; + const string keosd_stop = "/v1/" + string(client::config::key_store_executable_name) + "/stop"; FC_DECLARE_EXCEPTION( connection_exception, 1100000, "Connection Exception" ); }}} diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index fa45ba36952..59fc67679e6 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -252,9 +252,9 @@ fc::variant call( const std::string& url, } catch(boost::system::system_error& e) { if(url == ::url) - std::cerr << localized("Failed to connect to nodeos at ${u}; is nodeos running?", ("u", url)) << std::endl; + std::cerr << localized("Failed to connect to ${n} at ${u}; is ${n} running?", ("n", node_executable_name)("u", url)) << std::endl; else if(url == ::wallet_url) - std::cerr << localized("Failed to connect to keosd at ${u}; is keosd running?", ("u", url)) << std::endl; + std::cerr << localized("Failed to connect to ${k} at ${u}; is ${k} running?", ("k", key_store_executable_name)("u", url)) << std::endl; throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, e.what())}); } } @@ -305,7 +305,7 @@ void sign_transaction(signed_transaction& trx, fc::variant& required_keys, const trx = signed_trx.as(); } -fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { auto info = get_info(); if (trx.signatures.size() == 0) { // #5445 can't change txn content if already signed @@ -347,11 +347,11 @@ fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000 } } -fc::variant push_actions(std::vector&& actions, int32_t extra_kcpu, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { signed_transaction trx; trx.actions = std::forward(actions); - return push_transaction(trx, extra_kcpu, compression); + return push_transaction(trx, compression); } void print_action( const fc::variant& at ) { @@ -382,14 +382,14 @@ void print_action( const fc::variant& at ) { } //resolver for ABI serializer to decode actions in proposed transaction in multisig contract -auto abi_serializer_resolver = [](const name& account) -> optional { - static unordered_map > abi_cache; +auto abi_serializer_resolver = [](const name& account) -> fc::optional { + static unordered_map > abi_cache; auto it = abi_cache.find( account ); if ( it == abi_cache.end() ) { auto result = call(get_abi_func, fc::mutable_variant_object("account_name", account)); auto abi_results = result.as(); - optional abis; + fc::optional abis; if( abi_results.abi.valid() ) { abis.emplace( *abi_results.abi, abi_serializer_max_time ); } else { @@ -483,7 +483,7 @@ void print_result( const fc::variant& result ) { try { cerr << " us\n"; if( status == "failed" ) { - auto soft_except = processed["except"].as>(); + auto soft_except = processed["except"].as>(); if( soft_except ) { edump((soft_except->to_detail_string())); } @@ -500,8 +500,8 @@ void print_result( const fc::variant& result ) { try { } FC_CAPTURE_AND_RETHROW( (result) ) } using std::cout; -void send_actions(std::vector&& actions, int32_t extra_kcpu = 1000, packed_transaction::compression_type compression = packed_transaction::none ) { - auto result = push_actions( move(actions), extra_kcpu, compression); +void send_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { + auto result = push_actions( move(actions), compression); if( tx_print_json ) { cout << fc::json::to_pretty_string( result ) << endl; @@ -510,8 +510,8 @@ void send_actions(std::vector&& actions, int32_t extra_kcpu = 100 } } -void send_transaction( signed_transaction& trx, int32_t extra_kcpu, packed_transaction::compression_type compression = packed_transaction::none ) { - auto result = push_transaction(trx, extra_kcpu, compression); +void send_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { + auto result = push_transaction(trx, compression); if( tx_print_json ) { cout << fc::json::to_pretty_string( result ) << endl; @@ -879,8 +879,8 @@ void try_local_port(uint32_t duration) { auto start_time = duration_cast( system_clock::now().time_since_epoch() ).count(); while ( !local_port_used()) { if (duration_cast( system_clock::now().time_since_epoch()).count() - start_time > duration ) { - std::cerr << "Unable to connect to keosd, if keosd is running please kill the process and try again.\n"; - throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, "Unable to connect to keosd")}); + std::cerr << "Unable to connect to " << key_store_executable_name << ", if " << key_store_executable_name << " is running please kill the process and try again.\n"; + throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, "Unable to connect to ${k}", ("k", key_store_executable_name))}); } } } @@ -938,7 +938,7 @@ void ensure_keosd_running(CLI::App* app) { } } else { std::cerr << "No wallet service listening on " - << ". Cannot automatically start keosd because keosd was not found." << std::endl; + << ". Cannot automatically start " << key_store_executable_name << " because " << key_store_executable_name << " was not found." << std::endl; } } @@ -1312,9 +1312,50 @@ struct get_transaction_id_subcommand { get_transaction_id->set_callback([&] { try { - auto trx_var = json_from_file_or_string(trx_to_check); - auto trx = trx_var.as(); - std::cout << string(trx.id()) << std::endl; + fc::variant trx_var = json_from_file_or_string(trx_to_check); + if( trx_var.is_object() ) { + fc::variant_object& vo = trx_var.get_object(); + // if actions.data & actions.hex_data provided, use the hex_data since only currently support unexploded data + if( vo.contains("actions") ) { + if( vo["actions"].is_array() ) { + fc::mutable_variant_object mvo = vo; + fc::variants& action_variants = mvo["actions"].get_array(); + for( auto& action_v : action_variants ) { + if( !action_v.is_object() ) { + std::cerr << "Empty 'action' in transaction" << endl; + return; + } + fc::variant_object& action_vo = action_v.get_object(); + if( action_vo.contains( "data" ) && action_vo.contains( "hex_data" ) ) { + fc::mutable_variant_object maction_vo = action_vo; + maction_vo["data"] = maction_vo["hex_data"]; + action_vo = maction_vo; + vo = mvo; + } else if( action_vo.contains( "data" ) ) { + if( !action_vo["data"].is_string() ) { + std::cerr << "get transaction_id only supports un-exploded 'data' (hex form)" << std::endl; + return; + } + } + } + } else { + std::cerr << "transaction json 'actions' is not an array" << std::endl; + return; + } + } else { + std::cerr << "transaction json does not include 'actions'" << std::endl; + return; + } + auto trx = trx_var.as(); + transaction_id_type id = trx.id(); + if( id == transaction().id() ) { + std::cerr << "file/string does not represent a transaction" << std::endl; + } else { + std::cout << string( id ) << std::endl; + } + } else { + std::cerr << "file/string does not represent a transaction" << std::endl; + } } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",trx_to_check)) }); } @@ -2293,17 +2334,17 @@ int main( int argc, char** argv ) { CLI::App app{"Command Line Interface to EOSIO Client"}; app.require_subcommand(); - app.add_option( "-H,--host", obsoleted_option_host_port, localized("the host where nodeos is running") )->group("hidden"); - app.add_option( "-p,--port", obsoleted_option_host_port, localized("the port where nodeos is running") )->group("hidden"); - app.add_option( "--wallet-host", obsoleted_option_host_port, localized("the host where keosd is running") )->group("hidden"); - app.add_option( "--wallet-port", obsoleted_option_host_port, localized("the port where keosd is running") )->group("hidden"); + app.add_option( "-H,--host", obsoleted_option_host_port, localized("the host where ${n} is running", ("n", node_executable_name)) )->group("hidden"); + app.add_option( "-p,--port", obsoleted_option_host_port, localized("the port where ${n} is running", ("n", node_executable_name)) )->group("hidden"); + app.add_option( "--wallet-host", obsoleted_option_host_port, localized("the host where ${k} is running", ("k", key_store_executable_name)) )->group("hidden"); + app.add_option( "--wallet-port", obsoleted_option_host_port, localized("the port where ${k} is running", ("k", key_store_executable_name)) )->group("hidden"); - app.add_option( "-u,--url", url, localized("the http/https URL where nodeos is running"), true ); - app.add_option( "--wallet-url", wallet_url, localized("the http/https URL where keosd is running"), true ); + app.add_option( "-u,--url", url, localized("the http/https URL where ${n} is running", ("n", node_executable_name)), true ); + app.add_option( "--wallet-url", wallet_url, localized("the http/https URL where ${k} is running", ("k", key_store_executable_name)), true ); app.add_option( "-r,--header", header_opt_callback, localized("pass specific HTTP header; repeat this option to pass multiple headers")); app.add_flag( "-n,--no-verify", no_verify, localized("don't verify peer certificate when using HTTPS")); - app.add_flag( "--no-auto-keosd", no_auto_keosd, localized("don't automatically launch a keosd if one is not currently running")); + app.add_flag( "--no-auto-" + string(key_store_executable_name), no_auto_keosd, localized("don't automatically launch a ${k} if one is not currently running", ("k", key_store_executable_name))); app.set_callback([&app]{ ensure_keosd_running(&app);}); app.add_flag( "-v,--verbose", verbose, localized("output verbose errors and action console output")); @@ -2360,7 +2401,7 @@ int main( int argc, char** argv ) { bool pack_action_data_flag = false; auto pack_transaction = convert->add_subcommand("pack_transaction", localized("From plain signed json to packed form")); pack_transaction->add_option("transaction", plain_signed_transaction_json, localized("The plain signed json (string)"))->required(); - pack_transaction->add_flag("--pack-action-data", pack_action_data_flag, localized("Pack all action data within transaction, needs interaction with nodeos")); + pack_transaction->add_flag("--pack-action-data", pack_action_data_flag, localized("Pack all action data within transaction, needs interaction with ${n}", ("n", node_executable_name))); pack_transaction->set_callback([&] { fc::variant trx_var; try { @@ -2383,7 +2424,7 @@ int main( int argc, char** argv ) { bool unpack_action_data_flag = false; auto unpack_transaction = convert->add_subcommand("unpack_transaction", localized("From packed to plain signed json form")); unpack_transaction->add_option("transaction", packed_transaction_json, localized("The packed transaction json (string containing packed_trx and optionally compression fields)"))->required(); - unpack_transaction->add_flag("--unpack-action-data", unpack_action_data_flag, localized("Unpack all action data within transaction, needs interaction with nodeos")); + unpack_transaction->add_flag("--unpack-action-data", unpack_action_data_flag, localized("Unpack all action data within transaction, needs interaction with ${n}", ("n", node_executable_name))); unpack_transaction->set_callback([&] { fc::variant packed_trx_var; packed_transaction packed_trx; @@ -2503,7 +2544,7 @@ int main( int argc, char** argv ) { code_hash = old_result["code_hash"].as_string(); if(code_as_wasm) { wasm = old_result["wasm"].as_string(); - std::cout << localized("Warning: communicating to older nodeos which returns malformed binary wasm") << std::endl; + std::cout << localized("Warning: communicating to older ${n} which returns malformed binary wasm", ("n", node_executable_name)) << std::endl; } else wast = old_result["wast"].as_string(); @@ -2933,7 +2974,7 @@ int main( int argc, char** argv ) { actions.emplace_back( create_setcode(account, code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } } else { std::cerr << localized("Skipping set code because the new code is the same as the existing code") << std::endl; @@ -2981,7 +3022,7 @@ int main( int argc, char** argv ) { } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } } else { std::cerr << localized("Skipping set abi because the new abi is the same as the existing abi") << std::endl; @@ -2998,7 +3039,7 @@ int main( int argc, char** argv ) { set_abi_callback(); if (actions.size()) { std::cerr << localized("Publishing contract...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } else { std::cout << "no transaction is sent" << std::endl; } @@ -3232,7 +3273,7 @@ int main( int argc, char** argv ) { std::cout << fc::json::to_pretty_string(v) << std::endl; }); - auto stopKeosd = wallet->add_subcommand("stop", localized("Stop keosd."), false); + auto stopKeosd = wallet->add_subcommand("stop", localized("Stop ${k}.", ("k", key_store_executable_name)), false); stopKeosd->set_callback([] { const auto& v = call(wallet_url, keosd_stop); if ( !v.is_object() || v.get_object().size() != 0 ) { //on success keosd responds with empty object @@ -3261,7 +3302,7 @@ int main( int argc, char** argv ) { fc::optional chain_id; if( str_chain_id.size() == 0 ) { - ilog( "grabbing chain_id from nodeos" ); + ilog( "grabbing chain_id from ${n}", ("n", node_executable_name) ); auto info = get_info(); chain_id = info.chain_id; } else { diff --git a/programs/eosio-launcher/config.hpp.in b/programs/eosio-launcher/config.hpp.in index f60e6ab19e5..f733308dc1b 100644 --- a/programs/eosio-launcher/config.hpp.in +++ b/programs/eosio-launcher/config.hpp.in @@ -11,6 +11,7 @@ namespace eosio { namespace launcher { namespace config { constexpr char version_str[] = "${launcher_BUILD_VERSION}"; + constexpr char node_executable_name[] = "${NODE_EXECUTABLE_NAME}"; }}} #endif // CONFIG_HPP_IN diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 66b40819b9d..99febd23a1c 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -48,6 +48,7 @@ using bpo::options_description; using bpo::variables_map; using public_key_type = fc::crypto::public_key; using private_key_type = fc::crypto::private_key; +using namespace eosio::launcher::config; const string block_dir = "blocks"; const string shared_mem_dir = "state"; @@ -247,6 +248,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; + bool dont_start = false; }; void @@ -326,12 +328,6 @@ struct last_run_def { vector running_nodes; }; - -enum class p2p_plugin { - NET, - BNET -}; - enum launch_modes { LM_NONE, LM_LOCAL, @@ -390,11 +386,11 @@ string producer_names::producer_name(unsigned int producer_number) { struct launcher_def { bool force_overwrite; size_t total_nodes; + size_t unstarted_nodes; size_t prod_nodes; size_t producers; size_t next_node; string shape; - p2p_plugin p2p; allowed_connection allowed_connections = PC_NONE; bfs::path genesis; bfs::path output; @@ -408,6 +404,7 @@ struct launcher_def { bool skip_transaction_signatures = false; string eosd_extra_args; std::map specific_nodeos_args; + std::map specific_nodeos_installation_paths; testnet_def network; string gelf_endpoint; vector aliases; @@ -480,22 +477,24 @@ launcher_def::set_options (bpo::options_description &cfg) { cfg.add_options() ("force,f", bpo::bool_switch(&force_overwrite)->default_value(false), "Force overwrite of existing configuration files and erase blockchain") ("nodes,n",bpo::value(&total_nodes)->default_value(1),"total number of nodes to configure and launch") + ("unstarted-nodes",bpo::value(&unstarted_nodes)->default_value(0),"total number of nodes to configure, but not launch") ("pnodes,p",bpo::value(&prod_nodes)->default_value(1),"number of nodes that contain one or more producers") ("producers",bpo::value(&producers)->default_value(21),"total number of non-bios producer instances in this network") ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") ("shape,s",bpo::value(&shape)->default_value("star"),"network topology, use \"star\" \"mesh\" or give a filename for custom") - ("p2p-plugin", bpo::value()->default_value("net"),"select a p2p plugin to use (either net or bnet). Defaults to net.") ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") - ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") - ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") - ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag") - ("specific-nodeos", bpo::value>()->composing(), "forward nodeos command line argument(s) to its paired specific instance of nodeos(using \"--specific-num\"), enclose arg(s) in quotes") + ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), (string(node_executable_name) + " does not require transaction signatures.").c_str()) + (node_executable_name, bpo::value(&eosd_extra_args), ("forward " + string(node_executable_name) + " command line argument(s) to each instance of " + string(node_executable_name) + ", enclose arg(s) in quotes").c_str()) + ("specific-num", bpo::value>()->composing(), ("forward " + string(node_executable_name) + " command line argument(s) (using \"--specific-" + string(node_executable_name) + "\" flag) to this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--specific-" + string(node_executable_name) +"\" flag each time it is used").c_str()) + (("specific-" + string(node_executable_name)).c_str(), bpo::value>()->composing(), ("forward " + string(node_executable_name) + " command line argument(s) to its paired specific instance of " + string(node_executable_name) + "(using \"--specific-num\"), enclose arg(s) in quotes").c_str()) + ("spcfc-inst-num", bpo::value>()->composing(), ("Specify a specific version installation path (using \"--spcfc-inst-"+ string(node_executable_name) + "\" flag) for launching this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--spcfc-inst-" + string(node_executable_name) + "\" flag each time it is used").c_str()) + (("spcfc-inst-" + string(node_executable_name)).c_str(), bpo::value>()->composing(), ("Provide a specific version installation path to its paired specific instance of " + string(node_executable_name) + "(using \"--spcfc-inst-num\")").c_str()) ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") ("host-map",bpo::value(),"a file containing mapping specific nodes to hosts. Used to enhance the custom shape argument") ("servers",bpo::value(),"a file containing ip addresses and names of individual servers to deploy as producers or non-producers ") - ("per-host",bpo::value(&per_host)->default_value(0),"specifies how many nodeos instances will run on a single host. Use 0 to indicate all on one.") + ("per-host",bpo::value(&per_host)->default_value(0),("specifies how many " + string(node_executable_name) + " instances will run on a single host. Use 0 to indicate all on one.").c_str()) ("network-name",bpo::value(&network.name)->default_value("testnet_"),"network name prefix used in GELF logging source") ("enable-gelf-logging",bpo::value(&gelf_enabled)->default_value(true),"enable gelf logging appender in logging configuration file") ("gelf-endpoint",bpo::value(&gelf_endpoint)->default_value("10.160.11.21:12201"),"hostname:port or ip:port of GELF endpoint") @@ -513,6 +512,28 @@ inline enum_type& operator|=(enum_type&lhs, const enum_type& rhs) return lhs = static_cast(static_cast(lhs) | static_cast(rhs)); } +template +void retrieve_paired_array_parameters (const variables_map &vmap, const std::string& num_selector, const std::string& paired_selector, std::map& selector_map) { + if (vmap.count(num_selector)) { + const auto specific_nums = vmap[num_selector].as>(); + const auto specific_args = vmap[paired_selector].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every " << num_selector << " argument must be paired with a " << paired_selector << " argument" << endl; + exit (-1); + } + const auto total_nodes = vmap["nodes"].as(); + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= total_nodes) { + cerr << "\"--" << num_selector << "\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + selector_map[num] = specific_args[i]; + } + } +} + void launcher_def::initialize (const variables_map &vmap) { if (vmap.count("mode")) { @@ -550,24 +571,8 @@ launcher_def::initialize (const variables_map &vmap) { server_ident_file = vmap["servers"].as(); } - if (vmap.count("specific-num")) { - const auto specific_nums = vmap["specific-num"].as>(); - const auto specific_args = vmap["specific-nodeos"].as>(); - if (specific_nums.size() != specific_args.size()) { - cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; - exit (-1); - } - const auto total_nodes = vmap["nodes"].as(); - for(uint i = 0; i < specific_nums.size(); ++i) - { - const auto& num = specific_nums[i]; - if (num >= total_nodes) { - cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; - exit (-1); - } - specific_nodeos_args[num] = specific_args[i]; - } - } + retrieve_paired_array_parameters(vmap, "specific-num", "specific-" + string(node_executable_name), specific_nodeos_args); + retrieve_paired_array_parameters(vmap, "spcfc-inst-num", "spcfc-inst-" + string(node_executable_name), specific_nodeos_installation_paths); using namespace std::chrono; system_clock::time_point now = system_clock::now(); @@ -585,20 +590,6 @@ launcher_def::initialize (const variables_map &vmap) { host_map_file = src.stem().string() + "_hosts.json"; } - string nc = vmap["p2p-plugin"].as(); - if ( !nc.empty() ) { - if (boost::iequals(nc,"net")) - p2p = p2p_plugin::NET; - else if (boost::iequals(nc,"bnet")) - p2p = p2p_plugin::BNET; - else { - p2p = p2p_plugin::NET; - } - } - else { - p2p = p2p_plugin::NET; - } - if( !host_map_file.empty() ) { try { fc::json::from_file(host_map_file).as>(bindings); @@ -625,7 +616,31 @@ launcher_def::initialize (const variables_map &vmap) { if (prod_nodes > (producers + 1)) prod_nodes = producers; if (prod_nodes > total_nodes) - total_nodes = prod_nodes; + total_nodes = prod_nodes + unstarted_nodes; + else if (total_nodes < prod_nodes + unstarted_nodes) { + cerr << "ERROR: if provided, \"--nodes\" must be equal or greater than the number of nodes indicated by \"--pnodes\" and \"--unstarted-nodes\"." << endl; + exit (-1); + } + + if (vmap.count("specific-num")) { + const auto specific_nums = vmap["specific-num"].as>(); + const auto specific_args = vmap["specific-" + string(node_executable_name)].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every specific-num argument must be paired with a specific-" << node_executable_name << " argument" << endl; + exit (-1); + } + // don't include bios + const auto allowed_nums = total_nodes - 1; + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= allowed_nums) { + cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + specific_nodeos_args[num] = specific_args[i]; + } + } char* erd_env_var = getenv ("EOSIO_HOME"); if (erd_env_var == nullptr || std::string(erd_env_var).empty()) { @@ -724,7 +739,7 @@ launcher_def::generate () { write_dot_file (); if (!output.empty()) { - bfs::path savefile = output; + bfs::path savefile = output; { bfs::ofstream sf (savefile); sf << fc::json::to_pretty_string (network) << endl; @@ -745,6 +760,7 @@ launcher_def::generate () { } return false; } + return true; } @@ -855,6 +871,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; @@ -885,6 +902,7 @@ launcher_def::bind_nodes () { ++producer_number; } } + node.dont_start = i >= to_not_start_node; } node.gelf_endpoint = gelf_endpoint; network.nodes[node.name] = move(node); @@ -1068,14 +1086,9 @@ launcher_def::write_config_file (tn_node_def &node) { cfg << "blocks-dir = " << block_dir << "\n"; cfg << "http-server-address = " << host->host_name << ":" << instance.http_port << "\n"; cfg << "http-validate-host = false\n"; - if (p2p == p2p_plugin::NET) { - cfg << "p2p-listen-endpoint = " << host->listen_addr << ":" << instance.p2p_port << "\n"; - cfg << "p2p-server-address = " << host->public_name << ":" << instance.p2p_port << "\n"; - } else { - cfg << "bnet-endpoint = " << host->listen_addr << ":" << instance.p2p_port << "\n"; - // Include the net_plugin endpoint, because the plugin is always loaded (even if not used). - cfg << "p2p-listen-endpoint = " << host->listen_addr << ":" << instance.p2p_port + 1000 << "\n"; - } + cfg << "p2p-listen-endpoint = " << host->listen_addr << ":" << instance.p2p_port << "\n"; + cfg << "p2p-server-address = " << host->public_name << ":" << instance.p2p_port << "\n"; + if (is_bios) { cfg << "enable-stale-production = true\n"; @@ -1101,18 +1114,10 @@ launcher_def::write_config_file (tn_node_def &node) { if(!is_bios) { auto &bios_node = network.nodes["bios"]; - if (p2p == p2p_plugin::NET) { - cfg << "p2p-peer-address = " << bios_node.instance->p2p_endpoint<< "\n"; - } else { - cfg << "bnet-connect = " << bios_node.instance->p2p_endpoint<< "\n"; - } + cfg << "p2p-peer-address = " << bios_node.instance->p2p_endpoint<< "\n"; } for (const auto &p : node.peers) { - if (p2p == p2p_plugin::NET) { - cfg << "p2p-peer-address = " << network.nodes.find(p)->second.instance->p2p_endpoint << "\n"; - } else { - cfg << "bnet-connect = " << network.nodes.find(p)->second.instance->p2p_endpoint << "\n"; - } + cfg << "p2p-peer-address = " << network.nodes.find(p)->second.instance->p2p_endpoint << "\n"; } if (instance.has_db || node.producers.size()) { for (const auto &kp : node.keys ) { @@ -1127,11 +1132,7 @@ launcher_def::write_config_file (tn_node_def &node) { if( instance.has_db ) { cfg << "plugin = eosio::mongo_db_plugin\n"; } - if ( p2p == p2p_plugin::NET ) { - cfg << "plugin = eosio::net_plugin\n"; - } else { - cfg << "plugin = eosio::bnet_plugin\n"; - } + cfg << "plugin = eosio::net_plugin\n"; cfg << "plugin = eosio::chain_api_plugin\n" << "plugin = eosio::history_api_plugin\n"; cfg.close(); @@ -1499,7 +1500,7 @@ launcher_def::launch (eosd_def &instance, string >s) { bfs::path reerr_sl = dd / "stderr.txt"; bfs::path reerr_base = bfs::path("stderr." + launch_time + ".txt"); bfs::path reerr = dd / reerr_base; - bfs::path pidf = dd / "nodeos.pid"; + bfs::path pidf = dd / bfs::path(string(node_executable_name) + ".pid"); host_def* host; try { host = deploy_config_files (*instance.node); @@ -1511,7 +1512,14 @@ launcher_def::launch (eosd_def &instance, string >s) { node_rt_info info; info.remote = !host->is_local(); - string eosdcmd = "programs/nodeos/nodeos "; + string install_path; + if (instance.name != "bios" && !specific_nodeos_installation_paths.empty()) { + const auto node_num = boost::lexical_cast(instance.get_node_num()); + if (specific_nodeos_installation_paths.count(node_num)) { + install_path = specific_nodeos_installation_paths[node_num] + "/"; + } + } + string eosdcmd = install_path + "programs/nodeos/" + string(node_executable_name) + " "; if (skip_transaction_signatures) { eosdcmd += "--skip-transaction-signatures "; } @@ -1548,6 +1556,10 @@ launcher_def::launch (eosd_def &instance, string >s) { } if (!host->is_local()) { + if (instance.node->dont_start) { + cerr << "Unable to use \"unstarted-nodes\" with a remote hose" << endl; + exit (-1); + } string cmdl ("cd "); cmdl += host->eosio_home + "; nohup " + eosdcmd + " > " + reout.string() + " 2> " + reerr.string() + "& echo $! > " + pidf.string() @@ -1562,7 +1574,7 @@ launcher_def::launch (eosd_def &instance, string >s) { string cmd = "cd " + host->eosio_home + "; kill -15 $(cat " + pidf.string() + ")"; format_ssh (cmd, host->host_name, info.kill_cmd); } - else { + else if (!instance.node->dont_start) { cerr << "spawning child, " << eosdcmd << endl; bp::child c(eosdcmd, bp::std_out > reout, bp::std_err > reerr ); @@ -1584,6 +1596,16 @@ launcher_def::launch (eosd_def &instance, string >s) { } c.detach(); } + else { + cerr << "not spawning child, " << eosdcmd << endl; + + const bfs::path dd = instance.data_dir_name; + const bfs::path start_file = dd / "start.cmd"; + bfs::ofstream sf (start_file); + + sf << eosdcmd << endl; + sf.close(); + } last_run.running_nodes.emplace_back (move(info)); } @@ -1613,20 +1635,35 @@ launcher_def::kill (launch_modes mode, string sig_opt) { case LM_LOCAL: case LM_REMOTE : { bfs::path source = "last_run.json"; - fc::json::from_file(source).as(last_run); - for (auto &info : last_run.running_nodes) { - if (mode == LM_ALL || (info.remote && mode == LM_REMOTE) || - (!info.remote && mode == LM_LOCAL)) { - if (info.pid_file.length()) { - string pid; - fc::json::from_file(info.pid_file).as(pid); - string kill_cmd = "kill " + sig_opt + " " + pid; - boost::process::system (kill_cmd); - } - else { - boost::process::system (info.kill_cmd); - } - } + try { + fc::json::from_file( source ).as( last_run ); + for( auto& info : last_run.running_nodes ) { + if( mode == LM_ALL || (info.remote && mode == LM_REMOTE) || + (!info.remote && mode == LM_LOCAL) ) { + try { + if( info.pid_file.length() ) { + string pid; + fc::json::from_file( info.pid_file ).as( pid ); + string kill_cmd = "kill " + sig_opt + " " + pid; + boost::process::system( kill_cmd ); + } else { + boost::process::system( info.kill_cmd ); + } + } catch( fc::exception& fce ) { + cerr << "unable to kill fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to kill std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to kill" << endl; + } + } + } + } catch( fc::exception& fce ) { + cerr << "unable to open " << source << " fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to open " << source << " std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to open " << source << endl; } } } @@ -2030,7 +2067,7 @@ FC_REFLECT( eosd_def, (p2p_endpoint) ) // @ignore instance, gelf_endpoint -FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers) ) +FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers)(dont_start) ) FC_REFLECT( testnet_def, (name)(ssh_helper)(nodes) ) diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index 57e2a0c8b40..626ef6bd0f8 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -48,7 +48,7 @@ int main(int argc, char** argv) if(!app().initialize(argc, argv)) return -1; auto& http = app().get_plugin(); - http.add_handler("/v1/keosd/stop", [](string, string, url_response_callback cb) { cb(200, "{}"); std::raise(SIGTERM); } ); + http.add_handler("/v1/" + keosd::config::key_store_executable_name + "/stop", [](string, string, url_response_callback cb) { cb(200, fc::variant(fc::variant_object())); std::raise(SIGTERM); } ); app().startup(); app().exec(); } catch (const fc::exception& e) { diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index d9fb90ee45d..d5fe8273eb5 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -52,7 +52,6 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} state_history_plugin -Wl,${no_whole_archive_flag} - PRIVATE -Wl,${whole_archive_flag} bnet_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} chain_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_plugin -Wl,${no_whole_archive_flag} diff --git a/programs/nodeos/config.hpp.in b/programs/nodeos/config.hpp.in index dbeba72a3d1..821477e3270 100644 --- a/programs/nodeos/config.hpp.in +++ b/programs/nodeos/config.hpp.in @@ -11,6 +11,7 @@ namespace eosio { namespace nodeos { namespace config { constexpr uint64_t version = 0x${nodeos_BUILD_VERSION}; + const string node_executable_name = "${NODE_EXECUTABLE_NAME}"; }}} #endif // CONFIG_HPP_IN diff --git a/programs/nodeos/logging.json b/programs/nodeos/logging.json new file mode 100644 index 00000000000..07771457d72 --- /dev/null +++ b/programs/nodeos/logging.json @@ -0,0 +1,78 @@ +{ + "includes": [], + "appenders": [{ + "name": "stderr", + "type": "console", + "args": { + "stream": "std_error", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "stdout", + "type": "console", + "args": { + "stream": "std_out", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "net", + "type": "gelf", + "args": { + "endpoint": "10.10.10.10:12201", + "host": "host_name" + }, + "enabled": true + } + ], + "loggers": [{ + "name": "default", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + },{ + "name": "net_plugin_impl", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + },{ + "name": "producer_plugin", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + } + ] +} diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 7034a03858a..e4c77d91169 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -21,10 +21,6 @@ using namespace appbase; using namespace eosio; -namespace fc { - std::unordered_map& get_appender_map(); -} - namespace detail { void configure_logging(const bfs::path& config_path) @@ -51,12 +47,11 @@ void configure_logging(const bfs::path& config_path) void logging_conf_handler() { - ilog("Received HUP. Reloading logging configuration."); auto config_path = app().get_logging_conf(); + ilog("Received HUP. Reloading logging configuration from ${p}.", ("p", config_path.string())); if(fc::exists(config_path)) ::detail::configure_logging(config_path); - for(auto iter : fc::get_appender_map()) - iter.second->initialize(app().get_io_service()); + fc::log_config::initialize_appenders( app().get_io_service() ); } void initialize_logging() @@ -64,8 +59,7 @@ void initialize_logging() auto config_path = app().get_logging_conf(); if(fc::exists(config_path)) fc::configure_logging(config_path); // intentionally allowing exceptions to escape - for(auto iter : fc::get_appender_map()) - iter.second->initialize(app().get_io_service()); + fc::log_config::initialize_appenders( app().get_io_service() ); app().set_sighup_callback(logging_conf_handler); } @@ -87,8 +81,8 @@ int main(int argc, char** argv) app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); - app().set_default_data_dir(root / "eosio/nodeos/data" ); - app().set_default_config_dir(root / "eosio/nodeos/config" ); + app().set_default_data_dir(root / "eosio" / nodeos::config::node_executable_name / "data" ); + app().set_default_config_dir(root / "eosio" / nodeos::config::node_executable_name / "config" ); http_plugin::set_defaults({ .default_unix_socket_path = "", .default_http_port = 8888 @@ -96,10 +90,9 @@ int main(int argc, char** argv) if(!app().initialize(argc, argv)) return INITIALIZE_FAIL; initialize_logging(); - ilog("nodeos version ${ver}", ("ver", app().version_string())); - ilog("eosio root is ${root}", ("root", root.string())); - ilog("nodeos using configuration file ${c}", ("c", app().full_config_file_path().string())); - ilog("nodeos data directory is ${d}", ("d", app().data_dir().string())); + ilog("${name} version ${ver}", ("name", nodeos::config::node_executable_name)("ver", app().version_string())); + ilog("${name} using configuration file ${c}", ("name", nodeos::config::node_executable_name)("c", app().full_config_file_path().string())); + ilog("${name} data directory is ${d}", ("name", nodeos::config::node_executable_name)("d", app().data_dir().string())); app().startup(); app().exec(); } catch( const extract_genesis_state_exception& e ) { @@ -145,6 +138,6 @@ int main(int argc, char** argv) return OTHER_FAIL; } - ilog("nodeos successfully exiting"); + ilog("${name} successfully exiting", ("name", nodeos::config::node_executable_name)); return SUCCESS; } diff --git a/scripts/.build_vars b/scripts/.build_vars new file mode 100644 index 00000000000..585c521c935 --- /dev/null +++ b/scripts/.build_vars @@ -0,0 +1,81 @@ +export SRC_DIR=${EOSIO_INSTALL_DIR}/src +export OPT_DIR=${EOSIO_INSTALL_DIR}/opt +export VAR_DIR=${EOSIO_INSTALL_DIR}/var +export ETC_DIR=${EOSIO_INSTALL_DIR}/etc +export BIN_DIR=${EOSIO_INSTALL_DIR}/bin +export LIB_DIR=${EOSIO_INSTALL_DIR}/lib +export DATA_DIR=${EOSIO_INSTALL_DIR}/data + +# CMAKE +export CMAKE_VERSION_MAJOR=3 +export CMAKE_VERSION_MINOR=13 +export CMAKE_VERSION_PATCH=2 +export CMAKE_VERSION=${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}.${CMAKE_VERSION_PATCH} + +# MONGO +export MONGODB_VERSION=3.6.3 +export MONGODB_ROOT=${OPT_DIR}/mongodb-${MONGODB_VERSION} +export MONGODB_BIN=${BIN_DIR}/mongod +export MONGODB_CONF=${ETC_DIR}/mongod.conf +export MONGODB_LOG_DIR=${VAR_DIR}/log/mongodb +export MONGODB_LINK_DIR=${OPT_DIR}/mongodb +export MONGODB_DATA_DIR=${DATA_DIR}/mongodb +export MONGO_C_DRIVER_VERSION=1.13.0 +export MONGO_C_DRIVER_ROOT=${SRC_DIR}/mongo-c-driver-${MONGO_C_DRIVER_VERSION} +export MONGO_CXX_DRIVER_VERSION=3.4.0 +export MONGO_CXX_DRIVER_ROOT=${SRC_DIR}/mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION} +export ENABLE_MONGO=${ENABLE_MONGO:-false} +export INSTALL_MONGO=${INSTALL_MONGO:-false} + +# BOOST +export BOOST_VERSION_MAJOR=1 +export BOOST_VERSION_MINOR=70 +export BOOST_VERSION_PATCH=0 +export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} +export BOOST_ROOT=${BOOST_LOCATION:-${SRC_DIR}/boost_${BOOST_VERSION}} +export BOOST_LINK_LOCATION=${OPT_DIR}/boost + +# LLVM +export LLVM_VERSION=release_40 +export LLVM_ROOT=${OPT_DIR}/llvm +export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm + +# DOXYGEN +export DOXYGEN_VERSION=1_8_14 +export DOXYGEN_ROOT=${SRC_DIR}/doxygen-${DOXYGEN_VERSION} +export ENABLE_DOXYGEN=${ENABLE_DOXYGEN:-false} + +# CLANG +export CLANG_ROOT=${OPT_DIR}/clang8 +export PINNED_COMPILER_BRANCH=release_80 +export PINNED_COMPILER_LLVM_COMMIT=18e41dc +export PINNED_COMPILER_CLANG_COMMIT=a03da8b +export PINNED_COMPILER_LLD_COMMIT=d60a035 +export PINNED_COMPILER_POLLY_COMMIT=1bc06e5 +export PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT=6b34834 +export PINNED_COMPILER_LIBCXX_COMMIT=1853712 +export PINNED_COMPILER_LIBCXXABI_COMMIT=d7338a4 +export PINNED_COMPILER_LIBUNWIND_COMMIT=57f6739 +export PINNED_COMPILER_COMPILER_RT_COMMIT=5bc7979 +export NO_CPP17=${NO_CPP17:-false} +export PIN_COMPILER=${PIN_COMPILER:-false} +export BUILD_CLANG=${BUILD_CLANG:-false} + +export CORE_SYMBOL_NAME=${CORE_SYMBOL_NAME:-SYS} + +export CPU_CORES=$(grep -c ^processor /proc/cpuinfo 2>/dev/null || sysctl -n hw.ncpu) +export CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} + +export NONINTERACTIVE=${NONINTERACTIVE:-false} +export PROCEED=${PROCEED:-false} + +export CURRENT_USER=${CURRENT_USER:-$(whoami)} + +export ENABLE_COVERAGE_TESTING=${ENABLE_COVERAGE_TESTING:-false} +export HOMEBREW_NO_AUTO_UPDATE=1 +export TINI_VERSION=0.18.0 +export DISK_MIN=5 +export COUNT=0 +export DRYRUN=${DRYRUN:-false} +export VERBOSE=${VERBOSE:-false} +export SUDO_LOCATION=$( command -v sudo ) diff --git a/scripts/.environment b/scripts/.environment new file mode 100644 index 00000000000..d5542098253 --- /dev/null +++ b/scripts/.environment @@ -0,0 +1,19 @@ +export SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export REPO_ROOT="${SCRIPT_DIR}/.." +export BUILD_DIR="${REPO_ROOT}/build" + +export EOSIO_VERSION_MAJOR=$(cat $REPO_ROOT/CMakeLists.txt | grep -E "^[[:blank:]]*set[[:blank:]]*\([[:blank:]]*VERSION_MAJOR" | tail -1 | sed 's/.*VERSION_MAJOR //g' | sed 's/ //g' | sed 's/"//g' | cut -d\) -f1) +export EOSIO_VERSION_MINOR=$(cat $REPO_ROOT/CMakeLists.txt | grep -E "^[[:blank:]]*set[[:blank:]]*\([[:blank:]]*VERSION_MINOR" | tail -1 | sed 's/.*VERSION_MINOR //g' | sed 's/ //g' | sed 's/"//g' | cut -d\) -f1) +export EOSIO_VERSION_PATCH=$(cat $REPO_ROOT/CMakeLists.txt | grep -E "^[[:blank:]]*set[[:blank:]]*\([[:blank:]]*VERSION_PATCH" | tail -1 | sed 's/.*VERSION_PATCH //g' | sed 's/ //g' | sed 's/"//g' | cut -d\) -f1) +export EOSIO_VERSION_SUFFIX=$(cat $REPO_ROOT/CMakeLists.txt | grep -E "^[[:blank:]]*set[[:blank:]]*\([[:blank:]]*VERSION_SUFFIX" | tail -1 | sed 's/.*VERSION_SUFFIX //g' | sed 's/ //g' | sed 's/"//g' | cut -d\) -f1) +export EOSIO_VERSION="${EOSIO_VERSION_MAJOR}.${EOSIO_VERSION_MINOR}" +if [[ -z $EOSIO_VERSION_SUFFIX ]]; then + export EOSIO_VERSION_FULL="${EOSIO_VERSION_MAJOR}.${EOSIO_VERSION_MINOR}.${EOSIO_VERSION_PATCH}" +else + export EOSIO_VERSION_FULL="${EOSIO_VERSION_MAJOR}.${EOSIO_VERSION_MINOR}.${EOSIO_VERSION_PATCH}-${EOSIO_VERSION_SUFFIX}" +fi + +export EOSIO_INSTALL_DIR="${EOSIO_INSTALL_DIR:-${HOME}/eosio/${EOSIO_VERSION}}" +export TEMP_DIR="${TEMP_DIR:-${HOME}/tmp}" + +. ./scripts/.build_vars diff --git a/scripts/clang-devtoolset8-support.patch b/scripts/clang-devtoolset8-support.patch new file mode 100644 index 00000000000..64a946e26cd --- /dev/null +++ b/scripts/clang-devtoolset8-support.patch @@ -0,0 +1,55 @@ +From 32b65345c5760295d04c95e0abb3653fe20ffd16 Mon Sep 17 00:00:00 2001 +From: Tom Stellard +Date: Tue, 9 Apr 2019 13:26:10 +0000 +Subject: [PATCH] Add support for detection of devtoolset-8 + +Summary: +The current llvm/clang et al. project can be built with the latest developer toolset (devtoolset-8) on RHEL, which provides GCC 8.2.1. +However, the result compiler will not identify this toolset itself when compiling programs, which is of course not desirable. + +After the patch - which simply adds the name of the developer toolset to the existing list - it gets identified and selected, as shown below: + +[bamboo@bamboo llvm-project]$ clang -v +clang version 9.0.0 (https://github.com/llvm/llvm-project.git e5ac385fb1ffa4bd3875ea6a4d24efdbd7814572) +Target: x86_64-unknown-linux-gnu +Thread model: posix +InstalledDir: /home/bamboo/llvm/bin +Found candidate GCC installation: /opt/rh/devtoolset-4/root/usr/lib/gcc/x86_64-redhat-linux/5.2.1 +Found candidate GCC installation: /opt/rh/devtoolset-7/root/usr/lib/gcc/x86_64-redhat-linux/7 +Found candidate GCC installation: /opt/rh/devtoolset-8/root/usr/lib/gcc/x86_64-redhat-linux/8 +Found candidate GCC installation: /usr/lib/gcc/x86_64-redhat-linux/4.8.2 +Found candidate GCC installation: /usr/lib/gcc/x86_64-redhat-linux/4.8.5 +Selected GCC installation: /opt/rh/devtoolset-8/root/usr/lib/gcc/x86_64-redhat-linux/8 +Candidate multilib: .;@m64 +Candidate multilib: 32;@m32 +Selected multilib: .;@m64 + +Patch By: Radu-Adrian Popescu + +Reviewers: tstellar, fedor.sergeev + +Reviewed By: tstellar + +Subscribers: jdoerfert, cfe-commits + +Tags: #clang + +Differential Revision: https://reviews.llvm.org/D59987 + +llvm-svn: 358002 +--- + clang/lib/Driver/ToolChains/Gnu.cpp | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp +index 2a58f0f7142..8915e3f948f 100644 +--- a/clang/lib/Driver/ToolChains/Gnu.cpp ++++ b/clang/lib/Driver/ToolChains/Gnu.cpp +@@ -1875,6 +1875,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( + // Non-Solaris is much simpler - most systems just go with "/usr". + if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) { + // Yet, still look for RHEL devtoolsets. ++ Prefixes.push_back("/opt/rh/devtoolset-8/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-7/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-6/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-4/root/usr"); diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index d3128903097..2cf27cd0464 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -1,4 +1,6 @@ -#!/bin/bash +#!/usr/bin/env bash +set -eo pipefail +SCRIPT_VERSION=3.0 # Build script version (change this to re-build the CICD image) ########################################################################## # This is the EOSIO automated install script for Linux and Mac OS. # This file was downloaded from https://github.com/EOSIO/eos @@ -30,288 +32,211 @@ # https://github.com/EOSIO/eos/blob/master/LICENSE ########################################################################## -VERSION=2.1 # Build script version -CMAKE_BUILD_TYPE=Release -export DISK_MIN=20 -DOXYGEN=false -ENABLE_COVERAGE_TESTING=false -CORE_SYMBOL_NAME="SYS" -START_MAKE=true - -TIME_BEGIN=$( date -u +%s ) -txtbld=$(tput bold) -bldred=${txtbld}$(tput setaf 1) -txtrst=$(tput sgr0) - -export SRC_LOCATION=${HOME}/src -export OPT_LOCATION=${HOME}/opt -export VAR_LOCATION=${HOME}/var -export ETC_LOCATION=${HOME}/etc -export BIN_LOCATION=${HOME}/bin -export DATA_LOCATION=${HOME}/data -export CMAKE_VERSION_MAJOR=3 -export CMAKE_VERSION_MINOR=13 -export CMAKE_VERSION_PATCH=2 -export CMAKE_VERSION=${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}.${CMAKE_VERSION_PATCH} -export MONGODB_VERSION=3.6.3 -export MONGODB_ROOT=${OPT_LOCATION}/mongodb-${MONGODB_VERSION} -export MONGODB_CONF=${ETC_LOCATION}/mongod.conf -export MONGODB_LOG_LOCATION=${VAR_LOCATION}/log/mongodb -export MONGODB_LINK_LOCATION=${OPT_LOCATION}/mongodb -export MONGODB_DATA_LOCATION=${DATA_LOCATION}/mongodb -export MONGO_C_DRIVER_VERSION=1.13.0 -export MONGO_C_DRIVER_ROOT=${SRC_LOCATION}/mongo-c-driver-${MONGO_C_DRIVER_VERSION} -export MONGO_CXX_DRIVER_VERSION=3.4.0 -export MONGO_CXX_DRIVER_ROOT=${SRC_LOCATION}/mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION} -export BOOST_VERSION_MAJOR=1 -export BOOST_VERSION_MINOR=67 -export BOOST_VERSION_PATCH=0 -export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} -export BOOST_ROOT=${SRC_LOCATION}/boost_${BOOST_VERSION} -export BOOST_LINK_LOCATION=${OPT_LOCATION}/boost -export LLVM_VERSION=release_40 -export LLVM_ROOT=${OPT_LOCATION}/llvm -export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm -export DOXYGEN_VERSION=1_8_14 -export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} -export TINI_VERSION=0.18.0 - -# Setup directories -mkdir -p $SRC_LOCATION -mkdir -p $OPT_LOCATION -mkdir -p $VAR_LOCATION -mkdir -p $BIN_LOCATION -mkdir -p $VAR_LOCATION/log -mkdir -p $ETC_LOCATION -mkdir -p $MONGODB_LOG_LOCATION -mkdir -p $MONGODB_DATA_LOCATION - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -REPO_ROOT="${SCRIPT_DIR}/.." -BUILD_DIR="${REPO_ROOT}/build" - -# Use current directory's tmp directory if noexec is enabled for /tmp -if (mount | grep "/tmp " | grep --quiet noexec); then - mkdir -p $REPO_ROOT/tmp - TEMP_DIR="${REPO_ROOT}/tmp" - rm -rf $REPO_ROOT/tmp/* -else # noexec wasn't found - TEMP_DIR="/tmp" -fi - -function usage() -{ - printf "Usage: %s \\n[Build Option -o ] \\n[CodeCoverage -c] \\n[Doxygen -d] \\n[CoreSymbolName -s <1-7 characters>] \\n[Avoid Compiling -a]\\n[Noninteractive -y]\\n\\n" "$0" 1>&2 +function usage() { + printf "Usage: $0 OPTION... + -P Build with pinned clang and libcxx + -o TYPE Build (default: Release) + -s NAME Core Symbol Name <1-7 characters> (default: SYS) + -b DIR Use pre-built boost in DIR + -i DIR Directory to use for installing dependencies & EOSIO (default: $HOME) + -y Noninteractive mode (answers yes to every prompt) + -c Enable Code Coverage + -d Generate Doxygen + -m Build MongoDB dependencies + \\n" "$0" 1>&2 exit 1 } -NONINTERACTIVE=0 - +TIME_BEGIN=$( date -u +%s ) if [ $# -ne 0 ]; then - while getopts ":cdo:s:ahy" opt; do + while getopts "o:s:b:i:ycdhmPf" opt; do case "${opt}" in o ) options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) if [[ "${options[*]}" =~ "${OPTARG}" ]]; then - CMAKE_BUILD_TYPE="${OPTARG}" + CMAKE_BUILD_TYPE=$OPTARG else - printf "\\nInvalid argument: %s\\n" "${OPTARG}" 1>&2 + echo "Invalid argument: ${OPTARG}" 1>&2 + usage + fi + ;; + s ) + if [ "${#OPTARG}" -gt 7 ] || [ -z "${#OPTARG}" ]; then + echo "Invalid argument: ${OPTARG}" 1>&2 usage - exit 1 + else + CORE_SYMBOL_NAME=$OPTARG fi ;; + b ) + BOOST_LOCATION=$OPTARG + ;; + i ) + INSTALL_LOCATION=$OPTARG + ;; + y ) + NONINTERACTIVE=true + PROCEED=true + ;; + f ) + echo "DEPRECATION NOTICE: -f will be removed in the next release..." + ;; # Needs to be removed in 1.9 c ) ENABLE_COVERAGE_TESTING=true ;; d ) - DOXYGEN=true + ENABLE_DOXYGEN=true ;; - s) - if [ "${#OPTARG}" -gt 7 ] || [ -z "${#OPTARG}" ]; then - printf "\\nInvalid argument: %s\\n" "${OPTARG}" 1>&2 - usage - exit 1 - else - CORE_SYMBOL_NAME="${OPTARG}" - fi + m ) + ENABLE_MONGO=true ;; - a) - START_MAKE=false + P ) + PIN_COMPILER=true ;; - h) + h ) usage - exit 1 - ;; - y) - NONINTERACTIVE=1 ;; - \? ) - printf "\\nInvalid Option: %s\\n" "-${OPTARG}" 1>&2 + ? ) + echo "Invalid Option!" 1>&2 usage - exit 1 ;; : ) - printf "\\nInvalid Option: %s requires an argument.\\n" "-${OPTARG}" 1>&2 + echo "Invalid Option: -${OPTARG} requires an argument." 1>&2 usage - exit 1 ;; * ) usage - exit 1 ;; esac done fi -if [ ! -d "${REPO_ROOT}/.git" ]; then - printf "\\nThis build script only works with sources cloned from git\\n" - printf "Please clone a new eos directory with 'git clone https://github.com/EOSIO/eos --recursive'\\n" - printf "See the wiki for instructions: https://github.com/EOSIO/eos/wiki\\n" - exit 1 -fi - -cd $REPO_ROOT - -STALE_SUBMODS=$(( $(git submodule status --recursive | grep -c "^[+\-]") )) -if [ $STALE_SUBMODS -gt 0 ]; then - printf "\\ngit submodules are not up to date.\\n" - printf "Please run the command 'git submodule update --init --recursive'.\\n" - exit 1 -fi - -printf "\\nBeginning build version: %s\\n" "${VERSION}" -printf "%s\\n" "$( date -u )" -printf "User: %s\\n" "$( whoami )" -# printf "git head id: %s\\n" "$( cat .git/refs/heads/master )" -printf "Current branch: %s\\n" "$( git rev-parse --abbrev-ref HEAD )" - -ARCH=$( uname ) -printf "\\nARCHITECTURE: %s\\n" "${ARCH}" - -# Find and use existing CMAKE -export CMAKE=$(command -v cmake 2>/dev/null) - -if [ "$ARCH" == "Linux" ]; then - # Check if cmake is already installed or not and use source install location - if [ -z $CMAKE ]; then export CMAKE=$HOME/bin/cmake; fi - export OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' ) +# Ensure we're in the repo root and not inside of scripts +cd $( dirname "${BASH_SOURCE[0]}" )/.. + +# Load eosio specific helper functions +. ./scripts/helpers/eosio.sh + +$VERBOSE && echo "Build Script Version: ${SCRIPT_VERSION}" +echo "EOSIO Version: ${EOSIO_VERSION_FULL}" +echo "$( date -u )" +echo "User: ${CURRENT_USER}" +# echo "git head id: %s" "$( cat .git/refs/heads/master )" +echo "Current branch: $( execute git rev-parse --abbrev-ref HEAD 2>/dev/null )" + +( [[ ! $NAME == "Ubuntu" ]] && [[ ! $ARCH == "Darwin" ]] ) && set -i # Ubuntu doesn't support interactive mode since it uses dash + Some folks are having this issue on Darwin; colors aren't supported yet anyway + +# Ensure sudo is available (only if not using the root user) +ensure-sudo +# Test that which is on the system before proceeding +ensure-which +# Prevent a non-git clone from running +ensure-git-clone +# Prompt user for installation path. +install-directory-prompt +# If the same version has already been installed... +previous-install-prompt +# Prompt user and asks if we should install mongo or not +prompt-mongo-install +# Setup directories and envs we need (must come last) +setup + +execute cd $REPO_ROOT + +# Submodules need to be up to date +ensure-submodules-up-to-date + +# Check if cmake already exists +( [[ -z "${CMAKE}" ]] && [[ ! -z $(command -v cmake 2>/dev/null) ]] ) && export CMAKE=$(command -v cmake 2>/dev/null) + +# Use existing cmake on system (either global or specific to eosio) +# Setup based on architecture +if [[ $ARCH == "Linux" ]]; then + export CMAKE=${CMAKE:-${EOSIO_INSTALL_DIR}/bin/cmake} OPENSSL_ROOT_DIR=/usr/include/openssl - if [ ! -e /etc/os-release ]; then - printf "\\nEOSIO currently supports Amazon, Centos, Fedora, Mint & Ubuntu Linux only.\\n" - printf "Please install on the latest version of one of these Linux distributions.\\n" - printf "https://aws.amazon.com/amazon-linux-ami/\\n" - printf "https://www.centos.org/\\n" - printf "https://start.fedoraproject.org/\\n" - printf "https://linuxmint.com/\\n" - printf "https://www.ubuntu.com/\\n" - printf "Exiting now.\\n" - exit 1 - fi - case "$OS_NAME" in - "Amazon Linux AMI"|"Amazon Linux") - FILE="${REPO_ROOT}/scripts/eosio_build_amazon.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc + [[ ! -e /etc/os-release ]] && print_supported_linux_distros_and_exit + case $NAME in + "Amazon Linux AMI" | "Amazon Linux") + echo "${COLOR_CYAN}[Ensuring YUM installation]${COLOR_NC}" + FILE="${REPO_ROOT}/scripts/eosio_build_amazonlinux.sh" ;; "CentOS Linux") FILE="${REPO_ROOT}/scripts/eosio_build_centos.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc - ;; - "elementary OS") - FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - ;; - "Fedora") - export CPATH=/usr/include/llvm4.0:$CPATH # llvm4.0 for fedora package path inclusion - FILE="${REPO_ROOT}/scripts/eosio_build_fedora.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc - ;; - "Linux Mint") - FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 ;; "Ubuntu") FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - ;; - "Debian GNU/Linux") - FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 ;; - *) - printf "\\nUnsupported Linux Distribution. Exiting now.\\n\\n" - exit 1 + *) print_supported_linux_distros_and_exit;; esac + CMAKE_PREFIX_PATHS="${EOSIO_INSTALL_DIR}" fi if [ "$ARCH" == "Darwin" ]; then - # Check if cmake is already installed or not and use source install location - if [ -z $CMAKE ]; then export CMAKE=/usr/local/bin/cmake; fi - export OS_NAME=MacOSX # opt/gettext: cleos requires Intl, which requires gettext; it's keg only though and we don't want to force linking: https://github.com/EOSIO/eos/issues/2240#issuecomment-396309884 - # HOME/lib/cmake: mongo_db_plugin.cpp:25:10: fatal error: 'bsoncxx/builder/basic/kvp.hpp' file not found - LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME/lib/cmake ${LOCAL_CMAKE_FLAGS}" - FILE="${REPO_ROOT}/scripts/eosio_build_darwin.sh" - CXX_COMPILER=clang++ - C_COMPILER=clang + # EOSIO_INSTALL_DIR/lib/cmake: mongo_db_plugin.cpp:25:10: fatal error: 'bsoncxx/builder/basic/kvp.hpp' file not found + CMAKE_PREFIX_PATHS="/usr/local/opt/gettext;${EOSIO_INSTALL_DIR}" + FILE="${SCRIPT_DIR}/eosio_build_darwin.sh" OPENSSL_ROOT_DIR=/usr/local/opt/openssl + export CMAKE=${CMAKE} fi -# Cleanup old installation -. ./scripts/full_uninstaller.sh $NONINTERACTIVE -if [ $? -ne 0 ]; then exit -1; fi # Stop if exit from script is not 0 - -pushd $SRC_LOCATION &> /dev/null -. "$FILE" $NONINTERACTIVE # Execute OS specific build file -popd &> /dev/null - -printf "\\n========================================================================\\n" -printf "======================= Starting EOSIO Build =======================\\n" -printf "## CMAKE_BUILD_TYPE=%s\\n" "${CMAKE_BUILD_TYPE}" -printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" - -mkdir -p $BUILD_DIR -cd $BUILD_DIR - -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" \ - -DCMAKE_C_COMPILER="${C_COMPILER}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" -if [ $? -ne 0 ]; then exit -1; fi -make -j"${JOBS}" -if [ $? -ne 0 ]; then exit -1; fi +# Find and replace OPT_DIR in pinned_toolchain.cmake, then move it into build dir +execute bash -c "sed -e 's~@~$OPT_DIR~g' $SCRIPT_DIR/pinned_toolchain.cmake &> $BUILD_DIR/pinned_toolchain.cmake" + +echo "${COLOR_CYAN}=====================================================================================" +echo "======================= ${COLOR_WHITE}Starting EOSIO Dependency Install${COLOR_CYAN} ===========================${COLOR_NC}" +execute cd $SRC_DIR +set_system_vars # JOBS, Memory, disk space available, etc +echo "Architecture: ${ARCH}" +. $FILE # Execute OS specific build file +execute cd $REPO_ROOT + +echo "" +echo "${COLOR_CYAN}========================================================================" +echo "======================= ${COLOR_WHITE}Starting EOSIO Build${COLOR_CYAN} ===========================${COLOR_NC}" +if $VERBOSE; then + echo "CXX: $CXX" + echo "CC: $CC" +fi +execute cd $BUILD_DIR +# LOCAL_CMAKE_FLAGS +$ENABLE_MONGO && LOCAL_CMAKE_FLAGS="-DBUILD_MONGO_DB_PLUGIN=true ${LOCAL_CMAKE_FLAGS}" # Enable Mongo DB Plugin if user has enabled -m +if $PIN_COMPILER; then + CMAKE_PREFIX_PATHS="${CMAKE_PREFIX_PATHS};${LLVM_ROOT}" + LOCAL_CMAKE_FLAGS="${PINNED_TOOLCHAIN} -DCMAKE_PREFIX_PATH='${CMAKE_PREFIX_PATHS}' ${LOCAL_CMAKE_FLAGS}" +else + LOCAL_CMAKE_FLAGS="-DCMAKE_CXX_COMPILER='${CXX}' -DCMAKE_C_COMPILER='${CC}' -DCMAKE_PREFIX_PATH='${CMAKE_PREFIX_PATHS}' ${LOCAL_CMAKE_FLAGS}" +fi +$ENABLE_DOXYGEN && LOCAL_CMAKE_FLAGS="-DBUILD_DOXYGEN='${DOXYGEN}' ${LOCAL_CMAKE_FLAGS}" +$ENABLE_COVERAGE_TESTING && LOCAL_CMAKE_FLAGS="-DENABLE_COVERAGE_TESTING='${ENABLE_COVERAGE_TESTING}' ${LOCAL_CMAKE_FLAGS}" -cd $REPO_ROOT +execute bash -c "$CMAKE -DCMAKE_BUILD_TYPE='${CMAKE_BUILD_TYPE}' -DCORE_SYMBOL_NAME='${CORE_SYMBOL_NAME}' -DOPENSSL_ROOT_DIR='${OPENSSL_ROOT_DIR}' -DCMAKE_INSTALL_PREFIX='${EOSIO_INSTALL_DIR}' ${LOCAL_CMAKE_FLAGS} '${REPO_ROOT}'" +execute make -j$JOBS +execute cd $REPO_ROOT 1>/dev/null TIME_END=$(( $(date -u +%s) - $TIME_BEGIN )) -printf "${bldred}\n\n _______ _______ _______ _________ _______\n" -printf '( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' -printf "| ( \/| ( ) || ( \/ ) ( | ( ) |\n" -printf "| (__ | | | || (_____ | | | | | |\n" -printf "| __) | | | |(_____ ) | | | | | |\n" -printf "| ( | | | | ) | | | | | | |\n" -printf "| (____/\| (___) |/\____) |___) (___| (___) |\n" -printf "(_______/(_______)\_______)\_______/(_______)\n\n${txtrst}" - -printf "\\nEOSIO has been successfully built. %02d:%02d:%02d\\n" $(($TIME_END/3600)) $(($TIME_END%3600/60)) $(($TIME_END%60)) -printf "==============================================================================================\\n${bldred}" -printf "(Optional) Testing Instructions:\\n" -print_instructions -printf "${BIN_LOCATION}/mongod --dbpath ${MONGODB_DATA_LOCATION} -f ${MONGODB_CONF} --logpath ${MONGODB_LOG_LOCATION}/mongod.log &\\n" -printf "cd ./build && PATH=\$PATH:$HOME/opt/mongodb/bin make test\\n" # PATH is set as currently 'mongo' binary is required for the mongodb test -printf "${txtrst}==============================================================================================\\n" -printf "For more information:\\n" -printf "EOSIO website: https://eos.io\\n" -printf "EOSIO Telegram channel @ https://t.me/EOSProject\\n" -printf "EOSIO resources: https://eos.io/resources/\\n" -printf "EOSIO Stack Exchange: https://eosio.stackexchange.com\\n" -printf "EOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" +echo " _______ _______ _______ _________ _______" +echo "( ____ \( ___ )( ____ __ __ ( ___ )" +echo "| ( \/| ( ) || ( \/ ) ( | ( ) |" +echo "| (__ | | | || (_____ | | | | | |" +echo "| __) | | | |(_____ ) | | | | | |" +echo "| ( | | | | ) | | | | | | |" +echo "| (____/\| (___) |/\____) |___) (___| (___) |" +echo "(_______/(_______)\_______)\_______/(_______)" +echo "=============================================${COLOR_NC}" + +echo "${COLOR_GREEN}EOSIO has been successfully built. $(($TIME_END/3600)):$(($TIME_END%3600/60)):$(($TIME_END%60))" +echo "${COLOR_GREEN}You can now install using: ./scripts/eosio_install.sh${COLOR_NC}" +echo "${COLOR_YELLOW}Uninstall with: ./scripts/eosio_uninstall.sh${COLOR_NC}" + +echo "" +echo "${COLOR_CYAN}If you wish to perform tests to ensure functional code:${COLOR_NC}" +if $ENABLE_MONGO; then + echo "${BIN_DIR}/mongod --dbpath ${MONGODB_DATA_DIR} -f ${MONGODB_CONF} --logpath ${MONGODB_LOG_DIR}/mongod.log &" + PATH_TO_USE=" PATH=\$PATH:$OPT_DIR/mongodb/bin" +fi +echo "cd ./build &&${PATH_TO_USE} make test" # PATH is set as currently 'mongo' binary is required for the mongodb test +echo "" +resources diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh deleted file mode 100755 index 7a16e4486e9..00000000000 --- a/scripts/eosio_build_amazon.sh +++ /dev/null @@ -1,253 +0,0 @@ -if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi - -OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' | cut -d'.' -f1 ) - -DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 ) -DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) -DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) -DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) -DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) - -if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then - DEP_ARRAY=( - sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ - bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python34 python34-devel \ - libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel - ) -else - DEP_ARRAY=( - git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ - bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ - python3 python3-devel python-devel libedit-devel doxygen graphviz - ) -fi - -COUNT=1 -DISPLAY="" -DEP="" - -if [[ "${OS_NAME}" == "Amazon Linux AMI" && "${OS_VER}" -lt 2017 ]]; then - printf "You must be running Amazon Linux 2017.09 or higher to install EOSIO.\\n" - printf "exiting now.\\n" - exit 1 -fi - -if [ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]; then - printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "exiting now.\\n" - exit 1 -fi - -printf "\\nChecking Yum installation.\\n" -if ! YUM=$( command -v yum 2>/dev/null ) -then - printf "\\nYum must be installed to compile EOS.IO.\\n" - printf "\\nExiting now.\\n" - exit 1 -fi -printf "Yum installation found at ${YUM}.\\n" - -if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi -case $ANSWER in - 1 | [Yy]* ) - if ! sudo $YUM -y update; then - printf " - YUM update failed.\\n" - exit 1; - else - printf " - YUM update complete.\\n" - fi - ;; - [Nn]* ) echo " - Proceeding without update!";; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; -esac - -printf "Checking RPM for installed dependencies...\\n" -for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do - pkg=$( rpm -qi "${DEP_ARRAY[$i]}" 2>/dev/null | grep Name ) - if [[ -z $pkg ]]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" - printf " - Package %s ${bldred} NOT ${txtrst} found!\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) - else - printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" - continue - fi -done -if [ "${COUNT}" -gt 1 ]; then - printf "\\nThe following dependencies are required to install EOSIO:\\n" - printf "${DISPLAY}\\n\\n" - if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - if ! sudo $YUM -y install ${DEP}; then - printf " - YUM dependency installation failed!\\n" - exit 1; - else - printf " - YUM dependencies installed successfully.\\n" - fi - ;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf " - No required YUM dependencies to install.\\n" -fi - -# util-linux includes lscpu -# procps includes free -m -MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) -CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) -CPU_CORE=$( nproc ) -MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) -export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - -printf "\\nOS name: %s\\n" "${OS_NAME}" -printf "OS Version: %s\\n" "${OS_VER}" -printf "CPU speed: %sMhz\\n" "${CPU_SPEED}" -printf "CPU cores: %s\\n" "${CPU_CORE}" -printf "Physical Memory: %sMgb\\n" "${MEM_MEG}" -printf "Disk space total: %sGb\\n" "${DISK_TOTAL}" -printf "Disk space available: %sG\\n" "${DISK_AVAIL}" - -if [ "${MEM_MEG}" -lt 7000 ]; then - printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "exiting now.\\n" - exit 1 -fi - -printf "\\n" - - -printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then - printf "Installing CMAKE...\\n" - curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ - && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ - && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ - && make -j"${JOBS}" \ - && make install \ - && cd .. \ - && rm -f cmake-$CMAKE_VERSION.tar.gz \ - || exit 1 - printf " - CMAKE successfully installed @ ${CMAKE} \\n" -else - printf " - CMAKE found @ ${CMAKE}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - printf "Installing LLVM 4...\\n" - cd ../opt \ - && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ - && mkdir build \ - && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - || exit 1 - printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -function print_instructions() { - return 0 -} diff --git a/scripts/eosio_build_amazonlinux.sh b/scripts/eosio_build_amazonlinux.sh new file mode 100755 index 00000000000..8e748c3cd22 --- /dev/null +++ b/scripts/eosio_build_amazonlinux.sh @@ -0,0 +1,87 @@ +echo "OS name: ${NAME}" +echo "OS Version: ${VERSION_ID}" +echo "CPU cores: ${CPU_CORES}" +echo "Physical Memory: ${MEM_GIG}G" +echo "Disk space total: ${DISK_TOTAL}G" +echo "Disk space available: ${DISK_AVAIL}G" + +if [[ "${NAME}" == "Amazon Linux" ]] && [[ $VERSION == 2 ]]; then + DEPS_FILE="${REPO_ROOT}/scripts/eosio_build_amazonlinux2_deps" +else + echo " - You must be running Amazon Linux 2017.09 or higher to install EOSIO." && exit 1 +fi + +[[ $MEM_GIG -lt 7 ]] && echo "Your system must have 7 or more Gigabytes of physical memory installed." && exit 1 +[[ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]] && echo " - You must have at least ${DISK_MIN}GB of available storage to install EOSIO." && exit 1 + +# Ensure packages exist +($PIN_COMPILER && $BUILD_CLANG) && EXTRA_DEPS=(gcc-c++,rpm\ -qa) +ensure-yum-packages $DEPS_FILE $(echo ${EXTRA_DEPS[@]}) +# Handle clang/compiler +ensure-compiler +# CMAKE Installation +ensure-cmake +# CLANG Installation +build-clang +# LLVM Installation +ensure-llvm +# BOOST Installation +ensure-boost + +if $INSTALL_MONGO; then + + echo "${COLOR_CYAN}[Ensuring MongoDB installation]${COLOR_NC}" + if [[ ! -d $MONGODB_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_DIR/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_DIR/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_DIR \ + && rm -rf $MONGODB_LINK_DIR \ + && rm -rf $BIN_DIR/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_DIR \ + && ln -s $MONGODB_LINK_DIR/bin/mongod $BIN_DIR/mongod" + echo " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_DIR})." + else + echo " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_DIR})." + fi + echo "${COLOR_CYAN}[Ensuring MongoDB C driver installation]${COLOR_NC}" + if [[ ! -d $MONGO_C_DRIVER_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_DIR -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF $PINNED_TOOLCHAIN .. \ + && make -j${JOBS} \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz" + echo " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}." + else + echo " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}." + fi + echo "${COLOR_CYAN}[Ensuring MongoDB C++ driver installation]${COLOR_NC}" + if [[ ! -d $MONGO_CXX_DRIVER_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ + && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX='$EOSIO_INSTALL_DIR' -DCMAKE_PREFIX_PATH='$EOSIO_INSTALL_DIR' $PINNED_TOOLCHAIN .. \ + && make -j${JOBS} VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz" + echo " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}." + else + echo " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}." + fi +fi +echo "" \ No newline at end of file diff --git a/scripts/eosio_build_amazonlinux2_deps b/scripts/eosio_build_amazonlinux2_deps new file mode 100755 index 00000000000..4017586125e --- /dev/null +++ b/scripts/eosio_build_amazonlinux2_deps @@ -0,0 +1,23 @@ +git,rpm -qa +sudo,rpm -qa +procps-ng,rpm -qa +util-linux,rpm -qa +autoconf,rpm -qa +automake,rpm -qa +libtool,rpm -qa +make,rpm -qa +bzip2,rpm -qa +bzip2-devel,rpm -qa +openssl-devel,rpm -qa +gmp-devel,rpm -qa +libstdc++,rpm -qa +libcurl-devel,rpm -qa +libusbx-devel,rpm -qa +python3,rpm -qa +python3-devel,rpm -qa +python-devel,rpm -qa +libedit-devel,rpm -qa +doxygen,rpm -qa +graphviz,rpm -qa +clang,rpm -qa +patch,rpm -qa \ No newline at end of file diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 3d0056f0b36..9e20e86df55 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -1,305 +1,99 @@ -if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi - -OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' \ -| cut -d'.' -f1 ) - -MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) -CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) -CPU_CORE=$( nproc ) -MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) -export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - -DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 ) -DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) -DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) -DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) -DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) - -printf "\\nOS name: ${OS_NAME}\\n" -printf "OS Version: ${OS_VER}\\n" -printf "CPU speed: ${CPU_SPEED}Mhz\\n" -printf "CPU cores: ${CPU_CORE}\\n" -printf "Physical Memory: ${MEM_MEG}Mgb\\n" -printf "Disk install: ${DISK_INSTALL}\\n" -printf "Disk space total: ${DISK_TOTAL%.*}G\\n" -printf "Disk space available: ${DISK_AVAIL%.*}G\\n" -printf "Concurrent Jobs (make -j): ${JOBS}\\n" - -if [ "${MEM_MEG}" -lt 7000 ]; then - printf "\\nYour system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "Exiting now.\\n\\n" - exit 1; -fi - -if [ "${OS_VER}" -lt 7 ]; then - printf "\\nYou must be running Centos 7 or higher to install EOSIO.\\n" - printf "Exiting now.\\n\\n" - exit 1; -fi - -if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then - printf "\\nYou must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "Exiting now.\\n\\n" - exit 1; -fi - -printf "\\n" - -printf "Checking Yum installation...\\n" -if ! YUM=$( command -v yum 2>/dev/null ); then - printf "!! Yum must be installed to compile EOS.IO !!\\n" - printf "Exiting now.\\n" - exit 1; +echo "OS name: ${NAME}" +echo "OS Version: ${VERSION_ID}" +echo "CPU cores: ${CPU_CORES}" +echo "Physical Memory: ${MEM_GIG}G" +echo "Disk space total: ${DISK_TOTAL}G" +echo "Disk space available: ${DISK_AVAIL}G" + +( [[ $NAME == "CentOS Linux" ]] && [[ "$(echo ${VERSION} | sed 's/ .*//g')" < 7 ]] ) && echo " - You must be running Centos 7 or higher to install EOSIO." && exit 1 + +[[ $MEM_GIG -lt 7 ]] && echo "Your system must have 7 or more Gigabytes of physical memory installed." && exit 1 +[[ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]] && echo " - You must have at least ${DISK_MIN}GB of available storage to install EOSIO." && exit 1 + +echo "" + +# Repo necessary for rh-python3 and devtoolset-8 +ensure-scl +# GCC8 for Centos / Needed for CMAKE install even if we're pinning +ensure-devtoolset +if [[ -d /opt/rh/devtoolset-8 ]]; then + echo "${COLOR_CYAN}[Enabling Centos devtoolset-8 (so we can use GCC 8)]${COLOR_NC}" + execute-always source /opt/rh/devtoolset-8/enable + echo " - ${COLOR_GREEN}Centos devtoolset-8 successfully enabled!${COLOR_NC}" fi -printf " - Yum installation found at %s.\\n" "${YUM}" - -if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi -case $ANSWER in - 1 | [Yy]* ) - if ! "${YUM}" -y update; then - printf " - YUM update failed.\\n" - exit 1; - else - printf " - YUM update complete.\\n" - fi - ;; - [Nn]* ) echo " - Proceeding without update!";; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; -esac - -printf "Checking installation of Centos Software Collections Repository...\\n" -SCL=$( rpm -qa | grep -E 'centos-release-scl-[0-9].*' ) -if [ -z "${SCL}" ]; then - if [ $ANSWER != 1 ]; then read -p "Do you wish to install and enable this repository? (y/n)? " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - printf "Installing SCL...\\n" - if ! "${YUM}" -y --enablerepo=extras install centos-release-scl 2>/dev/null; then - printf "!! Centos Software Collections Repository installation failed !!\\n" - printf "Exiting now.\\n\\n" - exit 1; - else - printf "Centos Software Collections Repository installed successfully.\\n" - fi - ;; - [Nn]* ) echo "User aborting installation of required Centos Software Collections Repository, Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf " - ${SCL} found.\\n" -fi - -printf "Checking installation of devtoolset-7...\\n" -DEVTOOLSET=$( rpm -qa | grep -E 'devtoolset-7-[0-9].*' ) -if [ -z "${DEVTOOLSET}" ]; then - if [ $ANSWER != 1 ]; then read -p "Do you wish to install devtoolset-7? (y/n)? " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - printf "Installing devtoolset-7...\\n" - if ! "${YUM}" install -y devtoolset-7; then - printf "!! Centos devtoolset-7 installation failed !!\\n" - printf "Exiting now.\\n" - exit 1; - else - printf " - Centos devtoolset installed successfully!\\n" - fi - ;; - [Nn]* ) echo "User aborting installation of devtoolset-7. Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf " - ${DEVTOOLSET} found.\\n" -fi -if [ -d /opt/rh/devtoolset-7 ]; then - printf "Enabling Centos devtoolset-7 so we can use GCC 7...\\n" - source /opt/rh/devtoolset-7/enable || exit 1 - printf " - Centos devtoolset-7 successfully enabled!\\n" +# Ensure packages exist +ensure-yum-packages "${REPO_ROOT}/scripts/eosio_build_centos7_deps" +export PYTHON3PATH="/opt/rh/rh-python36" +if $DRYRUN || [ -d $PYTHON3PATH ]; then + echo "${COLOR_CYAN}[Enabling python36]${COLOR_NC}" + execute source $PYTHON3PATH/enable + echo " ${COLOR_GREEN}- Python36 successfully enabled!${COLOR_NC}" + echo "" fi - -printf "\\n" - -DEP_ARRAY=( - git autoconf automake libtool make bzip2 doxygen graphviz \ - bzip2-devel openssl-devel gmp-devel \ - ocaml libicu-devel python python-devel python33 \ - gettext-devel file sudo libusbx-devel libcurl-devel - ) -COUNT=1 -DISPLAY="" -DEP="" -printf "Checking RPM for installed dependencies...\\n" -for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do - pkg=$( rpm -qi "${DEP_ARRAY[$i]}" 2>/dev/null | grep Name ) - if [[ -z $pkg ]]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" - printf " - Package %s ${bldred} NOT ${txtrst} found!\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) +# Handle clang/compiler +ensure-compiler +# CMAKE Installation +ensure-cmake +# CLANG Installation +build-clang +# LLVM Installation +ensure-llvm +# BOOST Installation +ensure-boost + +if $INSTALL_MONGO; then + + echo "${COLOR_CYAN}[Ensuring MongoDB installation]${COLOR_NC}" + if [[ ! -d $MONGODB_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_DIR/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_DIR/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_DIR \ + && rm -rf $MONGODB_LINK_DIR \ + && rm -rf $BIN_DIR/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_DIR \ + && ln -s $MONGODB_LINK_DIR/bin/mongod $BIN_DIR/mongod" + echo " - MongoDB successfully installed @ ${MONGODB_ROOT}." else - printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" - continue + echo " - MongoDB found with correct version @ ${MONGODB_ROOT}." + fi + echo "${COLOR_CYAN}[Ensuring MongoDB C driver installation]${COLOR_NC}" + if [[ ! -d $MONGO_C_DRIVER_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_DIR -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF $PINNED_TOOLCHAIN .. \ + && make -j${JOBS} \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz" + echo " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}." + else + echo " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}." + fi + echo "${COLOR_CYAN}[Ensuring MongoDB CXX driver installation]${COLOR_NC}" + if [[ ! -d $MONGO_CXX_DRIVER_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ + && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_DIR -DCMAKE_PREFIX_PATH=$EOSIO_INSTALL_DIR $PINNED_TOOLCHAIN .. \ + && make -j${JOBS} VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz" + echo " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}." + else + echo " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}." fi -done -if [ "${COUNT}" -gt 1 ]; then - printf "\\nThe following dependencies are required to install EOSIO:\\n" - printf "${DISPLAY}\\n\\n" - if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - if ! "${YUM}" -y install ${DEP}; then - printf " - YUM dependency installation failed!\\n" - exit 1; - else - printf " - YUM dependencies installed successfully.\\n" - fi - ;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf " - No required YUM dependencies to install.\\n" -fi - -if [ -d /opt/rh/python33 ]; then - printf "Enabling python33...\\n" - source /opt/rh/python33/enable || exit 1 - printf " - Python33 successfully enabled!\\n" -fi - -printf "\\n" - -printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then - printf "Installing CMAKE...\\n" - curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ - && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ - && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ - && make -j"${JOBS}" \ - && make install \ - && cd .. \ - && rm -f cmake-$CMAKE_VERSION.tar.gz \ - || exit 1 - printf " - CMAKE successfully installed @ ${CMAKE} \\n" -else - printf " - CMAKE found @ ${CMAKE}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -export CPATH="$CPATH:/opt/rh/python33/root/usr/include/python3.3m" # m on the end causes problems with boost finding python3 -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - printf "Installing LLVM 4...\\n" - cd ../opt \ - && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ - && mkdir build \ - && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - || exit 1 - printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -function print_instructions() { - printf "source /opt/rh/python33/enable\\n" - printf "source /opt/rh/devtoolset-7/enable\\n" - return 0 -} diff --git a/scripts/eosio_build_centos7_deps b/scripts/eosio_build_centos7_deps new file mode 100644 index 00000000000..60452c66252 --- /dev/null +++ b/scripts/eosio_build_centos7_deps @@ -0,0 +1,21 @@ +git,rpm -qa +autoconf,rpm -qa +automake,rpm -qa +libtool,rpm -qa +make,rpm -qa +bzip2,rpm -qa +doxygen,rpm -qa +graphviz,rpm -qa +bzip2-devel,rpm -qa +openssl-devel,rpm -qa +gmp-devel,rpm -qa +ocaml,rpm -qa +libicu-devel,rpm -qa +python,rpm -qa +python-devel,rpm -qa +rh-python36,rpm -qa +gettext-devel,rpm -qa +file,rpm -qa +libusbx-devel,rpm -qa +libcurl-devel,rpm -qa +patch,rpm -qa \ No newline at end of file diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 11bbbc37d99..d06afd8bb42 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -1,270 +1,95 @@ -if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi +echo "OS name: ${NAME}" +echo "OS Version: ${OS_VER}" +echo "CPU cores: ${CPU_CORES}" +echo "Physical Memory: ${MEM_GIG}G" +echo "Disk install: ${DISK_INSTALL}" +echo "Disk space total: ${DISK_TOTAL}G" +echo "Disk space available: ${DISK_AVAIL}G" -OS_VER=$(sw_vers -productVersion) -OS_MAJ=$(echo "${OS_VER}" | cut -d'.' -f1) -OS_MIN=$(echo "${OS_VER}" | cut -d'.' -f2) -OS_PATCH=$(echo "${OS_VER}" | cut -d'.' -f3) -MEM_GIG=$(bc <<< "($(sysctl -in hw.memsize) / 1024000000)") -CPU_SPEED=$(bc <<< "scale=2; ($(sysctl -in hw.cpufrequency) / 10^8) / 10") -CPU_CORE=$( sysctl -in machdep.cpu.core_count ) -export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) +[[ "${OS_MIN}" -lt 12 ]] && echo "You must be running Mac OS 10.12.x or higher to install EOSIO." && exit 1 -DISK_INSTALL=$(df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 || cut -d' ' -f1) -blksize=$(df . | head -1 | awk '{print $2}' | cut -d- -f1) -gbfactor=$(( 1073741824 / blksize )) -total_blks=$(df . | tail -1 | awk '{print $2}') -avail_blks=$(df . | tail -1 | awk '{print $4}') -DISK_TOTAL=$((total_blks / gbfactor )) -DISK_AVAIL=$((avail_blks / gbfactor )) +[[ $MEM_GIG -lt 7 ]] && echo "Your system must have 7 or more Gigabytes of physical memory installed." && exit 1 +[[ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]] && echo " - You must have at least ${DISK_MIN}GB of available storage to install EOSIO." && exit 1 -export HOMEBREW_NO_AUTO_UPDATE=1 +echo "" -COUNT=1 -DISPLAY="" -DEPS="" +echo "${COLOR_CYAN}[Ensuring xcode-select installation]${COLOR_NC}" +if ! XCODESELECT=$( command -v xcode-select ); then echo " - xcode-select must be installed in order to proceed!" && exit 1; +else echo " - xcode-select installation found @ ${XCODESELECT}"; fi -printf "\\nOS name: ${OS_NAME}\\n" -printf "OS Version: ${OS_VER}\\n" -printf "CPU speed: ${CPU_SPEED}Mhz\\n" -printf "CPU cores: %s\\n" "${CPU_CORE}" -printf "Physical Memory: ${MEM_GIG} Gbytes\\n" -printf "Disk install: ${DISK_INSTALL}\\n" -printf "Disk space total: ${DISK_TOTAL}G\\n" -printf "Disk space available: ${DISK_AVAIL}G\\n" +echo "${COLOR_CYAN}[Ensuring Ruby installation]${COLOR_NC}" +if ! RUBY=$( command -v ruby ); then echo " - Ruby must be installed in order to proceed!" && exit 1; +else echo " - Ruby installation found @ ${RUBY}"; fi -if [ "${MEM_GIG}" -lt 7 ]; then - echo "Your system must have 7 or more Gigabytes of physical memory installed." - echo "Exiting now." - exit 1 -fi - -if [ "${OS_MIN}" -lt 12 ]; then - echo "You must be running Mac OS 10.12.x or higher to install EOSIO." - echo "Exiting now." - exit 1 -fi - -if [ "${DISK_AVAIL}" -lt "$DISK_MIN" ]; then - echo "You must have at least ${DISK_MIN}GB of available storage to install EOSIO." - echo "Exiting now." - exit 1 -fi - -printf "\\n" - -printf "Checking xcode-select installation...\\n" -if ! XCODESELECT=$( command -v xcode-select) -then - printf " - XCode must be installed in order to proceed!\\n\\n" - exit 1 -fi -printf " - XCode installation found @ ${XCODESELECT}\\n" - -printf "Checking Ruby installation...\\n" -if ! RUBY=$( command -v ruby) -then - printf " - Ruby must be installed in order to proceed!\\n" - exit 1 -fi -printf " - Ruby installation found @ ${RUBY}\\n" - -printf "Checking Home Brew installation...\\n" -if ! BREW=$( command -v brew ) -then - printf "Homebrew must be installed to compile EOS.IO!\\n" - if [ $ANSWER != 1 ]; then read -p "Do you wish to install HomeBrew? (y/n)? " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - "${XCODESELECT}" --install 2>/dev/null; - if ! "${RUBY}" -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"; then - echo " - Unable to install homebrew at this time." - exit 1; - else - BREW=$( command -v brew ) - fi - ;; - [Nn]* ) echo "User aborted homebrew installation. Exiting now."; exit 1;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac - -fi -printf " - Home Brew installation found @ ${BREW}\\n" - -printf "\\nChecking dependencies...\\n" -var_ifs="${IFS}" -IFS="," -while read -r name tester testee brewname uri; do - if [ "${tester}" "${testee}" ]; then - printf " - %s found\\n" "${name}" - continue - fi - # resolve conflict with homebrew glibtool and apple/gnu installs of libtool - if [ "${testee}" == "/usr/local/bin/glibtool" ]; then - if [ "${tester}" "/usr/local/bin/libtool" ]; then - printf " - %s found\\n" "${name}" - continue - fi - fi - DEPS=$DEPS"${brewname}," - DISPLAY="${DISPLAY}${COUNT}. ${name}\\n" - printf " - %s ${bldred}NOT${txtrst} found.\\n" "${name}" - (( COUNT++ )) -done < "${REPO_ROOT}/scripts/eosio_build_darwin_deps" -IFS="${var_ifs}" +ensure-homebrew if [ ! -d /usr/local/Frameworks ]; then - printf "\\n${bldred}/usr/local/Frameworks is necessary to brew install python@3. Run the following commands as sudo and try again:${txtrst}\\n" - printf "sudo mkdir /usr/local/Frameworks && sudo chown $(whoami):admin /usr/local/Frameworks\\n\\n" + echo "${COLOR_YELLOW}/usr/local/Frameworks is necessary to brew install python@3. Run the following commands as sudo and try again:${COLOR_NC}" + echo "sudo mkdir /usr/local/Frameworks && sudo chown $(whoami):admin /usr/local/Frameworks" exit 1; fi -if [ $COUNT -gt 1 ]; then - printf "\\nThe following dependencies are required to install EOSIO:\\n" - printf "${DISPLAY}\\n\\n" - if [ $ANSWER != 1 ]; then read -p "Do you wish to install these packages? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - "${XCODESELECT}" --install 2>/dev/null; - if [ $1 == 0 ]; then read -p "Do you wish to update homebrew packages first? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - if ! brew update; then - printf " - Brew update failed.\\n" - exit 1; - else - printf " - Brew update complete.\\n" - fi - ;; - [Nn]* ) echo "Proceeding without update!";; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac - brew tap eosio/eosio # Required to install mongo-cxx-driver with static library - printf "\\nInstalling Dependencies...\\n" - # Ignore cmake so we don't install a newer version. - # Build from source to use local cmake; see homebrew-eosio repo for examples - # DON'T INSTALL llvm@4 WITH --force! - OIFS="$IFS" - IFS=$',' - for DEP in $DEPS; do - # Eval to support string/arguments with $DEP - if ! eval $BREW install $DEP; then - printf " - Homebrew exited with the above errors!\\n" - exit 1; - fi - done - IFS="$OIFS" - ;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf "\\n - No required Home Brew dependencies to install.\\n" -fi - - -printf "\\n" - - -export CPATH="$(python-config --includes | awk '{print $1}' | cut -dI -f2):$CPATH" # Boost has trouble finding pyconfig.h -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/$BOOST_VERSION_MAJOR.$BOOST_VERSION_MINOR.$BOOST_VERSION_PATCH/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) --with-iostreams --with-date_time --with-filesystem \ - --with-system --with-program_options --with-chrono --with-test install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT}.\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ "$(grep "Version:" $HOME/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - -printf "\\n" - - -# We install llvm into /usr/local/opt using brew install llvm@4 -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - ln -s /usr/local/opt/llvm@4 $LLVM_ROOT \ - || exit 1 - printf " - LLVM successfully linked from /usr/local/opt/llvm@4 to ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi - - -cd .. -printf "\\n" - -function print_instructions() { - return 0 -} +# Handle clang/compiler +ensure-compiler +# Ensure packages exist +ensure-brew-packages "${REPO_ROOT}/scripts/eosio_build_darwin_deps" +[[ -z "${CMAKE}" ]] && export CMAKE="/usr/local/bin/cmake" +# CLANG Installation +build-clang +# LLVM Installation +ensure-llvm +# BOOST Installation +ensure-boost +# MONGO Installation +if $INSTALL_MONGO; then + echo "${COLOR_CYAN}[Ensuring MongoDB installation]${COLOR_NC}" + if [[ ! -d $MONGODB_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && mv $SRC_DIR/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_DIR/mongod.log \ + && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_DIR \ + && rm -rf $MONGODB_LINK_DIR \ + && rm -rf $BIN_DIR/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_DIR \ + && ln -s $MONGODB_LINK_DIR/bin/mongod $BIN_DIR/mongod" + echo " - MongoDB successfully installed @ ${MONGODB_ROOT}" + else + echo " - MongoDB found with correct version @ ${MONGODB_ROOT}." + fi + echo "${COLOR_CYAN}[Ensuring MongoDB C driver installation]${COLOR_NC}" + if [[ ! -d $MONGO_C_DRIVER_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_DIR -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SASL=OFF -DENABLE_SNAPPY=OFF .. \ + && make -j${JOBS} \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz" + echo " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}." + else + echo " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}." + fi + echo "${COLOR_CYAN}[Ensuring MongoDB C++ driver installation]${COLOR_NC}" + if [[ "$(grep "Version:" $EOSIO_INSTALL_DIR/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}' || true)" != $MONGO_CXX_DRIVER_VERSION ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r${MONGO_CXX_DRIVER_VERSION}.tar.gz -o mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_DIR -DCMAKE_PREFIX_PATH=$EOSIO_INSTALL_DIR .. \ + && make -j${JOBS} VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz" + echo " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}." + else + echo " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}." + fi +fi \ No newline at end of file diff --git a/scripts/eosio_build_darwin_deps b/scripts/eosio_build_darwin_deps index 44192f04309..b2cf9a8c6f5 100755 --- a/scripts/eosio_build_darwin_deps +++ b/scripts/eosio_build_darwin_deps @@ -1,13 +1,13 @@ -cmake,-f,/usr/local/bin/cmake,cmake,https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4.tar.gz -automake,-x,/usr/local/bin/automake,automake,http://ftp.gnu.org/gnu/automake/automake-1.15.tar.gz -Libtool,-x,/usr/local/bin/glibtool,libtool,http://gnu.askapache.com/libtool/libtool-2.4.6.tar.gz -OpenSSL,-f,/usr/local/opt/openssl/lib/libssl.a,openssl,https://www.openssl.org/source/openssl-1.0.2n.tar.gz -wget,-x,/usr/local/bin/wget,wget,https://ftp.gnu.org/gnu/wget/wget-1.19.2.tar.gz -GMP,-f,/usr/local/opt/gmp/include/gmpxx.h,gmp,https://ftp.gnu.org/gnu/gmp/gmp-6.1.2.tar.bz2 -llvm,-x,/usr/local/opt/llvm@4/bin/clang-4.0,llvm@4,http://releases.llvm.org/4.0.1/llvm-4.0.1.src.tar.xz -python,-d,/usr/local/Cellar/python/3.7.2_1,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz -python@2,-d,/usr/local/Cellar/python@2/2.7.15_2,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz -doxygen,-f,/usr/local/bin/doxygen,doxygen,http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.14.src.tar.gz -graphviz,-d,/usr/local/opt/graphviz,graphviz,https://fossies.org/linux/misc/graphviz-2.40.1.tar.gz -libusb,-f,/usr/local/lib/libusb-1.0.0.dylib,libusb,https://github.com/libusb/libusb/releases/download/v1.0.22/libusb-1.0.22.tar.bz2 -pkgconfig,-x,/usr/local/bin/pkg-config,pkgconfig,https://pkg-config.freedesktop.org/releases/pkg-config-0.29.2.tar.gz +cmake,/usr/local/bin/cmake +graphviz,/usr/local/opt/graphviz +libtool,/usr/local/bin/glibtool +automake,/usr/local/bin/automake +wget,/usr/local/bin/wget +gmp,/usr/local/opt/gmp/include/gmpxx.h +llvm@4,/usr/local/opt/llvm@4 +pkgconfig,/usr/local/bin/pkg-config +python,/usr/local/opt/python3 +python@2,/usr/local/opt/python2 +doxygen,/usr/local/bin/doxygen +libusb,/usr/local/lib/libusb-1.0.0.dylib +openssl,/usr/local/opt/openssl/lib/libssl.a \ No newline at end of file diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh deleted file mode 100755 index c27f47658d3..00000000000 --- a/scripts/eosio_build_fedora.sh +++ /dev/null @@ -1,236 +0,0 @@ -if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi - -CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) -CPU_CORE=$( nproc ) - -OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' ) -if [ "${OS_VER}" -lt 25 ]; then - printf "You must be running Fedora 25 or higher to install EOSIO.\\n" - printf "Exiting now.\\n" - exit 1; -fi - -# procps-ng includes free command -if [[ -z "$( rpm -qi "procps-ng" 2>/dev/null | grep Name )" ]]; then yum install -y procps-ng; fi -MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) -if [ "${MEM_MEG}" -lt 7000 ]; then - printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "Exiting now.\\n" - exit 1; -fi -MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) -export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - -DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\\ -f1 ) -DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) -DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) -DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) -DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then - printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "Exiting now.\\n" - exit 1; -fi - -printf "\\nOS name: ${OS_NAME}\\n" -printf "OS Version: ${OS_VER}\\n" -printf "CPU speed: ${CPU_SPEED}Mhz\\n" -printf "CPU cores: ${CPU_CORE}\\n" -printf "Physical Memory: ${MEM_MEG} Mgb\\n" -printf "Disk space total: ${DISK_TOTAL%.*}G\\n" -printf "Disk space available: ${DISK_AVAIL%.*}G\\n" - -# llvm is symlinked from /usr/lib64/llvm4.0 into user's home -DEP_ARRAY=( - git sudo procps-ng which gcc gcc-c++ autoconf automake libtool make \ - bzip2-devel wget bzip2 compat-openssl10 graphviz doxygen \ - openssl-devel gmp-devel libstdc++-devel python2 python2-devel python3 python3-devel \ - libedit ncurses-devel swig llvm4.0 llvm4.0-devel llvm4.0-libs llvm4.0-static libcurl-devel libusb-devel -) -COUNT=1 -DISPLAY="" -DEP="" - -printf "\\nChecking Yum installation...\\n" -if ! YUM=$( command -v yum 2>/dev/null ); then - printf "!! Yum must be installed to compile EOS.IO !!\\n" - printf "Exiting now.\\n" - exit 1; -fi -printf " - Yum installation found at %s.\\n" "${YUM}" - - -if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi -case $ANSWER in - 1 | [Yy]* ) - if ! sudo $YUM -y update; then - printf " - YUM update failed.\\n" - exit 1; - else - printf " - YUM update complete.\\n" - fi - ;; - [Nn]* ) echo " - Proceeding without update!";; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; -esac - -printf "Checking RPM for installed dependencies...\\n" -for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do - pkg=$( rpm -qi "${DEP_ARRAY[$i]}" 2>/dev/null | grep Name ) - if [[ -z $pkg ]]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" - printf " - Package %s ${bldred} NOT ${txtrst} found!\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) - else - printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" - continue - fi -done -if [ "${COUNT}" -gt 1 ]; then - printf "\\nThe following dependencies are required to install EOSIO:\\n" - printf "${DISPLAY}\\n\\n" - if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - if ! sudo $YUM -y install ${DEP}; then - printf " - YUM dependency installation failed!\\n" - exit 1; - else - printf " - YUM dependencies installed successfully.\\n" - fi - ;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf " - No required YUM dependencies to install.\\n" -fi - -printf "\\n" - - -printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then - printf "Installing CMAKE...\\n" - curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ - && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ - && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ - && make -j"${JOBS}" \ - && make install \ - && cd .. \ - && rm -f cmake-$CMAKE_VERSION.tar.gz \ - || exit 1 - printf " - CMAKE successfully installed @ ${CMAKE} \\n" -else - printf " - CMAKE found @ ${CMAKE}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - ln -s /usr/lib64/llvm4.0 $LLVM_ROOT \ - || exit 1 - printf " - LLVM successfully linked from /usr/lib64/llvm4.0 to ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -function print_instructions() { - return 0 -} \ No newline at end of file diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 65fbfeeec07..6912919cc1e 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -1,259 +1,94 @@ -if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi - -OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' ) -OS_MAJ=$(echo "${OS_VER}" | cut -d'.' -f1) -OS_MIN=$(echo "${OS_VER}" | cut -d'.' -f2) - -MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 || cut -d' ' -f2 ) -CPU_SPEED=$( lscpu | grep -m1 "MHz" | tr -s ' ' | cut -d\ -f3 || cut -d' ' -f3 | cut -d'.' -f1 ) -CPU_CORE=$( nproc ) -MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) -export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - -DISK_INSTALL=$(df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 || cut -d' ' -f1) -DISK_TOTAL_KB=$(df . | tail -1 | awk '{print $2}') -DISK_AVAIL_KB=$(df . | tail -1 | awk '{print $4}') -DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) -DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) - -printf "\\nOS name: ${OS_NAME}\\n" -printf "OS Version: ${OS_VER}\\n" -printf "CPU speed: ${CPU_SPEED}Mhz\\n" -printf "CPU cores: %s\\n" "${CPU_CORE}" -printf "Physical Memory: ${MEM_MEG} Mgb\\n" -printf "Disk install: ${DISK_INSTALL}\\n" -printf "Disk space total: ${DISK_TOTAL%.*}G\\n" -printf "Disk space available: ${DISK_AVAIL%.*}G\\n" - -if [ "${MEM_MEG}" -lt 7000 ]; then - printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "Exiting now.\\n" - exit 1 -fi - -case "${OS_NAME}" in - "Linux Mint") - if [ "${OS_MAJ}" -lt 18 ]; then - printf "You must be running Linux Mint 18.x or higher to install EOSIO.\\n" - printf "Exiting now.\\n" - exit 1 - fi - ;; - "Ubuntu") - if [ "${OS_MAJ}" -lt 16 ]; then - printf "You must be running Ubuntu 16.04.x or higher to install EOSIO.\\n" - printf "Exiting now.\\n" - exit 1 - fi +echo "OS name: ${NAME}" +echo "OS Version: ${VERSION_ID}" +echo "CPU cores: ${CPU_CORES}" +echo "Physical Memory: ${MEM_GIG}G" +echo "Disk space total: ${DISK_TOTAL}G" +echo "Disk space available: ${DISK_AVAIL}G" + +( [[ $NAME == "Ubuntu" ]] && ( [[ "$(echo ${VERSION_ID})" == "16.04" ]] || [[ "$(echo ${VERSION_ID})" == "18.04" ]] ) ) || ( echo " - You must be running 16.04.x or 18.04.x to install EOSIO." && exit 1 ) + +[[ $MEM_GIG -lt 7 ]] && echo "Your system must have 7 or more Gigabytes of physical memory installed." && exit 1 +[[ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]] && echo " - You must have at least ${DISK_MIN}GB of available storage to install EOSIO." && exit 1 + +# system clang and build essential for Ubuntu 18 (16 too old) +( [[ $PIN_COMPILER == false ]] && [[ $VERSION_ID == "18.04" ]] ) && EXTRA_DEPS=(clang,dpkg\ -s) +# We install clang8 for Ubuntu 16, but we still need something to compile cmake, boost, etc + pinned 18 still needs something to build source +( [[ $VERSION_ID == "16.04" ]] || ( $PIN_COMPILER && [[ $VERSION_ID == "18.04" ]] ) ) && ensure-build-essential +# Ensure packages exist +([[ $PIN_COMPILER == false ]] && [[ $BUILD_CLANG == false ]]) && EXTRA_DEPS+=(llvm-4.0,dpkg\ -s) +$ENABLE_COVERAGE_TESTING && EXTRA_DEPS+=(lcov,dpkg\ -s) +ensure-apt-packages "${REPO_ROOT}/scripts/eosio_build_ubuntu_deps" $(echo ${EXTRA_DEPS[@]}) +echo "" +# Handle clang/compiler +ensure-compiler +# CMAKE Installation +ensure-cmake +# CLANG Installation +build-clang +# LLVM Installation +ensure-llvm +# BOOST Installation +ensure-boost +VERSION_MAJ=$(echo "${VERSION_ID}" | cut -d'.' -f1) +VERSION_MIN=$(echo "${VERSION_ID}" | cut -d'.' -f2) +if $INSTALL_MONGO; then + if [[ $VERSION_MAJ == 18 ]]; then # UBUNTU 18 doesn't have MONGODB 3.6.3 - if [ $OS_MAJ -gt 16 ]; then - export MONGODB_VERSION=4.1.1 - fi + MONGODB_VERSION=4.1.1 # We have to re-set this with the new version - export MONGODB_ROOT=${OPT_LOCATION}/mongodb-${MONGODB_VERSION} - ;; - "Debian") - if [ $OS_MAJ -lt 10 ]; then - printf "You must be running Debian 10 to install EOSIO, and resolve missing dependencies from unstable (sid).\n" - printf "Exiting now.\n" - exit 1 + MONGODB_ROOT=${OPT_DIR}/mongodb-${MONGODB_VERSION} fi - ;; -esac - -if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then - printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "Exiting now.\\n" - exit 1 -fi - -# llvm-4.0 is installed into /usr/lib/llvm-4.0 -# clang is necessary for building on ubuntu -DEP_ARRAY=( - git llvm-4.0 clang-4.0 libclang-4.0-dev make automake libbz2-dev libssl-dev doxygen graphviz \ - libgmp3-dev autotools-dev build-essential libicu-dev python2.7 python2.7-dev python3 python3-dev \ - autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev libcurl4-gnutls-dev pkg-config -) -COUNT=1 -DISPLAY="" -DEP="" - -if [[ "${ENABLE_CODE_COVERAGE}" == true ]]; then - DEP_ARRAY+=(lcov) -fi - -if [ $ANSWER != 1 ]; then read -p "Do you wish to update repositories with apt-get update? (y/n) " ANSWER; fi -case $ANSWER in - 1 | [Yy]* ) - if ! sudo apt-get update; then - printf " - APT update failed.\\n" - exit 1; - else - printf " - APT update complete.\\n" - fi - ;; - [Nn]* ) echo "Proceeding without update!";; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; -esac - -printf "\\nChecking for installed dependencies...\\n" -for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do - pkg=$( dpkg -s "${DEP_ARRAY[$i]}" 2>/dev/null | grep Status | tr -s ' ' | cut -d\ -f4 ) - if [ -z "$pkg" ]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" - printf " - Package %s${bldred} NOT${txtrst} found!\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) + echo "${COLOR_CYAN}[Ensuring MongoDB installation]${COLOR_NC}" + if [[ ! -d $MONGODB_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu${VERSION_MAJ}${VERSION_MIN}-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-ubuntu${VERSION_MAJ}${VERSION_MIN}-${MONGODB_VERSION}.tgz \ + && mv $SRC_DIR/mongodb-linux-x86_64-ubuntu${VERSION_MAJ}${VERSION_MIN}-${MONGODB_VERSION} $MONGODB_ROOT \ + && touch $MONGODB_LOG_DIR/mongod.log \ + && rm -f mongodb-linux-x86_64-ubuntu${VERSION_MAJ}${VERSION_MIN}-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_DIR \ + && rm -rf $MONGODB_LINK_DIR \ + && rm -rf $BIN_DIR/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_DIR \ + && ln -s $MONGODB_LINK_DIR/bin/mongod $BIN_DIR/mongod" + echo " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_DIR})." else - printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" - continue + echo " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_DIR})." fi -done -if [ "${COUNT}" -gt 1 ]; then - printf "\\nThe following dependencies are required to install EOSIO:\\n" - printf "${DISPLAY}\\n\\n" - if [ $ANSWER != 1 ]; then read -p "Do you wish to install these packages? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - if ! sudo apt-get -y install ${DEP}; then - printf " - APT dependency failed.\\n" - exit 1 - else - printf " - APT dependencies installed successfully.\\n" - fi - ;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf " - No required APT dependencies to install." -fi - - -printf "\\n" - - -printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then - printf "Installing CMAKE...\\n" - curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ - && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ - && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ - && make -j"${JOBS}" \ - && make install \ - && cd .. \ - && rm -f cmake-$CMAKE_VERSION.tar.gz \ - || exit 1 - printf " - CMAKE successfully installed @ ${CMAKE} \\n" -else - printf " - CMAKE found @ ${CMAKE}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - ln -s /usr/lib/llvm-4.0 $LLVM_ROOT \ - || exit 1 - printf " - LLVM successfully linked from /usr/lib/llvm-4.0 to ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -function print_instructions() { - return 0 -} + echo "${COLOR_CYAN}[Ensuring MongoDB C driver installation]${COLOR_NC}" + if [[ ! -d $MONGO_C_DRIVER_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_DIR -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF $PINNED_TOOLCHAIN .. \ + && make -j${JOBS} \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz" + echo " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}." + else + echo " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}." + fi + echo "${COLOR_CYAN}[Ensuring MongoDB C++ driver installation]${COLOR_NC}" + if [[ ! -d $MONGO_CXX_DRIVER_ROOT ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ + && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_DIR -DCMAKE_PREFIX_PATH=$EOSIO_INSTALL_DIR $PINNED_TOOLCHAIN .. \ + && make -j${JOBS} VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz" + echo " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}." + else + echo " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}." + fi +fi \ No newline at end of file diff --git a/scripts/eosio_build_ubuntu_deps b/scripts/eosio_build_ubuntu_deps new file mode 100644 index 00000000000..e27bee25952 --- /dev/null +++ b/scripts/eosio_build_ubuntu_deps @@ -0,0 +1,25 @@ +git,dpkg -s +make,dpkg -s +bzip2,dpkg -s +automake,dpkg -s +libbz2-dev,dpkg -s +libssl-dev,dpkg -s +doxygen,dpkg -s +graphviz,dpkg -s +libgmp3-dev,dpkg -s +autotools-dev,dpkg -s +libicu-dev,dpkg -s +python2.7,dpkg -s +python2.7-dev,dpkg -s +python3,dpkg -s +python3-dev,dpkg -s +autoconf,dpkg -s +libtool,dpkg -s +curl,dpkg -s +zlib1g-dev,dpkg -s +sudo,dpkg -s +ruby,dpkg -s +libusb-1.0-0-dev,dpkg -s +libcurl4-gnutls-dev,dpkg -s +pkg-config,dpkg -s +patch,dpkg -s \ No newline at end of file diff --git a/scripts/eosio_install.sh b/scripts/eosio_install.sh index ac5a731f2fd..f8909fcac39 100755 --- a/scripts/eosio_install.sh +++ b/scripts/eosio_install.sh @@ -1,4 +1,6 @@ -#!/bin/bash +#!/usr/bin/env bash +set -eo pipefail +VERSION=2.0 ########################################################################## # This is the EOSIO automated install script for Linux and Mac OS. # This file was downloaded from https://github.com/EOSIO/eos @@ -6,19 +8,19 @@ # Copyright (c) 2017, Respective Authors all rights reserved. # # After June 1, 2018 this software is available under the following terms: -# +# # The MIT License -# +# # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: -# +# # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. -# +# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -30,41 +32,19 @@ # https://github.com/EOSIO/eos/blob/master/LICENSE.txt ########################################################################## -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -REPO_ROOT="${SCRIPT_DIR}/.." -BUILD_DIR="${REPO_ROOT}/build" - -OPT_LOCATION=$HOME/opt -BIN_LOCATION=$HOME/bin -LIB_LOCATION=$HOME/lib -mkdir -p $LIB_LOCATION +# Load eosio specific helper functions +. ./scripts/helpers/eosio.sh -CMAKE_BUILD_TYPE=Release -TIME_BEGIN=$( date -u +%s ) -INSTALL_PREFIX=$OPT_LOCATION/eosio -VERSION=1.2 +[[ ! $NAME == "Ubuntu" ]] && set -i # Ubuntu doesn't support interactive mode since it uses dash -txtbld=$(tput bold) -bldred=${txtbld}$(tput setaf 1) -txtrst=$(tput sgr0) +[[ ! -d $BUILD_DIR ]] && printf "${COLOR_RED}Please run ./eosio_build.sh first!${COLOR_NC}" && exit 1 +echo "${COLOR_CYAN}=====================================================================================" +echo "========================== ${COLOR_WHITE}Starting EOSIO Installation${COLOR_CYAN} ==============================${COLOR_NC}" +execute cd $BUILD_DIR +execute make install +execute cd .. -if [ ! -d $BUILD_DIR ]; then - printf "\\nError, eosio_build.sh has not ran. Please run ./eosio_build.sh first!\\n\\n" - exit -1 -fi - -if ! pushd "${BUILD_DIR}" &> /dev/null;then - printf "Unable to enter build directory %s.\\n Exiting now.\\n" "${BUILD_DIR}" - exit 1; -fi - -if ! make install; then - printf "\\nMAKE installing EOSIO has exited with the above error.\\n\\n" - exit -1 -fi -popd &> /dev/null - -printf "\n${bldred} ___ ___ ___ ___\n" +printf "\n${COLOR_RED} ___ ___ ___ ___\n" printf " / /\\ / /\\ / /\\ ___ / /\\ \n" printf " / /:/_ / /::\\ / /:/_ / /\\ / /::\\ \n" printf " / /:/ /\\ / /:/\\:\\ / /:/ /\\ / /:/ / /:/\\:\\ \n" @@ -74,13 +54,10 @@ printf " \\ \\:\\/:/ /:/ \\ \\:\\ / /:/ \\ \\:\\/:/~/:/ \\ \\:\\/\\ \\ printf " \\ \\::/ /:/ \\ \\:\\ /:/ \\ \\::/ /:/ \\__\\::/ \\ \\:\\ /:/ \n" printf " \\ \\:\\/:/ \\ \\:\\/:/ \\__\\/ /:/ /__/:/ \\ \\:\\/:/ \n" printf " \\ \\::/ \\ \\::/ /__/:/ \\__\\/ \\ \\::/ \n" -printf " \\__\\/ \\__\\/ \\__\\/ \\__\\/ \n\n${txtrst}" +printf " \\__\\/ \\__\\/ \\__\\/ \\__\\/ \n\n${COLOR_NC}" printf "==============================================================================================\\n" -printf "EOSIO has been installed into ${OPT_LOCATION}/eosio/bin!\\n" -printf "If you need to, you can fully uninstall using eosio_uninstall.sh && scripts/clean_old_install.sh.\\n" +printf "${COLOR_GREEN}EOSIO has been installed into ${EOSIO_INSTALL_DIR}/bin${COLOR_NC}" +printf "\\n${COLOR_YELLOW}Uninstall with: ./scripts/eosio_uninstall.sh${COLOR_NC}\\n" printf "==============================================================================================\\n\\n" - -printf "EOSIO website: https://eos.io\\n" -printf "EOSIO resources: https://eos.io/resources/\\n" -printf "EOSIO Stack Exchange: https://eosio.stackexchange.com\\n" +resources diff --git a/scripts/eosio_uninstall.sh b/scripts/eosio_uninstall.sh index facb5f935f4..fb595779ac8 100755 --- a/scripts/eosio_uninstall.sh +++ b/scripts/eosio_uninstall.sh @@ -1,87 +1,89 @@ -#! /bin/bash +#!/usr/bin/env bash +set -eo pipefail -OPT_LOCATION=$HOME/opt +# Load bash script helper functions +. ./scripts/helpers/eosio.sh -binaries=( - cleos - eosio-abigen - eosio-launcher - eosio-s2wasm - eosio-wast2wasm - eosiocpp - keosd - nodeos - eosio-applesdemo -) +usage() { + printf "Usage --- \\n $ %s [ --full ] [ --force ]\\n + --full: Removal of data directory (be absolutely sure you want to delete it before using this!)\\n + --force: Unattended uninstall which works regardless of the eosio directory existence.\\n This helps cleanup dependencies and start fresh if you need to. + \\n" "$0" +} -if [ -d $OPT_LOCATION/eosio ]; then - printf "Do you wish to remove this install? (requires sudo)\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - if [ "$(id -u)" -ne 0 ]; then - printf "\nThis requires sudo, please run ./eosio_uninstall.sh with sudo\n\n" - exit -1 - fi - - pushd $HOME &> /dev/null - pushd opt &> /dev/null - rm -rf eosio - # Handle cleanup of directories created from installation - if [ "$1" == "--full" ]; then - if [ -d ~/Library/Application\ Support/eosio ]; then rm -rf ~/Library/Application\ Support/eosio; fi # Mac OS - if [ -d ~/.local/share/eosio ]; then rm -rf ~/.local/share/eosio; fi # Linux - fi - popd &> /dev/null - pushd bin &> /dev/null - for binary in ${binaries[@]}; do - rm ${binary} - done - popd &> /dev/null - pushd lib/cmake &> /dev/null - rm -rf eosio - popd &> /dev/null +INSTALL_PATHS=() +# User input handling +PROCEED=false +DEP_PROCEED=false +FORCED=false +FULL=false +if [[ $@ =~ [[:space:]]?--force[[:space:]]? ]]; then + echo "[Forcing Unattended Removal: Enabled]" + FORCED=true + PROCEED=true + DEP_PROCEED=true +fi +if [[ $@ =~ [[:space:]]?--full[[:space:]]? ]]; then + echo "[Full removal (nodeos generated state, etc): Enabled]" + if $FORCED; then + FULL=true + elif [[ $FORCED == false ]]; then + while true; do + read -p "Removal of the eosio data directory will require a resync of data which can take days. Do you wish to proceed? (y/n) " PROCEED + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + FULL=true break;; - [Nn]* ) - printf "Aborting uninstall\n\n" - exit -1;; - esac - done + 1 | false | [Nn]* ) break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + fi fi +if [[ ! -z $@ ]] && [[ ! $@ =~ [[:space:]]?--force[[:space:]]? ]] && [[ ! $@ =~ [[:space:]]?--full[[:space:]]? ]]; then usage && exit; fi -if [ -d "/usr/local/eosio" ]; then - printf "Do you wish to remove this install? (requires sudo)\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - if [ "$(id -u)" -ne 0 ]; then - printf "\nThis requires sudo, please run ./eosio_uninstall.sh with sudo\n\n" - exit -1 - fi +# As of 1.8.0, we're using a versioned directories under home: https://github.com/EOSIO/eos/issues/6940 +[[ -d "${EOSIO_INSTALL_DIR}" ]] && echo "[EOSIO Installation Found: ${EOSIO_INSTALL_DIR}]" && INSTALL_PATHS+=("${EOSIO_INSTALL_DIR}") # EOSIO_INSTALL_DIR set in .environment - pushd /usr/local &> /dev/null - pushd opt &> /dev/null - rm -rf eosio - # Handle cleanup of directories created from installation - if [ "$1" == "--full" ]; then - if [ -d ~/Library/Application\ Support/eosio ]; then rm -rf ~/Library/Application\ Support/eosio; fi # Mac OS - if [ -d ~/.local/share/eosio ]; then rm -rf ~/.local/share/eosio; fi # Linux - fi - popd &> /dev/null - pushd bin &> /dev/null - for binary in ${binaries[@]}; do - rm ${binary} +# Removal +while true; do + [[ $FORCED == false ]] && read -p "Do you wish to remove the installation? (y/n) " PROCEED + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + echo "[Removing EOSIO and Dependencies]" + if [[ $ARCH == "Darwin" ]]; then + for package in $(cat scripts/eosio_build_darwin_deps | cut -d, -f1 2>/dev/null); do + while true; do + [[ $FORCED == false ]] && read -p "Do you wish to uninstall and unlink all brew installed ${package} versions? (y/n) " DEP_PROCEED + case $DEP_PROCEED in + "") echo "What would you like to do?";; + 0 | true | [Yy]* ) + execute brew uninstall $package --force || true + execute brew cleanup -s $package || true + break;; + 1 | false | [Nn]* ) break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done done - popd &> /dev/null - pushd lib/cmake &> /dev/null - rm -rf eosio - popd &> /dev/null - - break;; - [Nn]* ) - printf "Aborting uninstall\n\n" - exit -1;; - esac - done -fi + fi + # Handle cleanup of data directory + if $FULL; then + ## Add both just to be safe + [[ $ARCH == "Darwin" ]] && INSTALL_PATHS+=("${HOME}/Library/Application\ Support/eosio") + [[ $ARCH != "Darwin" ]] && INSTALL_PATHS+=("${HOME}/.local/share/eosio") + fi + # Version < 1.8.0; Before we started using ~/eosio/1.8.x + # Arrays should return with newlines (IFS=\n;helpers.sh) as Application\ Support will split into two + for INSTALL_PATH in ${INSTALL_PATHS[@]}; do + execute rm -rf $INSTALL_PATH + done + echo "[EOSIO Removal Complete]" + break;; + 1 | false | [Nn]* ) echo " - Cancelled EOSIO Removal!"; exit 1;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac +done \ No newline at end of file diff --git a/scripts/full_uninstaller.sh b/scripts/full_uninstaller.sh deleted file mode 100755 index 94401c4a12e..00000000000 --- a/scripts/full_uninstaller.sh +++ /dev/null @@ -1,134 +0,0 @@ -#! /bin/bash -ANSWER=0 -if [[ $1 =~ force-* ]]; then FORCED=1; else FORCED=0; fi -if [ -d "/usr/local/include/eosio" ] || [ -d "$HOME/opt/eosio" ] || [ $FORCED == 1 ]; then # use force for running the script directly - printf "\nEOSIO installation (AND DEPENDENCIES) already found...\n" - if [ $1 == 0 ]; then - read -p "Do you wish to remove them? (this includes dependencies)? (y/n) " ANSWER - elif [ $1 == 1 ] || [ $FORCED == 1 ]; then - ANSWER=1 - fi - echo "Uninstalling..." - case $ANSWER in - 1 | [Yy]* ) - if [ -d "$HOME/opt/eosio" ] || [[ $1 == "force-new" ]]; then - if [ $( uname ) == "Darwin" ]; then - # gettext and other brew packages are not modified as they can be dependencies for things other than eosio - if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed llvm@4 versions? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - brew uninstall llvm@4 --force - brew cleanup -s llvm@4 - ;; - [Nn]* ) ;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac - if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed doxygen versions? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - brew uninstall doxygen --force - brew cleanup -s doxygen - ;; - [Nn]* ) ;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac - if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed graphviz versions? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - brew uninstall graphviz --force - brew cleanup -s graphviz - ;; - [Nn]* ) ;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac - if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed libusb versions? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - brew uninstall libusb --force - brew cleanup -s libusb - ;; - [Nn]* ) ;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac - fi - rm -rf $HOME/opt/eosio - rm -f $HOME/bin/eosio-launcher - rm -rf $HOME/lib/cmake/eosios - rm -rf $HOME/opt/llvm - rm -f $HOME/opt/boost - rm -rf $HOME/src/boost_* - rm -rf $HOME/src/cmake-* - rm -rf $HOME/share/cmake-* - rm -rf $HOME/share/aclocal/cmake* - rm -rf $HOME/doc/cmake* - rm -f $HOME/bin/nodeos $HOME/bin/keosd $HOME/bin/cleos $HOME/bin/ctest $HOME/bin/*cmake* $HOME/bin/cpack - rm -rf $HOME/src/mongo* - fi - - if [ -d "/usr/local/include/eosio" ] || [[ $1 == "force-old" ]]; then - if [ "$(id -u)" -ne 0 ]; then - printf "\nCleanup requires sudo... Please manually run ./scripts/clean_old_install.sh with sudo.\n" - exit -1 - fi - pushd /usr/local &> /dev/null - rm -rf wasm - pushd include &> /dev/null - rm -rf libbson-1.0 libmongoc-1.0 mongocxx bsoncxx appbase chainbase eosio.system eosiolib fc libc++ musl secp256k* 2>/dev/null - rm -rf eosio 2>/dev/null - popd &> /dev/null - pushd bin &> /dev/null - rm cleos eosio-abigen eosio-applesedemo eosio-launcher eosio-s2wasm eosio-wast2wasm eosiocpp keosd nodeos 2>/dev/null - popd &> /dev/null - libraries=( - libeosio_testing - libeosio_chain - libfc - libbinaryen - libWAST - libWASM - libRuntime - libPlatform - libIR - libLogging - libsoftfloat - libchainbase - libappbase - libbuiltins - libbson-1.0 - libbson-static-1.0.a - libbsoncxx-static - libmongoc-1.0 - libmongoc-static-1.0.a - libmongocxx-static - libsecp256k* - ) - pushd lib &> /dev/null - for lib in ${libraries[@]}; do - rm ${lib}.a ${lib}.dylib ${lib}.so 2>/dev/null - rm pkgconfig/${lib}.pc 2>/dev/null - rm cmake/${lib} 2>/dev/null - done - popd &> /dev/null - pushd etc &> /dev/null - rm eosio 2>/dev/null - popd &> /dev/null - pushd share &> /dev/null - rm eosio 2>/dev/null - popd &> /dev/null - pushd usr/share &> /dev/null - rm eosio 2>/dev/null - popd &> /dev/null - pushd var/lib &> /dev/null - rm eosio 2>/dev/null - popd &> /dev/null - pushd var/log &> /dev/null - rm eosio 2>/dev/null - popd &> /dev/null - fi - ;; - [Nn]* ) - printf "Skipping\n\n" - exit 0 - ;; - esac -fi diff --git a/scripts/generate_bottle.sh b/scripts/generate_bottle.sh index 075fb9a7bb7..034ea7bc935 100644 --- a/scripts/generate_bottle.sh +++ b/scripts/generate_bottle.sh @@ -1,4 +1,5 @@ -#! /bin/bash +#!/usr/bin/env bash +set -eo pipefail VERS=`sw_vers -productVersion | awk '/10\.13\..*/{print $0}'` if [[ -z "$VERS" ]]; diff --git a/scripts/generate_deb.sh b/scripts/generate_deb.sh index 9686c904036..3c7de977a42 100755 --- a/scripts/generate_deb.sh +++ b/scripts/generate_deb.sh @@ -1,4 +1,5 @@ -#! /bin/bash +#!/usr/bin/env bash +set -eo pipefail PREFIX="usr" SPREFIX=${PREFIX} diff --git a/scripts/generate_package.sh.in b/scripts/generate_package.sh.in index 4874a1b4246..003f268021a 100644 --- a/scripts/generate_package.sh.in +++ b/scripts/generate_package.sh.in @@ -1,4 +1,5 @@ -#! /bin/bash +#!/usr/bin/env bash +set -eo pipefail VARIANT=$1 diff --git a/scripts/generate_rpm.sh b/scripts/generate_rpm.sh index 625eff29d7b..71bc2f619da 100644 --- a/scripts/generate_rpm.sh +++ b/scripts/generate_rpm.sh @@ -1,4 +1,5 @@ -#! /bin/bash +#!/usr/bin/env bash +set -eo pipefail PREFIX="usr" SPREFIX=${PREFIX} diff --git a/scripts/generate_tarball.sh b/scripts/generate_tarball.sh index 02f5e009ce5..203ad1a29b9 100644 --- a/scripts/generate_tarball.sh +++ b/scripts/generate_tarball.sh @@ -1,4 +1,5 @@ -#! /bin/bash +#!/usr/bin/env bash +set -eo pipefail NAME=$1 EOS_PREFIX=${PREFIX}/${SUBPREFIX} diff --git a/scripts/helpers/eosio.sh b/scripts/helpers/eosio.sh new file mode 100755 index 00000000000..34fbacac445 --- /dev/null +++ b/scripts/helpers/eosio.sh @@ -0,0 +1,345 @@ +# Checks for Arch and OS + Support for tests setting them manually +## Necessary for linux exclusion while running bats tests/bash-bats/*.sh +[[ -z "${ARCH}" ]] && export ARCH=$( uname ) +if [[ -z "${NAME}" ]]; then + if [[ $ARCH == "Linux" ]]; then + [[ ! -e /etc/os-release ]] && echo "${COLOR_RED} - /etc/os-release not found! It seems you're attempting to use an unsupported Linux distribution.${COLOR_NC}" && exit 1 + # Obtain OS NAME, and VERSION + . /etc/os-release + elif [[ $ARCH == "Darwin" ]]; then export NAME=$(sw_vers -productName) + else echo " ${COLOR_RED}- EOSIO is not supported for your Architecture!${COLOR_NC}" && exit 1 + fi +fi + +# Setup yum and apt variables +if [[ $NAME =~ "Amazon Linux" ]] || [[ $NAME == "CentOS Linux" ]]; then + if ! YUM=$( command -v yum 2>/dev/null ); then echo "${COLOR_RED}YUM must be installed to compile EOSIO${COLOR_NC}" && exit 1; fi +elif [[ $NAME == "Ubuntu" ]]; then + if ! APTGET=$( command -v apt-get 2>/dev/null ); then echo "${COLOR_RED}APT-GET must be installed to compile EOSIO${COLOR_NC}" && exit 1; fi +fi + +# Obtain dependency versions; Must come first in the script +. ./scripts/.environment +# Load general helpers +. ./scripts/helpers/general.sh + +function setup() { + if $VERBOSE; then + echo "VERBOSE: ${VERBOSE}" + echo "DRYRUN: ${DRYRUN}" + echo "TEMP_DIR: ${TEMP_DIR}" + echo "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}" + echo "CORE_SYMBOL_NAME: ${CORE_SYMBOL_NAME}" + echo "BOOST_LOCATION: ${BOOST_LOCATION}" + echo "INSTALL_LOCATION: ${INSTALL_LOCATION}" + echo "BUILD_DIR: ${BUILD_DIR}" + echo "EOSIO_INSTALL_DIR: ${EOSIO_INSTALL_DIR}" + echo "NONINTERACTIVE: ${NONINTERACTIVE}" + echo "PROCEED: ${PROCEED}" + echo "ENABLE_COVERAGE_TESTING: ${ENABLE_COVERAGE_TESTING}" + echo "ENABLE_DOXYGEN: ${ENABLE_DOXYGEN}" + echo "ENABLE_MONGO: ${ENABLE_MONGO}" + echo "INSTALL_MONGO: ${INSTALL_MONGO}" + echo "SUDO_LOCATION: ${SUDO_LOCATION}" + echo "PIN_COMPILER: ${PIN_COMPILER}" + fi + ( [[ -d $BUILD_DIR ]] && [[ -z $BUILD_DIR_CLEANUP_SKIP ]] ) && execute rm -rf $BUILD_DIR # cleanup old build directory; support disabling it (Zach requested) + execute-always mkdir -p $TEMP_DIR + execute mkdir -p $BUILD_DIR + execute mkdir -p $SRC_DIR + execute mkdir -p $OPT_DIR + execute mkdir -p $VAR_DIR + execute mkdir -p $BIN_DIR + execute mkdir -p $VAR_DIR/log + execute mkdir -p $ETC_DIR + execute mkdir -p $LIB_DIR + execute mkdir -p $MONGODB_LOG_DIR + execute mkdir -p $MONGODB_DATA_DIR +} + +function ensure-which() { + if ! which ls &>/dev/null; then + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}EOSIO compiler checks require the 'which' package: Would you like for us to install it? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) install-package which WETRUN; break;; + 1 | false | [Nn]* ) echo "${COLOR_RED}Please install the 'which' command before proceeding!${COLOR_NC}"; exit 1;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + fi +} + +# Prompt user for installation directory. +function install-directory-prompt() { + if [[ -z $INSTALL_LOCATION ]]; then + echo "No installation location was specified. Please provide the location where EOSIO is installed." + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to use the default location? ${EOSIO_INSTALL_DIR}? (y/n)${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) + echo "What would you like to do?";; + 0 | true | [Yy]* ) + break;; + 1 | false | [Nn]* ) + printf "Enter the desired installation location." && read -p " " EOSIO_INSTALL_DIR; + export EOSIO_INSTALL_DIR; + break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + else + export EOSIO_INSTALL_DIR=${INSTALL_LOCATION} + fi + . ./scripts/.build_vars + echo "EOSIO will be installed to: ${EOSIO_INSTALL_DIR}" +} + +function previous-install-prompt() { + if [[ -d $EOSIO_INSTALL_DIR ]]; then + echo "EOSIO has already been installed into ${EOSIO_INSTALL_DIR}... It's suggested that you eosio_uninstall.sh before re-running this script." + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to proceed anyway? (y/n)${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) break;; + 1 | false | [Nn]* ) exit;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + fi +} + +function resources() { + echo "${COLOR_CYAN}EOSIO website:${COLOR_NC} https://eos.io" + echo "${COLOR_CYAN}EOSIO Telegram channel:${COLOR_NC} https://t.me/EOSProject" + echo "${COLOR_CYAN}EOSIO resources:${COLOR_NC} https://eos.io/resources/" + echo "${COLOR_CYAN}EOSIO Stack Exchange:${COLOR_NC} https://eosio.stackexchange.com" +} + +function print_supported_linux_distros_and_exit() { + echo "On Linux the EOSIO build script only supports Amazon, Centos, and Ubuntu." + echo "Please install on a supported version of one of these Linux distributions." + echo "https://aws.amazon.com/amazon-linux-ami/" + echo "https://www.centos.org/" + echo "https://www.ubuntu.com/" + echo "Exiting now." + exit 1 +} + +function prompt-mongo-install() { + if $ENABLE_MONGO; then + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}You have chosen to include MongoDB support. Do you want for us to install MongoDB as well? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) export INSTALL_MONGO=true; break;; + 1 | false | [Nn]* ) echo "${COLOR_RED} - Existing MongoDB will be used.${COLOR_NC}"; break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + fi +} + +function ensure-compiler() { + # Support build-essentials on ubuntu + if [[ $NAME == "CentOS Linux" ]] || [[ $VERSION_ID == "16.04" ]] || ( $PIN_COMPILER && [[ $VERSION_ID == "18.04" ]] ); then + export CXX=${CXX:-'g++'} + export CC=${CC:-'gcc'} + fi + export CXX=${CXX:-'clang++'} + export CC=${CC:-'clang'} + if $PIN_COMPILER || [[ -f $CLANG_ROOT/bin/clang++ ]]; then + export PIN_COMPILER=true + export BUILD_CLANG=true + export CPP_COMP=$CLANG_ROOT/bin/clang++ + export CC_COMP=$CLANG_ROOT/bin/clang + export PATH=$CLANG_ROOT/bin:$PATH + elif [[ $PIN_COMPILER == false ]]; then + which $CXX &>/dev/null || ( echo "${COLOR_RED}Unable to find $CXX compiler: Pass in the -P option if you wish for us to install it or install a C++17 compiler and set \$CXX and \$CC to the proper binary locations. ${COLOR_NC}"; exit 1 ) + # readlink on mac differs from linux readlink (mac doesn't have -f) + [[ $ARCH == "Linux" ]] && READLINK_COMMAND="readlink -f" || READLINK_COMMAND="readlink" + COMPILER_TYPE=$( eval $READLINK_COMMAND $(which $CXX) || true ) + if [[ $CXX =~ "clang" ]] || [[ $COMPILER_TYPE =~ "clang" ]]; then + if [[ $ARCH == "Darwin" ]]; then + ### Check for apple clang version 10 or higher + [[ $( $(which $CXX) --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1 ) -lt 10 ]] && export NO_CPP17=true + else + if [[ $( $(which $CXX) --version | cut -d ' ' -f 3 | head -n 1 | cut -d '.' -f1) =~ ^[0-9]+$ ]]; then # Check if the version message cut returns an integer + [[ $( $(which $CXX) --version | cut -d ' ' -f 3 | head -n 1 | cut -d '.' -f1) < 6 ]] && export NO_CPP17=true + elif [[ $(clang --version | cut -d ' ' -f 4 | head -n 1 | cut -d '.' -f1) =~ ^[0-9]+$ ]]; then # Check if the version message cut returns an integer + [[ $( $(which $CXX) --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1 ) < 6 ]] && export NO_CPP17=true + fi + fi + else + ## Check for c++ version 7 or higher + [[ $( $(which $CXX) -dumpversion | cut -d '.' -f 1 ) -lt 7 ]] && export NO_CPP17=true + if [[ $NO_CPP17 == false ]]; then # https://github.com/EOSIO/eos/issues/7402 + while true; do + echo "${COLOR_YELLOW}WARNING: Your GCC compiler ($CXX) is less performant than clang (https://github.com/EOSIO/eos/issues/7402). We suggest running the build script with -P or install your own clang and try again.${COLOR_NC}" + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to proceed anyway? (y/n)?${COLOR_NC}" && read -p " " PROCEED + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) break;; + 1 | false | [Nn]* ) exit 1;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + fi + fi + fi + if $NO_CPP17; then + while true; do + echo "${COLOR_YELLOW}Unable to find C++17 support in ${CXX}!${COLOR_NC}" + echo "If you already have a C++17 compiler installed or would like to install your own, export CXX to point to the compiler of your choosing." + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to download and build C++17? (y/n)?${COLOR_NC}" && read -p " " PROCEED + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + export PIN_COMPILER=true + export BUILD_CLANG=true + export CPP_COMP=$CLANG_ROOT/bin/clang++ + export CC_COMP=$CLANG_ROOT/bin/clang + export PATH=$CLANG_ROOT/bin:$PATH + break;; + 1 | false | [Nn]* ) echo "${COLOR_RED} - User aborted C++17 installation!${COLOR_NC}"; exit 1;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + fi + $BUILD_CLANG && export PINNED_TOOLCHAIN="-DCMAKE_TOOLCHAIN_FILE='${BUILD_DIR}/pinned_toolchain.cmake'" + echo "" +} + +function ensure-cmake() { + echo "${COLOR_CYAN}[Ensuring CMAKE installation]${COLOR_NC}" + if [[ ! -e "${CMAKE}" ]]; then + execute bash -c "cd $SRC_DIR && \ + curl -LO https://cmake.org/files/v${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}/cmake-${CMAKE_VERSION}.tar.gz \ + && tar -xzf cmake-${CMAKE_VERSION}.tar.gz \ + && cd cmake-${CMAKE_VERSION} \ + && ./bootstrap --prefix=${EOSIO_INSTALL_DIR} \ + && make -j${JOBS} \ + && make install \ + && cd .. \ + && rm -f cmake-${CMAKE_VERSION}.tar.gz" + [[ -z "${CMAKE}" ]] && export CMAKE="${BIN_DIR}/cmake" + echo " - CMAKE successfully installed @ ${CMAKE}" + echo "" + else + echo " - CMAKE found @ ${CMAKE}." + echo "" + fi +} + +function ensure-boost() { + [[ $ARCH == "Darwin" ]] && export CPATH="$(python-config --includes | awk '{print $1}' | cut -dI -f2):$CPATH" # Boost has trouble finding pyconfig.h + echo "${COLOR_CYAN}[Ensuring Boost $( echo $BOOST_VERSION | sed 's/_/./g' ) library installation]${COLOR_NC}" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 || true ) + if [[ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]]; then + B2_FLAGS="-q -j${JOBS} --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test install" + BOOTSTRAP_FLAGS="" + if [[ $ARCH == "Linux" ]] && $PIN_COMPILER; then + B2_FLAGS="toolset=clang cxxflags='-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG_ROOT}/include/c++/v1' linkflags='-stdlib=libc++' link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j${JOBS} install" + BOOTSTRAP_FLAGS="--with-toolset=clang" + fi + execute bash -c "cd $SRC_DIR && \ + curl -LO https://dl.bintray.com/boostorg/release/$BOOST_VERSION_MAJOR.$BOOST_VERSION_MINOR.$BOOST_VERSION_PATCH/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh ${BOOTSTRAP_FLAGS} --prefix=$BOOST_ROOT \ + && ./b2 ${B2_FLAGS} \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION" + echo " - Boost library successfully installed @ ${BOOST_ROOT}" + echo "" + else + echo " - Boost library found with correct version @ ${BOOST_ROOT}" + echo "" + fi +} + +function ensure-llvm() { + echo "${COLOR_CYAN}[Ensuring LLVM 4 support]${COLOR_NC}" + if [[ ! -d $LLVM_ROOT ]]; then + if [[ $ARCH == "Darwin" ]]; then # Handle brew installed llvm@4 + execute ln -s /usr/local/opt/llvm@4 $LLVM_ROOT + echo " - LLVM successfully linked from /usr/local/opt/llvm@4 to ${LLVM_ROOT}" + else + if $PIN_COMPILER || $BUILD_CLANG; then + CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX='${LLVM_ROOT}' -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE='${BUILD_DIR}/pinned_toolchain.cmake' .." + else + if [[ $NAME == "Ubuntu" ]]; then + execute ln -s /usr/lib/llvm-4.0 $LLVM_ROOT + echo " - LLVM successfully linked from /usr/lib/llvm-4.0 to ${LLVM_ROOT}" + return 0 + fi + CMAKE_FLAGS="-G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX=${LLVM_ROOT} -DLLVM_TARGETS_TO_BUILD='host' -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release .." + fi + execute bash -c "cd ${OPT_DIR} \ + && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ + && mkdir build \ + && cd build \ + && ${CMAKE} ${CMAKE_FLAGS} \ + && make -j${JOBS} \ + && make install" + echo " - LLVM successfully installed @ ${LLVM_ROOT}" + echo "" + fi + else + echo " - LLVM found @ ${LLVM_ROOT}." + echo "" + fi +} + + +function build-clang() { + if $BUILD_CLANG; then + echo "${COLOR_CYAN}[Ensuring Clang support]${COLOR_NC}" + if [[ ! -d $CLANG_ROOT ]]; then + execute bash -c "cd ${TEMP_DIR} \ + && rm -rf clang8 \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 \ + && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT \ + && cd tools \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang \ + && git checkout $PINNED_COMPILER_CLANG_COMMIT \ + && patch -p2 < \"$REPO_ROOT/scripts/clang-devtoolset8-support.patch\" \ + && cd tools && mkdir extra && cd extra \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ + && cd ../../../../projects \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ + && cd ${TEMP_DIR}/clang8 \ + && mkdir build && cd build \ + && ${CMAKE} -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX='${CLANG_ROOT}' -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ + && make -j${JOBS} \ + && make install \ + && rm -rf ${TEMP_DIR}/clang8" + echo " - Clang 8 successfully installed @ ${CLANG_ROOT}" + echo "" + else + echo " - Clang 8 found @ ${CLANG_ROOT}" + echo "" + fi + export CXX=$CPP_COMP + export CC=$CC_COMP + fi +} diff --git a/scripts/helpers/general.sh b/scripts/helpers/general.sh new file mode 100755 index 00000000000..4fc4761fda1 --- /dev/null +++ b/scripts/helpers/general.sh @@ -0,0 +1,381 @@ +# Arrays should return with newlines so we can do something like "${output##*$'\n'}" to get the last line +IFS=$'\n' + +if [[ $- == *i* ]]; then # Disable if the shell isn't interactive (avoids: tput: No value for $TERM and no -T specified) + export COLOR_NC=$(tput sgr0) # No Color + export COLOR_RED=$(tput setaf 1) + export COLOR_GREEN=$(tput setaf 2) + export COLOR_YELLOW=$(tput setaf 3) + export COLOR_BLUE=$(tput setaf 4) + export COLOR_MAGENTA=$(tput setaf 5) + export COLOR_CYAN=$(tput setaf 6) + export COLOR_WHITE=$(tput setaf 7) +fi + +# Execution helpers; necessary for BATS testing and log output in buildkite + +function execute() { + $VERBOSE && echo "--- Executing: $@" + $DRYRUN || "$@" +} + +function execute-quiet() { + $VERBOSE && echo "--- Executing: $@ &>/dev/null" + $DRYRUN || "$@" &>/dev/null +} + +function execute-always() { + ORIGINAL_DRYRUN=$DRYRUN + DRYRUN=false + execute "$@" + DRYRUN=$ORIGINAL_DRYRUN +} + +function execute-without-verbose() { + ORIGINAL_VERBOSE=$VERBOSE + VERBOSE=false + execute "$@" + VERBOSE=$ORIGINAL_VERBOSE +} + +function ensure-git-clone() { + if [[ ! -e "${REPO_ROOT}/.git" ]]; then + echo "This build script only works with sources cloned from git" + exit 1 + fi +} + +function ensure-submodules-up-to-date() { + if [[ $DRYRUN == false ]] && [[ ! -z $(execute-without-verbose git submodule status --recursive | grep "^[+\-]") ]]; then + echo "${COLOR_RED}git submodules are not up to date: Please run the command 'git submodule update --init --recursive'.${COLOR_NC}" + exit 1 + fi +} + +function ensure-sudo() { + ( [[ $CURRENT_USER != "root" ]] && [[ -z $SUDO_LOCATION ]] ) && echo "${COLOR_RED}Please install the 'sudo' command before proceeding!${COLOR_NC}" && exit 1 + true 1>/dev/null # Needed +} + +function set_system_vars() { + if [[ $ARCH == "Darwin" ]]; then + export OS_VER=$(sw_vers -productVersion) + export OS_MAJ=$(echo "${OS_VER}" | cut -d'.' -f1) + export OS_MIN=$(echo "${OS_VER}" | cut -d'.' -f2) + export OS_PATCH=$(echo "${OS_VER}" | cut -d'.' -f3) + export MEM_GIG=$(bc <<< "($(sysctl -in hw.memsize) / 1024000000)") + export DISK_INSTALL=$(df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 || cut -d' ' -f1) + export blksize=$(df . | head -1 | awk '{print $2}' | cut -d- -f1) + export gbfactor=$(( 1073741824 / blksize )) + export total_blks=$(df . | tail -1 | awk '{print $2}') + export avail_blks=$(df . | tail -1 | awk '{print $4}') + export DISK_TOTAL=$((total_blks / gbfactor )) + export DISK_AVAIL=$((avail_blks / gbfactor )) + else + export DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 ) + export DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) + export DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) + export MEM_GIG=$(( ( ( $(cat /proc/meminfo | grep MemTotal | awk '{print $2}') / 1000 ) / 1000 ) )) + export DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) + export DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) + fi + export JOBS=$(( MEM_GIG > CPU_CORES ? CPU_CORES : MEM_GIG )) +} + +function install-package() { + if [[ $ARCH == "Linux" ]]; then + EXECUTION_FUNCTION="execute" + [[ $2 == "WETRUN" ]] && EXECUTION_FUNCTION="execute-always" + ( [[ $2 =~ "--" ]] || [[ $3 =~ "--" ]] ) && OPTIONS="${2}${3}" + [[ $CURRENT_USER != "root" ]] && [[ ! -z $SUDO_LOCATION ]] && SUDO_COMMAND="$SUDO_LOCATION -E" + ( [[ $NAME =~ "Amazon Linux" ]] || [[ $NAME == "CentOS Linux" ]] ) && eval $EXECUTION_FUNCTION $SUDO_COMMAND $YUM $OPTIONS install -y $1 + ( [[ $NAME =~ "Ubuntu" ]] ) && eval $EXECUTION_FUNCTION $SUDO_COMMAND $APTGET $OPTIONS install -y $1 + fi + true # Required; Weird behavior without it +} + +function uninstall-package() { + if [[ $ARCH == "Linux" ]]; then + EXECUTION_FUNCTION="execute" + REMOVE="remove" + [[ $2 == "WETRUN" ]] && EXECUTION_FUNCTION="execute-always" + ( [[ $2 == "autoremove" ]] || [[ $3 == "autoremove" ]] ) && REMOVE="autoremove" + ( [[ $2 =~ "--" ]] || [[ $3 =~ "--" ]] ) && OPTIONS="${2}${3}" + [[ $CURRENT_USER != "root" ]] && [[ ! -z $SUDO_LOCATION ]] && SUDO_COMMAND="$SUDO_LOCATION -E" + # Check if the packages exist before uninstalling them. This speeds things up for tests. + ( ( [[ $NAME =~ "Amazon Linux" ]] || [[ $NAME == "CentOS Linux" ]] ) && [[ ! -z $(rpm -qa $1) ]] ) && eval $EXECUTION_FUNCTION $SUDO_COMMAND $YUM $OPTIONS $REMOVE -y $1 + ( [[ $NAME =~ "Ubuntu" ]] && $(dpkg -s $1 &>/dev/null) ) && eval $EXECUTION_FUNCTION $SUDO_COMMAND $APTGET $OPTIONS $REMOVE -y $1 + fi + true +} + +function ensure-homebrew() { + echo "${COLOR_CYAN}[Ensuring HomeBrew installation]${COLOR_NC}" + if ! BREW=$( command -v brew ); then + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to install HomeBrew? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + execute "${XCODESELECT}" --install 2>/dev/null || true + if ! execute "${RUBY}" -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"; then + echo "${COLOR_RED}Unable to install HomeBrew!${COLOR_NC}" && exit 1; + else BREW=$( command -v brew ); fi + break;; + 1 | false | [Nn]* ) echo "${COLOR_RED} - User aborted required HomeBrew installation.${COLOR_NC}"; exit 1;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + else + echo " - HomeBrew installation found @ ${BREW}" + fi +} + +function ensure-scl() { + echo "${COLOR_CYAN}[Ensuring installation of Centos Software Collections Repository]${COLOR_NC}" # Needed for rh-python36 + SCL=$( rpm -qa | grep -E 'centos-release-scl-[0-9].*' || true ) + if [[ -z "${SCL}" ]]; then + while true; do + [[ $NONINTERACTIVE == false ]] && read -p "${COLOR_YELLOW}Do you wish to install and enable the Centos Software Collections Repository? (y/n)?${COLOR_NC} " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) install-package centos-release-scl "--enablerepo=extras"; break;; + 1 | false | [Nn]* ) echo " - User aborted installation of required Centos Software Collections Repository."; exit 1;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + else + echo " - ${SCL} found." + fi +} + +function ensure-devtoolset() { + echo "${COLOR_CYAN}[Ensuring installation of devtoolset-8]${COLOR_NC}" + DEVTOOLSET=$( rpm -qa | grep -E 'devtoolset-8-[0-9].*' || true ) + if [[ -z "${DEVTOOLSET}" ]]; then + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Not Found: Do you wish to install it? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) install-package devtoolset-8; break;; + 1 | false | [Nn]* ) echo " - User aborted installation of devtoolset-8."; break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + else + echo " - ${DEVTOOLSET} found." + fi +} + +function ensure-build-essential() { + if [[ ! $(dpkg -s clang 2>/dev/null) ]]; then # Clang already exists, so no need for build essentials + echo "${COLOR_CYAN}[Ensuring installation of build-essential]${COLOR_NC}" + BUILD_ESSENTIAL=$( dpkg -s build-essential | grep 'Package: build-essential' || true ) + if [[ -z $BUILD_ESSENTIAL ]]; then + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to install it? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + install-package build-essential + echo " - ${COLOR_GREEN}Installed build-essential${COLOR_NC}" + break;; + 1 | false | [Nn]* ) echo " - User aborted installation of build-essential."; break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + else + echo " - ${BUILD_ESSENTIAL} found." + fi + fi +} + +function ensure-yum-packages() { + ( [[ -z "${1}" ]] || [[ ! -f "${1}" ]] ) && echo "\$1 must be the location of your dependency file!" && exit 1 + DEPS_FILE="${TEMP_DIR}/$(basename ${1})" + # Create temp file so we can add to it + cat $1 > $DEPS_FILE + if [[ ! -z "${2}" ]]; then # Handle EXTRA_DEPS passed in and add them to temp DEPS_FILE + printf "\n" >> $DEPS_FILE # Avoid needing a new line at the end of deps files + OLDIFS="$IFS"; IFS=$'' + _2=("$(echo $2 | sed 's/-qa /-qa\n/g')") + for ((i = 0; i < ${#_2[@]}; i++)); do echo "${_2[$i]}\n" | sed 's/-qa\\n/-qa/g' >> $DEPS_FILE; done + fi + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to update YUM repositories? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) execute eval $( [[ $CURRENT_USER == "root" ]] || echo $SUDO_LOCATION -E ) $YUM -y update; break;; + 1 | false | [Nn]* ) echo " - Proceeding without update!"; break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + echo "${COLOR_CYAN}[Ensuring package dependencies]${COLOR_NC}" + OLDIFS="$IFS"; IFS=$',' + # || [[ -n "$testee" ]]; needed to see last line of deps file (https://stackoverflow.com/questions/12916352/shell-script-read-missing-last-line) + while read -r testee tester || [[ -n "$testee" ]]; do + if [[ ! -z $(eval $tester $testee) ]]; then + echo " - ${testee} ${COLOR_GREEN}found!${COLOR_NC}" + else + DEPS=$DEPS"${testee} " + echo " - ${testee} ${COLOR_RED}NOT${COLOR_NC} found." + (( COUNT+=1 )) + fi + done < $DEPS_FILE + IFS=$OLDIFS + OLDIFS="$IFS"; IFS=$' ' + echo "" + if [[ $COUNT > 0 ]]; then + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to install missing dependencies? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + for DEP in $DEPS; do + install-package $DEP + done + break;; + 1 | false | [Nn]* ) echo " ${COLOR_RED}- User aborted installation of required dependencies.${COLOR_NC}"; exit;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + echo "" + else + echo "${COLOR_GREEN} - No required package dependencies to install.${COLOR_NC}" + echo "" + fi + IFS=$OLDIFS +} + +function ensure-brew-packages() { + ( [[ -z "${1}" ]] || [[ ! -f "${1}" ]] ) && echo "\$1 must be the location of your dependency file!" && exit 1 + DEPS_FILE="${TEMP_DIR}/$(basename ${1})" + # Create temp file so we can add to it + cat $1 > $DEPS_FILE + if [[ ! -z "${2}" ]]; then # Handle EXTRA_DEPS passed in and add them to temp DEPS_FILE + printf "\n" >> $DEPS_FILE # Avoid needing a new line at the end of deps files + OLDIFS="$IFS"; IFS=$'' + _2=("$(echo $2 | sed 's/-s /-s\n/g')") + for ((i = 0; i < ${#_2[@]}; i++)); do echo "${_2[$i]}\n" | sed 's/-s\\n/-s/g' >> $DEPS_FILE; done + fi + echo "${COLOR_CYAN}[Ensuring HomeBrew dependencies]${COLOR_NC}" + OLDIFS="$IFS"; IFS=$',' + # || [[ -n "$nmae" ]]; needed to see last line of deps file (https://stackoverflow.com/questions/12916352/shell-script-read-missing-last-line) + while read -r name path || [[ -n "$name" ]]; do + if [[ -f $path ]] || [[ -d $path ]]; then + echo " - ${name} ${COLOR_GREEN}found!${COLOR_NC}" + continue + fi + # resolve conflict with homebrew glibtool and apple/gnu installs of libtool + if [[ "${testee}" == "/usr/local/bin/glibtool" ]]; then + if [ "${tester}" "/usr/local/bin/libtool" ]; then + echo " - ${name} ${COLOR_GREEN}found!${COLOR_NC}" + continue + fi + fi + DEPS=$DEPS"${name} " + echo " - ${name} ${COLOR_RED}NOT${COLOR_NC} found." + (( COUNT+=1 )) + done < $DEPS_FILE + if [[ $COUNT > 0 ]]; then + echo "" + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to install missing dependencies? (y/n)${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + execute "${XCODESELECT}" --install 2>/dev/null || true + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to update HomeBrew packages first? (y/n)${COLOR_NC}" && read -p " " PROCEED + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) echo "${COLOR_CYAN}[Updating HomeBrew]${COLOR_NC}" && execute brew update; break;; + 1 | false | [Nn]* ) echo " - Proceeding without update!"; break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + execute brew tap eosio/eosio + echo "${COLOR_CYAN}[Installing HomeBrew Dependencies]${COLOR_NC}" + execute eval $BREW install $DEPS + IFS="$OIFS" + break;; + 1 | false | [Nn]* ) echo " ${COLOR_RED}- User aborted installation of required dependencies.${COLOR_NC}"; exit;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + else + echo "${COLOR_GREEN} - No required package dependencies to install.${COLOR_NC}" + echo "" + fi +} + +function apt-update-prompt() { + if [[ $NAME == "Ubuntu" ]]; then + while true; do # APT + [[ $NONINTERACTIVE == false ]] && read -p "${COLOR_YELLOW}Do you wish to update APT-GET repositories before proceeding? (y/n)?${COLOR_NC} " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) execute-always $APTGET update; break;; + 1 | false | [Nn]* ) echo " - Proceeding without update!"; break;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + fi + true +} + +function ensure-apt-packages() { + ( [[ -z "${1}" ]] || [[ ! -f "${1}" ]] ) && echo "\$1 must be the location of your dependency file!" && exit 1 + DEPS_FILE="${TEMP_DIR}/$(basename ${1})" + # Create temp file so we can add to it + cat $1 > $DEPS_FILE + if [[ ! -z "${2}" ]]; then # Handle EXTRA_DEPS passed in and add them to temp DEPS_FILE + printf "\n" >> $DEPS_FILE # Avoid needing a new line at the end of deps files + OLDIFS="$IFS"; IFS=$'' + _2=("$(echo $2 | sed 's/-s /-s\n/g')") + for ((i = 0; i < ${#_2[@]}; i++)); do echo "${_2[$i]}" >> $DEPS_FILE; done + fi + echo "${COLOR_CYAN}[Ensuring package dependencies]${COLOR_NC}" + OLDIFS="$IFS"; IFS=$',' + # || [[ -n "$testee" ]]; needed to see last line of deps file (https://stackoverflow.com/questions/12916352/shell-script-read-missing-last-line) + while read -r testee tester || [[ -n "$testee" ]]; do + if [[ ! -z $(eval $tester $testee 2>/dev/null) ]]; then + echo " - ${testee} ${COLOR_GREEN}found!${COLOR_NC}" + else + DEPS=$DEPS"${testee} " + echo " - ${testee} ${COLOR_RED}NOT${COLOR_NC} found." + (( COUNT+=1 )) + fi + done < $DEPS_FILE + IFS=$OLDIFS + OLDIFS="$IFS"; IFS=$' ' + if [[ $COUNT > 0 ]]; then + echo "" + while true; do + [[ $NONINTERACTIVE == false ]] && printf "${COLOR_YELLOW}Do you wish to install missing dependencies? (y/n)?${COLOR_NC}" && read -p " " PROCEED + echo "" + case $PROCEED in + "" ) echo "What would you like to do?";; + 0 | true | [Yy]* ) + for DEP in $DEPS; do + install-package $DEP + done + break;; + 1 | false | [Nn]* ) echo " ${COLOR_RED}- User aborted installation of required dependencies.${COLOR_NC}"; exit;; + * ) echo "Please type 'y' for yes or 'n' for no.";; + esac + done + else + echo "${COLOR_GREEN} - No required package dependencies to install.${COLOR_NC}" + echo "" + fi + IFS=$OLDIFS +} \ No newline at end of file diff --git a/scripts/pinned_toolchain.cmake b/scripts/pinned_toolchain.cmake new file mode 100644 index 00000000000..cdb517c5e1f --- /dev/null +++ b/scripts/pinned_toolchain.cmake @@ -0,0 +1,15 @@ +set(OPT_PATH @) +set(CMAKE_C_COMPILER_WORKS 1) +set(CMAKE_CXX_COMPILER_WORKS 1) +set(CMAKE_C_COMPILER ${OPT_PATH}/clang8/bin/clang) +set(CMAKE_CXX_COMPILER ${OPT_PATH}/clang8/bin/clang++) + +set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/clang8/include/c++/v1 /usr/local/include /usr/include) + +set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") + +set(CMAKE_EXE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") +set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") +set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") + +set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") diff --git a/testnet.md b/testnet.md deleted file mode 100644 index b3cc58d8471..00000000000 --- a/testnet.md +++ /dev/null @@ -1,201 +0,0 @@ -# EOS Testnet -To date, all work done to experiment with the EOS blockchain has been performed using a single instance of eosd hosting all 21 block producers. While this is a perfectly valid solution for validating features of the blockchain, developing new contracts, or whatever, it does not scale. Nor does it expose the sort of issues raised when contract and block data must be shared across multiple instances. Providing the ability to scale involves deploying multiple eosd nodes across many hosts and lining then into a peer-to-peer (p2p) network. Composing this network involves tailoring and distributing configuration files, coordinating starts and stops and other tasks. - -Doing this manually is a tedious task and easily error prone. Fortunately a solution is provided, in the form of the Launcher application, described below. - -## Testnet nodes, networks, and topology -Before getting into the details of the EOS testnet, lets clarify some terms. In this document I use the terms "host" and "machine" fairly interchangeably. A host generally boils down to a single IP address, although in practice it could have more. - -The next term is "node." A node is an instance of the eosd executable configured to serve as 0 or more producers. There is not a one-to-one mapping between nodes and hosts, a host may serve more than one node, but one node cannot span more than one host. - -I use "local network" to refer to any group of nodes, whether on a single host or several, are all close in that access does not have to leave a secure network environment. - -Finally there is the idea of distributed networks that involve remote hosts. These may be hosts on which you may not have direct access for starting and stopping eosd instances, but with whom you may wish to collaborate for setting up a decentralized testnet. - -### Localhost networks -Running a testnet on a single machine is the quickest way to get started. As you will see below, this is the default mode for the Launcher application. You can set up a localhost network immediately by simply telling the launcher how many producing or non-producing nodes to activate, and perhaps what type of network topology to use. - -The downside is that you need a lot of hardware when running many nodes on a single host. Also the multiple nodes will contend with each other in terms of CPU cycles, limiting true concurrency, and also localhost network performance is much different from inter-host performance, even with very high speed lans. - -### Distributed networks -The most representative model of the live net is to spread the eosd nodes across many hosts. The Launcher app is able to start distributed nodes by the use of bash scripts pushed through ssh. In this case additional configuration is required to replace configured references to "localhost" or "127.0.0.1" with the actual host name or ip addresses of the various peer machines. - -Launching a distributed testnet requires the operator to have ssh access to all the remote machines configured to authenticate without the need for a user entered password. This configuration is described in detail below. - -In cases where a testnet spans multiple remote networks, a common launcher defined configuration file may be shared externally between distributed operators, each being responsible for launching his or her own local network. - -Note that the Launcher will not push instances of eosd to the remote hosts, you must prepare the various test network hosts separately. - -### Network Topology -Network topology or "shape" describes how the nodes are connected in order to share transaction and block data, and requests for the same. The idea for varying network topology is that there is a trade off between the number of times a node must send a message reporting a new transaction or block, vs the number of times that message must be repeated to ensure all nodes know of it. - -The Launcher has definitions of two basic different network "shapes" based on inter-nodal connections, which can be selected by a command line option. If you wish to create your own custom network topology, you can do so by supplying a json formatted file. This file is typically the edited version of the template created by the launcher in "output" mode. - -#### Star network -![](https://github.com/EOSIO/eos/raw/master/star.png) -A "star" is intended to support a larger number of nodes in the testnet. In this case the number of peers connected to a node and the distribution of those nodes varies based on the number of nodes in the network. - -#### Mesh network -![](https://github.com/EOSIO/eos/raw/master/mesh.png) -In a "mesh" network, each node is connected to as many peer nodes as possible. - -#### Custom network shape -![](custom.png) -This is an example of a custom deployment where clusters of nodes are isolated except through a single crosslink. - -# The Launcher Application -To address the complexity implied by distributing multiple eosd nodes across a LAN or a wider network, the launcher application was created. - -Based on a handful of command line arguments the Launcher is able to compose per-node configuration files, distribute these files securely amongst the peer hosts, then start up the multiple instances of eosd. - -Eosd instances started this way have their output logged in individual text files. Finally the launcher application is also able to shut down some or all of the test network. - -## Running the Launcher application - -The launcher program is used to configure and deploy producing and non-producing eosd nodes that talk to each other using configured routes. The configuration for each node is stored in separate directories, permitting multiple nodes to be active on the same host, assuming the machine has sufficient memory and disk space for multiple eosd instances. The launcher makes use of multiple configuration sources in order to deploy a testnet. A handful of command line arguments can be used to set up simple local networks. - -To support deploying distributed networks, the launcher will read more detailed configuration from a JSON file. You can use the launcher to create a default JSON file based on the command line options you supply. Edit that file to substitute actual hostnames and other details -as needed, then rerun the launcher supplying this file. - -For the moment the launcher only activates platform-native nodes, dockerized nodes will be added later. It should be straight forward to use the generated configuration files with dockerized nodes. - -## Launcher command line arguments -Here is the current list of command line arguments recognized by the launcher. - -``` -launcher command line arguments: - -n [ --nodes ] arg (=1) total number of nodes to configure and - launch - -p [ --pnodes ] arg (=1) number of nodes that are producers - -d [ --delay ] arg (=0) number of seconds to wait before starting the next node. Used to simulate a person keying in a series of individual eosd startup command lines. - -s [ --shape ] arg (=star) network topology, use "star" - "mesh" or give a filename for custom - -g [ --genesis ] arg (="./genesis.json") - set the path to genesis.json - -o [ --output ] arg save a copy of the generated topology - in this file - --skip-signature EOSD does not require transaction - signatures. - -i [ --timestamp ] arg set the timestamp for the first block. - Use "now" to indicate the current time - -l [ --launch ] arg select a subset of nodes to launch. - Currently may be "all", "none", or - "local". If not set, the default is to - launch all unless an output file is - named, in which case it starts none. - -k [ --kill ] arg The launcher retrieves the previously - started process ids and signals each with the specified signum. Use 15 for a sigterm and 9 for sigkill. - -h [ --help ] print this list -``` -Note that if a testnet.json file is supplied as the `--shape` argument, then the `--nodes`, `--pnodes`, and `--genesis` arguments are all ignored. - -## The Generated Multihost Testnet Configuration File -This is the file generated by running the following command: - - `launcher --output [other options]` - -In this mode, the launcher does not activate any eosd instances, it produces a file of the given filename. This file is a JSON formatted template that provides an easy means of - -The object described in this file is composed of a helper for using ssl, and a collection of testnet node descriptors. The node descriptors are listed as name, value pairs. Note that the names serve a dual purpose acting as both the key in a map of node descriptors and as an alias for the node in the peer lists. For example: - -``` -{ - "ssh_helper": { - "ssh_cmd": "/usr/bin/ssh", - "scp_cmd": "/usr/bin/scp", - "ssh_identity": "phil", - "ssh_args": "-i ~phil/.ssh/id-sample" - }, -``` -The ssh helper fields are paths to ssh and scp, an identity if necessary, and any optional arguments. - -``` - "nodes": [[ - "testnet_0",{ - "genesis": "./genesis.json", - "remote": true, - "ssh_identity": "", - "ssh_args": "", - "eos_root_dir": "/home/phil/blockchain/eos", - "data_dir": "tn_data_0", - "hostname": "remoteserv", - "public_name": "remoteserv", - "p2p_port": 9876, - "http_port": 8888, - "filesize": 8192, - "keys": [{ - "public_key": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "wif_private_key": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" - } - ], - "peers": [ - "testnet_1", - "testnet_2", - "testnet_3", - "testnet_4", - "testnet_5" - ], - "producers": [ - "inita", - "initg", - "initm", - "inits" - ] - } - ],[ - "testnet_1",{ - -``` - -The rest of the testnet.json file is the collection of node descriptors. The fragment shown above was created with the command line `programs/launcher/launcher -p6 -s mesh -o testnet.json` and then edited to refer to a remote host named "remoteserv." - -### Elements Of The JSON File -This table describes all of the key/value pairs used in the testnet.json file. - -|Value | Description -| :------------ | :----------- -| ssh_helper | a set of values used to facilitate the use of SSH and SCP -| nodes | a collection of descriptors defining the eosd instances used to assemble this testnet. The names used as keys in this collection are also aliases used within as placeholders for peer nodes. - -|ssh_helper elements | Description -| :---------- | :------------ -| ssh_cmd | path to the local ssh command -| scp_cmd | path to the local scp command -| ssh_args | any additional command line arguments needed to successfully connect to remote peers -| ssh_identity | The user name to use when accessing the remote hosts - -|node elements | Description -| :-------- | :---------- -| genesis | path to the genesis.json file. This should be the same file for all members of the testnet. -| remote | specifies whether this node is in the local network or not. This flag ties in with the launch mode command line option (-l) to determine if the local launcher instance will attempt to start this node. -| ssh_identity | a per-node override of the general ssh_identity defined above. -| ssh_args | a per-node override of the general ssh_args -| eos_root_dir | specifies the directory into which all eosd artifacts are based. This is required for any hosts that are not the local host. -| data_dir | the root for the remaining node-specific settings below. -| hostname | the domain name for the server, or its IP address. -| public_name | possibly different from the hostname, this name will get substituted for the aliases when creating the per-node config.ini file's peer list. -| p2p_port | combined with the public name to identify the endpoint listed on for peer connections. When multiple nodes share a host, the p2p_port is automatically incremented for each node. -| http_port | defines the listen endpoint for the client API services -| filesize | sets the capacity in megabytes for the size of the blockchain backing store file. -| keys | specify the authentication tokens for this node. -| peers | this list indicates the other nodes in the network to which this one actively connects. Since this file may be edited to alter the hostname, public name, or p2p port values, the peers list here holds aliases for the actual endpoints eventually written to the individual config.ini files. -| producers | this list identifies which of the producers from the genesis.json file are held by this node. Note that the launcher uses a round-robin algorithm to spread the producer instances across the producing nodes. - -### Provisioning Distributed Servers -The ssh_helper section of the testnet.json file contains the ssh elements necessary to connect and issue commands to other servers. In addition to the ssh_helper section which provides access to global configuration settings, the per-node configuration may provide overriding identity and connection arguments. - -It is also necessary to provision the server by at least copying the eosd executable, and the genesis.json files to their appropriate locations relative to some named EOS root directory. For example, I defined the EOS root to be `/home/phil/blockchain/eos`. When run, the launcher will run through a variety of shell commands using ssh and finally using scp to copy a config.ini file to the appropriate data directory on the remote. - -## Runtime Artifacts -The launcher app creates a separate date and configuration directory for each node instance. This directory is named `tn_data_` with n ranging from 0 to the number of nodes being launched. - -| Per-Node File | Description -| :------------ | :---------- -| config.ini | The eosd configuration file. -| eosd.pid | The process ID of the running eosd instance. -| blockchain/* | The blockchain backing store -| blocks/* | The blockchain log store -| stderr.txt | The cerr output from eosd. -| stdout.txt | The cout output from eosd. - -A file called "last_run.json" contains hints for a later instance of the launcher to be able to kill local and remote nodes when run with -k 15. diff --git a/testnet.template b/testnet.template index f9e793a5c89..574d7ec9795 100644 --- a/testnet.template +++ b/testnet.template @@ -12,6 +12,11 @@ if [ -z "$biosport" ]; then biosport=9776 fi +bioscontractpath=$BIOS_CONTRACT_PATH +if [ -z "$bioscontractpath" ]; then + bioscontractpath="unittests/contracts/eosio.bios" +fi + wddir=eosio-ignition-wd wdaddr=localhost:8899 wdurl=http://$wdaddr @@ -35,6 +40,8 @@ mkdir $wddir step=1 echo Initializing ignition sequence at $(date) | tee $logfile +echo "FEATURE_DIGESTS: $FEATURE_DIGESTS" >> $logfile + echo "http-server-address = $wdaddr" > $wddir/config.ini programs/keosd/keosd --config-dir $wddir --data-dir $wddir 2> $wddir/wdlog.txt & @@ -75,7 +82,13 @@ wcmd create --to-console -n ignition # ------ DO NOT ALTER THE NEXT LINE ------- ###INSERT prodkeys -ecmd set contract eosio unittests/contracts/eosio.bios eosio.bios.wasm eosio.bios.abi +ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi + +# Preactivate all digests +for digest in $FEATURE_DIGESTS; +do +ecmd push action eosio activate "{\"feature_digest\":\"$digest\"}" -p eosio +done # Create required system accounts ecmd create key --to-console diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 58ef93a8b2b..5ccb452bd89 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -9,8 +9,6 @@ find_package(LLVM 4.0 REQUIRED CONFIG) link_directories(${LLVM_LIBRARY_DIR}) -set( CMAKE_CXX_STANDARD 14 ) - include_directories("${CMAKE_SOURCE_DIR}/plugins/wallet_plugin/include") file(GLOB UNIT_TESTS "*.cpp") @@ -22,7 +20,7 @@ target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include ${CMAKE_SOURCE_DIR}/plugins/chain_plugin/include ${CMAKE_BINARY_DIR}/unittests/include/ ) - + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/core_symbol.py.in ${CMAKE_CURRENT_BINARY_DIR}/core_symbol.py) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/testUtils.py ${CMAKE_CURRENT_BINARY_DIR}/testUtils.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/WalletMgr.py ${CMAKE_CURRENT_BINARY_DIR}/WalletMgr.py COPYONLY) @@ -35,14 +33,20 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_startup_catchup.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_startup_catchup.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_under_min_avail_ram.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_irreversible_mode_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_irreversible_mode_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_protocol_feature_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_multiple_version_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_multiple_version_protocol_feature_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/db_modes_test.sh ${CMAKE_CURRENT_BINARY_DIR}/db_modes_test.sh COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/prod_preactivation_test.py ${CMAKE_CURRENT_BINARY_DIR}/prod_preactivation_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version-label.sh ${CMAKE_CURRENT_BINARY_DIR}/version-label.sh COPYONLY) #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose @@ -50,12 +54,8 @@ add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_ou add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) @@ -64,10 +64,13 @@ if(BUILD_MONGO_DB_PLUGIN) set_property(TEST nodeos_run_test-mongodb PROPERTY LABELS nonparallelizable_tests) endif() +add_test(NAME producer-preactivate-feature-test COMMAND tests/prod_preactivation_test.py --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST producer-preactivate-feature-test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME nodeos_protocol_feature_test COMMAND tests/nodeos_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_protocol_feature_test PROPERTY LABELS nonparallelizable_tests) + add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -79,13 +82,12 @@ add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --cle set_property(TEST validate_dirty_db_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME version-label-test COMMAND tests/version-label.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_remote_lr_test COMMAND tests/nodeos_run_remote_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -100,12 +102,21 @@ set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) - add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) + +add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_tests_properties(nodeos_startup_catchup_lr_test PROPERTIES TIMEOUT 3000) +set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) + +set(ALTERNATE_VERSION_LABELS_FILE "${CMAKE_BINARY_DIR}/tests/multiversion_paths.conf") + +add_test(NAME nodeos_multiple_version_protocol_feature_mv_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py + -v --clean-run --dump-error-detail --alternate-version-labels-file ${ALTERNATE_VERSION_LABELS_FILE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_multiple_version_protocol_feature_mv_test PROPERTY LABELS mixed_version_tests) if(ENABLE_COVERAGE_TESTING) @@ -117,14 +128,14 @@ if(ENABLE_COVERAGE_TESTING) if(NOT LLVMCOV_PATH) message(FATAL_ERROR "llvm-cov not found! Aborting...") - endif() # NOT LCOV_PATH + endif() if(NOT GENHTML_PATH) message(FATAL_ERROR "genhtml not found! Aborting...") endif() # NOT GENHTML_PATH # no spaces allowed within tests list - set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|bnet_nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') + set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') set(ctest_exclude_tests 'nodeos_run_remote_test|nodeos_run_test-mongodb|distributed-transactions-remote-test|restart-scenarios-test_replay') # Setup target diff --git a/tests/Cluster.py b/tests/Cluster.py index 3bc0f215566..3ed13404e27 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -19,6 +19,21 @@ from Node import Node from WalletMgr import WalletMgr +# Protocol Feature Setup Policy +class PFSetupPolicy: + NONE = 0 + PREACTIVATE_FEATURE_ONLY = 1 + FULL = 2 # This will only happen if the cluster is bootstrapped (i.e. dontBootstrap == False) + @staticmethod + def hasPreactivateFeature(policy): + return policy == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ + policy == PFSetupPolicy.FULL + @staticmethod + def isValid(policy): + return policy == PFSetupPolicy.NONE or \ + policy == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ + policy == PFSetupPolicy.FULL + # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-public-methods class Cluster(object): @@ -30,8 +45,6 @@ class Cluster(object): __BiosPort=8788 __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" - __configDir="etc/eosio/" - __dataDir="var/lib/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -52,6 +65,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 """ self.accounts={} self.nodes={} + self.unstartedNodes=[] self.localCluster=localCluster self.wallet=None self.walletd=walletd @@ -83,6 +97,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.useBiosBootFile=False self.filesToCleanup=[] + self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): @@ -93,30 +108,76 @@ def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): def setWalletMgr(self, walletMgr): self.walletMgr=walletMgr + @staticmethod + def __defaultAlternateVersionLabels(): + """Return a labels dictionary with just the "current" label to path set.""" + labels={} + labels["current"]="./" + return labels + + def setAlternateVersionLabels(self, file): + """From the provided file return a dictionary of labels to paths.""" + Utils.Print("alternate file=%s" % (file)) + self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() + if file is None: + # only have "current" + return + if not os.path.exists(file): + Utils.errorExit("Alternate Version Labels file \"%s\" does not exist" % (file)) + with open(file, 'r') as f: + content=f.read() + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$', re.MULTILINE) + all=p.findall(content) + for match in all: + label=match[0] + path=match[1] + if label=="current": + Utils.Print("ERROR: cannot overwrite default label %s with path=%s" % (label, path)) + continue + self.alternateVersionLabels[label]=path + if Utils.Debug: Utils.Print("Version label \"%s\" maps to \"%s\"" % (label, path)) + # launch local nodes and set self.nodes # pylint: disable=too-many-locals # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, - totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): + def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", delay=1, onlyBios=False, dontBootstrap=False, + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, onlySetProds=False, + pfSetupPolicy=PFSetupPolicy.FULL, alternateVersionLabelsFile=None, associatedNodeLabels=None, loadSystemContract=True): """Launch cluster. pnodes: producer nodes count - totalNodes: producer + non-producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. + totalNodes: producer + non-producer nodes + unstarted non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) delay: delay between individual nodes launch (as defined by launcher) delay 0 exposes a bootstrap bug where producer handover may have a large gap confusing nodes and bringing system to a halt. onlyBios: When true, only loads the bios contract (and not more full bootstrapping). - dontBootstrap: When true, don't do any bootstrapping at all. + dontBootstrap: When true, don't do any bootstrapping at all. (even bios is not uploaded) extraNodeosArgs: string of arguments to pass through to each nodoes instance (via --nodeos flag on launcher) useBiosBootFile: determines which of two bootstrap methods is used (when both dontBootstrap and onlyBios are false). The default value of true uses the bios_boot.sh file generated by the launcher. A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } + onlySetProds: Stop the bootstrap process after setting the producers (only if useBiosBootFile is false) + pfSetupPolicy: determine the protocol feature setup policy (none, preactivate_feature_only, or full) + alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. + associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. + loadSystemContract: indicate whether the eosio.system contract should be loaded (setting this to False causes useBiosBootFile to be treated as False) """ assert(isinstance(topo, str)) + assert PFSetupPolicy.isValid(pfSetupPolicy) + if alternateVersionLabelsFile is not None: + assert(isinstance(alternateVersionLabelsFile, str)) + elif associatedNodeLabels is not None: + associatedNodeLabels=None # need to supply alternateVersionLabelsFile to use labels + + if associatedNodeLabels is not None: + assert(isinstance(associatedNodeLabels, dict)) + Utils.Print("associatedNodeLabels size=%s" % (len(associatedNodeLabels))) + Utils.Print("alternateVersionLabelsFile=%s" % (alternateVersionLabelsFile)) if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) @@ -127,6 +188,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if pnodes > totalNodes: raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d)." % (totalNodes, pnodes)) + if pnodes + unstartedNodes > totalNodes: + raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d) + unstartedNodes(%d)." % (totalNodes, pnodes, unstartedNodes)) if self.walletMgr is None: self.walletMgr=WalletMgr(True) @@ -136,6 +199,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne assert(isinstance(totalProducers, (str,int))) producerFlag="--producers %s" % (totalProducers) + self.setAlternateVersionLabels(alternateVersionLabelsFile) + tries = 30 while not Utils.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))): Utils.Print("ERROR: Another process is listening on nodeos default port. wait...") @@ -144,14 +209,14 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne tries = tries - 1 time.sleep(2) - cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % ( + cmd="%s -p %s -n %s -d %s -i %s -f %s --unstarted-nodes %s" % ( Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], - p2pPlugin, producerFlag) + producerFlag, unstartedNodes) cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on \"*\" --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -161,6 +226,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne nodeosArgs += extraNodeosArgs if Utils.Debug: nodeosArgs += " --contracts-console" + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): + nodeosArgs += " --plugin eosio::producer_api_plugin" if nodeosArgs: cmdArr.append("--nodeos") @@ -181,6 +248,18 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--max-transaction-cpu-usage") cmdArr.append(str(150000000)) + if associatedNodeLabels is not None: + for nodeNum,label in associatedNodeLabels.items(): + assert(isinstance(nodeNum, (str,int))) + assert(isinstance(label, str)) + path=self.alternateVersionLabels.get(label) + if path is None: + Utils.errorExit("associatedNodeLabels passed in indicates label %s for node num %s, but it was not identified in %s" % (label, nodeNum, alternateVersionLabelsFile)) + cmdArr.append("--spcfc-inst-num") + cmdArr.append(str(nodeNum)) + cmdArr.append("--spcfc-inst-nodeos") + cmdArr.append(path) + # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" if topo=="bridge": @@ -205,7 +284,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne # of two entries - [ , ] with first being the name and second being the node definition shapeFileNodes = shapeFileObject["nodes"] - numProducers=totalProducers if totalProducers is not None else totalNodes + numProducers=totalProducers if totalProducers is not None else (totalNodes - unstartedNodes) maxProducers=ord('z')-ord('a')+1 assert numProducers 0: + self.unstartedNodes=self.discoverUnstartedLocalNodes(unstartedNodes, totalNodes) + + biosNode=self.discoverBiosNode(timeout=Utils.systemWaitTimeout) + if not biosNode or not Utils.waitForBool(biosNode.checkPulse, Utils.systemWaitTimeout): + Utils.Print("ERROR: Bios node doesn't appear to be running...") + return False + if onlyBios: self.nodes=[biosNode] # ensure cluster node are inter-connected by ensuring everyone has block 1 @@ -334,24 +417,33 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("ERROR: Cluster doesn't seem to be in sync. Some nodes missing block 1") return False + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): + Utils.Print("Activate Preactivate Feature.") + biosNode.activatePreactivateFeature() + if dontBootstrap: Utils.Print("Skipping bootstrap.") + self.biosNode=biosNode return True Utils.Print("Bootstrap cluster.") + if not loadSystemContract: + useBiosBootFile=False #ensure we use Cluster.bootstrap if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) + self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False else: self.useBiosBootFile=True - self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr) + self.biosNode=self.bios_bootstrap(biosNode, startedNodes, pfSetupPolicy) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False - self.discoverBiosNodePid() + if self.biosNode is None: + Utils.Print("ERROR: Bootstrap failed.") + return False # validate iniX accounts can be retrieved @@ -440,7 +532,7 @@ def waitOnClusterSync(self, timeout=None, blockType=BlockType.head, blockAdvanci assert(len(self.nodes) > 0) node=self.nodes[0] targetBlockNum=node.getBlockNum(blockType) #retrieve node 0's head or irrevercible block number - targetBlockNum+=blockAdvancing + targetBlockNum+=blockAdvancing if Utils.Debug: Utils.Print("%s block number on root node: %d" % (blockType.type, targetBlockNum)) if targetBlockNum == -1: @@ -452,18 +544,28 @@ def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None, blockType=Bloc """Wait for all nodes to have targetBlockNum finalized.""" assert(self.nodes) - def doNodesHaveBlockNum(nodes, targetBlockNum, blockType): + def doNodesHaveBlockNum(nodes, targetBlockNum, blockType, printCount): + ret=True for node in nodes: try: if (not node.killed) and (not node.isBlockPresent(targetBlockNum, blockType=blockType)): - return False + ret=False + break except (TypeError) as _: # This can happen if client connects before server is listening - return False + ret=False + break - return True + printCount+=1 + if Utils.Debug and not ret and printCount%5==0: + blockNums=[] + for i in range(0, len(nodes)): + blockNums.append(nodes[i].getBlockNum()) + Utils.Print("Cluster still not in sync, head blocks for nodes: [ %s ]" % (", ".join(blockNums))) + return ret - lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType) + printCount=0 + lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType, printCount) ret=Utils.waitForBool(lam, timeout) return ret @@ -584,6 +686,16 @@ def getNode(self, nodeId=0, exitOnError=True): def getNodes(self): return self.nodes + def launchUnstarted(self, numToLaunch=1, cachePopen=False): + assert(isinstance(numToLaunch, int)) + assert(numToLaunch>0) + launchList=self.unstartedNodes[:numToLaunch] + del self.unstartedNodes[:numToLaunch] + for node in launchList: + # the node number is indexed off of the started nodes list + node.launchUnstarted(len(self.nodes), cachePopen=cachePopen) + self.nodes.append(node) + # Spread funds across accounts with transactions spread through cluster nodes. # Validate transactions are synchronized on root node def spreadFunds(self, source, accounts, amount=1): @@ -750,15 +862,6 @@ def nodeNameToId(name): m=re.search(r"node_([\d]+)", name) return int(m.group(1)) - @staticmethod - def nodeExtensionToName(ext): - r"""Convert node extension (bios, 0, 1, etc) to node name. """ - prefix="node_" - if ext == "bios": - return prefix + ext - - return "node_%02d" % (ext) - @staticmethod def parseProducerKeys(configFile, nodeName): """Parse node config file for producer keys. Returns dictionary. (Keys: account name; Values: dictionary objects (Keys: ["name", "node", "private","public"]; Values: account name, node id returned by nodeNameToId(nodeName), private key(string)and public key(string))).""" @@ -796,7 +899,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Cluster.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -814,19 +917,19 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Cluster.nodeExtensionToName("bios") - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir("bios", "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName("bios") producerKeys=Cluster.parseProducerKeys(configFile, nodeName) if producerKeys is None: Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") return None for i in range(0, totalNodes): - nodeName=Cluster.nodeExtensionToName(i) - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir(i, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName(i) keys=Cluster.parseProducerKeys(configFile, nodeName) if keys is not None: producerKeys.update(keys) @@ -834,19 +937,27 @@ def parseClusterKeys(totalNodes): return producerKeys - @staticmethod - def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): + def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") - biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) - if not biosNode.checkPulse(): - Utils.Print("ERROR: Bios node doesn't appear to be running...") - return None + assert PFSetupPolicy.isValid(pfSetupPolicy) cmd="bash bios_boot.sh" if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): + env = { + "BIOS_CONTRACT_PATH": "unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios", + "FEATURE_DIGESTS": "" + } + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): + env["BIOS_CONTRACT_PATH"] = "unittests/contracts/eosio.bios" + + if pfSetupPolicy == PFSetupPolicy.FULL: + allBuiltinProtocolFeatureDigests = biosNode.getAllBuiltinFeatureDigestsToPreactivate() + env["FEATURE_DIGESTS"] = " ".join(allBuiltinProtocolFeatureDigests) + Utils.Print("Set FEATURE_DIGESTS to: %s" % env["FEATURE_DIGESTS"]) + + if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull, env=env): if not silent: Utils.Print("Launcher failed to shut down eos cluster.") return None @@ -864,14 +975,14 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): Utils.Print("ERROR: Failed to parse private keys from cluster config files.") return None - walletMgr.killall() - walletMgr.cleanup() + self.walletMgr.killall() + self.walletMgr.cleanup() - if not walletMgr.launch(): + if not self.walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - ignWallet=walletMgr.create("ignition") + ignWallet=self.walletMgr.create("ignition") if ignWallet is None: Utils.Print("ERROR: Failed to create ignition wallet.") return None @@ -885,7 +996,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): eosioAccount.activePublicKey=eosioKeys["public"] producerKeys.pop(eosioName) - if not walletMgr.importKey(eosioAccount, ignWallet): + if not self.walletMgr.importKey(eosioAccount, ignWallet): Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) return None @@ -915,20 +1026,15 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): return biosNode - @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False): + def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False, onlySetProds=False, loadSystemContract=True): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" Utils.Print("Starting cluster bootstrap.") + assert PFSetupPolicy.isValid(pfSetupPolicy) if totalProducers is None: totalProducers=totalNodes - biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) - if not biosNode.checkPulse(): - Utils.Print("ERROR: Bios node doesn't appear to be running...") - return None - producerKeys=Cluster.parseClusterKeys(totalNodes) # should have totalNodes node plus bios node if producerKeys is None: @@ -938,14 +1044,14 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalProducers+1,len(producerKeys))) return None - walletMgr.killall() - walletMgr.cleanup() + self.walletMgr.killall() + self.walletMgr.cleanup() - if not walletMgr.launch(): + if not self.walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - ignWallet=walletMgr.create("ignition") + ignWallet=self.walletMgr.create("ignition") eosioName="eosio" eosioKeys=producerKeys[eosioName] @@ -955,12 +1061,16 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM eosioAccount.activePrivateKey=eosioKeys["private"] eosioAccount.activePublicKey=eosioKeys["public"] - if not walletMgr.importKey(eosioAccount, ignWallet): + if not self.walletMgr.importKey(eosioAccount, ignWallet): Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) return None contract="eosio.bios" contractDir="unittests/contracts/%s" % (contract) + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): + contractDir="unittests/contracts/%s" % (contract) + else: + contractDir="unittests/contracts/old_versions/v1.6.0-rc3/%s" % (contract) wasmFile="%s.wasm" % (contract) abiFile="%s.abi" % (contract) Utils.Print("Publish %s contract" % (contract)) @@ -969,6 +1079,9 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM Utils.Print("ERROR: Failed to publish contract %s." % (contract)) return None + if pfSetupPolicy == PFSetupPolicy.FULL: + biosNode.preactivateAllBuiltinProtocolFeature() + Node.validateTransaction(trans) Utils.Print("Creating accounts: %s " % ", ".join(producerKeys.keys())) @@ -1049,6 +1162,8 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM Utils.Print("ERROR: Block production handover failed.") return None + if onlySetProds: return biosNode + eosioTokenAccount=copy.deepcopy(eosioAccount) eosioTokenAccount.name="eosio.token" trans=biosNode.createAccount(eosioTokenAccount, eosioAccount, 0) @@ -1097,7 +1212,7 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM contract=eosioTokenAccount.name Utils.Print("push create action to %s contract" % (contract)) action="create" - data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\",\"can_freeze\":\"0\",\"can_recall\":\"0\",\"can_whitelist\":\"0\"}" % (eosioTokenAccount.name, CORE_SYMBOL) + data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\"}" % (eosioAccount.name, CORE_SYMBOL) opts="--permission %s@active" % (contract) trans=biosNode.pushMessage(contract, action, data, opts) if trans is None or not trans[0]: @@ -1114,7 +1229,7 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM Utils.Print("push issue action to %s contract" % (contract)) action="issue" data="{\"to\":\"%s\",\"quantity\":\"1000000000.0000 %s\",\"memo\":\"initial issue\"}" % (eosioAccount.name, CORE_SYMBOL) - opts="--permission %s@active" % (contract) + opts="--permission %s@active" % (eosioAccount.name) trans=biosNode.pushMessage(contract, action, data, opts) if trans is None or not trans[0]: Utils.Print("ERROR: Failed to push issue action to eosio contract.") @@ -1138,17 +1253,18 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM (expectedAmount, actualAmount)) return None - contract="eosio.system" - contractDir="unittests/contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None + if loadSystemContract: + contract="eosio.system" + contractDir="unittests/contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - Node.validateTransaction(trans) + Node.validateTransaction(trans) initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) @@ -1198,7 +1314,7 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): - dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) + dataLocation=Utils.getNodeDataDir(nodeInstance) return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances @@ -1216,27 +1332,40 @@ def discoverLocalNodes(self, totalNodes, timeout=None): psOutDisplay=psOut[:6660]+"..." if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - pattern=Cluster.pgrepEosServerPattern(i) - m=re.search(pattern, psOut, re.MULTILINE) - if m is None: - Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + instance=self.discoverLocalNode(i, psOut, timeout) + if instance is None: break - instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes - def discoverBiosNodePid(self, timeout=None): + # Populate a node matched to actual running instance + def discoverLocalNode(self, nodeNum, psOut=None, timeout=None): + if psOut is None: + psOut=Cluster.pgrepEosServers(timeout) + if psOut is None: + Utils.Print("ERROR: No nodes discovered.") + return None + pattern=Cluster.pgrepEosServerPattern(nodeNum) + m=re.search(pattern, psOut, re.MULTILINE) + if m is None: + Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + return None + instance=Node(self.host, self.port + nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Node>", instance) + return instance + + def discoverBiosNode(self, timeout=None): psOut=Cluster.pgrepEosServers(timeout=timeout) pattern=Cluster.pgrepEosServerPattern("bios") Utils.Print("pattern={\n%s\n}, psOut=\n%s\n" % (pattern,psOut)) m=re.search(pattern, psOut, re.MULTILINE) if m is None: Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + return None else: - self.biosNode.pid=int(m.group(1)) + return Node(Cluster.__BiosHost, Cluster.__BiosPort, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr) # Kills a percentange of Eos instances starting from the tail and update eosInstanceInfos state def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): @@ -1292,20 +1421,20 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Cluster.nodeExtensionToName("bios"), "config.ini") + fileName=Utils.getNodeConfigDir("bios", "config.ini") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + configLocation=Utils.getNodeConfigDir(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + path=Utils.getNodeDataDir(i) fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) @@ -1379,9 +1508,9 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): - for f in glob.glob(Cluster.__dataDir + "node_*"): + for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) - for f in glob.glob(Cluster.__configDir + "node_*"): + for f in glob.glob(Utils.ConfigDir + "node_*"): shutil.rmtree(f) for f in self.filesToCleanup: @@ -1420,6 +1549,23 @@ def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000): return True + def discoverUnstartedLocalNodes(self, unstartedNodes, totalNodes): + unstarted=[] + firstUnstartedNode=totalNodes-unstartedNodes + for nodeId in range(firstUnstartedNode, totalNodes): + unstarted.append(self.discoverUnstartedLocalNode(nodeId)) + return unstarted + + def discoverUnstartedLocalNode(self, nodeId): + startFile=Node.unstartedFile(nodeId) + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("unstarted local node cmd: %s" % (cmd)) + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$') + instance=Node(self.host, port=self.port+nodeId, pid=None, cmd=cmd, walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Unstarted Node>", instance) + return instance + def getInfos(self, silentErrors=False, exitOnError=False): infos=[] for node in self.nodes: @@ -1430,7 +1576,7 @@ def getInfos(self, silentErrors=False, exitOnError=False): def reportStatus(self): if hasattr(self, "biosNode") and self.biosNode is not None: self.biosNode.reportStatus() - if hasattr(self, "nodes"): + if hasattr(self, "nodes"): for node in self.nodes: try: node.reportStatus() @@ -1454,7 +1600,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeExtension) + "/blocks/" + blockLogDir=Utils.getNodeDataDir(nodeExtension, "blocks") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): @@ -1523,10 +1669,10 @@ def identifyCommon(blockLogs, blockNameExtensions, first, last): commonBlockLogs=[] commonBlockNameExtensions=[] for i in range(numNodes): - if (len(blockLogs[i]) >= last): + if (len(blockLogs[i]) >= last): commonBlockLogs.append(blockLogs[i][first:last]) commonBlockNameExtensions.append(blockNameExtensions[i]) - return (commonBlockLogs,commonBlockNameExtensions) + return (commonBlockLogs,commonBlockNameExtensions) # compare the contents of the blockLogs for the given common block number span def compareCommon(blockLogs, blockNameExtensions, first, last): @@ -1544,8 +1690,8 @@ def compareCommon(blockLogs, blockNameExtensions, first, last): if Utils.Debug: Utils.Print("context=%s" % (context)) ret=Utils.compare(commonBlockLogs[0], commonBlockLogs[i], context) if ret is not None: - blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" - blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" + blockLogDir1=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" + blockLogDir2=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" Utils.Print(Utils.FileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) Utils.Print(Utils.FileDivider) @@ -1566,4 +1712,3 @@ def stripValues(lowestMaxes,greaterThan): while len(lowestMaxes)>0 and compareCommon(blockLogs, blockNameExtensions, first, lowestMaxes[0]): first=lowestMaxes[0]+1 lowestMaxes=stripValues(lowestMaxes,lowestMaxes[0]) - diff --git a/tests/Node.py b/tests/Node.py index 1c01893ceca..343308be2c6 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -62,7 +62,7 @@ def eosClientArgs(self): def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) - return "Host: %s, Port:%d" % (self.host, self.port) + return "Host: %s, Port:%d, Pid:%s" % (self.host, self.port, self.pid) @staticmethod def validateTransaction(trans): @@ -558,7 +558,7 @@ def getEosAccount(self, name, exitOnError=False, returnType=ReturnType.json, avo msg="( getEosAccount(name=%s) )" % (name); return self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg, returnType=returnType) else: - assert returnType == ReturnType.json, "MongoDB only supports a returnType of ReturnType.json" + assert returnType == ReturnType.json, "MongoDB only supports a returnType of ReturnType.json" return self.getEosAccountFromDb(name, exitOnError=exitOnError) def getEosAccountFromDb(self, name, exitOnError=False): @@ -674,9 +674,25 @@ def waitForNextBlock(self, timeout=None, blockType=BlockType.head): ret=Utils.waitForBool(lam, timeout) return ret - def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head): + def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head, reportInterval=None): lam = lambda: self.getBlockNum(blockType=blockType) > blockNum - ret=Utils.waitForBool(lam, timeout) + blockDesc = "head" if blockType == BlockType.head else "LIB" + count = 0 + + class WaitReporter: + def __init__(self, node, reportInterval): + self.count = 0 + self.node = node + self.reportInterval = reportInterval + + def __call__(self): + self.count += 1 + if self.count % self.reportInterval == 0: + info = self.node.getInfo() + Utils.Print("Waiting on %s block num %d, get info = {\n%s\n}" % (blockDesc, blockNum, info)) + + reporter = WaitReporter(self, reportInterval) if reportInterval is not None else None + ret=Utils.waitForBool(lam, timeout, reporter=reporter) return ret def waitForIrreversibleBlock(self, blockNum, timeout=None, blockType=BlockType.head): @@ -1075,8 +1091,12 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head assert(isinstance(blockType, BlockType)) assert(isinstance(returnType, ReturnType)) basedOnLib="true" if blockType==BlockType.lib else "false" - cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ - (self.endpointHttp, producer, whereInSequence, basedOnLib) + payload="{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }" % (producer, whereInSequence, basedOnLib) + return self.processCurlCmd("test_control", "kill_node_on_producer", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + cmd="curl %s/v1/%s/%s -d '%s' -X POST -H \"Content-Type: application/json\"" % \ + (self.endpointHttp, resource, command, payload) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) rtn=None start=time.perf_counter() @@ -1091,6 +1111,8 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head if Utils.Debug: end=time.perf_counter() Utils.Print("cmd Duration: %.3f sec" % (end-start)) + printReturn=json.dumps(rtn) if returnType==ReturnType.json else rtn + Utils.Print("cmd returned: %s" % (printReturn)) except subprocess.CalledProcessError as ex: if not silentErrors: end=time.perf_counter() @@ -1113,6 +1135,23 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head return rtn + def txnGenCreateTestAccounts(self, genAccount, genKey, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(genAccount, str)) + assert(isinstance(genKey, str)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", \"%s\" ]" % (genAccount, genKey) + return self.processCurlCmd("txn_test_gen", "create_test_accounts", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def txnGenStart(self, salt, period, batchSize, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(salt, str)) + assert(isinstance(period, int)) + assert(isinstance(batchSize, int)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", %d, %d ]" % (salt, period, batchSize) + return self.processCurlCmd("txn_test_gen", "start_generation", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False): if not waitForTransBlock: return trans @@ -1198,9 +1237,13 @@ def kill(self, killSignal): if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd)) assert(self.pid is not None) try: - os.kill(self.pid, killSignal) + if self.popenProc is not None: + self.popenProc.send_signal(killSignal) + self.popenProc.wait() + else: + os.kill(self.pid, killSignal) except OSError as ex: - Utils.Print("ERROR: Failed to kill node (%d)." % (self.cmd), ex) + Utils.Print("ERROR: Failed to kill node (%s)." % (self.cmd), ex) return False # wait for kill validation @@ -1220,16 +1263,20 @@ def myFunc(): self.killed=True return True - def interruptAndVerifyExitStatus(self): + def interruptAndVerifyExitStatus(self, timeout=15): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) try: - outs, _ = self.popenProc.communicate(timeout=15) + outs, _ = self.popenProc.communicate(timeout=timeout) assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode) except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) + # mark node as killed + self.pid=None + self.killed=True + def verifyAlive(self, silent=False): if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) if self.killed or self.pid is None: @@ -1295,20 +1342,24 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): + # If nodeosPath is equal to None, it will use the existing nodeos path + def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False, nodeosPath=None): assert(self.pid is None) assert(self.killed) + assert isinstance(nodeId, int) or (isinstance(nodeId, str) and nodeId == "bios"), "Invalid Node ID is passed" - if Utils.Debug: Utils.Print("Launching node process, Id: %d" % (nodeId)) + if Utils.Debug: Utils.Print("Launching node process, Id: {}".format(nodeId)) cmdArr=[] - myCmd=self.cmd + splittedCmd=self.cmd.split() + if nodeosPath: splittedCmd[0] = nodeosPath + myCmd=" ".join(splittedCmd) toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} if not newChain: skip=False swapValue=None - for i in self.cmd.split(): + for i in splittedCmd: Utils.Print("\"%s\"" % (i)) if skip: skip=False @@ -1329,23 +1380,10 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim for k,v in toAddOrSwap.items(): cmdArr.append(k) cmdArr.append(v) - myCmd=" ".join(cmdArr) - dataDir="var/lib/node_%02d" % (nodeId) - dt = datetime.datetime.now() - dateStr="%d_%02d_%02d_%02d_%02d_%02d" % ( - dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) - stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) - stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) - with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: - cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - Utils.Print("cmd: %s" % (cmd)) - popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - if cachePopen: - self.popenProc=popen - self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) + self.launchCmd(cmd, nodeId, cachePopen) def isNodeAlive(): """wait for node to be responsive.""" @@ -1355,11 +1393,15 @@ def isNodeAlive(): pass return False - isAlive=Utils.waitForBool(isNodeAlive, timeout) + isAlive=Utils.waitForBool(isNodeAlive, timeout, sleepTime=1) if isAlive: Utils.Print("Node relaunch was successfull.") else: Utils.Print("ERROR: Node relaunch Failed.") + # Ensure the node process is really killed + if self.popenProc: + self.popenProc.send_signal(signal.SIGTERM) + self.popenProc.wait() self.pid=None return False @@ -1367,6 +1409,32 @@ def isNodeAlive(): self.killed=False return True + @staticmethod + def unstartedFile(nodeId): + assert(isinstance(nodeId, int)) + startFile=Utils.getNodeDataDir(nodeId, "start.cmd") + if not os.path.exists(startFile): + Utils.errorExit("Cannot find unstarted node since %s file does not exist" % startFile) + return startFile + + def launchUnstarted(self, nodeId, cachePopen=False): + Utils.Print("launchUnstarted cmd: %s" % (self.cmd)) + self.launchCmd(self.cmd, nodeId, cachePopen) + + def launchCmd(self, cmd, nodeId, cachePopen=False): + dataDir=Utils.getNodeDataDir(nodeId) + dt = datetime.datetime.now() + dateStr=Utils.getDateString(dt) + stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) + stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) + with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: + Utils.Print("cmd: %s" % (cmd)) + popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) + if cachePopen: + self.popenProc=popen + self.pid=popen.pid + if Utils.Debug: Utils.Print("start Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans)) @@ -1398,3 +1466,110 @@ def reportStatus(self): status="last getInfo returned None" if not self.infoValid else "at last call to getInfo" Utils.Print(" hbn : %s (%s)" % (self.lastRetrievedHeadBlockNum, status)) Utils.Print(" lib : %s (%s)" % (self.lastRetrievedLIB, status)) + + # Require producer_api_plugin + def scheduleProtocolFeatureActivations(self, featureDigests=[]): + param = { "protocol_features_to_activate": featureDigests } + self.processCurlCmd("producer", "schedule_protocol_feature_activations", json.dumps(param)) + + # Require producer_api_plugin + def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatable=False): + param = { + "exclude_disabled": excludeDisabled, + "exclude_unactivatable": excludeUnactivatable + } + res = self.processCurlCmd("producer", "get_supported_protocol_features", json.dumps(param)) + return res + + # This will return supported protocol features in a dict (feature codename as the key), i.e. + # { + # "PREACTIVATE_FEATURE": {...}, + # "ONLY_LINK_TO_EXISTING_PERMISSION": {...}, + # } + # Require producer_api_plugin + def getSupportedProtocolFeatureDict(self, excludeDisabled=False, excludeUnactivatable=False): + protocolFeatureDigestDict = {} + supportedProtocolFeatures = self.getSupportedProtocolFeatures(excludeDisabled, excludeUnactivatable) + for protocolFeature in supportedProtocolFeatures: + for spec in protocolFeature["specification"]: + if (spec["name"] == "builtin_feature_codename"): + codename = spec["value"] + protocolFeatureDigestDict[codename] = protocolFeature + break + return protocolFeatureDigestDict + + def waitForHeadToAdvance(self, timeout=6): + currentHead = self.getHeadBlockNum() + def isHeadAdvancing(): + return self.getHeadBlockNum() > currentHead + return Utils.waitForBool(isHeadAdvancing, timeout) + + def waitForLibToAdvance(self, timeout=30): + currentLib = self.getIrreversibleBlockNum() + def isLibAdvancing(): + return self.getIrreversibleBlockNum() > currentLib + return Utils.waitForBool(isLibAdvancing, timeout) + + # Require producer_api_plugin + def activatePreactivateFeature(self): + protocolFeatureDigestDict = self.getSupportedProtocolFeatureDict() + preactivateFeatureDigest = protocolFeatureDigestDict["PREACTIVATE_FEATURE"]["feature_digest"] + assert preactivateFeatureDigest + + self.scheduleProtocolFeatureActivations([preactivateFeatureDigest]) + + # Wait for the next block to be produced so the scheduled protocol feature is activated + self.waitForHeadToAdvance() + + # Return an array of feature digests to be preactivated in a correct order respecting dependencies + # Require producer_api_plugin + def getAllBuiltinFeatureDigestsToPreactivate(self): + protocolFeatures = [] + supportedProtocolFeatures = self.getSupportedProtocolFeatures() + for protocolFeature in supportedProtocolFeatures: + for spec in protocolFeature["specification"]: + if (spec["name"] == "builtin_feature_codename"): + codename = spec["value"] + # Filter out "PREACTIVATE_FEATURE" + if codename != "PREACTIVATE_FEATURE": + protocolFeatures.append(protocolFeature["feature_digest"]) + break + return protocolFeatures + + # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature + def preactivateProtocolFeatures(self, featureDigests:list): + for digest in featureDigests: + Utils.Print("push activate action with digest {}".format(digest)) + data="{{\"feature_digest\":{}}}".format(digest) + opts="--permission eosio@active" + trans=self.pushMessage("eosio", "activate", data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to preactive digest {}".format(digest)) + return None + self.waitForHeadToAdvance() + + # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature + def preactivateAllBuiltinProtocolFeature(self): + allBuiltinProtocolFeatureDigests = self.getAllBuiltinFeatureDigestsToPreactivate() + self.preactivateProtocolFeatures(allBuiltinProtocolFeatureDigests) + + def getLatestBlockHeaderState(self): + headBlockNum = self.getHeadBlockNum() + cmdDesc = "get block {} --header-state".format(headBlockNum) + latestBlockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) + return latestBlockHeaderState + + def getActivatedProtocolFeatures(self): + latestBlockHeaderState = self.getLatestBlockHeaderState() + return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] + + def modifyBuiltinPFSubjRestrictions(self, nodeId, featureCodename, subjectiveRestriction={}): + jsonPath = os.path.join(Utils.getNodeConfigDir(nodeId), + "protocol_features", + "BUILTIN-{}.json".format(featureCodename)) + protocolFeatureJson = [] + with open(jsonPath) as f: + protocolFeatureJson = json.load(f) + protocolFeatureJson["subjective_restrictions"].update(subjectiveRestriction) + with open(jsonPath, "w") as f: + json.dump(protocolFeatureJson, f, indent=2) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 10b69fa334d..6b53c7a7782 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -22,6 +22,11 @@ def add(self, flag, type, help, default, choices=None): arg=self.AppArg(flag, type, help, default, choices) self.args.append(arg) + + def add_bool(self, flag, help, action='store_true'): + arg=self.AppArg(flag=flag, help=help, action=action) + self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" @@ -60,8 +65,6 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): default=Utils.SigKillTag) if "--kill-count" in includeArgs: parser.add_argument("--kill-count", type=int, help="nodeos instances to kill", default=-1) - if "--p2p-plugin" in includeArgs: - parser.add_argument("--p2p-plugin", choices=["net", "bnet"], help="select a p2p plugin to use. Defaults to net.", default="net") if "--seed" in includeArgs: parser.add_argument("--seed", type=int, help="random seed", default=1) @@ -105,6 +108,8 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true') if "--sanity-test" in includeArgs: parser.add_argument("--sanity-test", help="Validates nodeos and kleos are in path and can be started up.", action='store_true') + if "--alternate-version-labels-file" in includeArgs: + parser.add_argument("--alternate-version-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") for arg in applicationSpecificArgs.args: parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) diff --git a/tests/bash-bats/README.md b/tests/bash-bats/README.md new file mode 100644 index 00000000000..c8bd18fdb5a --- /dev/null +++ b/tests/bash-bats/README.md @@ -0,0 +1,63 @@ +# BATS Bash Testing + +For each bash script we have, there should be a separate .sh file within `REPO_ROOT/tests/bash-bats/`. + +### Usage notes: + +- BATS requires that you install greadlink: `brew install coreutils` +- DRYRUN=true is required for all tests and automatically enabled. You can use this when you're manually running eosio_build.sh (`DRYRUN=true VERBOSE=true ./scripts/eosio_build.sh`) +- execute-always gets around DRYRUN and runs stuff anyway (which installation uses this) +- To run all tests: + ``` + $ ./tests/bash-bats/bats-core/bin/bats tests/bash-bats/*.sh + ✓ [eosio_build_darwin] > Testing -y/NONINTERACTIVE/PROCEED + ✓ [eosio_build_darwin] > Testing prompts + ✓ [eosio_build_darwin] > Testing executions + ✓ [helpers] > execute > dryrun + ✓ [helpers] > execute > verbose + ✓ [uninstall] > Usage is visible with right interaction + ✓ [uninstall] > Testing user prompts + ✓ [uninstall] > Testing executions + ✓ [uninstall] > --force + ✓ [uninstall] > --force + --full + + 10 tests, 0 failures + ``` +- Verbose bats output ( `-t` ): + ``` + ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh + ``` + +--- + +### Running all tests for all distros: +``` +echo "[Darwin]" +./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh +echo "[Ubuntu 16]" +docker run --rm -ti -v $HOME/BLOCKONE/eos.bats:/eos ubuntu:16.04 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +echo "[Ubuntu 18]" +docker run --rm -ti -v $HOME/BLOCKONE/eos.bats:/eos ubuntu:18.04 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +echo "[AmazonLinux 2]" +docker run --rm -ti -v $HOME/BLOCKONE/eos.bats:/eos amazonlinux:2 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +echo "[Centos 7]" +docker run --rm -ti -v $HOME/BLOCKONE/eos.bats:/eos centos:7 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +``` + +### **Faster testing:** Start docker first, then run (keeping installed packages + faster tests) +``` +docker run --name ubuntu16 -d -t -v $HOME/BLOCKONE/eos.bats:/eos ubuntu:16.04 /bin/bash +docker run --name ubuntu18 -d -t -v $HOME/BLOCKONE/eos.bats:/eos ubuntu:18.04 /bin/bash +docker run --name amazonlinux2 -d -t -v $HOME/BLOCKONE/eos.bats:/eos amazonlinux:2 /bin/bash +docker run --name centos7 -d -t -v $HOME/BLOCKONE/eos.bats:/eos centos:7 /bin/bash +echo "[Ubuntu 16]" +docker exec -it ubuntu16 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +echo "[Ubuntu 18]" +docker exec -it ubuntu18 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +echo "[AmazonLinux 2]" +docker exec -it amazonlinux2 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +echo "[Centos 7]" +docker exec -it centos7 bash -c "cd /eos && ./tests/bash-bats/bats-core/bin/bats -t tests/bash-bats/*.sh" +``` + +- You'll need to modify the volume path ($HOME/BLOCKONE/eos.bats) to indicate where you've got eos cloned locally. diff --git a/tests/bash-bats/bats-core/bin/bats b/tests/bash-bats/bats-core/bin/bats new file mode 100755 index 00000000000..dea44d326b7 --- /dev/null +++ b/tests/bash-bats/bats-core/bin/bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -e + +BATS_READLINK='true' +if command -v 'greadlink' >/dev/null; then + BATS_READLINK='greadlink' +elif command -v 'readlink' >/dev/null; then + BATS_READLINK='readlink' +fi + +bats_resolve_absolute_root_dir() { + local cwd="$PWD" + local path="$1" + local result="$2" + local target_dir + local target_name + local original_shell_options="$-" + + # Resolve the parent directory, e.g. /bin => /usr/bin on CentOS (#113). + set -P + + while true; do + target_dir="${path%/*}" + target_name="${path##*/}" + + if [[ "$target_dir" != "$path" ]]; then + cd "$target_dir" + fi + + if [[ -L "$target_name" ]]; then + path="$("$BATS_READLINK" "$target_name")" + else + printf -v "$result" -- '%s' "${PWD%/*}" + set +P "-$original_shell_options" + cd "$cwd" + return + fi + done +} + +export BATS_ROOT +bats_resolve_absolute_root_dir "$0" 'BATS_ROOT' +exec "$BATS_ROOT/libexec/bats-core/bats" "$@" diff --git a/tests/bash-bats/bats-core/libexec/bats-core/bats b/tests/bash-bats/bats-core/libexec/bats-core/bats new file mode 100755 index 00000000000..03c1316f08b --- /dev/null +++ b/tests/bash-bats/bats-core/libexec/bats-core/bats @@ -0,0 +1,171 @@ +#!/usr/bin/env bash +set -e + +export BATS_VERSION='1.1.0' + +version() { + printf 'Bats %s\n' "$BATS_VERSION" +} + +abort() { + printf 'Error: %s\n' "$1" >&2 + usage >&2 + exit 1 +} + +usage() { + local cmd="${0##*/}" + local line + + while IFS= read -r line; do + printf '%s\n' "$line" + done <] [-j ] [-p | -t] ... + $cmd [-h | -v] + + is the path to a Bats test file, or the path to a directory + containing Bats test files (ending with ".bats"). + + -c, --count Count the number of test cases without running any tests + -f, --filter Filter test cases by names matching the regular expression + -h, --help Display this help message + -j, --jobs Number of parallel jobs to run (requires GNU parallel) + -p, --pretty Show results in pretty format (default for terminals) + -r, --recursive Include tests in subdirectories + -t, --tap Show results in TAP format + -v, --version Display the version number + + For more information, see https://github.com/bats-core/bats-core + +END_OF_HELP_TEXT +} + +expand_link() { + readlink="$(type -p greadlink readlink | head -1)" + "$readlink" -f "$1" +} + +expand_path() { + local path="${1%/}" + local dirname="${path%/*}" + local result="$2" + + if [[ "$dirname" == "$path" ]]; then + dirname="$PWD" + else + cd "$dirname" + dirname="$PWD" + cd "$OLDPWD" + fi + printf -v "$result" '%s/%s' "$dirname" "${path##*/}" +} + +BATS_LIBEXEC="$(dirname "$(expand_link "$BASH_SOURCE")")" +export BATS_CWD="$PWD" +export BATS_TEST_PATTERN="^[[:blank:]]*@test[[:blank:]]+(.*[^[:blank:]])[[:blank:]]+\{(.*)\$" +export BATS_TEST_FILTER= +export PATH="$BATS_LIBEXEC:$PATH" + +arguments=() + +# Unpack single-character options bundled together, e.g. -cr, -pr. +for arg in "$@"; do + if [[ "$arg" =~ ^-[^-]. ]]; then + index=1 + while option="${arg:$((index++)):1}"; do + if [[ -z "$option" ]]; then + break + fi + arguments+=("-$option") + done + else + arguments+=("$arg") + fi + shift +done + +set -- "${arguments[@]}" +arguments=() + +unset flags pretty recursive +flags=() +pretty= +recursive= +if [[ -z "${CI:-}" && -t 0 && -t 1 ]] && command -v tput >/dev/null; then + pretty=1 +fi + +while [[ "$#" -ne 0 ]]; do + case "$1" in + -h|--help) + version + usage + exit 0 + ;; + -v|--version) + version + exit 0 + ;; + -c|--count) + flags+=('-c') + ;; + -f|--filter) + shift + flags+=('-f' "$1") + ;; + -j|--jobs) + shift + flags+=('-j' "$1") + ;; + -r|--recursive) + recursive=1 + ;; + -t|--tap) + pretty= + ;; + -p|--pretty) + pretty=1 + ;; + -*) + abort "Bad command line option '$1'" + ;; + *) + arguments+=("$1") + ;; + esac + shift +done + +if [[ "${#arguments[@]}" -eq 0 ]]; then + abort 'Must specify at least one ' +fi + +filenames=() +for filename in "${arguments[@]}"; do + expand_path "$filename" 'filename' + + if [[ -d "$filename" ]]; then + shopt -s nullglob + if [[ "$recursive" -eq 1 ]]; then + while IFS= read -r -d $'\0' file; do + filenames+=("$file") + done < <(find "$filename" -type f -name '*.bats' -print0 | sort -z) + else + for suite_filename in "$filename"/*.bats; do + filenames+=("$suite_filename") + done + fi + shopt -u nullglob + else + filenames+=("$filename") + fi +done + +formatter="cat" +if [[ -n "$pretty" ]]; then + flags+=("-x") + formatter="bats-format-tap-stream" +fi + +set -o pipefail execfail +exec bats-exec-suite "${flags[@]}" "${filenames[@]}" | "$formatter" diff --git a/tests/bash-bats/bats-core/libexec/bats-core/bats-exec-suite b/tests/bash-bats/bats-core/libexec/bats-core/bats-exec-suite new file mode 100755 index 00000000000..eb30d74ba00 --- /dev/null +++ b/tests/bash-bats/bats-core/libexec/bats-core/bats-exec-suite @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +set -e + +count_only_flag='' +extended_syntax_flag='' +filter='' +num_jobs=1 +have_gnu_parallel= +flags=() + +while [[ "$#" -ne 0 ]]; do + case "$1" in + -c) + count_only_flag=1 + ;; + -f) + shift + filter="$1" + flags+=('-f' "$filter") + ;; + -j) + shift + num_jobs="$1" + ;; + -x) + extended_syntax_flag='-x' + flags+=('-x') + ;; + *) + break + ;; + esac + shift +done + +if ( type -p parallel &>/dev/null ); then + have_gnu_parallel=1 +elif [[ "$num_jobs" != 1 ]]; then + printf 'bats: cannot execute "%s" jobs without GNU parallel\n' "$num_jobs" >&2 + exit 1 +fi + +trap 'kill 0; exit 1' int + +all_tests=() +for filename in "$@"; do + if [[ ! -f "$filename" ]]; then + printf 'bats: %s does not exist\n' "$filename" >&2 + exit 1 + fi + + test_names=() + test_dupes=() + while read -r line; do + if [[ ! "$line" =~ ^bats_test_function\ ]]; then + continue + fi + line="${line%$'\r'}" + line="${line#* }" + + all_tests+=( "$(printf "%s\t%s" "$filename" "$line")" ) + if [[ " ${test_names[*]} " == *" $line "* ]]; then + test_dupes+=("$line") + continue + fi + test_names+=("$line") + done < <(BATS_TEST_FILTER="$filter" bats-preprocess "$filename") + + if [[ "${#test_dupes[@]}" -ne 0 ]]; then + printf 'bats warning: duplicate test name(s) in %s: %s\n' "$filename" "${test_dupes[*]}" >&2 + fi +done + +if [[ -n "$count_only_flag" ]]; then + printf '%d\n' "${#all_tests[@]}" + exit +fi + +status=0 +printf '1..%d\n' "${#all_tests[@]}" + +# No point on continuing if there's no tests. +if [[ "${#all_tests[@]}" == 0 ]]; then + exit +fi + +if [[ "$num_jobs" != 1 ]]; then + # Only use GNU parallel when we want parallel execution -- there is a small + # amount of overhead using it over a simple loop in the serial case. + set -o pipefail + printf '%s\n' "${all_tests[@]}" | grep -v '^$' | \ + parallel -qk -j "$num_jobs" --colsep="\t" -- bats-exec-test "${flags[@]}" '{1}' '{2}' '{#}' || status=1 +else + # Just do it serially. + test_number=1 + while IFS=$'\t' read -r filename test_name; do + bats-exec-test "${flags[@]}" "$filename" "$test_name" "$test_number" || status=1 + ((++test_number)) + done < <(printf '%s\n' "${all_tests[@]}" | grep -v '^$') +fi +exit "$status" diff --git a/tests/bash-bats/bats-core/libexec/bats-core/bats-exec-test b/tests/bash-bats/bats-core/libexec/bats-core/bats-exec-test new file mode 100755 index 00000000000..9b8d09e38fa --- /dev/null +++ b/tests/bash-bats/bats-core/libexec/bats-core/bats-exec-test @@ -0,0 +1,373 @@ +#!/usr/bin/env bash +set -eET + +BATS_COUNT_ONLY='' +BATS_TEST_FILTER='' +BATS_EXTENDED_SYNTAX='' + +while [[ "$#" -ne 0 ]]; do + case "$1" in + -c) + BATS_COUNT_ONLY=1 + ;; + -f) + shift + BATS_TEST_FILTER="$1" + ;; + -x) + BATS_EXTENDED_SYNTAX='-x' + ;; + *) + break + ;; + esac + shift +done + +BATS_TEST_FILENAME="$1" +shift +if [[ -z "$BATS_TEST_FILENAME" ]]; then + printf 'usage: bats-exec-test \n' >&2 + exit 1 +elif [[ ! -f "$BATS_TEST_FILENAME" ]]; then + printf 'bats: %s does not exist\n' "$BATS_TEST_FILENAME" >&2 + exit 1 +fi + +BATS_TEST_DIRNAME="${BATS_TEST_FILENAME%/*}" +BATS_TEST_NAMES=() + +load() { + local name="$1" + local filename + + if [[ "${name:0:1}" == '/' ]]; then + filename="${name}" + else + filename="$BATS_TEST_DIRNAME/${name}.sh" + fi + + if [[ ! -f "$filename" ]]; then + printf 'bats: %s does not exist\n' "$filename" >&2 + exit 1 + fi + + source "${filename}" +} + +run() { + local origFlags="$-" + set +eET + local origIFS="$IFS" + output="$("$@" 2>&1)" + status="$?" + IFS=$'\n' lines=($output) + IFS="$origIFS" + set "-$origFlags" +} + +setup() { + return 0 +} + +teardown() { + return 0 +} + +skip() { + BATS_TEST_SKIPPED="${1:-1}" + BATS_TEST_COMPLETED=1 + exit 0 +} + +bats_test_begin() { + BATS_TEST_DESCRIPTION="$1" + if [[ -n "$BATS_EXTENDED_SYNTAX" ]]; then + printf 'begin %d %s\n' "$BATS_TEST_NUMBER" "$BATS_TEST_DESCRIPTION" >&3 + fi + setup +} + +bats_test_function() { + local test_name="$1" + BATS_TEST_NAMES+=("$test_name") +} + +bats_capture_stack_trace() { + local test_file + local funcname + local i + + BATS_STACK_TRACE=() + + for ((i=2; i != ${#FUNCNAME[@]}; ++i)); do + # Use BATS_TEST_SOURCE if necessary to work around Bash < 4.4 bug whereby + # calling an exported function erases the test file's BASH_SOURCE entry. + test_file="${BASH_SOURCE[$i]:-$BATS_TEST_SOURCE}" + funcname="${FUNCNAME[$i]}" + BATS_STACK_TRACE+=("${BASH_LINENO[$((i-1))]} $funcname $test_file") + if [[ "$test_file" == "$BATS_TEST_SOURCE" ]]; then + case "$funcname" in + "$BATS_TEST_NAME"|setup|teardown) + break + ;; + esac + fi + done +} + +bats_print_stack_trace() { + local frame + local index=1 + local count="${#@}" + local filename + local lineno + + for frame in "$@"; do + bats_frame_filename "$frame" 'filename' + bats_trim_filename "$filename" 'filename' + bats_frame_lineno "$frame" 'lineno' + + if [[ $index -eq 1 ]]; then + printf '# (' + else + printf '# ' + fi + + local fn + bats_frame_function "$frame" 'fn' + if [[ "$fn" != "$BATS_TEST_NAME" ]]; then + printf "from function \`%s' " "$fn" + fi + + if [[ $index -eq $count ]]; then + printf 'in test file %s, line %d)\n' "$filename" "$lineno" + else + printf 'in file %s, line %d,\n' "$filename" "$lineno" + fi + + ((++index)) + done +} + +bats_print_failed_command() { + local frame="${BATS_STACK_TRACE[${#BATS_STACK_TRACE[@]}-1]}" + local filename + local lineno + local failed_line + local failed_command + + bats_frame_filename "$frame" 'filename' + bats_frame_lineno "$frame" 'lineno' + bats_extract_line "$filename" "$lineno" 'failed_line' + bats_strip_string "$failed_line" 'failed_command' + printf '%s' "# \`${failed_command}' " + + if [[ "$BATS_ERROR_STATUS" -eq 1 ]]; then + printf 'failed\n' + else + printf 'failed with status %d\n' "$BATS_ERROR_STATUS" + fi +} + +bats_frame_lineno() { + printf -v "$2" '%s' "${1%% *}" +} + +bats_frame_function() { + local __bff_function="${1#* }" + printf -v "$2" '%s' "${__bff_function%% *}" +} + +bats_frame_filename() { + local __bff_filename="${1#* }" + __bff_filename="${__bff_filename#* }" + + if [[ "$__bff_filename" == "$BATS_TEST_SOURCE" ]]; then + __bff_filename="$BATS_TEST_FILENAME" + fi + printf -v "$2" '%s' "$__bff_filename" +} + +bats_extract_line() { + local __bats_extract_line_line + local __bats_extract_line_index=0 + + while IFS= read -r __bats_extract_line_line; do + if [[ "$((++__bats_extract_line_index))" -eq "$2" ]]; then + printf -v "$3" '%s' "${__bats_extract_line_line%$'\r'}" + break + fi + done <"$1" +} + +bats_strip_string() { + [[ "$1" =~ ^[[:space:]]*(.*)[[:space:]]*$ ]] + printf -v "$2" '%s' "${BASH_REMATCH[1]}" +} + +bats_trim_filename() { + printf -v "$2" '%s' "${1#$BATS_CWD/}" +} + +bats_debug_trap() { + if [[ "$BASH_SOURCE" != "$1" ]]; then + bats_capture_stack_trace + BATS_LINENO="$BATS_CURRENT_LINENO" + BATS_CURRENT_LINENO="${BASH_LINENO[0]}" + fi +} + +# For some versions of Bash, the `ERR` trap may not always fire for every +# command failure, but the `EXIT` trap will. Also, some command failures may not +# set `$?` properly. See #72 and #81 for details. +# +# For this reason, we call `bats_error_trap` at the very beginning of +# `bats_teardown_trap` (the `DEBUG` trap for the call will move +# `BATS_CURRENT_LINENO` to `BATS_LINENO`) and check the value of +# `$BATS_TEST_COMPLETED` before taking other actions. We also adjust the exit +# status value if needed. +# +# See `bats_exit_trap` for an additional EXIT error handling case when `$?` +# isn't set properly during `teardown()` errors. +bats_error_trap() { + local status="$?" + if [[ -z "$BATS_TEST_COMPLETED" ]]; then + BATS_ERROR_STATUS="${BATS_ERROR_STATUS:-$status}" + if [[ "$BATS_ERROR_STATUS" -eq 0 ]]; then + BATS_ERROR_STATUS=1 + fi + BATS_STACK_TRACE[0]="$BATS_LINENO ${BATS_STACK_TRACE[0]#* }" + trap - debug + fi +} + +bats_teardown_trap() { + bats_error_trap + local status=0 + teardown >>"$BATS_OUT" 2>&1 || status="$?" + + if [[ $status -eq 0 ]]; then + BATS_TEARDOWN_COMPLETED=1 + elif [[ -n "$BATS_TEST_COMPLETED" ]]; then + BATS_ERROR_STATUS="$status" + fi + + bats_exit_trap +} + +bats_exit_trap() { + local line + local status + local skipped='' + trap - err exit + + if [[ -n "$BATS_TEST_SKIPPED" ]]; then + skipped=' # skip' + if [[ "$BATS_TEST_SKIPPED" != '1' ]]; then + skipped+=" $BATS_TEST_SKIPPED" + fi + fi + + if [[ -z "$BATS_TEST_COMPLETED" || -z "$BATS_TEARDOWN_COMPLETED" ]]; then + if [[ "$BATS_ERROR_STATUS" -eq 0 ]]; then + # For some versions of bash, `$?` may not be set properly for some error + # conditions before triggering the EXIT trap directly (see #72 and #81). + # Thanks to the `BATS_TEARDOWN_COMPLETED` signal, this will pinpoint such + # errors if they happen during `teardown()` when `bats_perform_test` calls + # `bats_teardown_trap` directly after the test itself passes. + # + # If instead the test fails, and the `teardown()` error happens while + # `bats_teardown_trap` runs as the EXIT trap, the test will fail with no + # output, since there's no way to reach the `bats_exit_trap` call. + BATS_STACK_TRACE[0]="$BATS_LINENO ${BATS_STACK_TRACE[0]#* }" + BATS_ERROR_STATUS=1 + fi + printf 'not ok %d %s\n' "$BATS_TEST_NUMBER" "$BATS_TEST_DESCRIPTION" >&3 + bats_print_stack_trace "${BATS_STACK_TRACE[@]}" >&3 + bats_print_failed_command >&3 + + while IFS= read -r line; do + printf '# %s\n' "$line" + done <"$BATS_OUT" >&3 + if [[ -n "$line" ]]; then + printf '# %s\n' "$line" + fi + status=1 + else + printf 'ok %d %s%s\n' "$BATS_TEST_NUMBER" "$BATS_TEST_DESCRIPTION" \ + "$skipped" >&3 + status=0 + fi + + rm -f "$BATS_OUT" + exit "$status" +} + +bats_perform_test() { + BATS_TEST_NAME="$1" + BATS_TEST_NUMBER="$2" + + if ! declare -F "$BATS_TEST_NAME" &>/dev/null; then + printf "bats: unknown test name \`%s'\n" "$BATS_TEST_NAME" >&2 + exit 1 + fi + + # Some versions of Bash will reset BASH_LINENO to the first line of the + # function when the ERR trap fires. All versions of Bash appear to reset it + # on an unbound variable access error. bats_debug_trap will fire both before + # the offending line is executed, and when the error is triggered. + # Consequently, we use `BATS_LINENO` to point to the line number seen by the + # first call to bats_debug_trap, _before_ the ERR trap or unbound variable + # access fires. + BATS_CURRENT_LINENO=0 + BATS_LINENO=0 + BATS_STACK_TRACE=() + + BATS_TEST_COMPLETED= + BATS_TEST_SKIPPED= + BATS_TEARDOWN_COMPLETED= + BATS_ERROR_STATUS= + trap "bats_debug_trap \"\$BASH_SOURCE\"" debug + trap 'bats_error_trap' err + trap 'bats_teardown_trap' exit + "$BATS_TEST_NAME" >>"$BATS_OUT" 2>&1 + BATS_TEST_COMPLETED=1 + trap 'bats_exit_trap' exit + bats_teardown_trap +} + +if [[ -z "$TMPDIR" ]]; then + BATS_TMPDIR='/tmp' +else + BATS_TMPDIR="${TMPDIR%/}" +fi + +BATS_TMPNAME="$BATS_TMPDIR/bats.$$" +BATS_PARENT_TMPNAME="$BATS_TMPDIR/bats.$PPID" +BATS_OUT="${BATS_TMPNAME}.out" + +bats_preprocess_source() { + BATS_TEST_SOURCE="${BATS_TMPNAME}.src" + bats-preprocess "$BATS_TEST_FILENAME" >"$BATS_TEST_SOURCE" + trap 'bats_cleanup_preprocessed_source' err exit + trap 'bats_cleanup_preprocessed_source; exit 1' int +} + +bats_cleanup_preprocessed_source() { + rm -f "$BATS_TEST_SOURCE" +} + +bats_evaluate_preprocessed_source() { + if [[ -z "$BATS_TEST_SOURCE" ]]; then + BATS_TEST_SOURCE="${BATS_PARENT_TMPNAME}.src" + fi + source "$BATS_TEST_SOURCE" +} + +exec 3<&1 + +# Run the given test. +bats_preprocess_source +bats_evaluate_preprocessed_source +bats_perform_test "$@" diff --git a/tests/bash-bats/bats-core/libexec/bats-core/bats-format-tap-stream b/tests/bash-bats/bats-core/libexec/bats-core/bats-format-tap-stream new file mode 100755 index 00000000000..8c3a9a69592 --- /dev/null +++ b/tests/bash-bats/bats-core/libexec/bats-core/bats-format-tap-stream @@ -0,0 +1,174 @@ +#!/usr/bin/env bash +set -e + +header_pattern='[0-9]+\.\.[0-9]+' +IFS= read -r header + +if [[ "$header" =~ $header_pattern ]]; then + count="${header:3}" + index=0 + failures=0 + skipped=0 + name= + count_column_width=$(( ${#count} * 2 + 2 )) +else + # If the first line isn't a TAP plan, print it and pass the rest through + printf '%s\n' "$header" + exec cat +fi + +update_screen_width() { + screen_width="$(tput cols)" + count_column_left=$(( $screen_width - $count_column_width )) +} + +trap update_screen_width WINCH +update_screen_width + +begin() { + go_to_column 0 + buffer_with_truncation $(( $count_column_left - 1 )) ' %s' "$name" + clear_to_end_of_line + go_to_column $count_column_left + buffer "%${#count}s/${count}" "$index" + go_to_column 1 +} + +pass() { + go_to_column 0 + buffer ' ✓ %s' "$name" + advance +} + +skip() { + local reason="$1" + if [[ -n "$reason" ]]; then + reason=": $reason" + fi + go_to_column 0 + buffer ' - %s (skipped%s)' "$name" "$reason" + advance +} + +fail() { + go_to_column 0 + set_color 1 bold + buffer ' ✗ %s' "$name" + advance +} + +log() { + set_color 1 + buffer ' %s\n' "$1" + clear_color +} + +summary() { + buffer '\n%d test' "$count" + if [[ "$count" -ne 1 ]]; then + buffer 's' + fi + + buffer ', %d failure' "$failures" + if [[ "$failures" -ne 1 ]]; then + buffer 's' + fi + + if [[ "$skipped" -gt 0 ]]; then + buffer ', %d skipped' "$skipped" + fi + + buffer '\n' +} + +buffer_with_truncation() { + local width="$1" + shift + local string + + printf -v 'string' -- "$@" + + if [[ "${#string}" -gt "$width" ]]; then + buffer '%s...' "${string:0:$(( $width - 4 ))}" + else + buffer '%s' "$string" + fi +} + +go_to_column() { + local column="$1" + buffer '\x1B[%dG' $(( $column + 1 )) +} + +clear_to_end_of_line() { + buffer '\x1B[K' +} + +advance() { + clear_to_end_of_line + buffer '\n' + clear_color +} + +set_color() { + local color="$1" + local weight=22 + + if [[ "$2" == 'bold' ]]; then + weight=1 + fi + buffer '\x1B[%d;%dm' "$(( 30 + $color ))" "$weight" +} + +clear_color() { + buffer '\x1B[0m' +} + +_buffer= + +buffer() { + local content + printf -v content -- "$@" + _buffer+="$content" +} + +flush() { + printf '%s' "$_buffer" + _buffer= +} + +finish() { + flush + printf '\n' +} + +trap finish EXIT + +while IFS= read -r line; do + case "$line" in + 'begin '* ) + ((++index)) + name="${line#* $index }" + begin + flush + ;; + 'ok '* ) + skip_expr="ok $index (.*) # skip ?(([[:print:]]*))?" + if [[ "$line" =~ $skip_expr ]]; then + ((++skipped)) + skip "${BASH_REMATCH[2]}" + else + pass + fi + ;; + 'not ok '* ) + ((++failures)) + fail + ;; + '# '* ) + log "${line:2}" + ;; + esac +done + +summary diff --git a/tests/bash-bats/bats-core/libexec/bats-core/bats-preprocess b/tests/bash-bats/bats-core/libexec/bats-core/bats-preprocess new file mode 100755 index 00000000000..8e31705cbc9 --- /dev/null +++ b/tests/bash-bats/bats-core/libexec/bats-core/bats-preprocess @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +set -e + +bats_encode_test_name() { + local name="$1" + local result='test_' + local hex_code + + if [[ ! "$name" =~ [^[:alnum:]\ _-] ]]; then + name="${name//_/-5f}" + name="${name//-/-2d}" + name="${name// /_}" + result+="$name" + else + local length="${#name}" + local char i + + for ((i=0; i Testing Arguments & Options" { + + if [[ $NAME =~ "Amazon Linux" ]] || [[ $NAME == "CentOS Linux" ]]; then + # which package isn't installed + uninstall-package which WETRUN &>/dev/null + run bash -c "printf \"y\ny\nn\nn\n\" | ./scripts/eosio_build.sh" + [[ ! -z $(echo "${output}" | grep "EOSIO compiler checks require the 'which'") ]] || exit + fi + + if [[ $ARCH == "Linux" ]]; then + if [[ $NAME != "CentOS Linux" ]]; then # Centos has the SCL prompt before checking for the compiler + # No c++! + run bash -c "printf \"y\ny\ny\nn\nn\n\" | ./${SCRIPT_LOCATION}" + [[ ! -z $(echo "${output}" | grep "Unable to find .* compiler") ]] || exit + fi + fi + + cd ./scripts # Also test that we can run the script from a directory other than the root + run bash -c "./eosio_build.sh -y -P" + [[ ! -z $(echo "${output}" | grep "PIN_COMPILER: true") ]] || exit + # Ensure build-essentials is installed so we can compile cmake, clang, boost, etc + if [[ $NAME == "Ubuntu" ]]; then + [[ ! -z $(echo "${output}" | grep "Installed build-essential") ]] || exit + fi + [[ "${output}" =~ -DCMAKE_TOOLCHAIN_FILE=\'.*/scripts/../build/pinned_toolchain.cmake\' ]] || exit + [[ "${output}" =~ "Clang 8 successfully installed" ]] || exit + # -P with prompts + cd .. + run bash -c "printf \"y\nn\nn\nn\n\" | ./$SCRIPT_LOCATION -P" + [[ "${output}" =~ .*User.aborted.* ]] || exit + # lack of -m + [[ ! -z $(echo "${output}" | grep "ENABLE_MONGO: false") ]] || exit + [[ ! -z $(echo "${output}" | grep "INSTALL_MONGO: false") ]] || exit + # lack of -i + [[ ! -z $(echo "${output}" | grep "EOSIO_INSTALL_DIR: ${HOME}/eosio/${EOSIO_VERSION}") ]] || exit + ## -o + run bash -c "printf \"y\ny\nn\nn\n\" | ./$SCRIPT_LOCATION -o Debug -P" + [[ ! -z $(echo "${output}" | grep "CMAKE_BUILD_TYPE: Debug") ]] || exit + ## -s + run bash -c "printf \"y\ny\nn\nn\n\" | ./$SCRIPT_LOCATION -s EOS2 -P" + [[ ! -z $(echo "${output}" | grep "CORE_SYMBOL_NAME: EOS2") ]] || exit + ## -b + run bash -c "printf \"y\ny\nn\nn\n\" | ./$SCRIPT_LOCATION -b /test -P" + [[ ! -z $(echo "${output}" | grep "BOOST_LOCATION: /test") ]] || exit + ## -i + run bash -c "printf \"y\ny\nn\nn\n\"| ./$SCRIPT_LOCATION -i /NEWPATH -P" + [[ ! -z $(echo "${output}" | grep "EOSIO_INSTALL_DIR: /NEWPATH") ]] || exit + [[ ! -z $(echo "${output}" | grep "TEMP_DIR: ${HOME}/tmp") ]] || exit + ## -c + run bash -c "printf \"y\ny\nn\nn\n\"| ./$SCRIPT_LOCATION -c -P" + [[ ! -z $(echo "${output}" | grep "ENABLE_COVERAGE_TESTING: true") ]] || exit + ## -d + run bash -c "printf \"y\ny\nn\nn\n\" | ./$SCRIPT_LOCATION -d -P" + [[ ! -z $(echo "${output}" | grep "ENABLE_DOXYGEN: true") ]] || exit + ## -m + run bash -c "printf \"y\ny\nn\nn\n\" | ./$SCRIPT_LOCATION -m -y -P" + [[ ! -z $(echo "${output}" | grep "ENABLE_MONGO: true") ]] || exit + [[ ! -z $(echo "${output}" | grep "INSTALL_MONGO: true") ]] || exit + ## -h / -anythingwedon'tsupport + run bash -c "./$SCRIPT_LOCATION -z" + [[ ! -z $(echo "${output}" | grep "Invalid Option!") ]] || exit + run bash -c "./$SCRIPT_LOCATION -h" + [[ ! -z $(echo "${output}" | grep "Usage:") ]] || exit +} \ No newline at end of file diff --git a/tests/bash-bats/eosio_build_amazonlinux.sh b/tests/bash-bats/eosio_build_amazonlinux.sh new file mode 100644 index 00000000000..3ff1d12a71c --- /dev/null +++ b/tests/bash-bats/eosio_build_amazonlinux.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bats +load helpers/general +export SCRIPT_LOCATION="scripts/eosio_build.sh" +export TEST_LABEL="[eosio_build_amazonlinux]" + +[[ $ARCH == "Linux" ]] || exit 0 # Skip if we're not on linux +( [[ $NAME == "Amazon Linux AMI" ]] || [[ $NAME == "Amazon Linux" ]] ) || exit 0 # Exit 0 is required for pipeline + +# A helper function is available to show output and status: `debug` + +# Testing Root user +./tests/bash-bats/modules/root-user.sh +# Testing Options +./tests/bash-bats/modules/dep_script_options.sh +# Testing CMAKE +./tests/bash-bats/modules/cmake.sh +# Testing Clang +./tests/bash-bats/modules/clang.sh +# Testing MongoDB +./tests/bash-bats/modules/mongodb.sh + +## Needed to load eosio_build_ files properly; it can be empty +@test "${TEST_LABEL} > General" { + set_system_vars # Obtain current machine's resources and set the necessary variables (like JOBS, etc) + + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION -P -i /NEWPATH" + [[ ! -z $(echo "${output}" | grep "Executing: make -j${JOBS}") ]] || exit + ### Make sure deps are loaded properly + [[ ! -z $(echo "${output}" | grep "Executing: cd /NEWPATH/src") ]] || exit + [[ ! -z $(echo "${output}" | grep "Starting EOSIO Dependency Install") ]] || exit + [[ ! -z $(echo "${output}" | grep "Executing: eval /usr/bin/yum -y update") ]] || exit + if [[ $NAME == "Amazon Linux" ]]; then + [[ ! -z $(echo "${output}" | grep "libstdc++.*found!") ]] || exit + elif [[ $NAME == "Amazon Linux AMI" ]]; then + [[ ! -z $(echo "${output}" | grep "make.*found!") ]] || exit + fi + [[ ! -z $(echo "${output}" | grep "sudo.*NOT.*found.") ]] || exit + [[ -z $(echo "${output}" | grep "- NOT found.") ]] || exit + [[ ! -z $(echo "${output}" | grep /NEWPATH*/src/boost) ]] || exit + [[ ! -z $(echo "${output}" | grep "Starting EOSIO Build") ]] || exit + [[ ! -z $(echo "${output}" | grep "make -j${CPU_CORES}") ]] || exit + [[ -z $(echo "${output}" | grep "MongoDB C++ driver successfully installed") ]] || exit # Mongo is off + # Ensure PIN_COMPILER=false uses proper flags for the various installs + install-package gcc-c++ WETRUN + install-package clang WETRUN + run bash -c "./$SCRIPT_LOCATION -y" + [[ ! -z $(echo "${output}" | grep " -G 'Unix Makefiles'") ]] || exit # CMAKE + [[ ! -z $(echo "${output}" | grep " --with-iostreams --with-date_time") ]] || exit # BOOST + uninstall-package gcc-c++ WETRUN + uninstall-package clang WETRUN +} \ No newline at end of file diff --git a/tests/bash-bats/eosio_build_centos.sh b/tests/bash-bats/eosio_build_centos.sh new file mode 100644 index 00000000000..c7927b4a38d --- /dev/null +++ b/tests/bash-bats/eosio_build_centos.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bats +load helpers/general + +export SCRIPT_LOCATION="scripts/eosio_build.sh" +export TEST_LABEL="[eosio_build_centos]" + +[[ $ARCH == "Linux" ]] || exit 0 # Exit 0 is required for pipeline +[[ $NAME == "CentOS Linux" ]] || exit 0 # Exit 0 is required for pipeline + +# A helper function is available to show output and status: `debug` + +# Testing Root user +./tests/bash-bats/modules/root-user.sh +# Testing Options +./tests/bash-bats/modules/dep_script_options.sh +# Testing CMAKE +./tests/bash-bats/modules/cmake.sh +# Testing Clang +./tests/bash-bats/modules/clang.sh +# Testing MongoDB +./tests/bash-bats/modules/mongodb.sh + +## Needed to load eosio_build_ files properly; it can be empty +@test "${TEST_LABEL} > General" { + set_system_vars # Obtain current machine's resources and set the necessary variables (like JOBS, etc) + + execute-always yum -y --enablerepo=extras install centos-release-scl &>/dev/null + install-package devtoolset-8 WETRUN &>/dev/null + # Ensure SCL and devtoolset-8 for c++ binary installation + run bash -c "printf \"y\n%.0s\" {1..100}| ./${SCRIPT_LOCATION} -i /NEWPATH" + [[ ! -z $(echo "${output}" | grep "centos-release-scl-2-3.el7.centos.noarch found") ]] || exit + [[ ! -z $(echo "${output}" | grep "devtoolset-8.* found") ]] || exit + [[ ! -z $(echo "${output}" | grep "Executing: source /opt/rh/devtoolset-8/enable") ]] || exit + [[ ! -z $(echo "${output}" | grep "Executing: make -j${JOBS}") ]] || exit + [[ ! -z $(echo "${output}" | grep "Starting EOSIO Dependency Install") ]] || exit + [[ ! -z $(echo "${output}" | grep "Executing: eval /usr/bin/yum -y update") ]] || exit + [[ ! -z $(echo "${output}" | grep "Python36 successfully enabled") ]] || exit + [[ -z $(echo "${output}" | grep "- NOT found.") ]] || exit + [[ ! -z $(echo "${output}" | grep "Ensuring CMAKE") ]] || exit + [[ ! -z $(echo "${output}" | grep /NEWPATH.*/src/boost) ]] || exit + [[ ! -z $(echo "${output}" | grep "Starting EOSIO Build") ]] || exit + [[ ! -z $(echo "${output}" | grep "make -j${CPU_CORES}") ]] || exit + [[ ! -z $(echo "${output}" | grep "EOSIO has been successfully built") ]] || exit + uninstall-package devtoolset-8* WETRUN &>/dev/null + uninstall-package centos-release-scl WETRUN &>/dev/null +} \ No newline at end of file diff --git a/tests/bash-bats/eosio_build_darwin.sh b/tests/bash-bats/eosio_build_darwin.sh new file mode 100644 index 00000000000..da7f6f1b5bc --- /dev/null +++ b/tests/bash-bats/eosio_build_darwin.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bats +load helpers/general + +export SCRIPT_LOCATION="scripts/eosio_build.sh" +export TEST_LABEL="[eosio_build_darwin]" + +[[ $ARCH == "Darwin" ]] || exit 0 # Exit 0 is required for pipeline +[[ $NAME == "Mac OS X" ]] || exit 0 # Exit 0 is required for pipeline + +# A helper function is available to show output and status: `debug` + +# Testing Root user +./tests/bash-bats/modules/root-user.sh +# Testing Options +./tests/bash-bats/modules/dep_script_options.sh +# Testing CMAKE +./tests/bash-bats/modules/cmake.sh +# Testing Clang +./tests/bash-bats/modules/clang.sh +# Testing MongoDB +./tests/bash-bats/modules/mongodb.sh + +## Needed to load eosio_build_ files properly; it can be empty +@test "${TEST_LABEL} > General" { + set_system_vars # Obtain current machine's resources and set the necessary variables (like JOBS, etc) + + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION -i /NEWPATH" + [[ ! -z $(echo "${output}" | grep "Executing: make -j${JOBS}") ]] || exit + ### Make sure deps are loaded properly + [[ ! -z $(echo "${output}" | grep "Starting EOSIO Dependency Install") ]] || exit + [[ ! -z $(echo "${output}" | grep "Executing: /usr/bin/xcode-select --install") ]] || exit + [[ -z $(echo "${output}" | grep "- NOT found.") ]] || exit + rm -f $CMAKE + [[ ! -z $(echo "${output}" | grep "[Updating HomeBrew]") ]] || exit + [[ ! -z $(echo "${output}" | grep "brew tap eosio/eosio") ]] || exit + [[ ! -z $(echo "${output}" | grep "brew install.*llvm@4.*") ]] || exit + [[ ! -z $(echo "${output}" | grep "LLVM successfully linked from /usr/local/opt/llvm@4") ]] || exit + [[ ! -z $(echo "${output}" | grep /NEWPATH.*/src/boost) ]] || exit + [[ ! -z $(echo "${output}" | grep "Starting EOSIO Build") ]] || exit + [[ ! -z $(echo "${output}" | grep " --with-iostreams --with-date_time") ]] || exit # BOOST + [[ ! -z $(echo "${output}" | grep "EOSIO has been successfully built") ]] || exit +} \ No newline at end of file diff --git a/tests/bash-bats/eosio_build_ubuntu.sh b/tests/bash-bats/eosio_build_ubuntu.sh new file mode 100644 index 00000000000..f5153719359 --- /dev/null +++ b/tests/bash-bats/eosio_build_ubuntu.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bats +load helpers/general + +export SCRIPT_LOCATION="scripts/eosio_build.sh" +export TEST_LABEL="[eosio_build_ubuntu]" + +[[ $ARCH == "Linux" ]] || exit 0 # Exit 0 is required for pipeline +[[ $NAME == "Ubuntu" ]] || exit 0 # Exit 0 is required for pipeline +( [[ $VERSION_ID == "18.04" ]] || [[ $VERSION_ID == "16.04" ]] ) || exit 0 # Exit 0 is required for pipeline + +# A helper function is available to show output and status: `debug` + +# Testing Root user +./tests/bash-bats/modules/root-user.sh +# Testing Options +./tests/bash-bats/modules/dep_script_options.sh +# Testing CMAKE +./tests/bash-bats/modules/cmake.sh +# Testing Clang +./tests/bash-bats/modules/clang.sh +# Testing MongoDB +./tests/bash-bats/modules/mongodb.sh + +## Needed to load eosio_build_ files properly; it can be empty +@test "${TEST_LABEL} > General" { + set_system_vars # Obtain current machine's resources and set the necessary variables (like JOBS, etc) + + [[ "$(echo ${VERSION_ID})" == "16.04" ]] && install-package build-essential WETRUN 1>/dev/null || install-package clang WETRUN 1>/dev/null + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION -i /NEWPATH" + [[ ! -z $(echo "${output}" | grep "Executing: make -j${JOBS}") ]] || exit + [[ ! -z $(echo "${output}" | grep "Starting EOSIO Dependency Install") ]] || exit + [[ ! -z $(echo "${output}" | grep python.*found) ]] || exit + [[ ! -z $(echo "${output}" | grep make.*NOT.*found) ]] || exit + [[ ! -z $(echo "${output}" | grep /NEWPATH.*/src/boost) ]] || exit + [[ ! -z $(echo "${output}" | grep "make -j${CPU_CORES}") ]] || exit + [[ ! -z $(echo "${output}" | grep " --with-iostreams --with-date_time") ]] || exit # BOOST + if [[ "$(echo ${VERSION_ID})" == "18.04" ]]; then + [[ ! -z $(echo "${output}" | grep llvm-4.0.*found) ]] || exit + fi + [[ -z $(echo "${output}" | grep "- NOT found.") ]] || exit + [[ -z $(echo "${output}" | grep lcov.*found.) ]] || exit + [[ ! -z $(echo "${output}" | grep "EOSIO has been successfully built") ]] || exit + [[ "$(echo ${VERSION_ID})" == "16.04" ]] && apt autoremove build-essential -y || uninstall-package clang WETRUN +} diff --git a/tests/bash-bats/eosio_uninstall.sh b/tests/bash-bats/eosio_uninstall.sh new file mode 100644 index 00000000000..77e4779438a --- /dev/null +++ b/tests/bash-bats/eosio_uninstall.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bats +load helpers/general + +SCRIPT_LOCATION="scripts/eosio_uninstall.sh" +TEST_LABEL="[eosio_uninstall]" + +mkdir -p $SRC_DIR +mkdir -p $OPT_DIR +mkdir -p $VAR_DIR +mkdir -p $BIN_DIR +mkdir -p $VAR_DIR/log +mkdir -p $ETC_DIR +mkdir -p $LIB_DIR +mkdir -p $MONGODB_LOG_DIR +mkdir -p $MONGODB_DATA_DIR + +# A helper function is available to show output and status: `debug` + +@test "${TEST_LABEL} > Usage is visible with right interaction" { + run ./$SCRIPT_LOCATION -help + [[ $output =~ "Usage ---" ]] || exit + run ./$SCRIPT_LOCATION --help + [[ $output =~ "Usage ---" ]] || exit + run ./$SCRIPT_LOCATION help + [[ $output =~ "Usage ---" ]] || exit + run ./$SCRIPT_LOCATION blah + [[ $output =~ "Usage ---" ]] || exit +} + +@test "${TEST_LABEL} > Testing user prompts" { + ## No y or no warning and re-prompt + run bash -c "echo -e \"\nx\nx\nx\" | ./$SCRIPT_LOCATION" + ( [[ "${lines[${#lines[@]}-1]}" == "Please type 'y' for yes or 'n' for no." ]] && [[ "${lines[${#lines[@]}-2]}" == "Please type 'y' for yes or 'n' for no." ]] ) || exit + ## All yes pass + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION" + [[ "${output##*$'\n'}" == "[EOSIO Removal Complete]" ]] || exit + ## First no shows "Cancelled..." + run bash -c "echo \"n\" | ./$SCRIPT_LOCATION" + [[ "${output##*$'\n'}" =~ "Cancelled EOSIO Removal!" ]] || exit + ## What would you like to do?" + run bash -c "echo \"\" | ./$SCRIPT_LOCATION" + [[ "${output##*$'\n'}" =~ "What would you like to do?" ]] || exit +} + +@test "${TEST_LABEL} > Testing executions" { + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION" + [[ "${output[*]}" =~ "Executing: rm -rf" ]] || exit + if [[ $ARCH == "Darwin" ]]; then + [[ "${output}" =~ "Executing: brew uninstall cmake --force" ]] || exit + fi +} + +@test "${TEST_LABEL} > --force" { + run ./$SCRIPT_LOCATION --force + # Make sure we reach the end + [[ "${output##*$'\n'}" == "[EOSIO Removal Complete]" ]] || exit +} + +@test "${TEST_LABEL} > --force + --full" { + run ./$SCRIPT_LOCATION --force --full + ([[ ! "${output[*]}" =~ "Library/Application\ Support/eosio" ]] && [[ ! "${output[*]}" =~ ".local/share/eosio" ]]) && exit + [[ "${output##*$'\n'}" == "[EOSIO Removal Complete]" ]] || exit +} + +rm -rf $SRC_DIR +rm -rf $OPT_DIR +rm -rf $VAR_DIR +rm -rf $BIN_DIR +rm -rf $VAR_DIR/log +rm -rf $ETC_DIR +rm -rf $LIB_DIR +rm -rf $MONGODB_LOG_DIR +rm -rf $MONGODB_DATA_DIR \ No newline at end of file diff --git a/tests/bash-bats/helpers.sh b/tests/bash-bats/helpers.sh new file mode 100644 index 00000000000..98c756ac836 --- /dev/null +++ b/tests/bash-bats/helpers.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bats +load helpers/general + +# A helper function is available to show output and status: `debug` + +# Load helpers (BE CAREFUL) +. ./scripts/helpers/general.sh + +TEST_LABEL="[helpers]" + +@test "${TEST_LABEL} > execute > dryrun" { + ## DRYRUN WORKS (true, false, and empty) + run execute exit 1 + ( [[ $output =~ "Executing: exit 1" ]] && [[ $status -eq 0 ]] ) || exit + DRYRUN=false + run execute exit 1 + ( [[ $output =~ "Executing: exit 1" ]] && [[ $status -eq 1 ]] ) || exit +} + +@test "${TEST_LABEL} > execute > verbose" { + ## VERBOSE WORKS (true, false, and empty) + run execute echo "VERBOSE!" + ( [[ $output =~ "Executing: echo VERBOSE!" ]] && [[ $status -eq 0 ]] ) || exit + VERBOSE=false + run execute echo "VERBOSE!" + ( [[ ! $output =~ "Executing: echo VERBOSE!" ]] && [[ $status -eq 0 ]] ) || exit + VERBOSE= + ( [[ ! $output =~ "Executing: echo VERBOSE!" ]] && [[ $status -eq 0 ]] ) || exit +} + +@test "${TEST_LABEL} > install directory prompt" { + NONINTERACTIVE=true + PROCEED=true + run install-directory-prompt + # Use default location + [[ ! -z $(echo "${output}" | grep "home") ]] || exit + NONINTERACTIVE=false + PROCEED=false + INSTALL_LOCATION="/etc/eos" + run install-directory-prompt + # Function received given input. + [[ ! -z $(echo "${output}") ]] || exit +} + +@test "${TEST_LABEL} > previous install prompt" { + NONINTERACTIVE=true + PROCEED=true + # Doesn't exists, no output + run previous-install-prompt + [[ -z $(echo "${output}") ]] || exit + # Exists, prompt + mkdir -p $EOSIO_INSTALL_DIR + run previous-install-prompt + [[ ! -z $(echo "${output}" | grep "EOSIO has already been installed into ${EOSIO_INSTALL_DIR}") ]] || exit + rm -rf $EOSIO_INSTALL_DIR +} + +@test "${TEST_LABEL} > TEMP_DIR" { + run setup + [[ -z $(echo "${output}" | grep "Executing: rm -rf ${REPO_ROOT}/../tmp/*") ]] || exit + [[ -z $(echo "${output}" | grep "Executing: mkdir -p ${REPO_ROOT}/../tmp") ]] || exit +} \ No newline at end of file diff --git a/tests/bash-bats/helpers/functions.sh b/tests/bash-bats/helpers/functions.sh new file mode 100644 index 00000000000..f8edaea624a --- /dev/null +++ b/tests/bash-bats/helpers/functions.sh @@ -0,0 +1,32 @@ +. ./scripts/helpers/eosio.sh # Obtain dependency versions and paths + +function debug() { + printf " ---------\\n STATUS: ${status}\\n${output}\\n ---------\\n\\n" >&3 +} + +function setup-bats-dirs () { + if [[ ! $HOME =~ "/$(whoami)" ]]; then + mkdir -p $HOME + fi + if [[ $TEMP_DIR =~ "${HOME}" ]]; then # Protection + mkdir -p $TEMP_DIR + rm -rf $TEMP_DIR/* + fi +} +setup-bats-dirs + +function teardown() { # teardown is run once after each test, even if it fails + [[ -d "$HOME" ]] && rm -rf "$HOME" + if [[ $ARCH == "Linux" ]]; then + uninstall-package which WETRUN + export SUDO_FORCE_REMOVE=yes + uninstall-package sudo WETRUN + uninstall-package devtoolset-8* WETRUN + uninstall-package centos-release-scl + uninstall-package gcc-c++ WETRUN + if [[ $NAME == 'Ubuntu' ]]; then + [[ ! $( dpkg -s build-essential 2>/dev/null ) ]] && apt autoremove build-essential -y &>/dev/null + fi + fi +} +trap teardown EXIT \ No newline at end of file diff --git a/tests/bash-bats/helpers/general.sh b/tests/bash-bats/helpers/general.sh new file mode 100644 index 00000000000..2e9dd206bfe --- /dev/null +++ b/tests/bash-bats/helpers/general.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bats + +# You can add `load helpers/general` to the .bats files you create to include anything in this file. +# DO NOT REMOVE +export DRYRUN=true +export VERBOSE=true +export BATS_RUN=true +export CURRENT_USER=$(whoami) +export HOME="$BATS_TMPDIR/bats-eosio-user-home" # Ensure $HOME is available for all scripts +load helpers/functions + +if [[ $NAME == "Ubuntu" ]]; then # Ubuntu won't find any packages until this runs + ensure update only runs once + [[ -z $(find /tmp/apt-updated -mmin -60 2>/dev/null) ]] && apt-get update &>/dev/null + [[ ! -f /tmp/apt-updated ]] && touch /tmp/apt-updated +else + [[ $ARCH != "Darwin" ]] && yum -y update &>/dev/null +fi + +# Ensure we're in the root directory to execute +if [[ ! -d "tests" ]] && [[ ! -f "README.md" ]]; then + echo "You must navigate into the root directory to execute tests..." >&3 + exit 1 +fi \ No newline at end of file diff --git a/tests/bash-bats/modules/clang.sh b/tests/bash-bats/modules/clang.sh new file mode 100755 index 00000000000..4033dc4c66d --- /dev/null +++ b/tests/bash-bats/modules/clang.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bats +load ../helpers/functions + +@test "${TEST_LABEL} > Testing CLANG" { + + if [[ $NAME == "Darwin" ]]; then + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION -i /NEWPATH" + ## CLANG already exists (c++/default) + [[ ! -z $(echo "${output}" | grep "PIN_COMPILER: true") ]] || exit + [[ ! -z $(echo "${output}" | grep "DCMAKE_CXX_COMPILER='c++'") ]] || exit + [[ ! -z $(echo "${output}" | grep "DCMAKE_C_COMPILER='cc'") ]] || exit + elif [[ $NAME == "Ubuntu" ]]; then + install-package build-essential WETRUN 1>/dev/null # ubuntu 18 build-essential will be high enough, 16 won't and has a version < 7 + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION -i /NEWPATH" + ## CLANG already exists (c++/default) (Ubuntu doesn't have clang>7, so we need to make sure it installs Clang 8) + [[ ! -z $(echo "${output}" | grep "PIN_COMPILER: false") ]] || exit + # if [[ $VERSION_ID == "16.04" ]]; then + # [[ ! -z $(echo "${output}" | grep "Unable to find compiler") ]] || exit + # [[ ! -z $(echo "${output}" | grep "Clang 8 successfully installed") ]] || exit + # [[ ! -z $(echo "${output}" | grep "$CLANG_ROOT") ]] || exit + # fi + ## CLANG + apt autoremove build-essential -y 1>/dev/null + run bash -c "./$SCRIPT_LOCATION -y -P" + [[ ! -z $(echo "${output}" | grep "PIN_COMPILER: true") ]] || exit + [[ ! -z $(echo "${output}" | grep "Clang 8 successfully installed") ]] || exit + [[ ! -z $(echo "${output}" | grep -E "Clang.*successfully installed @ ${CLANG_ROOT}") ]] || exit + fi + ## CXX doesn't exist + export CXX=c2234 + export CC=ewwqd + run bash -c "./$SCRIPT_LOCATION -y" + [[ ! -z $(echo "${output}" | grep "Unable to find .* compiler") ]] || exit + +} \ No newline at end of file diff --git a/tests/bash-bats/modules/cmake.sh b/tests/bash-bats/modules/cmake.sh new file mode 100755 index 00000000000..89b5b6dd883 --- /dev/null +++ b/tests/bash-bats/modules/cmake.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bats +load ../helpers/functions + +@test "${TEST_LABEL} > Testing CMAKE" { + # Testing for if CMAKE already exists + export CMAKE=${HOME}/cmake + touch $CMAKE + run bash -c " ./$SCRIPT_LOCATION -y -P" + [[ ! -z $(echo "${output}" | grep "Executing: bash -c ${HOME}/cmake") ]] || exit + # Testing for if cmake doesn't exist to be sure it's set properly + export CMAKE= + run bash -c "./$SCRIPT_LOCATION -y -P" + if [[ $ARCH == "Darwin" ]]; then + [[ ! -z $(echo "${output}" | grep "Executing: bash -c /usr/local/bin/cmake -DCMAKE_BUILD") ]] || exit + else + [[ ! -z $(echo "${output}" | grep "Executing: bash -c ${BIN_DIR}/cmake") ]] || exit + [[ ! -z $(echo "${output}" | grep "CMAKE successfully installed") ]] || exit + fi +} \ No newline at end of file diff --git a/tests/bash-bats/modules/dep_script_options.sh b/tests/bash-bats/modules/dep_script_options.sh new file mode 100755 index 00000000000..e1519a2f890 --- /dev/null +++ b/tests/bash-bats/modules/dep_script_options.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bats +load ../helpers/functions + +@test "${TEST_LABEL} > Testing Options" { + mkdir -p $HOME/test/tmp + run bash -c "./$SCRIPT_LOCATION -y -P -i $HOME/test -b /boost_tmp -m" + # echo $output >&3 + [[ ! -z $(echo "${output}" | grep "CMAKE_INSTALL_PREFIX='${HOME}/test") ]] || exit + [[ ! -z $(echo "${output}" | grep "@ /boost_tmp") ]] || exit + [[ ! -z $(echo "${output}" | grep "Ensuring MongoDB installation") ]] || exit + [[ ! -z $(echo "${output}" | grep "MongoDB C driver successfully installed") ]] || exit + if [[ $ARCH == "Linux" ]]; then + ( [[ ! -z $(echo "${output}" | grep "ENABLE_SNAPPY=OFF -DCMAKE_TOOLCHAIN_FILE='$BUILD_DIR/pinned_toolchain.cmake'") ]] ) || exit # MySQL install + fi + [[ ! -z $(echo "${output}" | grep "CMAKE_TOOLCHAIN_FILE='$BUILD_DIR/pinned_toolchain.cmake' -DCMAKE_PREFIX_PATH") ]] || exit # cmake build + [[ ! -z $(echo "${output}" | grep "EOSIO has been successfully built") ]] || exit +} \ No newline at end of file diff --git a/tests/bash-bats/modules/mongodb.sh b/tests/bash-bats/modules/mongodb.sh new file mode 100755 index 00000000000..d04b058b06b --- /dev/null +++ b/tests/bash-bats/modules/mongodb.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bats +load ../helpers/functions + +@test "${TEST_LABEL} > MongoDB" { + # Existing MongoDB + if [[ $NAME == "CentOS Linux" ]] || [[ $NAME == "Amazon Linux" ]]; then + run bash -c "printf \"y\ny\nn\ny\ny\ny\n\" | ./$SCRIPT_LOCATION -m -P" # which prompt requires first y + else + run bash -c "printf \"y\nn\nn\ny\ny\n\" | ./$SCRIPT_LOCATION -m -P" + fi + [[ ! -z $(echo "${output}" | grep "Existing MongoDB will be used") ]] || exit + [[ -z $(echo "${output}" | grep "Ensuring MongoDB installation") ]] || exit + # Installing ours + run bash -c "printf \"y\ny\ny\ny\ny\ny\n\" | ./$SCRIPT_LOCATION -m -P" + [[ -z $(echo "${output}" | grep "Existing MongoDB will be used") ]] || exit + [[ ! -z $(echo "${output}" | grep "Ensuring MongoDB installation") ]] || exit +} diff --git a/tests/bash-bats/modules/root-user.sh b/tests/bash-bats/modules/root-user.sh new file mode 100755 index 00000000000..5f4311766a3 --- /dev/null +++ b/tests/bash-bats/modules/root-user.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bats +load ../helpers/functions + +@test "${TEST_LABEL} > Testing root user run" { + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION -P -i /NEWPATH" + [[ ! -z $(echo "${output}" | grep "User: $(whoami)") ]] || exit + if [[ $ARCH == "Linux" ]]; then + [[ -z $(echo "${output}" | grep "$SUDO_LOCATION -E") ]] || exit + fi + export CURRENT_USER=test + run bash -c "printf \"y\nn\n\" | ./$SCRIPT_LOCATION -P" + [[ ! -z $(echo "${output}" | grep "User: test") ]] || exit + if [[ $ARCH == "Linux" ]]; then + [[ ! -z $(echo "${output}" | grep "Please install the 'sudo' command before proceeding") ]] || exit + fi + install-package sudo WETRUN + export SUDO_LOCATION=$( command -v sudo ) + run bash -c "printf \"y\n%.0s\" {1..100} | ./$SCRIPT_LOCATION -P -i /NEWPATH" + [[ ! -z $(echo "${output}" | grep "User: test") ]] || exit + if [[ $ARCH == "Linux" ]]; then + [[ ! -z $(echo "${output}" | grep "$SUDO_LOCATION -E .* install -y .*") ]] || exit + fi + +} \ No newline at end of file diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index 971228854d9..6a3ac94d511 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo, delay) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/db_modes_test.sh b/tests/db_modes_test.sh new file mode 100755 index 00000000000..f29b8ffcb51 --- /dev/null +++ b/tests/db_modes_test.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash + +# This test is intended to verify that switching between DB modes "just works". Addtionally +# it tries to make sure the dirty bit behaves as expected even in heap mode. + +set -euo pipefail + +VERBOSE=0 +TEST_LOCKED_MODE=0 + +while getopts ":lv" opt; do + case ${opt} in + l) + TEST_LOCKED_MODE=1 + ;; + v) + VERBOSE=1 + set -o xtrace + ;; + \?) + echo "Use -v for verbose; -l to enable test of locked mode" + exit 1; + ;; + :) + echo "Invalid option" + exit 1; + ;; + esac +done + +EOSIO_STUFF_DIR=$(mktemp -d) +trap "rm -rf $EOSIO_STUFF_DIR" EXIT +NODEOS_LAUNCH_PARAMS="./programs/nodeos/nodeos -d $EOSIO_STUFF_DIR --config-dir $EOSIO_STUFF_DIR \ +--chain-state-db-size-mb 8 --chain-state-db-guard-size-mb 0 --reversible-blocks-db-size-mb 1 \ +--reversible-blocks-db-guard-size-mb 0 -e -peosio" + +run_nodeos() { + if (( $VERBOSE == 0 )); then + $NODEOS_LAUNCH_PARAMS --http-server-address '' --p2p-listen-endpoint '' "$@" 2>/dev/null & + else + $NODEOS_LAUNCH_PARAMS --http-server-address '' --p2p-listen-endpoint '' "$@" & + fi +} + +run_expect_success() { + run_nodeos "$@" + local NODEOS_PID=$! + sleep 5 + kill $NODEOS_PID + wait $NODEOS_PID +} + +run_and_kill() { + run_nodeos "$@" + local NODEOS_PID=$! + sleep 5 + kill -KILL $NODEOS_PID + ! wait $NODEOS_PID +} + +run_expect_failure() { + run_nodeos "$@" + local NODEOS_PID=$! + MYPID=$$ + (sleep 10; kill -ALRM $MYPID) & local TIMER_PID=$! + trap "kill $NODEOS_PID; wait $NODEOS_PID; exit 1" ALRM + sleep 5 + if wait $NODEOS_PID; then exit 1; fi + kill $TIMER_PID + trap ALRM +} + +#new chain with mapped mode +run_expect_success --delete-all-blocks +#use previous DB with heap mode +run_expect_success --database-map-mode heap +#test lock mode if enabled +if (( $TEST_LOCKED_MODE == 1 )); then + run_expect_success --database-map-mode locked +fi +#locked mode should fail when it's not possible to lock anything +ulimit -l 0 +run_expect_failure --database-map-mode locked +#But shouldn't result in the dirty flag staying set; so next launch should run +run_expect_success +#Try killing with KILL +run_and_kill +#should be dirty now +run_expect_failure +#should also still be dirty in heap mode +run_expect_failure --database-map-mode heap + +#start over again! but this time start with heap mode +run_expect_success --delete-all-blocks --database-map-mode heap +#Then switch back to mapped +run_expect_success +#try killing it while in heap mode +run_and_kill --database-map-mode heap +#should be dirty if we run in either mode node +run_expect_failure --database-map-mode heap +run_expect_failure diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 5b302dcf141..9a02d5e6de4 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -10,7 +10,7 @@ Print=Utils.Print errorExit=Utils.errorExit -args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed","--p2p-plugin" +args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" ,"--dump-error-details","-v","--leave-running","--clean-run","--keep-logs"}) pnodes=args.p @@ -25,7 +25,6 @@ dumpErrorDetails=args.dump_error_details killAll=args.clean_run keepLogs=args.keep_logs -p2pPlugin=args.p2p_plugin killWallet=not dontKill killEosInstances=not dontKill @@ -63,7 +62,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp index 6abeb325913..91fca59ef3b 100644 --- a/tests/get_table_tests.cpp +++ b/tests/get_table_tests.cpp @@ -38,6 +38,34 @@ using namespace fc; BOOST_AUTO_TEST_SUITE(get_table_tests) +transaction_trace_ptr +issue_tokens( TESTER& t, account_name issuer, account_name to, const asset& amount, + std::string memo = "", account_name token_contract = N(eosio.token) ) +{ + signed_transaction trx; + + trx.actions.emplace_back( t.get_action( token_contract, N(issue), + vector{{issuer, config::active_name}}, + mutable_variant_object() + ("to", issuer.to_string()) + ("quantity", amount) + ("memo", memo) + ) ); + + trx.actions.emplace_back( t.get_action( token_contract, N(transfer), + vector{{issuer, config::active_name}}, + mutable_variant_object() + ("from", issuer.to_string()) + ("to", to.to_string()) + ("quantity", amount) + ("memo", memo) + ) ); + + t.set_transaction_headers(trx); + trx.sign( t.get_private_key( issuer, "active" ), t.control->get_chain_id() ); + return t.push_transaction( trx ); +} + BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { produce_blocks(2); @@ -60,11 +88,7 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("999.0000 SYS") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("999.0000 SYS") ); } produce_blocks(1); @@ -136,11 +160,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("10000.0000 SYS") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("10000.0000 SYS") ); } produce_blocks(1); @@ -151,11 +171,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { push_action(N(eosio.token), N(create), N(eosio.token), act ); // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("9999.0000 AAA") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("9999.0000 AAA") ); } produce_blocks(1); @@ -166,11 +182,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { push_action(N(eosio.token), N(create), N(eosio.token), act ); // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("7777.0000 CCC") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("7777.0000 CCC") ); } produce_blocks(1); @@ -181,11 +193,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { push_action(N(eosio.token), N(create), N(eosio.token), act ); // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("8888.0000 BBB") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("8888.0000 BBB") ); } produce_blocks(1); @@ -331,17 +339,13 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("10000.0000 SYS") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("10000.0000 SYS") ); } produce_blocks(1); - + set_code( config::system_account_name, contracts::eosio_system_wasm() ); set_abi( config::system_account_name, contracts::eosio_system_abi().data() ); - + base_tester::push_action(config::system_account_name, N(init), config::system_account_name, mutable_variant_object() ("version", 0) diff --git a/tests/launcher_test.py b/tests/launcher_test.py index b7d21b41179..bac5ed447a2 100755 --- a/tests/launcher_test.py +++ b/tests/launcher_test.py @@ -21,7 +21,7 @@ from core_symbol import CORE_SYMBOL args = TestHelper.parse_args({"--defproducera_prvt_key","--dump-error-details","--dont-launch","--keep-logs", - "-v","--leave-running","--clean-run","--p2p-plugin"}) + "-v","--leave-running","--clean-run"}) debug=args.v defproduceraPrvtKey=args.defproducera_prvt_key dumpErrorDetails=args.dump_error_details @@ -29,7 +29,6 @@ dontLaunch=args.dont_launch dontKill=args.leave_running killAll=args.clean_run -p2pPlugin=args.p2p_plugin Utils.Debug=debug cluster=Cluster(walletd=True, defproduceraPrvtKey=defproduceraPrvtKey) @@ -53,7 +52,7 @@ cluster.cleanup() Print("Stand up cluster") pnodes=4 - if cluster.launch(pnodes=pnodes, totalNodes=pnodes, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=pnodes) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 4ef22ab082f..e4e3bc9469b 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -7,7 +7,6 @@ from WalletMgr import WalletMgr from Node import BlockType from Node import Node -from TestHelper import AppArgs from TestHelper import TestHelper import decimal @@ -115,7 +114,7 @@ def getMinHeadAndLib(prodNodes): args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", - "--p2p-plugin","--wallet-port"}) + "--wallet-port"}) Utils.Debug=args.v totalProducerNodes=2 totalNonProducerNodes=1 @@ -128,7 +127,6 @@ def getMinHeadAndLib(prodNodes): dontKill=args.leave_running prodCount=args.prod_count killAll=args.clean_run -p2pPlugin=args.p2p_plugin walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) @@ -156,8 +154,8 @@ def getMinHeadAndLib(prodNodes): # "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01) # and the only connection between those 2 groups is through the bridge node - if cluster.launch(prodCount=prodCount, onlyBios=False, topo="bridge", pnodes=totalProducerNodes, - totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, + if cluster.launch(prodCount=prodCount, topo="bridge", pnodes=totalProducerNodes, + totalNodes=totalNodes, totalProducers=totalProducers, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py new file mode 100755 index 00000000000..dd7c836e98f --- /dev/null +++ b/tests/nodeos_irreversible_mode_test.py @@ -0,0 +1,407 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from Node import ReturnType +from TestHelper import TestHelper +from testUtils import Account + +import urllib.request +import re +import os +import time +import signal +import subprocess +import shutil + + +############################################################### +# nodeos_irreversible_mode_test +# --dump-error-details +# --keep-logs +# -v --leave-running --clean-run +############################################################### + +Print = Utils.Print +errorExit = Utils.errorExit +cmdError = Utils.cmdError +relaunchTimeout = 10 +numOfProducers = 4 +totalNodes = 10 + +# Parse command line arguments +args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) +Utils.Debug = args.v +killAll=args.clean_run +dumpErrorDetails=args.dump_error_details +dontKill=args.leave_running +killEosInstances=not dontKill +killWallet=not dontKill +keepLogs=args.keep_logs + +# Setup cluster and it's wallet manager +walletMgr=WalletMgr(True) +cluster=Cluster(walletd=True) +cluster.setWalletMgr(walletMgr) + +def makeSnapshot(nodeId): + req = urllib.request.Request("http://127.0.0.1:{}/v1/producer/create_snapshot".format(8888 + int(nodeId))) + urllib.request.urlopen(req) + +def backupBlksDir(nodeId): + dataDir = Utils.getNodeDataDir(nodeId) + sourceDir = os.path.join(dataDir, "blocks") + destinationDir = os.path.join(os.path.dirname(dataDir), os.path.basename(dataDir) + "-backup", "blocks") + shutil.copytree(sourceDir, destinationDir) + +def recoverBackedupBlksDir(nodeId): + dataDir = Utils.getNodeDataDir(nodeId) + # Delete existing one and copy backed up one + existingBlocksDir = os.path.join(dataDir, "blocks") + backedupBlocksDir = os.path.join(os.path.dirname(dataDir), os.path.basename(dataDir) + "-backup", "blocks") + shutil.rmtree(existingBlocksDir, ignore_errors=True) + shutil.copytree(backedupBlocksDir, existingBlocksDir) + +def getLatestSnapshot(nodeId): + snapshotDir = os.path.join(Utils.getNodeDataDir(nodeId), "snapshots") + snapshotDirContents = os.listdir(snapshotDir) + assert len(snapshotDirContents) > 0 + snapshotDirContents.sort() + return os.path.join(snapshotDir, snapshotDirContents[-1]) + + +def removeReversibleBlks(nodeId): + dataDir = Utils.getNodeDataDir(nodeId) + reversibleBlks = os.path.join(dataDir, "blocks", "reversible") + shutil.rmtree(reversibleBlks, ignore_errors=True) + +def removeState(nodeId): + dataDir = Utils.getNodeDataDir(nodeId) + state = os.path.join(dataDir, "state") + shutil.rmtree(state, ignore_errors=True) + +def getHeadLibAndForkDbHead(node: Node): + info = node.getInfo() + assert info is not None, "Fail to retrieve info from the node, the node is currently having a problem" + head = int(info["head_block_num"]) + lib = int(info["last_irreversible_block_num"]) + forkDbHead = int(info["fork_db_head_block_num"]) + return head, lib, forkDbHead + +# Wait for some time until LIB advance +def waitForBlksProducedAndLibAdvanced(): + requiredConfirmation = int(2 / 3 * numOfProducers) + 1 + maxNumOfBlksReqToConfirmLib = (12 * requiredConfirmation - 1) * 2 + # Give 6 seconds buffer time + bufferTime = 6 + timeToWait = maxNumOfBlksReqToConfirmLib / 2 + bufferTime + time.sleep(timeToWait) + +# Ensure that the relaunched node received blks from producers, in other words head and lib is advancing +def ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest): + head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) + waitForBlksProducedAndLibAdvanced() + headAfterWaiting, libAfterWaiting, forkDbHeadAfterWaiting = getHeadLibAndForkDbHead(nodeToTest) + assert headAfterWaiting > head and libAfterWaiting > lib and forkDbHeadAfterWaiting > forkDbHead, \ + "Either Head ({} -> {})/ Lib ({} -> {})/ Fork Db Head ({} -> {}) is not advancing".format(head, headAfterWaiting, lib, libAfterWaiting, forkDbHead, forkDbHeadAfterWaiting) + +# Confirm the head lib and fork db of irreversible mode +# Under any condition of irreversible mode: +# - forkDbHead >= head == lib +# headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check +# When comparing with the the state before node is switched: +# - head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode +def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): + head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) + assert head == lib, "Head ({}) should be equal to lib ({})".format(head, lib) + assert forkDbHead >= head, "Fork db head ({}) should be larger or equal to the head ({})".format(forkDbHead, head) + + if headLibAndForkDbHeadBeforeSwitchMode: + headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode + assert head == libBeforeSwitchMode, "Head ({}) should be equal to lib before switch mode ({})".format(head, libBeforeSwitchMode) + assert lib == libBeforeSwitchMode, "Lib ({}) should be equal to lib before switch mode ({})".format(lib, libBeforeSwitchMode) + assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode, \ + "Fork db head ({}) should be equal to head before switch mode ({}) and fork db head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode, forkDbHeadBeforeSwitchMode) + +# Confirm the head lib and fork db of speculative mode +# Under any condition of speculative mode: +# - forkDbHead == head >= lib +# headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check +# When comparing with the the state before node is switched: +# - head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode +def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): + head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) + assert head >= lib, "Head should be larger or equal to lib (head: {}, lib: {})".format(head, lib) + assert head == forkDbHead, "Head ({}) should be equal to fork db head ({})".format(head, forkDbHead) + + if headLibAndForkDbHeadBeforeSwitchMode: + headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode + assert head == forkDbHeadBeforeSwitchMode, "Head ({}) should be equal to fork db head before switch mode ({})".format(head, forkDbHeadBeforeSwitchMode) + assert lib == headBeforeSwitchMode and lib == libBeforeSwitchMode, \ + "Lib ({}) should be equal to head before switch mode ({}) and lib before switch mode ({})".format(lib, headBeforeSwitchMode, libBeforeSwitchMode) + assert forkDbHead == forkDbHeadBeforeSwitchMode, \ + "Fork db head ({}) should be equal to fork db head before switch mode ({}) ".format(forkDbHead, forkDbHeadBeforeSwitchMode) + +def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): + isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout, cachePopen=True) + time.sleep(1) # Give a second to replay or resync if needed + assert isRelaunchSuccess, relaunchAssertMessage + return isRelaunchSuccess + +# List to contain the test result message +testResultMsgs = [] +testSuccessful = False +try: + # Kill any existing instances and launch cluster + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=killAll) + cluster.cleanup() + cluster.launch( + prodCount=numOfProducers, + totalProducers=numOfProducers, + totalNodes=totalNodes, + pnodes=1, + useBiosBootFile=False, + topo="mesh", + specificExtraNodeosArgs={ + 0:"--enable-stale-production", + 4:"--read-mode irreversible", + 6:"--read-mode irreversible", + 9:"--plugin eosio::producer_api_plugin"}) + + producingNodeId = 0 + producingNode = cluster.getNode(producingNodeId) + + def stopProdNode(): + if not producingNode.killed: + producingNode.kill(signal.SIGTERM) + + def startProdNode(): + if producingNode.killed: + relaunchNode(producingNode, producingNodeId) + + # Give some time for it to produce, so lib is advancing + waitForBlksProducedAndLibAdvanced() + + # Kill all nodes, so we can test all node in isolated environment + for clusterNode in cluster.nodes: + clusterNode.kill(signal.SIGTERM) + cluster.biosNode.kill(signal.SIGTERM) + + # Wrapper function to execute test + # This wrapper function will resurrect the node to be tested, and shut it down by the end of the test + def executeTest(nodeIdOfNodeToTest, runTestScenario): + testResult = False + try: + # Relaunch killed node so it can be used for the test + nodeToTest = cluster.getNode(nodeIdOfNodeToTest) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, relaunchAssertMessage="Fail to relaunch before running test scenario") + + # Run test scenario + runTestScenario(nodeIdOfNodeToTest, nodeToTest) + testResultMsgs.append("!!!TEST CASE #{} ({}) IS SUCCESSFUL".format(nodeIdOfNodeToTest, runTestScenario.__name__)) + testResult = True + except Exception as e: + testResultMsgs.append("!!!BUG IS CONFIRMED ON TEST CASE #{} ({}): {}".format(nodeIdOfNodeToTest, runTestScenario.__name__, e)) + finally: + # Kill node after use + if not nodeToTest.killed: nodeToTest.kill(signal.SIGTERM) + return testResult + + # 1st test case: Replay in irreversible mode with reversible blks + # Expectation: Node replays and launches successfully and forkdb head, head, and lib matches the irreversible mode expectation + def replayInIrrModeWithRevBlks(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + + # Kill node and replay in irreversible mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 2nd test case: Replay in irreversible mode without reversible blks + # Expectation: Node replays and launches successfully and forkdb head, head, and lib matches the irreversible mode expectation + def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + + # Shut down node, remove reversible blks and relaunch + nodeToTest.kill(signal.SIGTERM) + removeReversibleBlks(nodeIdOfNodeToTest) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + + # Ensure the node condition is as expected after relaunch + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 3rd test case: Switch mode speculative -> irreversible without replay + # Expectation: Node switches mode successfully and forkdb head, head, and lib matches the irreversible mode expectation + def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + + # Kill and relaunch in irreversible mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") + + # Ensure the node condition is as expected after relaunch + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 4th test case: Switch mode irreversible -> speculative without replay + # Expectation: Node switches mode successfully and forkdb head, head, and lib matches the speculative mode expectation + def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + + # Kill and relaunch in speculative mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) + + # Ensure the node condition is as expected after relaunch + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 5th test case: Switch mode speculative -> irreversible without replay and connected to producing node + # Expectation: Node switches mode successfully + # and the head and lib should be advancing after some blocks produced + # and forkdb head, head, and lib matches the irreversible mode expectation + def switchSpecToIrrModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): + try: + startProdNode() + + # Kill and relaunch in irreversible mode + nodeToTest.kill(signal.SIGTERM) + waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") + + # Ensure the node condition is as expected after relaunch + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + finally: + stopProdNode() + + # 6th test case: Switch mode irreversible -> speculative without replay and connected to producing node + # Expectation: Node switches mode successfully + # and the head and lib should be advancing after some blocks produced + # and forkdb head, head, and lib matches the speculative mode expectation + def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): + try: + startProdNode() + + # Kill and relaunch in irreversible mode + nodeToTest.kill(signal.SIGTERM) + waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) + + # Ensure the node condition is as expected after relaunch + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) + finally: + stopProdNode() + + # 7th test case: Replay in irreversible mode with reversible blks while connected to producing node + # Expectation: Node replays and launches successfully + # and the head and lib should be advancing after some blocks produced + # and forkdb head, head, and lib matches the irreversible mode expectation + def replayInIrrModeWithRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): + try: + startProdNode() + # Kill node and replay in irreversible mode + nodeToTest.kill(signal.SIGTERM) + waitForBlksProducedAndLibAdvanced() # Wait + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + + # Ensure the node condition is as expected after relaunch + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + finally: + stopProdNode() + + # 8th test case: Replay in irreversible mode without reversible blks while connected to producing node + # Expectation: Node replays and launches successfully + # and the head and lib should be advancing after some blocks produced + # and forkdb head, head, and lib matches the irreversible mode expectation + def replayInIrrModeWithoutRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): + try: + startProdNode() + + # Kill node, remove rev blks and then replay in irreversible mode + nodeToTest.kill(signal.SIGTERM) + removeReversibleBlks(nodeIdOfNodeToTest) + waitForBlksProducedAndLibAdvanced() # Wait + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + + # Ensure the node condition is as expected after relaunch + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + finally: + stopProdNode() + + # 9th test case: Switch to speculative mode while using irreversible mode snapshots and using backed up speculative blocks + # Expectation: Node replays and launches successfully + # and the head and lib should be advancing after some blocks produced + # and forkdb head, head, and lib should stay the same after relaunch + def switchToSpecModeWithIrrModeSnapshot(nodeIdOfNodeToTest, nodeToTest): + try: + # Kill node and backup blocks directory of speculative mode + headLibAndForkDbHeadBeforeShutdown = getHeadLibAndForkDbHead(nodeToTest) + nodeToTest.kill(signal.SIGTERM) + backupBlksDir(nodeIdOfNodeToTest) + + # Relaunch in irreversible mode and create the snapshot + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + makeSnapshot(nodeIdOfNodeToTest) + nodeToTest.kill(signal.SIGTERM) + + # Start from clean data dir, recover back up blocks, and then relaunch with irreversible snapshot + removeState(nodeIdOfNodeToTest) + recoverBackedupBlksDir(nodeIdOfNodeToTest) # this function will delete the existing blocks dir first + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --snapshot {}".format(getLatestSnapshot(nodeIdOfNodeToTest)), addOrSwapFlags={"--read-mode": "speculative"}) + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) + # Ensure it automatically replays "reversible blocks", i.e. head lib and fork db should be the same + headLibAndForkDbHeadAfterRelaunch = getHeadLibAndForkDbHead(nodeToTest) + assert headLibAndForkDbHeadBeforeShutdown == headLibAndForkDbHeadAfterRelaunch, \ + "Head, Lib, and Fork Db after relaunch is different {} vs {}".format(headLibAndForkDbHeadBeforeShutdown, headLibAndForkDbHeadAfterRelaunch) + + # Start production and wait until lib advance, ensure everything is alright + startProdNode() + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + + # Note the head, lib and fork db head + stopProdNode() + headLibAndForkDbHeadBeforeShutdown = getHeadLibAndForkDbHead(nodeToTest) + nodeToTest.kill(signal.SIGTERM) + + # Relaunch the node again (using the same snapshot) + # This time ensure it automatically replays both "irreversible blocks" and "reversible blocks", i.e. the end result should be the same as before shutdown + removeState(nodeIdOfNodeToTest) + relaunchNode(nodeToTest, nodeIdOfNodeToTest) + headLibAndForkDbHeadAfterRelaunch = getHeadLibAndForkDbHead(nodeToTest) + assert headLibAndForkDbHeadBeforeShutdown == headLibAndForkDbHeadAfterRelaunch, \ + "Head, Lib, and Fork Db after relaunch is different {} vs {}".format(headLibAndForkDbHeadBeforeShutdown, headLibAndForkDbHeadAfterRelaunch) + finally: + stopProdNode() + + # Start executing test cases here + testResults = [] + testResults.append( executeTest(1, replayInIrrModeWithRevBlks) ) + testResults.append( executeTest(2, replayInIrrModeWithoutRevBlks) ) + testResults.append( executeTest(3, switchSpecToIrrMode) ) + testResults.append( executeTest(4, switchIrrToSpecMode) ) + testResults.append( executeTest(5, switchSpecToIrrModeWithConnectedToProdNode) ) + testResults.append( executeTest(6, switchIrrToSpecModeWithConnectedToProdNode) ) + testResults.append( executeTest(7, replayInIrrModeWithRevBlksAndConnectedToProdNode) ) + testResults.append( executeTest(8, replayInIrrModeWithoutRevBlksAndConnectedToProdNode) ) + testResults.append( executeTest(9, switchToSpecModeWithIrrModeSnapshot) ) + + testSuccessful = all(testResults) +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + # Print test result + for msg in testResultMsgs: Print(msg) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py new file mode 100755 index 00000000000..be3324f969e --- /dev/null +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster, PFSetupPolicy +from TestHelper import TestHelper +from WalletMgr import WalletMgr +from Node import Node + +import signal +import json +import time +import os +from os.path import join, exists +from datetime import datetime + +# Parse command line arguments +args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running", + "--keep-logs", "--alternate-version-labels-file"}) +Utils.Debug=args.v +killAll=args.clean_run +dumpErrorDetails=args.dump_error_details +dontKill=args.leave_running +killEosInstances=not dontKill +killWallet=not dontKill +keepLogs=args.keep_logs +alternateVersionLabelsFile=args.alternate_version_labels_file + +walletMgr=WalletMgr(True) +cluster=Cluster(walletd=True) +cluster.setWalletMgr(walletMgr) + +def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None, nodeosPath=None): + if not node.killed: + node.kill(signal.SIGTERM) + isRelaunchSuccess = node.relaunch(nodeId, chainArg, addOrSwapFlags=addOrSwapFlags, + timeout=5, cachePopen=True, nodeosPath=nodeosPath) + assert isRelaunchSuccess, "Fail to relaunch" + +def shouldNodeContainPreactivateFeature(node): + preactivateFeatureDigest = node.getSupportedProtocolFeatureDict()["PREACTIVATE_FEATURE"]["feature_digest"] + assert preactivateFeatureDigest + blockHeaderState = node.getLatestBlockHeaderState() + activatedProtocolFeatures = blockHeaderState["activated_protocol_features"]["protocol_features"] + return preactivateFeatureDigest in activatedProtocolFeatures + +def waitUntilBeginningOfProdTurn(node, producerName, timeout=30, sleepTime=0.4): + def isDesiredProdTurn(): + headBlockNum = node.getHeadBlockNum() + res = node.getBlock(headBlockNum)["producer"] == producerName and \ + node.getBlock(headBlockNum-1)["producer"] != producerName + return res + Utils.waitForBool(isDesiredProdTurn, timeout, sleepTime) + +def waitForOneRound(): + time.sleep(24) # We have 4 producers for this test + +def setValidityOfActTimeSubjRestriction(node, nodeId, codename, valid): + invalidActTimeSubjRestriction = { + "earliest_allowed_activation_time": "2030-01-01T00:00:00.000", + } + validActTimeSubjRestriction = { + "earliest_allowed_activation_time": "1970-01-01T00:00:00.000", + } + actTimeSubjRestriction = validActTimeSubjRestriction if valid else invalidActTimeSubjRestriction + node.modifyBuiltinPFSubjRestrictions(nodeId, codename, actTimeSubjRestriction) + restartNode(node, nodeId) + +def waitUntilBlockBecomeIrr(node, blockNum, timeout=60): + def hasBlockBecomeIrr(): + return node.getIrreversibleBlockNum() >= blockNum + return Utils.waitForBool(hasBlockBecomeIrr, timeout) + +# List to contain the test result message +testSuccessful = False +try: + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=killAll) + cluster.cleanup() + + # Create a cluster of 4 nodes, each node has 1 producer. The first 3 nodes use the latest vesion, + # While the 4th node use the version that doesn't support protocol feature activation (i.e. 1.7.0) + associatedNodeLabels = { + "3": "170" + } + Utils.Print("Alternate Version Labels File is {}".format(alternateVersionLabelsFile)) + assert exists(alternateVersionLabelsFile), "Alternate version labels file does not exist" + assert cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, + extraNodeosArgs=" --plugin eosio::producer_api_plugin ", + useBiosBootFile=False, + onlySetProds=True, + pfSetupPolicy=PFSetupPolicy.NONE, + alternateVersionLabelsFile=alternateVersionLabelsFile, + associatedNodeLabels=associatedNodeLabels), "Unable to launch cluster" + + newNodeIds = [0, 1, 2] + oldNodeId = 3 + newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) + oldNode = cluster.getNode(oldNodeId) + allNodes = [*newNodes, oldNode] + + def pauseBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "pause", "") + + def resumeBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "resume", "") + + def shouldNodesBeInSync(nodes:[Node]): + # Pause all block production to ensure the head is not moving + pauseBlockProductions() + time.sleep(1) # Wait for some time to ensure all blocks are propagated + headBlockIds = [] + for node in nodes: + headBlockId = node.getInfo()["head_block_id"] + headBlockIds.append(headBlockId) + resumeBlockProductions() + return len(set(headBlockIds)) == 1 + + # Before everything starts, all nodes (new version and old version) should be in sync + assert shouldNodesBeInSync(allNodes), "Nodes are not in sync before preactivation" + + # First, we are going to test the case where: + # - 1st node has valid earliest_allowed_activation_time + # - While 2nd and 3rd node have invalid earliest_allowed_activation_time + # Producer in the 1st node is going to activate PREACTIVATE_FEATURE during his turn + # Immediately, in the next block PREACTIVATE_FEATURE should be active in 1st node, but not on 2nd and 3rd + # Therefore, 1st node will be out of sync with 2nd, 3rd, and 4th node + # After a round has passed though, 1st node will realize he's in minority fork and then join the other nodes + # Hence, the PREACTIVATE_FEATURE that was previously activated will be dropped and all of the nodes should be in sync + setValidityOfActTimeSubjRestriction(newNodes[1], newNodeIds[1], "PREACTIVATE_FEATURE", False) + setValidityOfActTimeSubjRestriction(newNodes[2], newNodeIds[2], "PREACTIVATE_FEATURE", False) + + waitUntilBeginningOfProdTurn(newNodes[0], "defproducera") + newNodes[0].activatePreactivateFeature() + assert shouldNodeContainPreactivateFeature(newNodes[0]), "1st node should contain PREACTIVATE FEATURE" + assert not (shouldNodeContainPreactivateFeature(newNodes[1]) or shouldNodeContainPreactivateFeature(newNodes[2])), \ + "2nd and 3rd node should not contain PREACTIVATE FEATURE" + assert shouldNodesBeInSync([newNodes[1], newNodes[2], oldNode]), "2nd, 3rd and 4th node should be in sync" + assert not shouldNodesBeInSync(allNodes), "1st node should be out of sync with the rest nodes" + + waitForOneRound() + + assert not shouldNodeContainPreactivateFeature(newNodes[0]), "PREACTIVATE_FEATURE should be dropped" + assert shouldNodesBeInSync(allNodes), "All nodes should be in sync" + + # Then we set the earliest_allowed_activation_time of 2nd node and 3rd node with valid value + # Once the 1st node activate PREACTIVATE_FEATURE, all of them should have PREACTIVATE_FEATURE activated in the next block + # They will be in sync and their LIB will advance since they control > 2/3 of the producers + # Also the LIB should be able to advance past the block that contains PREACTIVATE_FEATURE + # However, the 4th node will be out of sync with them, and its LIB will stuck + setValidityOfActTimeSubjRestriction(newNodes[1], newNodeIds[1], "PREACTIVATE_FEATURE", True) + setValidityOfActTimeSubjRestriction(newNodes[2], newNodeIds[2], "PREACTIVATE_FEATURE", True) + + waitUntilBeginningOfProdTurn(newNodes[0], "defproducera") + libBeforePreactivation = newNodes[0].getIrreversibleBlockNum() + newNodes[0].activatePreactivateFeature() + + assert shouldNodesBeInSync(newNodes), "New nodes should be in sync" + assert not shouldNodesBeInSync(allNodes), "Nodes should not be in sync after preactivation" + for node in newNodes: assert shouldNodeContainPreactivateFeature(node), "New node should contain PREACTIVATE_FEATURE" + + activatedBlockNum = newNodes[0].getHeadBlockNum() # The PREACTIVATE_FEATURE should have been activated before or at this block num + assert waitUntilBlockBecomeIrr(newNodes[0], activatedBlockNum), \ + "1st node LIB should be able to advance past the block that contains PREACTIVATE_FEATURE" + assert newNodes[1].getIrreversibleBlockNum() >= activatedBlockNum and \ + newNodes[2].getIrreversibleBlockNum() >= activatedBlockNum, \ + "2nd and 3rd node LIB should also be able to advance past the block that contains PREACTIVATE_FEATURE" + assert oldNode.getIrreversibleBlockNum() <= libBeforePreactivation, \ + "4th node LIB should stuck on LIB before PREACTIVATE_FEATURE is activated" + + # Restart old node with newest version + # Before we are migrating to new version, use --export-reversible-blocks as the old version + # and --import-reversible-blocks with the new version to ensure the compatibility of the reversible blocks + # Finally, when we restart the 4th node with the version of nodeos that supports protocol feature, + # all nodes should be in sync, and the 4th node will also contain PREACTIVATE_FEATURE + portableRevBlkPath = os.path.join(Utils.getNodeDataDir(oldNodeId), "rev_blk_portable_format") + oldNode.kill(signal.SIGTERM) + # Note, for the following relaunch, these will fail to relaunch immediately (expected behavior of export/import), so the chainArg will not replace the old cmd + oldNode.relaunch(oldNodeId, chainArg="--export-reversible-blocks {}".format(portableRevBlkPath), timeout=1) + oldNode.relaunch(oldNodeId, chainArg="--import-reversible-blocks {}".format(portableRevBlkPath), timeout=1, nodeosPath="programs/nodeos/nodeos") + os.remove(portableRevBlkPath) + + restartNode(oldNode, oldNodeId, chainArg="--replay", nodeosPath="programs/nodeos/nodeos") + time.sleep(2) # Give some time to replay + + assert shouldNodesBeInSync(allNodes), "All nodes should be in sync" + assert shouldNodeContainPreactivateFeature(oldNode), "4th node should contain PREACTIVATE_FEATURE" + + testSuccessful = True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/nodeos_protocol_feature_test.py b/tests/nodeos_protocol_feature_test.py new file mode 100755 index 00000000000..e42f934b2f4 --- /dev/null +++ b/tests/nodeos_protocol_feature_test.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster, PFSetupPolicy +from TestHelper import TestHelper +from WalletMgr import WalletMgr +from Node import Node + +import signal +import json +from os.path import join +from datetime import datetime + +# Parse command line arguments +args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) +Utils.Debug = args.v +killAll=args.clean_run +dumpErrorDetails=args.dump_error_details +dontKill=args.leave_running +killEosInstances=not dontKill +killWallet=not dontKill +keepLogs=args.keep_logs + +# The following test case will test the Protocol Feature JSON reader of the blockchain + +def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None): + if not node.killed: + node.kill(signal.SIGTERM) + isRelaunchSuccess = node.relaunch(nodeId, chainArg, addOrSwapFlags=addOrSwapFlags, timeout=5, cachePopen=True) + assert isRelaunchSuccess, "Fail to relaunch" + +walletMgr=WalletMgr(True) +cluster=Cluster(walletd=True) +cluster.setWalletMgr(walletMgr) + +# List to contain the test result message +testSuccessful = False +try: + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=killAll) + cluster.cleanup() + cluster.launch(extraNodeosArgs=" --plugin eosio::producer_api_plugin ", + dontBootstrap=True, + pfSetupPolicy=PFSetupPolicy.NONE) + biosNode = cluster.biosNode + + # Modify the JSON file and then restart the node so it updates the internal state + newSubjectiveRestrictions = { + "earliest_allowed_activation_time": "2030-01-01T00:00:00.000", + "preactivation_required": True, + "enabled": False + } + biosNode.modifyBuiltinPFSubjRestrictions("bios", "PREACTIVATE_FEATURE", newSubjectiveRestrictions) + restartNode(biosNode, "bios") + + supportedProtocolFeatureDict = biosNode.getSupportedProtocolFeatureDict() + preactivateFeatureSubjectiveRestrictions = supportedProtocolFeatureDict["PREACTIVATE_FEATURE"]["subjective_restrictions"] + + # Ensure that the PREACTIVATE_FEATURE subjective restrictions match the value written in the JSON + assert preactivateFeatureSubjectiveRestrictions == newSubjectiveRestrictions,\ + "PREACTIVATE_FEATURE subjective restrictions are not updated according to the JSON" + + testSuccessful = True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 1db83f7f692..fbe7f8f6d05 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -23,7 +23,7 @@ args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" - ,"--sanity-test","--p2p-plugin","--wallet-port"}) + ,"--sanity-test","--wallet-port"}) server=args.host port=args.port debug=args.v @@ -38,7 +38,6 @@ onlyBios=args.only_bios killAll=args.clean_run sanityTest=args.sanity_test -p2pPlugin=args.p2p_plugin walletPort=args.wallet_port Utils.Debug=debug @@ -68,7 +67,7 @@ cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin) is False: + if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py new file mode 100755 index 00000000000..5934750d3a9 --- /dev/null +++ b/tests/nodeos_startup_catchup.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +import testUtils +import time +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import BlockType +from Node import Node +import signal +from TestHelper import AppArgs +from TestHelper import TestHelper + +import decimal +import math +import re + +############################################################### +# nodeos_startup_catchup +# Test configures a producing node and <--txn-plugins count> non-producing nodes with the +# txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them +# to the producing node. +# 1) After 10 seconds a new node is started. +# 2) the node is allowed to catch up to the producing node +# 3) that node is killed +# 4) restart the node +# 5) the node is allowed to catch up to the producing node +# 3) Repeat steps 2-5, <--catchup-count - 1> more times +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + +appArgs=AppArgs() +extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=2) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "-p","--wallet-port"}, applicationSpecificArgs=appArgs) +Utils.Debug=args.v +pnodes=args.p if args.p > 0 else 1 +startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2 +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=args.prod_count if args.prod_count > 1 else 2 +killAll=args.clean_run +walletPort=args.wallet_port +catchupCount=args.catchup_count if args.catchup_count > 0 else 1 +totalNodes=startedNonProdNodes+pnodes+catchupCount + +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + specificExtraNodeosArgs={} + txnGenNodeNum=pnodes # next node after producer nodes + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + specificExtraNodeosArgs[nodeNum]="--plugin eosio::txn_test_gen_plugin --txn-test-gen-account-prefix txntestacct" + Print("Stand up cluster") + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: + Utils.errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + Print("Create txn generate nodes") + txnGenNodes=[] + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + txnGenNodes.append(cluster.getNode(nodeNum)) + + Print("Create accounts for generated txns") + txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) + + def lib(node): + return node.getBlockNum(BlockType.lib) + + def head(node): + return node.getBlockNum(BlockType.head) + + def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportInterval=20): + if not node.waitForBlock(blockNum, timeout=timeout, blockType=blockType, reportInterval=reportInterval): + info=node.getInfo() + headBlockNum=info["head_block_num"] + libBlockNum=info["last_irreversible_block_num"] + Utils.errorExit("Failed to get to %s block number %d. Last had head block number %d and lib %d" % (blockType, blockNum, headBlockNum, libBlockNum)) + + def waitForNodeStarted(node): + sleepTime=0 + while sleepTime < 10 and node.getInfo(silentErrors=True) is None: + time.sleep(1) + sleepTime+=1 + + node0=cluster.getNode(0) + + Print("Wait for account creation to be irreversible") + blockNum=head(node0) + waitForBlock(node0, blockNum, blockType=BlockType.lib) + + Print("Startup txn generation") + period=1500 + transPerPeriod=150 + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, period, transPerPeriod) + time.sleep(1) + + blockNum=head(node0) + timePerBlock=500 + blocksPerPeriod=period/timePerBlock + transactionsPerBlock=transPerPeriod/blocksPerPeriod + steadyStateWait=20 + startBlockNum=blockNum+steadyStateWait + numBlocks=20 + endBlockNum=startBlockNum+numBlocks + waitForBlock(node0, endBlockNum) + transactions=0 + avg=0 + for blockNum in range(startBlockNum, endBlockNum): + block=node0.getBlock(blockNum) + transactions+=len(block["transactions"]) + + avg=transactions / (blockNum - startBlockNum + 1) + + Print("Validate transactions are generating") + minRequiredTransactions=transactionsPerBlock + assert avg>minRequiredTransactions, "Expected to at least receive %s transactions per block, but only getting %s" % (minRequiredTransactions, avg) + + Print("Cycle through catchup scenarios") + twoRounds=21*2*12 + for catchup_num in range(0, catchupCount): + Print("Start catchup node") + cluster.launchUnstarted(cachePopen=True) + lastLibNum=lib(node0) + # verify producer lib is still advancing + waitForBlock(node0, lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + catchupNode=cluster.getNodes()[-1] + catchupNodeNum=cluster.getNodes().index(catchupNode) + waitForNodeStarted(catchupNode) + lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node %s's LIB is advancing" % (catchupNodeNum)) + # verify lib is advancing (before we wait for it to have to catchup with producer) + waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify catchup node is advancing to producer") + numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds + waitForBlock(catchupNode, lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + + Print("Shutdown catchup node and validate exit code") + catchupNode.interruptAndVerifyExitStatus(60) + + Print("Restart catchup node") + catchupNode.relaunch(catchupNodeNum, cachePopen=True) + waitForNodeStarted(catchupNode) + lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node is advancing") + # verify catchup node is advancing to producer + waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify producer is still advancing LIB") + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify catchup node is advancing to producer") + # verify catchup node is advancing to producer + waitForBlock(catchupNode, lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + catchupNode.interruptAndVerifyExitStatus(60) + catchupNode.popenProc=None + + testSuccessful=True + +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + +exit(0) diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index ee728962b9f..fad398a860a 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -144,7 +144,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): from core_symbol import CORE_SYMBOL args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", - "--p2p-plugin","--wallet-port"}) + "--wallet-port"}) Utils.Debug=args.v totalNodes=4 cluster=Cluster(walletd=True) @@ -153,7 +153,6 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): dontKill=args.leave_running prodCount=args.prod_count killAll=args.clean_run -p2pPlugin=args.p2p_plugin walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) @@ -171,7 +170,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, useBiosBootFile=False) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py new file mode 100755 index 00000000000..200477c7356 --- /dev/null +++ b/tests/prod_preactivation_test.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster, PFSetupPolicy +from WalletMgr import WalletMgr +from Node import Node +from Node import ReturnType +from TestHelper import TestHelper + +import decimal +import re +import time + +############################################################### +# prod_preactivation_test +# --dump-error-details +# --keep-logs +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit +cmdError=Utils.cmdError +from core_symbol import CORE_SYMBOL + +args = TestHelper.parse_args({"--host","--port","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" + ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" + ,"--sanity-test","--wallet-port"}) +server=args.host +port=args.port +debug=args.v +enableMongo=args.mongodb +defproduceraPrvtKey=args.defproducera_prvt_key +defproducerbPrvtKey=args.defproducerb_prvt_key +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontLaunch=args.dont_launch +dontKill=args.leave_running +prodCount=2 +onlyBios=args.only_bios +killAll=args.clean_run +sanityTest=args.sanity_test +walletPort=args.wallet_port + +Utils.Debug=debug +localTest=True +cluster=Cluster(host=server, port=port, walletd=True, enableMongo=enableMongo, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill +dontBootstrap=sanityTest + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN prod_preactivation_test.py") + cluster.setWalletMgr(walletMgr) + Print("SERVER: %s" % (server)) + Print("PORT: %d" % (port)) + + if enableMongo and not cluster.isMongodDbRunning(): + errorExit("MongoDb doesn't seem to be running.") + + if localTest and not dontLaunch: + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=1, onlyBios=onlyBios, + dontBootstrap=dontBootstrap, useBiosBootFile=False, + pfSetupPolicy=PFSetupPolicy.NONE, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: + cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + node = cluster.getNode(0) + cmd = "curl %s/v1/producer/get_supported_protocol_features" % (node.endpointHttp) + Print("try to get supported feature list from Node 0 with cmd: %s" % (cmd)) + feature0=Utils.runCmdReturnJson(cmd) + + node = cluster.getNode(1) + cmd = "curl %s/v1/producer/get_supported_protocol_features" % (node.endpointHttp) + Print("try to get supported feature list from Node 1 with cmd: %s" % (cmd)) + feature1=Utils.runCmdReturnJson(cmd) + + if feature0 != feature1: + errorExit("feature list mismatch between node 0 and node 1") + else: + Print("feature list from node 0 matches with that from node 1") + + if len(feature0) == 0: + errorExit("No supported feature list") + + digest = "" + for i in range(0, len(feature0)): + feature = feature0[i] + if feature["specification"][0]["value"] != "PREACTIVATE_FEATURE": + continue + else: + digest = feature["feature_digest"] + + if len(digest) == 0: + errorExit("code name PREACTIVATE_FEATURE not found") + + Print("found digest ", digest, " of PREACTIVATE_FEATURE") + + node0 = cluster.getNode(0) + contract="eosio.bios" + contractDir="unittests/contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + + Print("publish a new bios contract %s should fails because env.is_feature_activated unresolveable" % (contractDir)) + retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) + + if retMap["output"].decode("utf-8").find("unresolveable") < 0: + errorExit("bios contract not result in expected unresolveable error") + + secwait = 30 + Print("Wait for node 1 to produce...") + node = cluster.getNode(1) + while secwait > 0: + info = node.getInfo() + if info["head_block_producer"] >= "defproducerl" and info["head_block_producer"] <= "defproduceru": + break + time.sleep(1) + secwait = secwait - 1 + + secwait = 30 + Print("Waiting until node 0 start to produce...") + node = cluster.getNode(1) + while secwait > 0: + info = node.getInfo() + if info["head_block_producer"] >= "defproducera" and info["head_block_producer"] <= "defproducerk": + break + time.sleep(1) + secwait = secwait - 1 + + if secwait <= 0: + errorExit("No producer of node 0") + + cmd = "curl --data-binary '{\"protocol_features_to_activate\":[\"%s\"]}' %s/v1/producer/schedule_protocol_feature_activations" % (digest, node.endpointHttp) + + Print("try to preactivate feature on node 1, cmd: %s" % (cmd)) + result = Utils.runCmdReturnJson(cmd) + + if result["result"] != "ok": + errorExit("failed to preactivate feature from producer plugin on node 1") + else: + Print("feature PREACTIVATE_FEATURE (%s) preactivation success" % (digest)) + + time.sleep(0.6) + Print("publish a new bios contract %s should fails because node1 is not producing block yet" % (contractDir)) + retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) + if retMap["output"].decode("utf-8").find("unresolveable") < 0: + errorExit("bios contract not result in expected unresolveable error") + + Print("now wait for node 1 produce a block...") + secwait = 30 # wait for node 1 produce a block + while secwait > 0: + info = node.getInfo() + if info["head_block_producer"] >= "defproducerl" and info["head_block_producer"] <= "defproduceru": + break + time.sleep(1) + secwait = secwait - 1 + + if secwait <= 0: + errorExit("No blocks produced by node 1") + + time.sleep(0.6) + retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True) + Print("sucessfully set new contract with new intrinsic!!!") + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 6b3c217d75d..8d0f8721c10 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -26,7 +26,7 @@ Print=Utils.Print errorExit=Utils.errorExit -args=TestHelper.parse_args({"-p","-d","-s","-c","--kill-sig","--kill-count","--keep-logs","--p2p-plugin" +args=TestHelper.parse_args({"-p","-d","-s","-c","--kill-sig","--kill-count","--keep-logs" ,"--dump-error-details","-v","--leave-running","--clean-run"}) pnodes=args.p topo=args.s @@ -40,7 +40,6 @@ dumpErrorDetails=args.dump_error_details keepLogs=args.keep_logs killAll=args.clean_run -p2pPlugin=args.p2p_plugin seed=1 Utils.Debug=debug @@ -66,7 +65,7 @@ pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/testUtils.py b/tests/testUtils.py index 5964fac068c..9c19a8e5f9e 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -36,6 +36,8 @@ class Utils: EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" FileDivider="=================================================================" + DataDir="var/lib/" + ConfigDir="etc/eosio/" @staticmethod def Print(*args, **kwargs): @@ -65,6 +67,38 @@ def setIrreversibleTimeout(timeout): def setSystemWaitTimeout(timeout): Utils.systemWaitTimeout=timeout + @staticmethod + def getDateString(dt): + return "%d_%02d_%02d_%02d_%02d_%02d" % ( + dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + + @staticmethod + def nodeExtensionToName(ext): + r"""Convert node extension (bios, 0, 1, etc) to node name. """ + prefix="node_" + if ext == "bios": + return prefix + ext + + return "node_%02d" % (ext) + + @staticmethod + def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path + + @staticmethod + def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.ConfigDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path + @staticmethod def getChainStrategies(): chainSyncStrategies={} @@ -108,7 +142,7 @@ def cmdError(name, cmdCode=0): Utils.Print(msg) @staticmethod - def waitForObj(lam, timeout=None): + def waitForObj(lam, timeout=None, sleepTime=3, reporter=None): if timeout is None: timeout=60 @@ -119,7 +153,6 @@ def waitForObj(lam, timeout=None): ret=lam() if ret is not None: return ret - sleepTime=3 if Utils.Debug: Utils.Print("cmd: sleep %d seconds, remaining time: %d seconds" % (sleepTime, endTime - time.time())) @@ -127,6 +160,8 @@ def waitForObj(lam, timeout=None): stdout.write('.') stdout.flush() needsNewLine=True + if reporter is not None: + reporter() time.sleep(sleepTime) finally: if needsNewLine: @@ -135,9 +170,9 @@ def waitForObj(lam, timeout=None): return None @staticmethod - def waitForBool(lam, timeout=None): + def waitForBool(lam, timeout=None, sleepTime=3, reporter=None): myLam = lambda: True if lam() else None - ret=Utils.waitForObj(myLam, timeout) + ret=Utils.waitForObj(myLam, timeout, sleepTime, reporter=reporter) return False if ret is None else ret @staticmethod @@ -180,7 +215,8 @@ def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): @staticmethod def runCmdReturnStr(cmd, trace=False): - retStr=Utils.checkOutput(cmd.split()) + cmdArr=shlex.split(cmd) + retStr=Utils.checkOutput(cmdArr) if trace: Utils.Print ("RAW > %s" % (retStr)) return retStr @@ -230,7 +266,7 @@ def pgrepCmd(serverName): # If no -a, AttributeError: 'NoneType' object has no attribute 'group' pgrepOpts="-fl" - return "pgrep %s %s" % (pgrepOpts, serverName)\ + return "pgrep %s %s" % (pgrepOpts, serverName) @staticmethod def getBlockLog(blockLogLocation, silentErrors=False, exitOnError=False): diff --git a/tests/validate-dirty-db.py b/tests/validate-dirty-db.py index ac7520bc353..afcf2767b73 100755 --- a/tests/validate-dirty-db.py +++ b/tests/validate-dirty-db.py @@ -74,7 +74,7 @@ def runNodeosAndGetOutput(myTimeout=3): pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: errorExit("Failed to stand up eos cluster.") node=cluster.getNode(0) diff --git a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py index 358d3b963b8..6be44a2d79e 100755 --- a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py +++ b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py @@ -26,6 +26,7 @@ 'eosio.stake', 'eosio.token', 'eosio.vpay', + 'eosio.rex', ] def jsonArg(a): diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 3b288f2d2a3..82e723e5563 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -33,8 +33,6 @@ find_package(LLVM 4.0 REQUIRED CONFIG) link_directories(${LLVM_LIBRARY_DIR}) -set( CMAKE_CXX_STANDARD 14 ) - add_subdirectory(contracts) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/include/contracts.hpp ESCAPE_QUOTES) @@ -42,7 +40,9 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA ### BUILD UNIT TEST EXECUTABLE ### file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc ${PLATFORM_SPECIFIC_LIBS} ) + +target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) + target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) target_include_directories( unit_test PUBLIC ${CMAKE_SOURCE_DIR}/libraries/testing/include diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 1d4fedc0e7c..8e2b9576506 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -218,7 +219,7 @@ transaction_trace_ptr CallAction(TESTER& test, T ac, const vector& } template -transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, const vector& scope = {N(testapi)}) { +transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, const vector& scope = {N(testapi)}, bool no_throw = false) { { signed_transaction trx; @@ -238,8 +239,10 @@ transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, flat_set keys; trx.get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); - auto res = test.push_transaction(trx); - BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); + auto res = test.push_transaction(trx, fc::time_point::maximum(), TESTER::DEFAULT_BILLED_CPU_TIME_US, no_throw); + if (!no_throw) { + BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); + } test.produce_block(); return res; } @@ -248,6 +251,7 @@ transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, #define CALL_TEST_FUNCTION(_TESTER, CLS, MTH, DATA) CallFunction(_TESTER, test_api_action{}, DATA) #define CALL_TEST_FUNCTION_SYSTEM(_TESTER, CLS, MTH, DATA) CallFunction(_TESTER, test_chain_action{}, DATA, {config::system_account_name} ) #define CALL_TEST_FUNCTION_SCOPE(_TESTER, CLS, MTH, DATA, ACCOUNT) CallFunction(_TESTER, test_api_action{}, DATA, ACCOUNT) +#define CALL_TEST_FUNCTION_NO_THROW(_TESTER, CLS, MTH, DATA) CallFunction(_TESTER, test_api_action{}, DATA, {N(testapi)}, true) #define CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION(_TESTER, CLS, MTH, DATA, EXC, EXC_MESSAGE) \ BOOST_CHECK_EXCEPTION( \ CALL_TEST_FUNCTION( _TESTER, CLS, MTH, DATA), \ @@ -330,25 +334,25 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { auto result = push_reqauth( config::system_account_name, "active" ); BOOST_REQUIRE_EQUAL( result->receipt->status, transaction_receipt::executed ); - BOOST_REQUIRE( result->action_traces[0].receipt.auth_sequence.find( config::system_account_name ) - != result->action_traces[0].receipt.auth_sequence.end() ); - auto base_global_sequence_num = result->action_traces[0].receipt.global_sequence; - auto base_system_recv_seq_num = result->action_traces[0].receipt.recv_sequence; - auto base_system_auth_seq_num = result->action_traces[0].receipt.auth_sequence[config::system_account_name]; - auto base_system_code_seq_num = result->action_traces[0].receipt.code_sequence.value; - auto base_system_abi_seq_num = result->action_traces[0].receipt.abi_sequence.value; + BOOST_REQUIRE( result->action_traces[0].receipt->auth_sequence.find( config::system_account_name ) + != result->action_traces[0].receipt->auth_sequence.end() ); + auto base_global_sequence_num = result->action_traces[0].receipt->global_sequence; + auto base_system_recv_seq_num = result->action_traces[0].receipt->recv_sequence; + auto base_system_auth_seq_num = result->action_traces[0].receipt->auth_sequence[config::system_account_name]; + auto base_system_code_seq_num = result->action_traces[0].receipt->code_sequence.value; + auto base_system_abi_seq_num = result->action_traces[0].receipt->abi_sequence.value; uint64_t base_test_recv_seq_num = 0; uint64_t base_test_auth_seq_num = 0; call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 1 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 1 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 0 ); - base_test_recv_seq_num = res->action_traces[0].receipt.recv_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, 0 ); + base_test_recv_seq_num = res->action_traces[0].receipt->recv_sequence; BOOST_CHECK( base_test_recv_seq_num > 0 ); base_test_recv_seq_num--; - const auto& m = res->action_traces[0].receipt.auth_sequence; + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); base_test_auth_seq_num = m.begin()->second; @@ -361,11 +365,11 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { call_provereset_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 4 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_test_recv_seq_num + 2 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 2 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 0 ); - const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->recv_sequence, base_test_recv_seq_num + 2 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, 2 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, 0 ); + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 3 ); @@ -377,11 +381,11 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { call_doit_and_check( config::system_account_name, N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 6 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_system_recv_seq_num + 4 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, base_system_code_seq_num + 1 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, base_system_abi_seq_num ); - const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 6 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->recv_sequence, base_system_recv_seq_num + 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, base_system_code_seq_num + 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, base_system_abi_seq_num ); + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 4 ); @@ -395,11 +399,11 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 11 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_test_recv_seq_num + 3 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 4 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 1 ); - const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 11 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->recv_sequence, base_test_recv_seq_num + 3 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, 1 ); + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 8 ); @@ -586,25 +590,31 @@ BOOST_FIXTURE_TEST_CASE(require_notice_tests, TESTER) { try { } FC_LOG_AND_RETHROW() } -BOOST_FIXTURE_TEST_CASE(ram_billing_in_notify_tests, TESTER) { try { - produce_blocks(2); - create_account( N(testapi) ); - create_account( N(testapi2) ); - produce_blocks(10); - set_code( N(testapi), contracts::test_api_wasm() ); - produce_blocks(1); - set_code( N(testapi2), contracts::test_api_wasm() ); - produce_blocks(1); - - BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi) ) ), - subjective_block_production_exception, fc_exception_message_is("Cannot charge RAM to other accounts during notify.") ); +BOOST_AUTO_TEST_CASE(ram_billing_in_notify_tests) { try { + validating_tester chain( validating_tester::default_config() ); + chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); + + chain.produce_blocks(2); + chain.create_account( N(testapi) ); + chain.create_account( N(testapi2) ); + chain.produce_blocks(10); + chain.set_code( N(testapi), contracts::test_api_wasm() ); + chain.produce_blocks(1); + chain.set_code( N(testapi2), contracts::test_api_wasm() ); + chain.produce_blocks(1); + + BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", + fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi) ) ), + subjective_block_production_exception, + fc_exception_message_is("Cannot charge RAM to other accounts during notify.") + ); - CALL_TEST_FUNCTION( *this, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | 0 ) ); + CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | 0 ) ); - CALL_TEST_FUNCTION( *this, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi2) ) ); + CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi2) ) ); - BOOST_REQUIRE_EQUAL( validate(), true ); + BOOST_REQUIRE_EQUAL( chain.validate(), true ); } FC_LOG_AND_RETHROW() } /************************************************************************************* @@ -701,12 +711,12 @@ BOOST_FIXTURE_TEST_CASE(cf_action_tests, TESTER) { try { // test send context free action auto ttrace = CALL_TEST_FUNCTION( *this, "test_transaction", "send_cf_action", {} ); - BOOST_CHECK_EQUAL(ttrace->action_traces.size(), 1); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces.size(), 1); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].receipt.receiver, account_name("dummy")); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].act.account, account_name("dummy")); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].act.name, account_name("event1")); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].act.authorization.size(), 0); + BOOST_REQUIRE_EQUAL(ttrace->action_traces.size(), 2); + BOOST_CHECK_EQUAL((int)(ttrace->action_traces[1].creator_action_ordinal), 1); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].receiver, account_name("dummy")); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.account, account_name("dummy")); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.name, account_name("event1")); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.authorization.size(), 0); BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_transaction", "send_cf_action_fail", {} ), eosio_assert_message_exception, @@ -1079,7 +1089,10 @@ BOOST_FIXTURE_TEST_CASE(transaction_tests, TESTER) { try { { produce_blocks(10); transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->receipt && t->receipt->status != transaction_receipt::executed) { trace = t; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->receipt && t->receipt->status != transaction_receipt::executed) { trace = t; } + } ); // test error handling on deferred transaction failure CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); @@ -1125,7 +1138,10 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { //schedule { transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t->scheduled) { trace = t; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t->scheduled) { trace = t; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {} ); BOOST_CHECK(!trace); produce_block( fc::seconds(2) ); @@ -1145,7 +1161,10 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { { transaction_trace_ptr trace; uint32_t count = 0; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->scheduled) { trace = t; ++count; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->scheduled) { trace = t; ++count; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {}); BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {}), deferred_tx_duplicate); produce_blocks( 3 ); @@ -1164,13 +1183,14 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { produce_blocks(10); -#warning re-enable deferred transaction replacement test after bug has been fixed - #if 0 //schedule twice with replace_existing flag (second deferred transaction should replace first one) { transaction_trace_ptr trace; uint32_t count = 0; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->scheduled) { trace = t; ++count; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->scheduled) { trace = t; ++count; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction_replace", {}); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction_replace", {}); produce_blocks( 3 ); @@ -1186,14 +1206,16 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { BOOST_CHECK_EQUAL( 1, trace->action_traces.size() ); c.disconnect(); } - #endif produce_blocks(10); //schedule and cancel { transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->scheduled) { trace = t; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->scheduled) { trace = t; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {}); CALL_TEST_FUNCTION(*this, "test_transaction", "cancel_deferred_transaction_success", {}); produce_block( fc::seconds(2) ); @@ -1213,7 +1235,8 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { //repeated deferred transactions { vector traces; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); if (t && t->scheduled) { traces.push_back( t ); } @@ -1235,7 +1258,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { // Payer is alice in this case, this tx should fail since we don't have the authorization of alice dtt_action dtt_act1; dtt_act1.payer = N(alice); - BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act1)), missing_auth_exception); + BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act1)), action_validate_exception); // Send a tx which in turn sends a deferred tx with the deferred tx's receiver != this tx receiver // This will include the authorization of the receiver, and impose any related delay associated with the authority @@ -1266,7 +1289,8 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act2)); // If the deferred tx receiver == this tx receiver, the authorization checking would originally be bypassed. - // But not anymore. Now it should subjectively fail because testapi@additional permission is not unilaterally satisfied by testapi@eosio.code. + // But not anymore. With the RESTRICT_ACTION_TO_SELF protocol feature activated, it should now objectively + // fail because testapi@additional permission is not unilaterally satisfied by testapi@eosio.code. dtt_action dtt_act3; dtt_act3.deferred_account = N(testapi); dtt_act3.permission_name = N(additional); @@ -1275,7 +1299,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { ("code", name(dtt_act3.deferred_account)) ("type", name(dtt_act3.deferred_action)) ("requirement", name(dtt_act3.permission_name))); - BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act3)), subjective_block_production_exception); + BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act3)), unsatisfied_authorization); // But it should again work if the deferred transaction has a sufficient delay. dtt_act3.delay_sec = 10; @@ -1294,6 +1318,186 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(more_deferred_transaction_tests) { try { + auto cfg = validating_tester::default_config(); + cfg.contracts_console = true; + validating_tester chain( cfg ); + chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = chain.control->get_protocol_feature_manager(); + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::replace_deferred ); + BOOST_REQUIRE( d ); + + chain.preactivate_protocol_features( {*d} ); + chain.produce_block(); + + const auto& index = chain.control->db().get_index(); + + auto print_deferred = [&index]() { + for( const auto& gto : index ) { + wlog("id = ${id}, trx_id = ${trx_id}", ("id", gto.id)("trx_id", gto.trx_id)); + } + }; + + const auto& contract_account = account_name("tester"); + const auto& test_account = account_name("alice"); + + chain.create_accounts( {contract_account, test_account} ); + chain.set_code( contract_account, contracts::deferred_test_wasm() ); + chain.set_abi( contract_account, contracts::deferred_test_abi().data() ); + chain.produce_block(); + + BOOST_REQUIRE_EQUAL(0, index.size()); + + chain.push_action( contract_account, N(delayedcall), test_account, fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 0) + ("contract", contract_account) + ("payload", 42) + ("delay_sec", 1000) + ("replace_existing", false) + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + print_deferred(); + + signed_transaction trx; + trx.actions.emplace_back( + chain.get_action( contract_account, N(delayedcall), + vector{{test_account, config::active_name}}, + fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 0) + ("contract", contract_account) + ("payload", 13) + ("delay_sec", 1000) + ("replace_existing", true) + ) + ); + trx.actions.emplace_back( + chain.get_action( contract_account, N(delayedcall), + vector{{test_account, config::active_name}}, + fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 1) + ("contract", contract_account) + ("payload", 42) + ("delay_sec", 1000) + ("replace_existing", false) + ) + ); + trx.actions.emplace_back( + chain.get_action( contract_account, N(fail), + vector{}, + fc::mutable_variant_object() + ) + ); + chain.set_transaction_headers(trx); + trx.sign( chain.get_private_key( test_account, "active" ), chain.control->get_chain_id() ); + BOOST_REQUIRE_EXCEPTION( + chain.push_transaction( trx ), + eosio_assert_message_exception, + eosio_assert_message_is("fail") + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + print_deferred(); + + chain.produce_blocks(2); + + chain.push_action( contract_account, N(delayedcall), test_account, fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 1) + ("contract", contract_account) + ("payload", 101) + ("delay_sec", 1000) + ("replace_existing", false) + ); + + chain.push_action( contract_account, N(delayedcall), test_account, fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 2) + ("contract", contract_account) + ("payload", 102) + ("delay_sec", 1000) + ("replace_existing", false) + ); + + BOOST_REQUIRE_EQUAL(3, index.size()); + print_deferred(); + + BOOST_REQUIRE_THROW( + chain.push_action( contract_account, N(delayedcall), test_account, fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 2) + ("contract", contract_account) + ("payload", 101) + ("delay_sec", 1000) + ("replace_existing", true) + ), + fc::exception + ); + + BOOST_REQUIRE_EQUAL(3, index.size()); + print_deferred(); + + signed_transaction trx2; + trx2.actions.emplace_back( + chain.get_action( contract_account, N(delayedcall), + vector{{test_account, config::active_name}}, + fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 1) + ("contract", contract_account) + ("payload", 100) + ("delay_sec", 1000) + ("replace_existing", true) + ) + ); + trx2.actions.emplace_back( + chain.get_action( contract_account, N(delayedcall), + vector{{test_account, config::active_name}}, + fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 2) + ("contract", contract_account) + ("payload", 101) + ("delay_sec", 1000) + ("replace_existing", true) + ) + ); + trx2.actions.emplace_back( + chain.get_action( contract_account, N(delayedcall), + vector{{test_account, config::active_name}}, + fc::mutable_variant_object() + ("payer", test_account) + ("sender_id", 1) + ("contract", contract_account) + ("payload", 102) + ("delay_sec", 1000) + ("replace_existing", true) + ) + ); + trx2.actions.emplace_back( + chain.get_action( contract_account, N(fail), + vector{}, + fc::mutable_variant_object() + ) + ); + chain.set_transaction_headers(trx2); + trx2.sign( chain.get_private_key( test_account, "active" ), chain.control->get_chain_id() ); + BOOST_REQUIRE_EXCEPTION( + chain.push_transaction( trx2 ), + eosio_assert_message_exception, + eosio_assert_message_is("fail") + ); + + BOOST_REQUIRE_EQUAL(3, index.size()); + print_deferred(); + + BOOST_REQUIRE_EQUAL( chain.validate(), true ); +} FC_LOG_AND_RETHROW() } + template struct setprod_act { static account_name get_account() { @@ -2045,6 +2249,17 @@ BOOST_FIXTURE_TEST_CASE(eosio_assert_code_tests, TESTER) { try { BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_action", "test_assert_code", fc::raw::pack((uint64_t)42) ), eosio_assert_code_exception, eosio_assert_code_is(42) ); + + auto trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_assert_code", fc::raw::pack((uint64_t)42) ); + BOOST_REQUIRE( trace ); + BOOST_REQUIRE( trace->except ); + BOOST_REQUIRE( trace->error_code ); + BOOST_REQUIRE_EQUAL( *trace->error_code, 42 ); + BOOST_REQUIRE_EQUAL( trace->action_traces.size(), 1 ); + BOOST_REQUIRE( trace->action_traces[0].except ); + BOOST_REQUIRE( trace->action_traces[0].error_code ); + BOOST_REQUIRE_EQUAL( *trace->action_traces[0].error_code, 42 ); + produce_block(); auto omsg1 = abis.get_error_message(1); @@ -2060,7 +2275,490 @@ BOOST_FIXTURE_TEST_CASE(eosio_assert_code_tests, TESTER) { try { produce_block(); + auto trace2 = CALL_TEST_FUNCTION_NO_THROW( + *this, "test_action", "test_assert_code", + fc::raw::pack( static_cast(system_error_code::generic_system_error) ) + ); + BOOST_REQUIRE( trace2 ); + BOOST_REQUIRE( trace2->except ); + BOOST_REQUIRE( trace2->error_code ); + BOOST_REQUIRE_EQUAL( *trace2->error_code, static_cast(system_error_code::contract_restricted_error_code) ); + BOOST_REQUIRE_EQUAL( trace2->action_traces.size(), 1 ); + BOOST_REQUIRE( trace2->action_traces[0].except ); + BOOST_REQUIRE( trace2->action_traces[0].error_code ); + BOOST_REQUIRE_EQUAL( *trace2->action_traces[0].error_code, static_cast(system_error_code::contract_restricted_error_code) ); + + produce_block(); + BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } +/************************************************************************************* ++ * action_ordinal_test test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_SCOPE( *this, "test_action", "test_action_ordinal1", + {}, vector{ N(testapi)}); + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 11); + + auto &atrace = txn_trace->action_traces; + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); + int start_gseq = atrace[0].receipt->global_sequence; + + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); + + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); + + BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); + BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); + BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[3].receipt->global_sequence, start_gseq + 8); + + BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); + + BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); + BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); + BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[5].receipt->global_sequence, start_gseq + 9); + + BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); + BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); + + BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); + BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); + BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[7].receipt->global_sequence, start_gseq + 10); + + BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); + BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[8].closest_unnotified_ancestor_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); + + BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); + BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[9].closest_unnotified_ancestor_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); + BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); + + BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); + BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[10].closest_unnotified_ancestor_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); + BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); +} FC_LOG_AND_RETHROW() } + + +/************************************************************************************* ++ * action_ordinal_failtest1 test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + create_account(N(fail1) ); // <- make first action fails in the middle + produce_blocks(1); + + transaction_trace_ptr txn_trace = + CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 3); + + auto &atrace = txn_trace->action_traces; + + // fails here after creating one notify action and one inline action + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[0].except->code(), 3050003); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[1].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); + +} FC_LOG_AND_RETHROW() } + +/************************************************************************************* ++ * action_ordinal_failtest2 test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + create_account(N(fail3) ); // <- make action 3 fails in the middle + produce_blocks(1); + + transaction_trace_ptr txn_trace = + CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 8); + + auto &atrace = txn_trace->action_traces; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); + int start_gseq = atrace[0].receipt->global_sequence; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); + BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); + BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), false); + + // hey exception is here + BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[4].except.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[4].except->code(), 3050003); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); + BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); + BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); + BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[6].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); + BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); + BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); + +} FC_LOG_AND_RETHROW() } + +/************************************************************************************* ++ * action_ordinal_failtest3 test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + create_account(N(failnine) ); // <- make action 9 fails in the middle + produce_blocks(1); + + transaction_trace_ptr txn_trace = + CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 11); + + auto &atrace = txn_trace->action_traces; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); + int start_gseq = atrace[0].receipt->global_sequence; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); + + // fails here + BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); + BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); + BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[3].except->code(), 3050003); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); + BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); + BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); + BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); + BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); + BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); + BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[8].closest_unnotified_ancestor_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); + BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[9].closest_unnotified_ancestor_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); + BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); + BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[10].closest_unnotified_ancestor_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); + BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index a238c7246a3..b944b532423 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -8,7 +8,6 @@ #include #include -#include #ifdef NON_VALIDATING_TEST #define TESTER tester @@ -371,7 +370,8 @@ BOOST_AUTO_TEST_CASE( any_auth ) { try { BOOST_AUTO_TEST_CASE(no_double_billing) { try { - TESTER chain; + validating_tester chain( validating_tester::default_config() ); + chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); chain.produce_block(); @@ -490,7 +490,7 @@ BOOST_AUTO_TEST_CASE( linkauth_special ) { try { chain.create_account(N(tester)); chain.create_account(N(tester2)); chain.produce_blocks(); - + chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() ("account", "tester") ("permission", "first") diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index 74c074bd9bb..fab79a306a1 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -30,13 +30,13 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test) // Re-sign the transaction signed_tx.signatures.clear(); signed_tx.sign(main.get_private_key(config::system_account_name, "active"), main.control->get_chain_id()); - // Replace the valid transaction with the invalid transaction + // Replace the valid transaction with the invalid transaction auto invalid_packed_tx = packed_transaction(signed_tx); copy_b->transactions.back().trx = invalid_packed_tx; // Re-sign the block auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule_hash) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) ); copy_b->producer_signature = main.get_private_key(config::system_account_name, "active").sign(sig_digest); // Push block with invalid transaction to other chain @@ -77,7 +77,7 @@ std::pair corrupt_trx_in_block(validating_te // Re-sign the block auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule_hash) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) ); copy_b->producer_signature = main.get_private_key(b->producer, "active").sign(sig_digest); return std::pair(b, copy_b); } @@ -163,4 +163,32 @@ BOOST_AUTO_TEST_CASE(untrusted_producer_test) }) ; } +/** + * Ensure that the block broadcasted by producing node and receiving node is identical + */ +BOOST_AUTO_TEST_CASE(broadcasted_block_test) +{ + + tester producer_node; + tester receiving_node; + + signed_block_ptr bcasted_blk_by_prod_node; + signed_block_ptr bcasted_blk_by_recv_node; + + producer_node.control->accepted_block.connect( [&](const block_state_ptr& bs) { + bcasted_blk_by_prod_node = bs->block; + }); + receiving_node.control->accepted_block.connect( [&](const block_state_ptr& bs) { + bcasted_blk_by_recv_node = bs->block; + }); + + auto b = producer_node.produce_block(); + receiving_node.push_block(b); + + bytes bcasted_blk_by_prod_node_packed = fc::raw::pack(*bcasted_blk_by_prod_node); + bytes bcasted_blk_by_recv_node_packed = fc::raw::pack(*bcasted_blk_by_recv_node); + BOOST_CHECK(std::equal(bcasted_blk_by_prod_node_packed.begin(), bcasted_blk_by_prod_node_packed.end(), bcasted_blk_by_recv_node_packed.begin())); + +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/bootseq_tests.cpp b/unittests/bootseq_tests.cpp index a3df02b652c..561a04622cc 100644 --- a/unittests/bootseq_tests.cpp +++ b/unittests/bootseq_tests.cpp @@ -213,10 +213,10 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { set_privileged(N(eosio.token)); // Verify eosio.msig and eosio.token is privileged - const auto& eosio_msig_acc = get(N(eosio.msig)); - BOOST_TEST(eosio_msig_acc.privileged == true); - const auto& eosio_token_acc = get(N(eosio.token)); - BOOST_TEST(eosio_token_acc.privileged == true); + const auto& eosio_msig_acc = get(N(eosio.msig)); + BOOST_TEST(eosio_msig_acc.is_privileged() == true); + const auto& eosio_token_acc = get(N(eosio.token)); + BOOST_TEST(eosio_token_acc.is_privileged() == true); // Create SYS tokens in eosio.token, set its manager as eosio @@ -279,7 +279,7 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { // Total Stakes = b1 + whale2 + whale3 stake = (100,000,000 - 1,000) + (20,000,000 - 1,000) + (30,000,000 - 1,000) vector data = get_row_by_account( config::system_account_name, config::system_account_name, N(global), N(global) ); - + BOOST_TEST(get_global_state()["total_activated_stake"].as() == 1499999997000); // No producers will be set, since the total activated stake is less than 150,000,000 diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index 8dd1f2b4dcf..efafa1846c8 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -36,17 +36,23 @@ namespace eosio { MAKE_READ_WASM_ABI(eosio_token, eosio.token, contracts) MAKE_READ_WASM_ABI(eosio_wrap, eosio.wrap, contracts) + MAKE_READ_WASM_ABI(before_preactivate_eosio_bios, eosio.bios, contracts/old_versions/v1.6.0-rc3) + // Contracts in `eos/unittests/unittests/test-contracts' directory - MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) - MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) - MAKE_READ_WASM_ABI(noop, noop, test-contracts) - MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) - MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) - MAKE_READ_WASM_ABI(snapshot_test, snapshot_test, test-contracts) - MAKE_READ_WASM_ABI(test_api, test_api, test-contracts) - MAKE_READ_WASM_ABI(test_api_db, test_api_db, test-contracts) - MAKE_READ_WASM_ABI(test_api_multi_index, test_api_multi_index, test-contracts) - MAKE_READ_WASM_ABI(test_ram_limit, test_ram_limit, test-contracts) + MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) + MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) + MAKE_READ_WASM_ABI(get_sender_test, get_sender_test, test-contracts) + MAKE_READ_WASM_ABI(noop, noop, test-contracts) + MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) + MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) + MAKE_READ_WASM_ABI(ram_restrictions_test, ram_restrictions_test, test-contracts) + MAKE_READ_WASM_ABI(reject_all, reject_all, test-contracts) + MAKE_READ_WASM_ABI(restrict_action_test, restrict_action_test, test-contracts) + MAKE_READ_WASM_ABI(snapshot_test, snapshot_test, test-contracts) + MAKE_READ_WASM_ABI(test_api, test_api, test-contracts) + MAKE_READ_WASM_ABI(test_api_db, test_api_db, test-contracts) + MAKE_READ_WASM_ABI(test_api_multi_index, test_api_multi_index, test-contracts) + MAKE_READ_WASM_ABI(test_ram_limit, test_ram_limit, test-contracts) }; } /// eosio::testing } /// eosio diff --git a/unittests/contracts/CMakeLists.txt b/unittests/contracts/CMakeLists.txt index 59ea1c1ca26..f64c79de062 100644 --- a/unittests/contracts/CMakeLists.txt +++ b/unittests/contracts/CMakeLists.txt @@ -6,3 +6,5 @@ file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.msig/ DESTINATION ${CMAKE_CURRENT_BI file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.system/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.system/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.token/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.token/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.wrap/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.wrap/) + +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/old_versions/v1.6.0-rc3/eosio.bios/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/old_versions/v1.6.0-rc3/eosio.bios/) diff --git a/unittests/contracts/eosio.bios/eosio.bios.abi b/unittests/contracts/eosio.bios/eosio.bios.abi index 0d5749b981b..01f62c976f5 100644 --- a/unittests/contracts/eosio.bios/eosio.bios.abi +++ b/unittests/contracts/eosio.bios/eosio.bios.abi @@ -1,6 +1,7 @@ { - "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Mon Dec 3 17:06:17 2018", + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", "version": "eosio::abi/1.1", + "types": [], "structs": [ { "name": "abi_hash", @@ -16,6 +17,16 @@ } ] }, + { + "name": "activate", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, { "name": "authority", "base": "", @@ -254,6 +265,16 @@ } ] }, + { + "name": "reqactivated", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, { "name": "reqauth", "base": "", @@ -429,8 +450,12 @@ ] } ], - "types": [], "actions": [ + { + "name": "activate", + "type": "activate", + "ricardian_contract": "" + }, { "name": "canceldelay", "type": "canceldelay", @@ -456,6 +481,11 @@ "type": "onerror", "ricardian_contract": "" }, + { + "name": "reqactivated", + "type": "reqactivated", + "ricardian_contract": "" + }, { "name": "reqauth", "type": "reqauth", @@ -517,6 +547,5 @@ } ], "ricardian_clauses": [], - "variants": [], - "abi_extensions": [] + "variants": [] } \ No newline at end of file diff --git a/unittests/contracts/eosio.bios/eosio.bios.wasm b/unittests/contracts/eosio.bios/eosio.bios.wasm index ea62431344e..968bd1529dc 100755 Binary files a/unittests/contracts/eosio.bios/eosio.bios.wasm and b/unittests/contracts/eosio.bios/eosio.bios.wasm differ diff --git a/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.abi b/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.abi new file mode 100644 index 00000000000..ebdfccd0704 --- /dev/null +++ b/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.abi @@ -0,0 +1,522 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Mon Mar 11 20:20:06 2019", + "version": "eosio::abi/1.1", + "structs": [ + { + "name": "abi_hash", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "hash", + "type": "checksum256" + } + ] + }, + { + "name": "authority", + "base": "", + "fields": [ + { + "name": "threshold", + "type": "uint32" + }, + { + "name": "keys", + "type": "key_weight[]" + }, + { + "name": "accounts", + "type": "permission_level_weight[]" + }, + { + "name": "waits", + "type": "wait_weight[]" + } + ] + }, + { + "name": "blockchain_parameters", + "base": "", + "fields": [ + { + "name": "max_block_net_usage", + "type": "uint64" + }, + { + "name": "target_block_net_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_net_usage", + "type": "uint32" + }, + { + "name": "base_per_transaction_net_usage", + "type": "uint32" + }, + { + "name": "net_usage_leeway", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_num", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_den", + "type": "uint32" + }, + { + "name": "max_block_cpu_usage", + "type": "uint32" + }, + { + "name": "target_block_cpu_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "min_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "max_transaction_lifetime", + "type": "uint32" + }, + { + "name": "deferred_trx_expiration_window", + "type": "uint32" + }, + { + "name": "max_transaction_delay", + "type": "uint32" + }, + { + "name": "max_inline_action_size", + "type": "uint32" + }, + { + "name": "max_inline_action_depth", + "type": "uint16" + }, + { + "name": "max_authority_depth", + "type": "uint16" + } + ] + }, + { + "name": "canceldelay", + "base": "", + "fields": [ + { + "name": "canceling_auth", + "type": "permission_level" + }, + { + "name": "trx_id", + "type": "checksum256" + } + ] + }, + { + "name": "deleteauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "key_weight", + "base": "", + "fields": [ + { + "name": "key", + "type": "public_key" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "linkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + }, + { + "name": "requirement", + "type": "name" + } + ] + }, + { + "name": "newaccount", + "base": "", + "fields": [ + { + "name": "creator", + "type": "name" + }, + { + "name": "name", + "type": "name" + }, + { + "name": "owner", + "type": "authority" + }, + { + "name": "active", + "type": "authority" + } + ] + }, + { + "name": "onerror", + "base": "", + "fields": [ + { + "name": "sender_id", + "type": "uint128" + }, + { + "name": "sent_trx", + "type": "bytes" + } + ] + }, + { + "name": "permission_level", + "base": "", + "fields": [ + { + "name": "actor", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "permission_level_weight", + "base": "", + "fields": [ + { + "name": "permission", + "type": "permission_level" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "producer_key", + "base": "", + "fields": [ + { + "name": "producer_name", + "type": "name" + }, + { + "name": "block_signing_key", + "type": "public_key" + } + ] + }, + { + "name": "reqauth", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + } + ] + }, + { + "name": "setabi", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "abi", + "type": "bytes" + } + ] + }, + { + "name": "setalimits", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "ram_bytes", + "type": "int64" + }, + { + "name": "net_weight", + "type": "int64" + }, + { + "name": "cpu_weight", + "type": "int64" + } + ] + }, + { + "name": "setcode", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "vmtype", + "type": "uint8" + }, + { + "name": "vmversion", + "type": "uint8" + }, + { + "name": "code", + "type": "bytes" + } + ] + }, + { + "name": "setglimits", + "base": "", + "fields": [ + { + "name": "ram", + "type": "uint64" + }, + { + "name": "net", + "type": "uint64" + }, + { + "name": "cpu", + "type": "uint64" + } + ] + }, + { + "name": "setparams", + "base": "", + "fields": [ + { + "name": "params", + "type": "blockchain_parameters" + } + ] + }, + { + "name": "setpriv", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "is_priv", + "type": "uint8" + } + ] + }, + { + "name": "setprods", + "base": "", + "fields": [ + { + "name": "schedule", + "type": "producer_key[]" + } + ] + }, + { + "name": "unlinkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + } + ] + }, + { + "name": "updateauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + }, + { + "name": "parent", + "type": "name" + }, + { + "name": "auth", + "type": "authority" + } + ] + }, + { + "name": "wait_weight", + "base": "", + "fields": [ + { + "name": "wait_sec", + "type": "uint32" + }, + { + "name": "weight", + "type": "uint16" + } + ] + } + ], + "types": [], + "actions": [ + { + "name": "canceldelay", + "type": "canceldelay", + "ricardian_contract": "" + }, + { + "name": "deleteauth", + "type": "deleteauth", + "ricardian_contract": "" + }, + { + "name": "linkauth", + "type": "linkauth", + "ricardian_contract": "" + }, + { + "name": "newaccount", + "type": "newaccount", + "ricardian_contract": "" + }, + { + "name": "onerror", + "type": "onerror", + "ricardian_contract": "" + }, + { + "name": "reqauth", + "type": "reqauth", + "ricardian_contract": "" + }, + { + "name": "setabi", + "type": "setabi", + "ricardian_contract": "" + }, + { + "name": "setalimits", + "type": "setalimits", + "ricardian_contract": "" + }, + { + "name": "setcode", + "type": "setcode", + "ricardian_contract": "" + }, + { + "name": "setglimits", + "type": "setglimits", + "ricardian_contract": "" + }, + { + "name": "setparams", + "type": "setparams", + "ricardian_contract": "" + }, + { + "name": "setpriv", + "type": "setpriv", + "ricardian_contract": "" + }, + { + "name": "setprods", + "type": "setprods", + "ricardian_contract": "" + }, + { + "name": "unlinkauth", + "type": "unlinkauth", + "ricardian_contract": "" + }, + { + "name": "updateauth", + "type": "updateauth", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "abihash", + "type": "abi_hash", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [], + "abi_extensions": [] +} \ No newline at end of file diff --git a/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.wasm b/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.wasm new file mode 100755 index 00000000000..fd6478776ed Binary files /dev/null and b/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.wasm differ diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index 21dabc36c56..bd00e7b60eb 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -64,37 +64,23 @@ BOOST_AUTO_TEST_SUITE(database_tests) BOOST_TEST(test.control->fetch_block_by_number(i + 1)->id() == block_ids.back()); } - // Utility function to check expected irreversible block - auto calc_exp_last_irr_block_num = [&](uint32_t head_block_num) -> uint32_t { - const auto producers_size = test.control->head_block_state()->active_schedule.producers.size(); - const auto max_reversible_rounds = EOS_PERCENT(producers_size, config::percent_100 - config::irreversible_threshold_percent); - if( max_reversible_rounds == 0) { - return head_block_num; - } else { - const auto current_round = head_block_num / config::producer_repetitions; - const auto irreversible_round = current_round - max_reversible_rounds; - return (irreversible_round + 1) * config::producer_repetitions - 1; - } - }; - - // Check the last irreversible block number is set correctly - const auto expected_last_irreversible_block_number = calc_exp_last_irr_block_num(num_of_blocks_to_prod); + // Check the last irreversible block number is set correctly, with one producer, irreversibility should only just 1 block before + const auto expected_last_irreversible_block_number = test.control->head_block_num() - 1; BOOST_TEST(test.control->head_block_state()->dpos_irreversible_blocknum == expected_last_irreversible_block_number); - // Check that block 201 cannot be found (only 20 blocks exist) - BOOST_TEST(test.control->fetch_block_by_number(num_of_blocks_to_prod + 1 + 1) == nullptr); + // Ensure that future block doesn't exist + const auto nonexisting_future_block_num = test.control->head_block_num() + 1; + BOOST_TEST(test.control->fetch_block_by_number(nonexisting_future_block_num) == nullptr); const uint32_t next_num_of_blocks_to_prod = 100; - // Produce 100 blocks and check their IDs should match the above test.produce_blocks(next_num_of_blocks_to_prod); - const auto next_expected_last_irreversible_block_number = calc_exp_last_irr_block_num( - num_of_blocks_to_prod + next_num_of_blocks_to_prod); + const auto next_expected_last_irreversible_block_number = test.control->head_block_num() - 1; // Check the last irreversible block number is updated correctly BOOST_TEST(test.control->head_block_state()->dpos_irreversible_blocknum == next_expected_last_irreversible_block_number); - // Check that block 201 can now be found - BOOST_CHECK_NO_THROW(test.control->fetch_block_by_number(num_of_blocks_to_prod + 1)); + // Previous nonexisting future block should exist by now + BOOST_CHECK_NO_THROW(test.control->fetch_block_by_number(nonexisting_future_block_num)); // Check the latest head block match - BOOST_TEST(test.control->fetch_block_by_number(num_of_blocks_to_prod + next_num_of_blocks_to_prod + 1)->id() == + BOOST_TEST(test.control->fetch_block_by_number(test.control->head_block_num())->id() == test.control->head_block_id()); } FC_LOG_AND_RETHROW() } diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index 913f3395e8e..2d6f77c7ee9 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -4,7 +4,6 @@ */ #include #include -#include #include #include @@ -261,7 +260,7 @@ BOOST_AUTO_TEST_CASE(delete_auth_test) { try { expect_assert_message(e, "permission_query_exception: Permission Query Exception\nFailed to retrieve permission"); return true; }); - + // update auth chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() ("account", "tester") diff --git a/unittests/fork_test_utilities.cpp b/unittests/fork_test_utilities.cpp new file mode 100644 index 00000000000..a8caaeeb233 --- /dev/null +++ b/unittests/fork_test_utilities.cpp @@ -0,0 +1,42 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "fork_test_utilities.hpp" + +private_key_type get_private_key( name keyname, string role ) { + return private_key_type::regenerate(fc::sha256::hash(string(keyname)+role)); +} + +public_key_type get_public_key( name keyname, string role ){ + return get_private_key( keyname, role ).get_public_key(); +} + +void push_blocks( tester& from, tester& to, uint32_t block_num_limit ) { + while( to.control->fork_db_pending_head_block_num() + < std::min( from.control->fork_db_pending_head_block_num(), block_num_limit ) ) + { + auto fb = from.control->fetch_block_by_number( to.control->fork_db_pending_head_block_num()+1 ); + to.push_block( fb ); + } +} + +bool produce_empty_blocks_until( tester& t, + account_name last_producer, + account_name next_producer, + uint32_t max_num_blocks_to_produce ) +{ + auto condition_satisfied = [&t, last_producer, next_producer]() { + return t.control->pending_block_producer() == next_producer && t.control->head_block_producer() == last_producer; + }; + + for( uint32_t blocks_produced = 0; + blocks_produced < max_num_blocks_to_produce; + t.produce_block(), ++blocks_produced ) + { + if( condition_satisfied() ) + return true; + } + + return condition_satisfied(); +} diff --git a/unittests/fork_test_utilities.hpp b/unittests/fork_test_utilities.hpp new file mode 100644 index 00000000000..f5ae33ae718 --- /dev/null +++ b/unittests/fork_test_utilities.hpp @@ -0,0 +1,21 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +using namespace eosio::chain; +using namespace eosio::testing; + +private_key_type get_private_key( name keyname, string role ); + +public_key_type get_public_key( name keyname, string role ); + +void push_blocks( tester& from, tester& to, uint32_t block_num_limit = std::numeric_limits::max() ); + +bool produce_empty_blocks_until( tester& t, + account_name last_producer, + account_name next_producer, + uint32_t max_num_blocks_to_produce = std::numeric_limits::max() ); diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 5dbc4863a49..920d759e951 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -16,24 +16,11 @@ #include +#include "fork_test_utilities.hpp" + using namespace eosio::chain; using namespace eosio::testing; -private_key_type get_private_key( name keyname, string role ) { - return private_key_type::regenerate(fc::sha256::hash(string(keyname)+role)); -} - -public_key_type get_public_key( name keyname, string role ){ - return get_private_key( keyname, role ).get_public_key(); -} - -void push_blocks( tester& from, tester& to ) { - while( to.control->fork_db_head_block_num() < from.control->fork_db_head_block_num() ) { - auto fb = from.control->fetch_block_by_number( to.control->fork_db_head_block_num()+1 ); - to.push_block( fb ); - } -} - BOOST_AUTO_TEST_SUITE(forked_tests) BOOST_AUTO_TEST_CASE( irrblock ) try { @@ -49,7 +36,7 @@ BOOST_AUTO_TEST_CASE( irrblock ) try { wlog("set producer schedule to [dan,sam,pam]"); c.produce_blocks(50); -} FC_LOG_AND_RETHROW() +} FC_LOG_AND_RETHROW() struct fork_tracker { vector blocks; @@ -66,9 +53,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { auto res = bios.set_producers( {N(a),N(b),N(c),N(d),N(e)} ); // run until the producers are installed and its the start of "a's" round - while( bios.control->pending_block_state()->header.producer.to_string() != "a" || bios.control->head_block_state()->header.producer.to_string() != "e") { - bios.produce_block(); - } + BOOST_REQUIRE( produce_empty_blocks_until( bios, N(e), N(a) ) ); // sync remote node tester remote; @@ -106,7 +91,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { // re-sign the block auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), fork.block_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule_hash) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); copy_b->producer_signature = remote.get_private_key(N(b), "active").sign(sig_digest); // add this new block to our corrupted block merkle @@ -134,7 +119,9 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { } // push the block which should attempt the corrupted fork and fail - BOOST_REQUIRE_THROW(bios.push_block(fork.blocks.back()), fc::exception); + BOOST_REQUIRE_EXCEPTION( bios.push_block(fork.blocks.back()), fc::exception, + fc_exception_message_is( "Block ID does not match" ) + ); } } @@ -149,8 +136,9 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { BOOST_AUTO_TEST_CASE( forking ) try { tester c; - c.produce_block(); - c.produce_block(); + while (c.control->head_block_num() < 3) { + c.produce_block(); + } auto r = c.create_accounts( {N(dan),N(sam),N(pam)} ); wdump((fc::json::to_pretty_string(r))); c.produce_block(); @@ -174,15 +162,18 @@ BOOST_AUTO_TEST_CASE( forking ) try { ("maximum_supply", core_from_string("10000000.0000")) ); - wdump((fc::json::to_pretty_string(cr))); - cr = c.push_action( N(eosio.token), N(issue), config::system_account_name, mutable_variant_object() - ("to", "dan" ) + ("to", "eosio" ) ("quantity", core_from_string("100.0000")) ("memo", "") ); - wdump((fc::json::to_pretty_string(cr))); + cr = c.push_action( N(eosio.token), N(transfer), config::system_account_name, mutable_variant_object() + ("from", "eosio") + ("to", "dan" ) + ("quantity", core_from_string("100.0000")) + ("memo", "") + ); tester c2; @@ -300,7 +291,9 @@ BOOST_AUTO_TEST_CASE( forking ) try { */ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { tester c; - c.produce_blocks(10); + while (c.control->head_block_num() < 11) { + c.produce_block(); + } auto r = c.create_accounts( {N(dan),N(sam),N(pam),N(scott)} ); auto res = c.set_producers( {N(dan),N(sam),N(pam),N(scott)} ); wlog("set producer schedule to [dan,sam,pam,scott]"); @@ -319,7 +312,7 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { auto nextproducer = [](tester &c, int skip_interval) ->account_name { auto head_time = c.control->head_block_time(); auto next_time = head_time + fc::milliseconds(config::block_interval_ms * skip_interval); - return c.control->head_block_state()->get_scheduled_producer(next_time).producer_name; + return c.control->head_block_state()->get_scheduled_producer(next_time).producer_name; }; // fork c: 2 producers: dan, sam @@ -329,18 +322,18 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { account_name next1 = nextproducer(c, skip1); if (next1 == N(dan) || next1 == N(sam)) { c.produce_block(fc::milliseconds(config::block_interval_ms * skip1)); skip1 = 1; - } + } else ++skip1; account_name next2 = nextproducer(c2, skip2); if (next2 == N(scott)) { c2.produce_block(fc::milliseconds(config::block_interval_ms * skip2)); skip2 = 1; - } + } else ++skip2; } BOOST_REQUIRE_EQUAL(87u, c.control->head_block_num()); BOOST_REQUIRE_EQUAL(73u, c2.control->head_block_num()); - + // push fork from c2 => c size_t p = fork_num; @@ -351,8 +344,45 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { BOOST_REQUIRE_EQUAL(73u, c.control->head_block_num()); -} FC_LOG_AND_RETHROW() +} FC_LOG_AND_RETHROW() + +/** + * Tests that a validating node does not accept a block which is considered invalid by another node. + */ +BOOST_AUTO_TEST_CASE( validator_accepts_valid_blocks ) try { + + tester n1; + tester n2; + tester n3; + + n1.produce_block(); + + auto id = n1.control->head_block_id(); + + block_state_ptr first_block; + + auto c = n2.control->accepted_block.connect( [&]( const block_state_ptr& bsp) { + first_block = bsp; + } ); + + push_blocks( n1, n2 ); + + BOOST_CHECK_EQUAL( n2.control->head_block_id(), id ); + + BOOST_REQUIRE( first_block ); + first_block->verify_signee( first_block->signee() ); + BOOST_CHECK_EQUAL( first_block->header.id(), first_block->block->id() ); + BOOST_CHECK( first_block->header.producer_signature == first_block->block->producer_signature ); + + c.disconnect(); + + n3.push_block( first_block->block ); + + BOOST_CHECK_EQUAL( n3.control->head_block_id(), id ); + + +} FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE( read_modes ) try { tester c; @@ -363,21 +393,187 @@ BOOST_AUTO_TEST_CASE( read_modes ) try { auto res = c.set_producers( {N(dan),N(sam),N(pam)} ); c.produce_blocks(200); auto head_block_num = c.control->head_block_num(); + auto last_irreversible_block_num = c.control->last_irreversible_block_num(); - tester head(true, db_read_mode::HEAD); + tester head(setup_policy::old_bios_only, db_read_mode::HEAD); push_blocks(c, head); - BOOST_REQUIRE_EQUAL(head_block_num, head.control->fork_db_head_block_num()); - BOOST_REQUIRE_EQUAL(head_block_num, head.control->head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, head.control->fork_db_head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, head.control->head_block_num()); - tester read_only(false, db_read_mode::READ_ONLY); + tester read_only(setup_policy::none, db_read_mode::READ_ONLY); push_blocks(c, read_only); - BOOST_REQUIRE_EQUAL(head_block_num, read_only.control->fork_db_head_block_num()); - BOOST_REQUIRE_EQUAL(head_block_num, read_only.control->head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, read_only.control->fork_db_head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, read_only.control->head_block_num()); - tester irreversible(true, db_read_mode::IRREVERSIBLE); + tester irreversible(setup_policy::old_bios_only, db_read_mode::IRREVERSIBLE); push_blocks(c, irreversible); - BOOST_REQUIRE_EQUAL(head_block_num, irreversible.control->fork_db_head_block_num()); - BOOST_REQUIRE_EQUAL(head_block_num - 49, irreversible.control->head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, irreversible.control->fork_db_pending_head_block_num()); + BOOST_CHECK_EQUAL(last_irreversible_block_num, irreversible.control->fork_db_head_block_num()); + BOOST_CHECK_EQUAL(last_irreversible_block_num, irreversible.control->head_block_num()); + +} FC_LOG_AND_RETHROW() + + +BOOST_AUTO_TEST_CASE( irreversible_mode ) try { + auto does_account_exist = []( const tester& t, account_name n ) { + const auto& db = t.control->db(); + return (db.find( n ) != nullptr); + }; + + tester main; + + main.create_accounts( {N(producer1), N(producer2)} ); + main.produce_block(); + main.set_producers( {N(producer1), N(producer2)} ); + main.produce_block(); + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer1), N(producer2), 26) ); + + main.create_accounts( {N(alice)} ); + main.produce_block(); + auto hbn1 = main.control->head_block_num(); + auto lib1 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer2), N(producer1), 11) ); + + auto hbn2 = main.control->head_block_num(); + auto lib2 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( lib2 < hbn1 ); + + tester other; + + push_blocks( main, other ); + BOOST_CHECK_EQUAL( other.control->head_block_num(), hbn2 ); + + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer1), N(producer2), 12) ); + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer2), N(producer1), 12) ); + + auto hbn3 = main.control->head_block_num(); + auto lib3 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( lib3 >= hbn1 ); + + BOOST_CHECK_EQUAL( does_account_exist( main, N(alice) ), true ); + + // other forks away from main after hbn2 + BOOST_REQUIRE_EQUAL( other.control->head_block_producer().to_string(), "producer2" ); + + other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round + BOOST_REQUIRE_EQUAL( other.control->head_block_producer().to_string(), "producer2" ); + auto fork_first_block_id = other.control->head_block_id(); + wlog( "{w}", ("w", fork_first_block_id)); + + BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + BOOST_REQUIRE_EQUAL( other.control->pending_block_producer().to_string(), "producer1" ); + + // Repeat two more times to ensure other has a longer chain than main + other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round + BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + + other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round + BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + + auto hbn4 = other.control->head_block_num(); + auto lib4 = other.control->last_irreversible_block_num(); + + BOOST_REQUIRE( hbn4 > hbn3 ); + BOOST_REQUIRE( lib4 < hbn1 ); + + tester irreversible(setup_policy::none, db_read_mode::IRREVERSIBLE); + + push_blocks( main, irreversible, hbn1 ); + + BOOST_CHECK_EQUAL( irreversible.control->fork_db_pending_head_block_num(), hbn1 ); + BOOST_CHECK_EQUAL( irreversible.control->head_block_num(), lib1 ); + BOOST_CHECK_EQUAL( does_account_exist( irreversible, N(alice) ), false ); + + push_blocks( other, irreversible, hbn4 ); + + BOOST_CHECK_EQUAL( irreversible.control->fork_db_pending_head_block_num(), hbn4 ); + BOOST_CHECK_EQUAL( irreversible.control->head_block_num(), lib4 ); + BOOST_CHECK_EQUAL( does_account_exist( irreversible, N(alice) ), false ); + + // force push blocks from main to irreversible creating a new branch in irreversible's fork database + for( uint32_t n = hbn2 + 1; n <= hbn3; ++n ) { + auto fb = main.control->fetch_block_by_number( n ); + irreversible.push_block( fb ); + } + + BOOST_CHECK_EQUAL( irreversible.control->fork_db_pending_head_block_num(), hbn3 ); + BOOST_CHECK_EQUAL( irreversible.control->head_block_num(), lib3 ); + BOOST_CHECK_EQUAL( does_account_exist( irreversible, N(alice) ), true ); + + { + auto bs = irreversible.control->fetch_block_state_by_id( fork_first_block_id ); + BOOST_REQUIRE( bs && bs->id == fork_first_block_id ); + } + + main.produce_block(); + auto hbn5 = main.control->head_block_num(); + auto lib5 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( lib5 > lib3 ); + + push_blocks( main, irreversible, hbn5 ); + + { + auto bs = irreversible.control->fetch_block_state_by_id( fork_first_block_id ); + BOOST_REQUIRE( !bs ); + } + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( reopen_forkdb ) try { + tester c1; + + c1.create_accounts( {N(alice),N(bob),N(carol)} ); + c1.produce_block(); + + auto res = c1.set_producers( {N(alice),N(bob),N(carol)} ); + + c1.produce_blocks(2); + + BOOST_REQUIRE_EQUAL( c1.control->active_producers().version, 1u ); + + produce_empty_blocks_until( c1, N(carol), N(alice) ); + c1.produce_block(); + produce_empty_blocks_until( c1, N(carol), N(alice) ); + + tester c2; + + push_blocks( c1, c2 ); + + auto fork1_lib_before = c1.control->last_irreversible_block_num(); + + // alice produces a block on fork 1 causing LIB to advance + c1.produce_block(); + + auto fork1_head_block_id = c1.control->head_block_id(); + + auto fork1_lib_after = c1.control->last_irreversible_block_num(); + BOOST_REQUIRE( fork1_lib_after > fork1_lib_before ); + + auto fork2_lib_before = c2.control->last_irreversible_block_num(); + BOOST_REQUIRE_EQUAL( fork1_lib_before, fork2_lib_before ); + + // carol produces a block on fork 2 skipping over the slots of alice and bob + c2.produce_block( fc::milliseconds(config::block_interval_ms * 25) ); + auto fork2_start_block = c2.control->head_block_num(); + c2.produce_block(); + + auto fork2_lib_after = c2.control->last_irreversible_block_num(); + BOOST_REQUIRE_EQUAL( fork2_lib_before, fork2_lib_after ); + + for( uint32_t block_num = fork2_start_block; block_num < c2.control->head_block_num(); ++block_num ) { + auto fb = c2.control->fetch_block_by_number( block_num ); + c1.push_block( fb ); + } + + BOOST_REQUIRE( fork1_head_block_id == c1.control->head_block_id() ); // new blocks should not cause fork switch + + c1.close(); + + c1.open( nullptr ); } FC_LOG_AND_RETHROW() diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 5ed82c742c0..611d9f1f40e 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -7,11 +7,13 @@ #include #include #include +#include #include #include +#include +#include -#include #include #ifdef NON_VALIDATING_TEST @@ -828,20 +830,20 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(trx.id(), mtrx->id); BOOST_CHECK_EQUAL(trx.id(), mtrx2->id); - boost::asio::thread_pool thread_pool(5); + named_thread_pool thread_pool( "misc", 5 ); BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); BOOST_CHECK_EQUAL(1u, keys.second.size()); @@ -868,6 +870,8 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys5.second.size()); BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); + thread_pool.stop(); + } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(reflector_init_test) { @@ -1067,6 +1071,59 @@ BOOST_AUTO_TEST_CASE(reflector_init_test) { } FC_LOG_AND_RETHROW() } +// Verify appbase::execution_priority_queue uses a stable priority queue so that jobs are executed +// in order, FIFO, as submitted. +BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { + try { + using namespace std::chrono_literals; + + appbase::execution_priority_queue pri_queue; + auto io_serv = std::make_shared(); + auto work_ptr = std::make_unique(*io_serv); + std::atomic posted{0}; + + std::thread t( [io_serv, &pri_queue, &posted]() { + while( posted < 100 && io_serv->run_one() ) { + ++posted; + } + bool more = true; + while( more || io_serv->run_one() ) { + while( io_serv->poll_one() ) {} + // execute the highest priority item + more = pri_queue.execute_highest(); + } + } ); + std::atomic ran{0}; + std::mutex mx; + std::vector results; + for( int i = 0; i < 50; ++i ) { + boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::low, [io_serv, &mx, &ran, &results, i](){ + std::lock_guard g(mx); + results.push_back( 50 + i ); + ++ran; + })); + boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::high, [io_serv, &mx, &ran, &results, i](){ + std::lock_guard g(mx); + results.push_back( i ); + ++ran; + })); + } + + while( ran < 100 ) std::this_thread::sleep_for( 5us ); + + work_ptr.reset(); + io_serv->stop(); + t.join(); + + std::lock_guard g(mx); + BOOST_CHECK_EQUAL( 100, results.size() ); + for( int i = 0; i < 100; ++i ) { + BOOST_CHECK_EQUAL( i, results.at( i ) ); + } + + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index ede5a905620..bec63376ff5 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -5,9 +5,10 @@ #include #include -#include #include +#include "fork_test_utilities.hpp" + #ifdef NON_VALIDATING_TEST #define TESTER tester #else @@ -204,7 +205,9 @@ BOOST_AUTO_TEST_SUITE(producer_schedule_tests) BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { create_accounts( {N(alice),N(bob),N(carol)} ); - produce_block(); + while (control->head_block_num() < 3) { + produce_block(); + } auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); @@ -228,7 +231,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_block(); // Starts new block which promotes the pending schedule to active BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(7); + produce_blocks(6); res = set_producers( {N(alice),N(bob),N(carol)} ); vector sch2 = { @@ -244,7 +247,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_blocks(23); // Alice produces the last block of her first round. // Bob's first block (which advances LIB to Alice's last block) is started but not finalized. BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(alice) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(bob) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(bob) ); BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); produce_blocks(12); // Bob produces his first 11 blocks @@ -252,7 +255,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_blocks(12); // Bob produces his 12th block. // Alice's first block of the second round is started but not finalized (which advances LIB to Bob's last block). BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(alice) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(bob) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(bob) ); BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); @@ -267,7 +270,9 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { create_accounts( {N(alice),N(bob),N(carol)} ); - produce_block(); + while (control->head_block_num() < 3) { + produce_block(); + } auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); @@ -291,7 +296,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_block(); // Starts new block which promotes the pending schedule to active BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(7); + produce_blocks(6); res = set_producers( {N(alice),N(bob)} ); vector sch2 = { @@ -304,7 +309,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_blocks(48); BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(bob) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(carol) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(carol) ); BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); produce_blocks(47); @@ -312,7 +317,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_blocks(1); BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(carol) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(alice) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(alice) ); BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); @@ -322,79 +327,179 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() -BOOST_FIXTURE_TEST_CASE( empty_producer_schedule_has_no_effect, tester ) try { - create_accounts( {N(alice),N(bob),N(carol)} ); - produce_block(); +BOOST_AUTO_TEST_CASE( empty_producer_schedule_has_no_effect ) try { + validating_tester c( validating_tester::default_config() ); + c.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); + + c.create_accounts( {N(alice),N(bob),N(carol)} ); + while (c.control->head_block_num() < 3) { + c.produce_block(); + } auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); }; - auto res = set_producers( {N(alice),N(bob)} ); + auto res = c.set_producers( {N(alice),N(bob)} ); vector sch1 = { {N(alice), get_public_key(N(alice), "active")}, {N(bob), get_public_key(N(bob), "active")} }; wlog("set producer schedule to [alice,bob]"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->pending_producers().producers.size(), 0u ); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().producers.size(), 0u ); // Start a new block which promotes the proposed schedule to pending - produce_block(); - BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->pending_producers() ) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 0u ); + c.produce_block(); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->pending_producers() ) ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 0u ); // Start a new block which promotes the pending schedule to active - produce_block(); - BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(7); + c.produce_block(); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->active_producers() ) ); + c.produce_blocks(6); - res = set_producers( {} ); + res = c.set_producers( {} ); wlog("set producer schedule to []"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); - BOOST_CHECK_EQUAL( control->proposed_producers()->producers.size(), 0u ); - BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2u ); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( c.control->proposed_producers()->producers.size(), 0u ); + BOOST_CHECK_EQUAL( c.control->proposed_producers()->version, 2u ); - produce_blocks(12); - BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); + c.produce_blocks(12); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 1u ); // Empty producer schedule does get promoted from proposed to pending - produce_block(); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); - BOOST_CHECK_EQUAL( false, control->proposed_producers().valid() ); + c.produce_block(); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( false, c.control->proposed_producers().valid() ); // However it should not get promoted from pending to active - produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); + c.produce_blocks(24); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); // Setting a new producer schedule should still use version 2 - res = set_producers( {N(alice),N(bob),N(carol)} ); + res = c.set_producers( {N(alice),N(bob),N(carol)} ); vector sch2 = { {N(alice), get_public_key(N(alice), "active")}, {N(bob), get_public_key(N(bob), "active")}, {N(carol), get_public_key(N(carol), "active")} }; wlog("set producer schedule to [alice,bob,carol]"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2u ); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *c.control->proposed_producers() ) ); + BOOST_CHECK_EQUAL( c.control->proposed_producers()->version, 2u ); // Produce enough blocks to promote the proposed schedule to pending, which it can do because the existing pending has zero producers - produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->pending_producers() ) ); + c.produce_blocks(24); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, c.control->pending_producers() ) ); // Produce enough blocks to promote the pending schedule to active - produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); + c.produce_blocks(24); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 2u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, c.control->active_producers() ) ); + + BOOST_REQUIRE_EQUAL( c.validate(), true ); +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { + tester c; + + c.create_accounts( {N(alice),N(bob),N(carol)} ); + c.produce_block(); + + auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { + return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); + }; + + auto res = c.set_producers( {N(alice),N(bob),N(carol)} ); + vector sch1 = { + {N(alice), c.get_public_key(N(alice), "active")}, + {N(bob), c.get_public_key(N(bob), "active")}, + {N(carol), c.get_public_key(N(carol), "active")} + }; + wlog("set producer schedule to [alice,bob,carol]"); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 0u ); + c.produce_block(); // Starts new block which promotes the proposed schedule to pending + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->pending_producers() ) ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 0u ); + c.produce_block(); + c.produce_block(); // Starts new block which promotes the pending schedule to active + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->active_producers() ) ); + + produce_empty_blocks_until( c, N(carol), N(alice) ); + c.produce_block(); + produce_empty_blocks_until( c, N(carol), N(alice) ); + + res = c.set_producers( {N(alice),N(bob)} ); + vector sch2 = { + {N(alice), c.get_public_key(N(alice), "active")}, + {N(bob), c.get_public_key(N(bob), "active")} + }; + wlog("set producer schedule to [alice,bob]"); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *c.control->proposed_producers() ) ); + + produce_empty_blocks_until( c, N(bob), N(carol) ); + produce_empty_blocks_until( c, N(alice), N(bob) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + + produce_empty_blocks_until( c, N(carol), N(alice) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + + produce_empty_blocks_until( c, N(bob), N(carol) ); + BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(carol) ); + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); + + auto carol_last_produced_block_num = c.control->head_block_num() + 1; + wdump((carol_last_produced_block_num)); + + c.produce_block(); + BOOST_CHECK( c.control->pending_block_producer() == N(alice) ); + + res = c.set_producers( {N(alice),N(bob),N(carol)} ); + wlog("set producer schedule to [alice,bob,carol]"); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); + + produce_empty_blocks_until( c, N(bob), N(alice) ); + produce_empty_blocks_until( c, N(alice), N(bob) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 3u ); + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); + + produce_empty_blocks_until( c, N(bob), N(alice) ); + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 3u ); + + produce_empty_blocks_until( c, N(alice), N(bob) ); + c.produce_blocks(11); + BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(bob) ); + c.finish_block(); + + auto carol_block_num = c.control->head_block_num() + 1; + auto carol_block_time = c.control->head_block_time() + fc::milliseconds(config::block_interval_ms); + auto confirmed = carol_block_num - carol_last_produced_block_num - 1; + + c.control->start_block( carol_block_time, confirmed ); + BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(carol) ); + c.produce_block(); + auto h = c.control->head_block_header(); + + BOOST_CHECK_EQUAL( h.producer, N(carol) ); + BOOST_CHECK_EQUAL( h.confirmed, confirmed ); + + produce_empty_blocks_until( c, N(carol), N(alice) ); - BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp new file mode 100644 index 00000000000..cd20e6cd74f --- /dev/null +++ b/unittests/protocol_feature_tests.cpp @@ -0,0 +1,1331 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include +#include +#include + +#include + +#include + +#include + +#include + +#include "fork_test_utilities.hpp" + +using namespace eosio::chain; +using namespace eosio::testing; + +BOOST_AUTO_TEST_SUITE(protocol_feature_tests) + +BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { + tester c( setup_policy::none ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + c.produce_block(); + + // Cannot set latest bios contract since it requires intrinsics that have not yet been whitelisted. + BOOST_CHECK_EXCEPTION( c.set_code( config::system_account_name, contracts::eosio_bios_wasm() ), + wasm_exception, fc_exception_message_is("env.is_feature_activated unresolveable") + ); + + // But the old bios contract can still be set. + c.set_code( config::system_account_name, contracts::before_preactivate_eosio_bios_wasm() ); + c.set_abi( config::system_account_name, contracts::before_preactivate_eosio_bios_abi().data() ); + + auto t = c.control->pending_block_time(); + c.control->abort_block(); + BOOST_REQUIRE_EXCEPTION( c.control->start_block( t, 0, {digest_type()} ), protocol_feature_exception, + fc_exception_message_is( "protocol feature with digest '0000000000000000000000000000000000000000000000000000000000000000' is unrecognized" ) + ); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::preactivate_feature ); + + BOOST_REQUIRE( d ); + + // Activate PREACTIVATE_FEATURE. + c.schedule_protocol_features_wo_preactivation({ *d }); + c.produce_block(); + + // Now the latest bios contract can be set. + c.set_code( config::system_account_name, contracts::eosio_bios_wasm() ); + c.set_abi( config::system_account_name, contracts::eosio_bios_abi().data() ); + + c.produce_block(); + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(reqactivated), config::system_account_name, + mutable_variant_object()("feature_digest", digest_type()) ), + eosio_assert_message_exception, + eosio_assert_message_is( "protocol feature is not activated" ) + ); + + c.push_action( config::system_account_name, N(reqactivated), config::system_account_name, mutable_variant_object() + ("feature_digest", *d ) + ); + + c.produce_block(); + + // Ensure validator node accepts the blockchain + + tester c2(setup_policy::none, db_read_mode::SPECULATIVE); + push_blocks( c, c2 ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( activate_and_restart ) try { + tester c( setup_policy::none ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto pfs = pfm.get_protocol_feature_set(); // make copy of protocol feature set + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::preactivate_feature ); + BOOST_REQUIRE( d ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + + // Activate PREACTIVATE_FEATURE. + c.schedule_protocol_features_wo_preactivation({ *d }); + c.produce_blocks(2); + + auto head_block_num = c.control->head_block_num(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + + c.close(); + c.open( std::move( pfs ), nullptr ); + + BOOST_CHECK_EQUAL( head_block_num, c.control->head_block_num() ); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( double_preactivation ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + c.push_action( config::system_account_name, N(activate), config::system_account_name, + fc::mutable_variant_object()("feature_digest", *d), 10 ); + + std::string expected_error_msg("protocol feature with digest '"); + { + fc::variant v; + to_variant( *d, v ); + expected_error_msg += v.get_string(); + expected_error_msg += "' is already pre-activated"; + } + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(activate), config::system_account_name, + fc::mutable_variant_object()("feature_digest", *d), 20 ), + protocol_feature_exception, + fc_exception_message_is( expected_error_msg ) + ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( double_activation ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.preactivate_protocol_features( {*d} ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.schedule_protocol_features_wo_preactivation( {*d} ); + + BOOST_CHECK_EXCEPTION( c.produce_block();, + block_validate_exception, + fc_exception_message_starts_with( "attempted duplicate activation within a single block:" ) + ); + + c.protocol_features_to_be_activated_wo_preactivation.clear(); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.produce_block(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.produce_block(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( require_preactivation_test ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.schedule_protocol_features_wo_preactivation( {*d} ); + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_starts_with( "attempted to activate protocol feature without prior required preactivation:" ) + ); + + c.protocol_features_to_be_activated_wo_preactivation.clear(); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.preactivate_protocol_features( {*d} ); + c.finish_block(); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + BOOST_CHECK_EXCEPTION( c.control->start_block( + c.control->head_block_time() + fc::milliseconds(config::block_interval_ms), + 0, + {} + ), + block_validate_exception, + fc_exception_message_is( "There are pre-activated protocol features that were not activated at the start of this block" ) + ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.produce_block(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + c.create_accounts( {N(alice), N(bob), N(charlie)} ); + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(linkauth), N(bob), fc::mutable_variant_object() + ("account", "bob") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ), permission_query_exception, + fc_exception_message_is( "Failed to retrieve permission: test" ) + ); + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(linkauth), N(charlie), fc::mutable_variant_object() + ("account", "charlie") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ), permission_query_exception, + fc_exception_message_is( "Failed to retrieve permission: test" ) + ); + + c.push_action( config::system_account_name, N(updateauth), N(alice), fc::mutable_variant_object() + ("account", "alice") + ("permission", "test") + ("parent", "active") + ("auth", authority(get_public_key("testapi", "test"))) + ); + + c.produce_block(); + + // Verify the incorrect behavior prior to ONLY_LINK_TO_EXISTING_PERMISSION activation. + c.push_action( config::system_account_name, N(linkauth), N(bob), fc::mutable_variant_object() + ("account", "bob") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // Verify the correct behavior after ONLY_LINK_TO_EXISTING_PERMISSION activation. + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(linkauth), N(charlie), fc::mutable_variant_object() + ("account", "charlie") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ), permission_query_exception, + fc_exception_message_is( "Failed to retrieve permission: test" ) + ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { + tester c( setup_policy::none ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto restart_with_new_pfs = [&c]( protocol_feature_set&& pfs ) { + c.close(); + c.open(std::move(pfs), nullptr); + }; + + auto get_builtin_digest = [&pfm]( builtin_protocol_feature_t codename ) -> digest_type { + auto res = pfm.get_builtin_digest( codename ); + BOOST_REQUIRE( res ); + return *res; + }; + + auto preactivate_feature_digest = get_builtin_digest( builtin_protocol_feature_t::preactivate_feature ); + auto only_link_to_existing_permission_digest = get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + + auto invalid_act_time = fc::time_point::from_iso_string( "2200-01-01T00:00:00" ); + auto valid_act_time = fc::time_point{}; + + // First, test subjective_restrictions on feature that can be activated WITHOUT preactivation (PREACTIVATE_FEATURE) + + c.schedule_protocol_features_wo_preactivation({ preactivate_feature_digest }); + // schedule PREACTIVATE_FEATURE activation (persists until next successful start_block) + + subjective_restriction_map custom_subjective_restrictions = { + { builtin_protocol_feature_t::preactivate_feature, {invalid_act_time, false, true} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); + // When a block is produced, the protocol feature activation should fail and throw an error + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_starts_with( + std::string(c.control->head_block_time()) + + " is too early for the earliest allowed activation time of the protocol feature" + ) + ); + BOOST_CHECK_EQUAL( c.protocol_features_to_be_activated_wo_preactivation.size(), 1 ); + + // Revert to the valid earliest allowed activation time, however with enabled == false + custom_subjective_restrictions = { + { builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, false} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); + // This should also fail, but with different exception + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_is( + std::string("protocol feature with digest '") + + std::string(preactivate_feature_digest) + + "' is disabled" + ) + ); + BOOST_CHECK_EQUAL( c.protocol_features_to_be_activated_wo_preactivation.size(), 1 ); + + // Revert to the valid earliest allowed activation time, however with subjective_restrictions enabled == true + custom_subjective_restrictions = { + { builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, true} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); + // Now it should be fine, the feature should be activated after the block is produced + BOOST_CHECK_NO_THROW( c.produce_block() ); + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + BOOST_CHECK_EQUAL( c.protocol_features_to_be_activated_wo_preactivation.size(), 0 ); + + // Second, test subjective_restrictions on feature that need to be activated WITH preactivation (ONLY_LINK_TO_EXISTING_PERMISSION) + + c.set_bios_contract(); + c.produce_block(); + + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {invalid_act_time, true, true} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); + // It should fail + BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), + subjective_block_production_exception, + fc_exception_message_starts_with( + std::string(c.control->head_block_time() + fc::milliseconds(config::block_interval_ms)) + + " is too early for the earliest allowed activation time of the protocol feature" + ) + ); + + // Revert with valid time and subjective_restrictions enabled == false + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, false} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); + // It should fail but with different exception + BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), + subjective_block_production_exception, + fc_exception_message_is( + std::string("protocol feature with digest '") + + std::string(only_link_to_existing_permission_digest)+ + "' is disabled" + ) + ); + + // Revert with valid time and subjective_restrictions enabled == true + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, true} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); + // Should be fine now, and activated in the next block + BOOST_CHECK_NO_THROW( c.preactivate_protocol_features({only_link_to_existing_permission_digest}) ); + c.produce_block(); + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + c.create_accounts( {N(alice), N(bob), N(test)} ); + c.set_code( N(test), contracts::deferred_test_wasm() ); + c.set_abi( N(test), contracts::deferred_test_abi().data() ); + c.produce_block(); + + auto alice_ram_usage0 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100) + ); + + auto alice_ram_usage1 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + + // Verify subjective mitigation is in place + BOOST_CHECK_EXCEPTION( + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 101 ) + ), + subjective_block_production_exception, + fc_exception_message_is( "Replacing a deferred transaction is temporarily disabled." ) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + c.control->abort_block(); + + c.close(); + auto cfg = c.get_config(); + cfg.disable_all_subjective_mitigations = true; + c.init( cfg, nullptr ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage0 ); + + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + auto dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + auto first_dtrx_id = dtrxs[0]; + + // With the subjective mitigation disabled, replacing the deferred transaction is allowed. + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 101) + ); + + auto alice_ram_usage2 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + BOOST_CHECK_EQUAL( alice_ram_usage2, alice_ram_usage1 + (alice_ram_usage1 - alice_ram_usage0) ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + BOOST_CHECK_EQUAL( first_dtrx_id, dtrxs[0] ); // Incorrectly kept as the old transaction ID. + + c.produce_block(); + + auto alice_ram_usage3 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + BOOST_CHECK_EQUAL( alice_ram_usage3, alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 0 ); + + c.produce_block(); + + c.close(); + cfg.disable_all_subjective_mitigations = false; + c.init( cfg, nullptr ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::replace_deferred ); + BOOST_REQUIRE( d ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage0 ); + + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + auto first_dtrx_id2 = dtrxs[0]; + + // With REPLACE_DEFERRED activated, replacing the deferred transaction is allowed and now should work properly. + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 101) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + BOOST_CHECK( first_dtrx_id2 != dtrxs[0] ); + + // Replace again with a deferred transaction identical to the first one + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100), + 100 // Needed to make this input transaction unique + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + BOOST_CHECK_EQUAL( first_dtrx_id2, dtrxs[0] ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + tester c2( setup_policy::none ); + + c.create_accounts( {N(alice), N(test)} ); + c.set_code( N(test), contracts::deferred_test_wasm() ); + c.set_abi( N(test), contracts::deferred_test_abi().data() ); + c.produce_block(); + + push_blocks( c, c2 ); + + c2.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 50) + ); + + c2.finish_block(); + + BOOST_CHECK_EXCEPTION( + c2.produce_block(), + fc::exception, + fc_exception_message_is( "no transaction extensions supported yet for deferred transactions" ) + ); + + c2.produce_empty_block( fc::minutes(10) ); + + transaction_trace_ptr trace0; + auto h2 = c2.control->applied_transaction.connect( [&](std::tuple x) { + auto& t = std::get<0>(x); + if( t && t->receipt && t->receipt->status == transaction_receipt::expired) { + trace0 = t; + } + } ); + + c2.produce_block(); + + h2.disconnect(); + + BOOST_REQUIRE( trace0 ); + + c.produce_block(); + + const auto& index = c.control->db().get_index(); + + transaction_trace_ptr trace1; + auto h = c.control->applied_transaction.connect( [&](std::tuple x) { + auto& t = std::get<0>(x); + if( t && t->receipt && t->receipt->status == transaction_receipt::executed) { + trace1 = t; + } + } ); + + BOOST_REQUIRE_EQUAL(0, index.size()); + + c.push_action( config::system_account_name, N(reqauth), N(alice), fc::mutable_variant_object() + ("from", "alice"), + 5, 2 + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + + c.produce_block(); + + BOOST_REQUIRE_EQUAL(1, index.size()); + + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d1 = pfm.get_builtin_digest( builtin_protocol_feature_t::replace_deferred ); + BOOST_REQUIRE( d1 ); + auto d2 = pfm.get_builtin_digest( builtin_protocol_feature_t::no_duplicate_deferred_id ); + BOOST_REQUIRE( d2 ); + + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 42) + ); + BOOST_REQUIRE_EQUAL(2, index.size()); + + c.preactivate_protocol_features( {*d1, *d2} ); + c.produce_block(); + // The deferred transaction with payload 42 that was scheduled prior to the activation of the protocol features should now be retired. + + BOOST_REQUIRE( trace1 ); + BOOST_REQUIRE_EQUAL(1, index.size()); + + trace1 = nullptr; + + // Retire the delayed eosio::reqauth transaction. + c.produce_blocks(5); + BOOST_REQUIRE( trace1 ); + BOOST_REQUIRE_EQUAL(0, index.size()); + + h.disconnect(); + + auto check_generation_context = []( auto&& data, + const transaction_id_type& sender_trx_id, + unsigned __int128 sender_id, + account_name sender ) + { + transaction trx; + fc::datastream ds1( data.data(), data.size() ); + fc::raw::unpack( ds1, trx ); + BOOST_REQUIRE_EQUAL( trx.transaction_extensions.size(), 1 ); + BOOST_REQUIRE_EQUAL( trx.transaction_extensions.back().first, 0 ); + + fc::datastream ds2( trx.transaction_extensions.back().second.data(), + trx.transaction_extensions.back().second.size() ); + + transaction_id_type actual_sender_trx_id; + fc::raw::unpack( ds2, actual_sender_trx_id ); + BOOST_CHECK_EQUAL( actual_sender_trx_id, sender_trx_id ); + + unsigned __int128 actual_sender_id; + fc::raw::unpack( ds2, actual_sender_id ); + BOOST_CHECK( actual_sender_id == sender_id ); + + uint64_t actual_sender; + fc::raw::unpack( ds2, actual_sender ); + BOOST_CHECK_EQUAL( account_name(actual_sender), sender ); + }; + + BOOST_CHECK_EXCEPTION( + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 77 ) + ), + ill_formed_deferred_transaction_generation_context, + fc_exception_message_is( "deferred transaction generaction context contains mismatching sender" ) + ); + + BOOST_REQUIRE_EQUAL(0, index.size()); + + auto trace2 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 40) + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + + check_generation_context( index.begin()->packed_trx, + trace2->id, + ((static_cast(N(alice)) << 64) | 1), + N(test) ); + + c.produce_block(); + + BOOST_REQUIRE_EQUAL(0, index.size()); + + auto trace3 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 50) + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + + check_generation_context( index.begin()->packed_trx, + trace3->id, + ((static_cast(N(alice)) << 64) | 1), + N(test) ); + + c.produce_block(); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { + tester chain( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester_account = N(tester); + + chain.produce_blocks(); + chain.create_account(N(currency)); + chain.create_account(tester_account); + chain.produce_blocks(); + + chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", name(tester_account).to_string()) + ("permission", "first") + ("parent", "active") + ("auth", authority(chain.get_public_key(tester_account, "first"), 5)) + ); + + auto validate_disallow = [&] (const char *code, const char *type) { + BOOST_REQUIRE_EXCEPTION( + chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", name(tester_account).to_string()) + ("code", code) + ("type", type) + ("requirement", "first")), + action_validate_exception, + fc_exception_message_is(std::string("Cannot link eosio::") + std::string(type) + std::string(" to a minimum permission")) + ); + }; + + validate_disallow("eosio", "linkauth"); + validate_disallow("eosio", "unlinkauth"); + validate_disallow("eosio", "deleteauth"); + validate_disallow("eosio", "updateauth"); + validate_disallow("eosio", "canceldelay"); + + validate_disallow("currency", "linkauth"); + validate_disallow("currency", "unlinkauth"); + validate_disallow("currency", "deleteauth"); + validate_disallow("currency", "updateauth"); + validate_disallow("currency", "canceldelay"); + + const auto& pfm = chain.control->get_protocol_feature_manager(); + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::fix_linkauth_restriction ); + BOOST_REQUIRE( d ); + + chain.preactivate_protocol_features( {*d} ); + chain.produce_block(); + + auto validate_allowed = [&] (const char *code, const char *type) { + chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", name(tester_account).to_string()) + ("code", code) + ("type", type) + ("requirement", "first")); + }; + + validate_disallow("eosio", "linkauth"); + validate_disallow("eosio", "unlinkauth"); + validate_disallow("eosio", "deleteauth"); + validate_disallow("eosio", "updateauth"); + validate_disallow("eosio", "canceldelay"); + + validate_allowed("currency", "linkauth"); + validate_allowed("currency", "unlinkauth"); + validate_allowed("currency", "deleteauth"); + validate_allowed("currency", "updateauth"); + validate_allowed("currency", "canceldelay"); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( disallow_empty_producer_schedule_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::disallow_empty_producer_schedule ); + BOOST_REQUIRE( d ); + + // Before activation, it is allowed to set empty producer schedule + c.set_producers( {} ); + + // After activation, it should not be allowed + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + BOOST_REQUIRE_EXCEPTION( c.set_producers( {} ), + wasm_execution_error, + fc_exception_message_is( "Producer schedule cannot be empty" ) ); + + // Setting non empty producer schedule should still be fine + vector producer_names = {N(alice),N(bob),N(carol)}; + c.create_accounts( producer_names ); + c.set_producers( producer_names ); + c.produce_blocks(2); + const auto& schedule = c.get_producer_keys( producer_names ); + BOOST_CHECK( std::equal( schedule.begin(), schedule.end(), c.control->active_producers().producers.begin()) ); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( restrict_action_to_self_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::restrict_action_to_self ); + BOOST_REQUIRE( d ); + + c.create_accounts( {N(testacc), N(acctonotify), N(alice)} ); + c.set_code( N(testacc), contracts::restrict_action_test_wasm() ); + c.set_abi( N(testacc), contracts::restrict_action_test_abi().data() ); + + c.set_code( N(acctonotify), contracts::restrict_action_test_wasm() ); + c.set_abi( N(acctonotify), contracts::restrict_action_test_abi().data() ); + + // Before the protocol feature is preactivated + // - Sending inline action to self = no problem + // - Sending deferred trx to self = throw subjective exception + // - Sending inline action to self from notification = throw subjective exception + // - Sending deferred trx to self from notification = throw subjective exception + BOOST_CHECK_NO_THROW( c.push_action( N(testacc), N(sendinline), N(alice), mutable_variant_object()("authorizer", "alice")) ); + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(senddefer), N(alice), + mutable_variant_object()("authorizer", "alice")("senderid", 0)), + subjective_block_production_exception, + fc_exception_message_starts_with( "Authorization failure with sent deferred transaction" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifyinline), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice")), + subjective_block_production_exception, + fc_exception_message_starts_with( "Authorization failure with inline action sent to self" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifydefer), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice")("senderid", 1)), + subjective_block_production_exception, + fc_exception_message_starts_with( "Authorization failure with sent deferred transaction" ) ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // After the protocol feature is preactivated, all the 4 cases will throw an objective unsatisfied_authorization exception + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(sendinline), N(alice), mutable_variant_object()("authorizer", "alice") ), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(senddefer), N(alice), + mutable_variant_object()("authorizer", "alice")("senderid", 3)), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifyinline), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice") ), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifydefer), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice")("senderid", 4)), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( only_bill_to_first_authorizer ) { try { + tester chain( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester_account = N(tester); + const auto& tester_account2 = N(tester2); + + chain.produce_blocks(); + chain.create_account(tester_account); + chain.create_account(tester_account2); + + chain.push_action(config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() + ("account", name(tester_account).to_string()) + ("ram_bytes", 10000) + ("net_weight", 1000) + ("cpu_weight", 1000)); + + chain.push_action(config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() + ("account", name(tester_account2).to_string()) + ("ram_bytes", 10000) + ("net_weight", 1000) + ("cpu_weight", 1000)); + + const resource_limits_manager& mgr = chain.control->get_resource_limits_manager(); + + chain.produce_blocks(); + + { + action act; + act.account = tester_account; + act.name = N(null); + act.authorization = vector{ + {tester_account, config::active_name}, + {tester_account2, config::active_name} + }; + + signed_transaction trx; + trx.actions.emplace_back(std::move(act)); + chain.set_transaction_headers(trx); + + trx.sign(get_private_key(tester_account, "active"), chain.control->get_chain_id()); + trx.sign(get_private_key(tester_account2, "active"), chain.control->get_chain_id()); + + + auto tester_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit0 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit0 = mgr.get_account_net_limit_ex(tester_account2); + + chain.push_transaction(trx); + + auto tester_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit1 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit1 = mgr.get_account_net_limit_ex(tester_account2); + + BOOST_CHECK(tester_cpu_limit1.used > tester_cpu_limit0.used); + BOOST_CHECK(tester2_cpu_limit1.used > tester2_cpu_limit0.used); + BOOST_CHECK(tester_net_limit1.used > tester_net_limit0.used); + BOOST_CHECK(tester2_net_limit1.used > tester2_net_limit0.used); + + BOOST_CHECK_EQUAL(tester_cpu_limit1.used - tester_cpu_limit0.used, tester2_cpu_limit1.used - tester2_cpu_limit0.used); + BOOST_CHECK_EQUAL(tester_net_limit1.used - tester_net_limit0.used, tester2_net_limit1.used - tester2_net_limit0.used); + } + + const auto& pfm = chain.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_bill_first_authorizer ); + BOOST_REQUIRE( d ); + + chain.preactivate_protocol_features( {*d} ); + chain.produce_blocks(); + + { + action act; + act.account = tester_account; + act.name = N(null2); + act.authorization = vector{ + {tester_account, config::active_name}, + {tester_account2, config::active_name} + }; + + signed_transaction trx; + trx.actions.emplace_back(std::move(act)); + chain.set_transaction_headers(trx); + + trx.sign(get_private_key(tester_account, "active"), chain.control->get_chain_id()); + trx.sign(get_private_key(tester_account2, "active"), chain.control->get_chain_id()); + + auto tester_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit0 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit0 = mgr.get_account_net_limit_ex(tester_account2); + + chain.push_transaction(trx); + + auto tester_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit1 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit1 = mgr.get_account_net_limit_ex(tester_account2); + + BOOST_CHECK(tester_cpu_limit1.used > tester_cpu_limit0.used); + BOOST_CHECK(tester2_cpu_limit1.used == tester2_cpu_limit0.used); + BOOST_CHECK(tester_net_limit1.used > tester_net_limit0.used); + BOOST_CHECK(tester2_net_limit1.used == tester2_net_limit0.used); + } + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( forward_setcode_test ) { try { + tester c( setup_policy::preactivate_feature_only ); + + const auto& tester1_account = N(tester1); + const auto& tester2_account = N(tester2); + c.create_accounts( {tester1_account, tester2_account} ); + + // Deploy contract that rejects all actions dispatched to it with the following exceptions: + // * eosio::setcode to set code on the eosio is allowed (unless the rejectall account exists) + // * eosio::newaccount is allowed only if it creates the rejectall account. + c.set_code( config::system_account_name, contracts::reject_all_wasm() ); + c.produce_block(); + + // Before activation, deploying a contract should work since setcode won't be forwarded to the WASM on eosio. + c.set_code( tester1_account, contracts::noop_wasm() ); + + // Activate FORWARD_SETCODE protocol feature and then return contract on eosio back to what it was. + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::forward_setcode ); + BOOST_REQUIRE( d ); + c.set_bios_contract(); + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + c.set_code( config::system_account_name, contracts::reject_all_wasm() ); + c.produce_block(); + + // After activation, deploying a contract causes setcode to be dispatched to the WASM on eosio, + // and in this case the contract is configured to reject the setcode action. + BOOST_REQUIRE_EXCEPTION( c.set_code( tester2_account, contracts::noop_wasm() ), + eosio_assert_message_exception, + eosio_assert_message_is( "rejecting all actions" ) ); + + + tester c2(setup_policy::none); + push_blocks( c, c2 ); // make a backup of the chain to enable testing further conditions. + + c.set_bios_contract(); // To allow pushing further actions for setting up the other part of the test. + c.create_account( N(rejectall) ); + c.produce_block(); + // The existence of the rejectall account will make the reject_all contract reject all actions with no exception. + + // It will now not be possible to deploy the reject_all contract to the eosio account, + // because after it is set by the native function, it is called immediately after which will reject the transaction. + BOOST_REQUIRE_EXCEPTION( c.set_code( config::system_account_name, contracts::reject_all_wasm() ), + eosio_assert_message_exception, + eosio_assert_message_is( "rejecting all actions" ) ); + + + // Going back to the backup chain, we can create the rejectall account while the reject_all contract is + // already deployed on eosio. + c2.create_account( N(rejectall) ); + c2.produce_block(); + // Now all actions dispatched to the eosio account should be rejected. + + // However, it should still be possible to set the bios contract because the WASM on eosio is called after the + // native setcode function completes. + c2.set_bios_contract(); + c2.produce_block(); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( get_sender_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester1_account = account_name("tester1"); + const auto& tester2_account = account_name("tester2"); + c.create_accounts( {tester1_account, tester2_account} ); + c.produce_block(); + + BOOST_CHECK_EXCEPTION( c.set_code( tester1_account, contracts::get_sender_test_wasm() ), + wasm_exception, + fc_exception_message_is( "env.get_sender unresolveable" ) ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::get_sender ); + BOOST_REQUIRE( d ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + c.set_code( tester1_account, contracts::get_sender_test_wasm() ); + c.set_abi( tester1_account, contracts::get_sender_test_abi().data() ); + c.set_code( tester2_account, contracts::get_sender_test_wasm() ); + c.set_abi( tester2_account, contracts::get_sender_test_abi().data() ); + c.produce_block(); + + BOOST_CHECK_EXCEPTION( c.push_action( tester1_account, N(sendinline), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", account_name{}) ), + eosio_assert_message_exception, + eosio_assert_message_is( "sender did not match" ) ); + + c.push_action( tester1_account, N(sendinline), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", tester1_account.to_string()) + ); + + c.push_action( tester1_account, N(notify), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", tester1_account.to_string()) + ("send_inline", false) + ); + + c.push_action( tester1_account, N(notify), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", tester2_account.to_string()) + ("send_inline", true) + ); + + c.push_action( tester1_account, N(assertsender), tester1_account, mutable_variant_object() + ("expected_sender", account_name{}) + ); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( ram_restrictions_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester1_account = account_name("tester1"); + const auto& tester2_account = account_name("tester2"); + const auto& alice_account = account_name("alice"); + const auto& bob_account = account_name("bob"); + c.create_accounts( {tester1_account, tester2_account, alice_account, bob_account} ); + c.produce_block(); + c.set_code( tester1_account, contracts::ram_restrictions_test_wasm() ); + c.set_abi( tester1_account, contracts::ram_restrictions_test_abi().data() ); + c.produce_block(); + c.set_code( tester2_account, contracts::ram_restrictions_test_wasm() ); + c.set_abi( tester2_account, contracts::ram_restrictions_test_abi().data() ); + c.produce_block(); + + // Basic setup + c.push_action( tester1_account, N(setdata), alice_account, mutable_variant_object() + ("len1", 10) + ("len2", 0) + ("payer", alice_account) + ); + + // Cannot bill more RAM to another account that has not authorized the action. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), + missing_auth_exception, + fc_exception_message_starts_with( "missing authority" ) + ); + + // Cannot migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way without the authority of that account. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ), + missing_auth_exception, + fc_exception_message_starts_with( "missing authority" ) + ); + + // Cannot bill more RAM to another account within a notification + // even if the account authorized the original action. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), + subjective_block_production_exception, + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); + + // Cannot migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way within a notification. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ), + subjective_block_production_exception, + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); + + // Cannot send deferred transaction paid by another account that has not authorized the action. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(senddefer), bob_account, mutable_variant_object() + ("senderid", 123) + ("payer", alice_account) + ), + missing_auth_exception, + fc_exception_message_starts_with( "missing authority" ) + ); + + // Cannot send deferred transaction paid by another account within a notification + // even if the account authorized the original action. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifydefer), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("senderid", 123) + ("payer", alice_account) + ), + subjective_block_production_exception, + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); + + // Can send deferred transaction paid by another account if it has authorized the action. + c.push_action( tester1_account, N(senddefer), alice_account, mutable_variant_object() + ("senderid", 123) + ("payer", alice_account) + ); + c.produce_block(); + + // Can migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way with the authority of that account. + c.push_action( tester1_account, N(setdata), alice_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ); + + c.produce_block(); + + // Disable the subjective mitigation + c.close(); + auto cfg = c.get_config(); + cfg.disable_all_subjective_mitigations = true; + c.init( cfg, nullptr ); + + c.produce_block(); + + // Without the subjective mitigation, it is now possible to bill more RAM to another account + // within a notification if the account authorized the original action. + // This is due to the subjective mitigation in place. + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 10) + ("len2", 10) + ("payer", alice_account) + ); + + // Reset back to the original state. + c.push_action( tester1_account, N(setdata), alice_account, mutable_variant_object() + ("len1", 10) + ("len2", 0) + ("payer", alice_account) + ); + c.produce_block(); + + // Re-enable the subjective mitigation + c.close(); + cfg.disable_all_subjective_mitigations = false; + c.init( cfg, nullptr ); + + c.produce_block(); + + // Still cannot bill more RAM to another account within a notification + // even if the account authorized the original action. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 10) + ("len2", 10) + ("payer", alice_account) + ), + subjective_block_production_exception, + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::ram_restrictions ); + BOOST_REQUIRE( d ); + + // Activate RAM_RESTRICTIONS protocol feature (this would also disable the subjective mitigation). + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // Cannot send deferred transaction paid by another account that has not authorized the action. + // This still fails objectively, but now with another error message. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(senddefer), bob_account, mutable_variant_object() + ("senderid", 123) + ("payer", alice_account) + ), + action_validate_exception, + fc_exception_message_starts_with( "cannot bill RAM usage of deferred transaction to another account that has not authorized the action" ) + ); + + // Cannot send deferred transaction paid by another account within a notification + // even if the account authorized the original action. + // This now fails with an objective error. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifydefer), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("senderid", 123) + ("payer", alice_account) + ), + action_validate_exception, + fc_exception_message_is( "cannot bill RAM usage of deferred transactions to another account within notify context" ) + ); + + // Cannot bill more RAM to another account within a notification + // even if the account authorized the original action. + // This now fails with an objective error. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), + unauthorized_ram_usage_increase, + fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account within a notify context" ) + ); + + // Cannot bill more RAM to another account that has not authorized the action. + // This still fails objectively, but now with another error message. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), + unauthorized_ram_usage_increase, + fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account that has not authorized the action" ) + ); + + // Still can send deferred transaction paid by another account if it has authorized the action. + c.push_action( tester1_account, N(senddefer), alice_account, mutable_variant_object() + ("senderid", 123) + ("payer", alice_account) + ); + c.produce_block(); + + // Now can migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way without the authority of that account. + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ); + + // Now can also migrate data from table2 to table1 paid by another account + // in a RAM usage neutral way even within a notification . + c.push_action( tester2_account, N(notifysetdat), bob_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 10) + ("len2", 0) + ("payer", "alice") + ); + + // Of course it should also be possible to migrate data from table1 to table2 paid by another account + // in a way that reduces RAM usage as well, even within a notification. + c.push_action( tester2_account, N(notifysetdat), bob_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 0) + ("len2", 5) + ("payer", "alice") + ); + + // It should also still be possible for the receiver to take over payment of the RAM + // if it is necessary to increase RAM usage without the authorization of the original payer. + // This should all be possible to do even within a notification. + c.push_action( tester2_account, N(notifysetdat), bob_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 10) + ("len2", 10) + ("payer", "tester1") + ); + + c.produce_block(); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/resource_limits_test.cpp b/unittests/resource_limits_test.cpp index 3bcd8582e4d..9040c5bdc5f 100644 --- a/unittests/resource_limits_test.cpp +++ b/unittests/resource_limits_test.cpp @@ -14,7 +14,7 @@ using namespace eosio::chain::resource_limits; using namespace eosio::testing; using namespace eosio::chain; -class resource_limits_fixture: private chainbase_fixture<512*1024>, public resource_limits_manager +class resource_limits_fixture: private chainbase_fixture<1024*1024>, public resource_limits_manager { public: resource_limits_fixture() diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index c3578e15750..a3749f9656a 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -49,13 +49,13 @@ class snapshotted_tester : public base_tester { init(copied_config, snapshot); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override { diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 59f4ec0c28d..1aded520712 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -9,10 +9,14 @@ endif() add_subdirectory( asserter ) add_subdirectory( deferred_test ) +add_subdirectory( get_sender_test ) add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) add_subdirectory( proxy ) +add_subdirectory( ram_restrictions_test ) +add_subdirectory( reject_all ) +add_subdirectory( restrict_action_test ) add_subdirectory( snapshot_test ) add_subdirectory( test_api ) add_subdirectory( test_api_db ) diff --git a/unittests/test-contracts/README.md b/unittests/test-contracts/README.md index 8b03cc131cd..13937d4b107 100644 --- a/unittests/test-contracts/README.md +++ b/unittests/test-contracts/README.md @@ -2,4 +2,6 @@ test_ram_limit contract was compiled with eosio.cdt v1.4.1 That contract was ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. +deferred_test, get_sender_test, proxy, reject_all, and test_api contracts were compiled with eosio.cdt v1.6.1 + The remaining contracts have been ported to compile with eosio.cdt v1.6.x. They were compiled with a patched version of eosio.cdt v1.6.0-rc1 (commit 1c9180ff5a1e431385180ce459e11e6a1255c1a4). diff --git a/unittests/test-contracts/deferred_test/deferred_test.abi b/unittests/test-contracts/deferred_test/deferred_test.abi index 37c9eff9492..857e725efec 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.abi +++ b/unittests/test-contracts/deferred_test/deferred_test.abi @@ -35,6 +35,41 @@ } ] }, + { + "name": "delayedcall", + "base": "", + "fields": [ + { + "name": "payer", + "type": "name" + }, + { + "name": "sender_id", + "type": "uint64" + }, + { + "name": "contract", + "type": "name" + }, + { + "name": "payload", + "type": "uint64" + }, + { + "name": "delay_sec", + "type": "uint32" + }, + { + "name": "replace_existing", + "type": "bool" + } + ] + }, + { + "name": "fail", + "base": "", + "fields": [] + }, { "name": "inlinecall", "base": "", @@ -65,6 +100,16 @@ "type": "deferfunc", "ricardian_contract": "" }, + { + "name": "delayedcall", + "type": "delayedcall", + "ricardian_contract": "" + }, + { + "name": "fail", + "type": "fail", + "ricardian_contract": "" + }, { "name": "inlinecall", "type": "inlinecall", diff --git a/unittests/test-contracts/deferred_test/deferred_test.cpp b/unittests/test-contracts/deferred_test/deferred_test.cpp index 54c02ed4a95..1ff307d8426 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.cpp +++ b/unittests/test-contracts/deferred_test/deferred_test.cpp @@ -4,6 +4,8 @@ */ #include "deferred_test.hpp" #include +#include +#include using namespace eosio; @@ -15,7 +17,43 @@ void deferred_test::defercall( name payer, uint64_t sender_id, name contract, ui transaction trx; deferfunc_action a( contract, {get_self(), "active"_n} ); trx.actions.emplace_back( a.to_action( payload ) ); - trx.send( (static_cast(payer.value) << 64) | sender_id, payer ); + bool replace_existing = (payload >= 100); + + if( (50 <= payload && payload < 100) || payload >= 150 ) { + size_t tx_size = transaction_size(); + char* buffer = (char*)malloc( tx_size ); + read_transaction( buffer, tx_size ); + auto tx_id = sha256( buffer, tx_size ); + char context_buffer[56]; + trx.transaction_extensions.emplace_back( (uint16_t)0, std::vector() ); + auto& context_vector = std::get<1>( trx.transaction_extensions.back() ); + context_vector.resize(56); + datastream ds( context_vector.data(), 56 ); + ds << tx_id.extract_as_byte_array(); + ds << ((static_cast(payer.value) << 64) | sender_id); + if( payload != 77 ) + ds << get_self(); + else + ds << payer; + } + + trx.send( (static_cast(payer.value) << 64) | sender_id, payer, replace_existing ); +} + +void deferred_test::delayedcall( name payer, uint64_t sender_id, name contract, + uint64_t payload, uint32_t delay_sec, bool replace_existing ) +{ + print( "delayedcall called on ", get_self(), "\n" ); + require_auth( payer ); + + print( "deferred send of deferfunc action (with delay of ", delay_sec, " sec) to ", contract, " by ", payer, + " with sender id ", sender_id, " and payload ", payload ); + transaction trx; + trx.delay_sec = delay_sec; + deferfunc_action a( contract, {get_self(), "active"_n} ); + trx.actions.emplace_back( a.to_action( payload ) ); + + trx.send( sender_id, payer, replace_existing ); } void deferred_test::deferfunc( uint64_t payload ) { @@ -28,6 +66,10 @@ void deferred_test::inlinecall( name contract, name authorizer, uint64_t payload a.send( payload ); } +void deferred_test::fail() { + check( false, "fail" ); +} + void deferred_test::on_error( uint128_t sender_id, ignore> sent_trx ) { print( "onerror called on ", get_self(), "\n" ); } diff --git a/unittests/test-contracts/deferred_test/deferred_test.hpp b/unittests/test-contracts/deferred_test/deferred_test.hpp index 1e5aa22681b..ba7b5c26730 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.hpp +++ b/unittests/test-contracts/deferred_test/deferred_test.hpp @@ -14,6 +14,10 @@ class [[eosio::contract]] deferred_test : public eosio::contract { [[eosio::action]] void defercall( eosio::name payer, uint64_t sender_id, eosio::name contract, uint64_t payload ); + [[eosio::action]] + void delayedcall( eosio::name payer, uint64_t sender_id, eosio::name contract, + uint64_t payload, uint32_t delay_sec, bool replace_existing ); + [[eosio::action]] void deferfunc( uint64_t payload ); using deferfunc_action = eosio::action_wrapper<"deferfunc"_n, &deferred_test::deferfunc>; @@ -21,6 +25,9 @@ class [[eosio::contract]] deferred_test : public eosio::contract { [[eosio::action]] void inlinecall( eosio::name contract, eosio::name authorizer, uint64_t payload ); + [[eosio::action]] + void fail(); + [[eosio::on_notify("eosio::onerror")]] void on_error( uint128_t sender_id, eosio::ignore> sent_trx ); }; diff --git a/unittests/test-contracts/deferred_test/deferred_test.wasm b/unittests/test-contracts/deferred_test/deferred_test.wasm index eea70b8dc6e..416dad9fd5f 100755 Binary files a/unittests/test-contracts/deferred_test/deferred_test.wasm and b/unittests/test-contracts/deferred_test/deferred_test.wasm differ diff --git a/unittests/test-contracts/get_sender_test/CMakeLists.txt b/unittests/test-contracts/get_sender_test/CMakeLists.txt new file mode 100644 index 00000000000..cd633da3bae --- /dev/null +++ b/unittests/test-contracts/get_sender_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( get_sender_test get_sender_test get_sender_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/get_sender_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/get_sender_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/get_sender_test.abi ${CMAKE_CURRENT_BINARY_DIR}/get_sender_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.abi b/unittests/test-contracts/get_sender_test/get_sender_test.abi new file mode 100644 index 00000000000..0048a2c7eeb --- /dev/null +++ b/unittests/test-contracts/get_sender_test/get_sender_test.abi @@ -0,0 +1,69 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "assertsender", + "base": "", + "fields": [ + { + "name": "expected_sender", + "type": "name" + } + ] + }, + { + "name": "notify", + "base": "", + "fields": [ + { + "name": "to", + "type": "name" + }, + { + "name": "expected_sender", + "type": "name" + }, + { + "name": "send_inline", + "type": "bool" + } + ] + }, + { + "name": "sendinline", + "base": "", + "fields": [ + { + "name": "to", + "type": "name" + }, + { + "name": "expected_sender", + "type": "name" + } + ] + } + ], + "actions": [ + { + "name": "assertsender", + "type": "assertsender", + "ricardian_contract": "" + }, + { + "name": "notify", + "type": "notify", + "ricardian_contract": "" + }, + { + "name": "sendinline", + "type": "sendinline", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.cpp b/unittests/test-contracts/get_sender_test/get_sender_test.cpp new file mode 100644 index 00000000000..a3574dfae4c --- /dev/null +++ b/unittests/test-contracts/get_sender_test/get_sender_test.cpp @@ -0,0 +1,30 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "get_sender_test.hpp" +#include + +using namespace eosio; + +void get_sender_test::assertsender( name expected_sender ) { + check( get_sender() == expected_sender, "sender did not match" ); +} + +void get_sender_test::sendinline( name to, name expected_sender ) { + assertsender_action a( to, std::vector{} ); + a.send( expected_sender ); +} + +void get_sender_test::notify( name to, name expected_sender, bool send_inline ) { + require_recipient( to ); +} + +void get_sender_test::on_notify( name to, name expected_sender, bool send_inline ) { + if( send_inline ) { + assertsender_action a( get_first_receiver(), std::vector{} ); + a.send( expected_sender ); + } else { + check( get_sender() == expected_sender, "sender did not match" ); + } +} diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.hpp b/unittests/test-contracts/get_sender_test/get_sender_test.hpp new file mode 100644 index 00000000000..632c2905326 --- /dev/null +++ b/unittests/test-contracts/get_sender_test/get_sender_test.hpp @@ -0,0 +1,43 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { + namespace internal_use_do_not_use { + extern "C" { + __attribute__((eosio_wasm_import)) + uint64_t get_sender(); + } + } +} + +namespace eosio { + name get_sender() { + return name( internal_use_do_not_use::get_sender() ); + } +} + +class [[eosio::contract]] get_sender_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void assertsender( eosio::name expected_sender ); + using assertsender_action = eosio::action_wrapper<"assertsender"_n, &get_sender_test::assertsender>; + + [[eosio::action]] + void sendinline( eosio::name to, eosio::name expected_sender ); + + [[eosio::action]] + void notify( eosio::name to, eosio::name expected_sender, bool send_inline ); + + // eosio.cdt 1.6.1 has a problem with "*::notify" so hardcode to tester1 for now. + // TODO: Change it back to "*::notify" when the bug is fixed in eosio.cdt. + [[eosio::on_notify("tester1::notify")]] + void on_notify( eosio::name to, eosio::name expected_sender, bool send_inline ); + +}; diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.wasm b/unittests/test-contracts/get_sender_test/get_sender_test.wasm new file mode 100755 index 00000000000..89dc670d5a6 Binary files /dev/null and b/unittests/test-contracts/get_sender_test/get_sender_test.wasm differ diff --git a/unittests/test-contracts/proxy/proxy.cpp b/unittests/test-contracts/proxy/proxy.cpp index 1a199c4a5ba..c9fc324cad5 100644 --- a/unittests/test-contracts/proxy/proxy.cpp +++ b/unittests/test-contracts/proxy/proxy.cpp @@ -59,6 +59,7 @@ void proxy::on_error( uint128_t sender_id, eosio::ignore> ) { get_datastream() >> packed_trx_size; transaction trx; get_datastream() >> trx; + trx.transaction_extensions.clear(); trx.delay_sec = cfg.delay; trx.send( id, get_self() ); diff --git a/unittests/test-contracts/proxy/proxy.wasm b/unittests/test-contracts/proxy/proxy.wasm index c09311385be..b40249782ba 100755 Binary files a/unittests/test-contracts/proxy/proxy.wasm and b/unittests/test-contracts/proxy/proxy.wasm differ diff --git a/unittests/test-contracts/ram_restrictions_test/CMakeLists.txt b/unittests/test-contracts/ram_restrictions_test/CMakeLists.txt new file mode 100644 index 00000000000..d7fc5e71eeb --- /dev/null +++ b/unittests/test-contracts/ram_restrictions_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( ram_restrictions_test ram_restrictions_test ram_restrictions_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/ram_restrictions_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/ram_restrictions_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/ram_restrictions_test.abi ${CMAKE_CURRENT_BINARY_DIR}/ram_restrictions_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.abi b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.abi new file mode 100644 index 00000000000..6a12751077a --- /dev/null +++ b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.abi @@ -0,0 +1,143 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "data", + "base": "", + "fields": [ + { + "name": "key", + "type": "uint64" + }, + { + "name": "value", + "type": "bytes" + } + ] + }, + { + "name": "noop", + "base": "", + "fields": [] + }, + { + "name": "notifydefer", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "senderid", + "type": "uint64" + }, + { + "name": "payer", + "type": "name" + } + ] + }, + { + "name": "notifysetdat", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "len1", + "type": "uint32" + }, + { + "name": "len2", + "type": "uint32" + }, + { + "name": "payer", + "type": "name" + } + ] + }, + { + "name": "senddefer", + "base": "", + "fields": [ + { + "name": "senderid", + "type": "uint64" + }, + { + "name": "payer", + "type": "name" + } + ] + }, + { + "name": "setdata", + "base": "", + "fields": [ + { + "name": "len1", + "type": "uint32" + }, + { + "name": "len2", + "type": "uint32" + }, + { + "name": "payer", + "type": "name" + } + ] + } + ], + "actions": [ + { + "name": "noop", + "type": "noop", + "ricardian_contract": "" + }, + { + "name": "notifydefer", + "type": "notifydefer", + "ricardian_contract": "" + }, + { + "name": "notifysetdat", + "type": "notifysetdat", + "ricardian_contract": "" + }, + { + "name": "senddefer", + "type": "senddefer", + "ricardian_contract": "" + }, + { + "name": "setdata", + "type": "setdata", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "tablea", + "type": "data", + "index_type": "i64", + "key_names": [], + "key_types": [] + }, + { + "name": "tableb", + "type": "data", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp new file mode 100644 index 00000000000..d24cdbdbc24 --- /dev/null +++ b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp @@ -0,0 +1,62 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "ram_restrictions_test.hpp" +#include + +using namespace eosio; + +template +void _setdata(name self, int len, name payer) { + Table ta(self, 0); + std::vector data; + data.resize(len, 0); + auto it = ta.find(0); + if (it == ta.end()) { + ta.emplace(payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); + } else { + ta.modify(it, payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); + } +} + +void ram_restrictions_test::noop( ) { +} + +void ram_restrictions_test::setdata( uint32_t len1, uint32_t len2, name payer ) { + _setdata(get_self(), len1, payer); + _setdata(get_self(), len2, payer); +} + +void ram_restrictions_test::notifysetdat( name acctonotify, uint32_t len1, uint32_t len2, name payer ) { + require_recipient(acctonotify); +} + +void ram_restrictions_test::on_notify_setdata( name acctonotify, uint32_t len1, uint32_t len2, name payer) { + setdata(len1, len2, payer); +} + +void ram_restrictions_test::senddefer( uint64_t senderid, name payer ) { + transaction trx; + trx.actions.emplace_back( + std::vector{{_self, "active"_n}}, + get_self(), + "noop"_n, + std::make_tuple() + ); + trx.send( senderid, payer ); +} + +void ram_restrictions_test::notifydefer( name acctonotify, uint64_t senderid, name payer ) { + require_recipient(acctonotify); +} + +void ram_restrictions_test::on_notifydefer( name acctonotify, uint64_t senderid, name payer ) { + senddefer(senderid, payer); +} diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp new file mode 100644 index 00000000000..34a988fe5d5 --- /dev/null +++ b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp @@ -0,0 +1,45 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] ram_restrictions_test : public eosio::contract { +public: + struct [[eosio::table]] data { + uint64_t key; + std::vector value; + + uint64_t primary_key() const { return key; } + }; + + typedef eosio::multi_index<"tablea"_n, data> tablea; + typedef eosio::multi_index<"tableb"_n, data> tableb; + +public: + using eosio::contract::contract; + + [[eosio::action]] + void noop(); + + [[eosio::action]] + void setdata( uint32_t len1, uint32_t len2, eosio::name payer ); + + [[eosio::action]] + void notifysetdat( eosio::name acctonotify, uint32_t len1, uint32_t len2, eosio::name payer ); + + [[eosio::on_notify("tester2::notifysetdat")]] + void on_notify_setdata( eosio::name acctonotify, uint32_t len1, uint32_t len2, eosio::name payer ); + + [[eosio::action]] + void senddefer( uint64_t senderid, eosio::name payer ); + + [[eosio::action]] + void notifydefer( eosio::name acctonotify, uint64_t senderid, eosio::name payer ); + + [[eosio::on_notify("tester2::notifydefer")]] + void on_notifydefer( eosio::name acctonotify, uint64_t senderid, eosio::name payer ); + +}; diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.wasm b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.wasm new file mode 100644 index 00000000000..74be3b18fdf Binary files /dev/null and b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.wasm differ diff --git a/unittests/test-contracts/reject_all/CMakeLists.txt b/unittests/test-contracts/reject_all/CMakeLists.txt new file mode 100644 index 00000000000..027aa487fa1 --- /dev/null +++ b/unittests/test-contracts/reject_all/CMakeLists.txt @@ -0,0 +1,5 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( reject_all reject_all reject_all.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/reject_all.wasm ${CMAKE_CURRENT_BINARY_DIR}/reject_all.wasm COPYONLY ) +endif() diff --git a/unittests/test-contracts/reject_all/reject_all.cpp b/unittests/test-contracts/reject_all/reject_all.cpp new file mode 100644 index 00000000000..40f26fd827b --- /dev/null +++ b/unittests/test-contracts/reject_all/reject_all.cpp @@ -0,0 +1,31 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include + +using namespace eosio; + +extern "C" { + void apply( uint64_t receiver, uint64_t first_receiver, uint64_t action ) { + check( receiver == first_receiver, "rejecting all notifications" ); + + // reject all actions with only the following exceptions: + // * do not reject an eosio::setcode that sets code on the eosio account unless the rejectall account exists; + // * do not reject an eosio::newaccount that creates the rejectall account. + + if( first_receiver == "eosio"_n.value ) { + if( action == "setcode"_n.value ) { + auto accnt = unpack_action_data(); + if( accnt == "eosio"_n && !is_account("rejectall"_n) ) + return; + } else if( action == "newaccount"_n.value ) { + auto accnts = unpack_action_data< std::pair >(); + if( accnts.second == "rejectall"_n ) + return; + } + } + + check( false , "rejecting all actions" ); + } +} diff --git a/unittests/test-contracts/reject_all/reject_all.wasm b/unittests/test-contracts/reject_all/reject_all.wasm new file mode 100755 index 00000000000..ee794557a98 Binary files /dev/null and b/unittests/test-contracts/reject_all/reject_all.wasm differ diff --git a/unittests/test-contracts/restrict_action_test/CMakeLists.txt b/unittests/test-contracts/restrict_action_test/CMakeLists.txt new file mode 100644 index 00000000000..5ffe32ae7da --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( restrict_action_test restrict_action_test restrict_action_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/restrict_action_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/restrict_action_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/restrict_action_test.abi ${CMAKE_CURRENT_BINARY_DIR}/restrict_action_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.abi b/unittests/test-contracts/restrict_action_test/restrict_action_test.abi new file mode 100644 index 00000000000..37db4926071 --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.abi @@ -0,0 +1,98 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "noop", + "base": "", + "fields": [] + }, + { + "name": "notifydefer", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "authorizer", + "type": "name" + }, + { + "name": "senderid", + "type": "uint32" + } + ] + }, + { + "name": "notifyinline", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "authorizer", + "type": "name" + } + ] + }, + { + "name": "senddefer", + "base": "", + "fields": [ + { + "name": "authorizer", + "type": "name" + }, + { + "name": "senderid", + "type": "uint32" + } + ] + }, + { + "name": "sendinline", + "base": "", + "fields": [ + { + "name": "authorizer", + "type": "name" + } + ] + } + ], + "actions": [ + { + "name": "noop", + "type": "noop", + "ricardian_contract": "" + }, + { + "name": "notifydefer", + "type": "notifydefer", + "ricardian_contract": "" + }, + { + "name": "notifyinline", + "type": "notifyinline", + "ricardian_contract": "" + }, + { + "name": "senddefer", + "type": "senddefer", + "ricardian_contract": "" + }, + { + "name": "sendinline", + "type": "sendinline", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp b/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp new file mode 100644 index 00000000000..5c8f2b596b5 --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp @@ -0,0 +1,48 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "restrict_action_test.hpp" +#include + +using namespace eosio; + +void restrict_action_test::noop( ) { + +} + +void restrict_action_test::sendinline( name authorizer ) { + action( + permission_level{authorizer,"active"_n}, + get_self(), + "noop"_n, + std::make_tuple() + ).send(); +} + +void restrict_action_test::senddefer( name authorizer, uint32_t senderid ) { + transaction trx; + trx.actions.emplace_back( + permission_level{authorizer,"active"_n}, + get_self(), + "noop"_n, + std::make_tuple() + ); + trx.send(senderid, get_self()); +} + +void restrict_action_test::notifyinline( name acctonotify, name authorizer ) { + require_recipient(acctonotify); +} + +void restrict_action_test::notifydefer( name acctonotify, name authorizer, uint32_t senderid ) { + require_recipient(acctonotify); +} + +void restrict_action_test::on_notify_inline( name acctonotify, name authorizer ) { + sendinline(authorizer); +} + +void restrict_action_test::on_notify_defer( name acctonotify, name authorizer, uint32_t senderid ) { + senddefer(authorizer, senderid); +} diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp b/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp new file mode 100644 index 00000000000..f5ab48e385b --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp @@ -0,0 +1,34 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] restrict_action_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void noop( ); + + [[eosio::action]] + void sendinline( eosio::name authorizer ); + + [[eosio::action]] + void senddefer( eosio::name authorizer, uint32_t senderid ); + + + [[eosio::action]] + void notifyinline( eosio::name acctonotify, eosio::name authorizer ); + + [[eosio::action]] + void notifydefer( eosio::name acctonotify, eosio::name authorizer, uint32_t senderid ); + + [[eosio::on_notify("testacc::notifyinline")]] + void on_notify_inline( eosio::name acctonotify, eosio::name authorizer ); + + [[eosio::on_notify("testacc::notifydefer")]] + void on_notify_defer( eosio::name acctonotify, eosio::name authorizer, uint32_t senderid ); +}; diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.wasm b/unittests/test-contracts/restrict_action_test/restrict_action_test.wasm new file mode 100755 index 00000000000..31acb952b19 Binary files /dev/null and b/unittests/test-contracts/restrict_action_test/restrict_action_test.wasm differ diff --git a/unittests/test-contracts/test_api/test_action.cpp b/unittests/test-contracts/test_api/test_action.cpp index bf1985ae3ef..65a8d31554c 100644 --- a/unittests/test-contracts/test_api/test_action.cpp +++ b/unittests/test-contracts/test_api/test_action.cpp @@ -259,3 +259,86 @@ void test_action::test_ram_billing_in_notify( uint64_t receiver, uint64_t code, db_store_i64( "notifytest"_n.value, "notifytest"_n.value, payer, "notifytest"_n.value, &to_notify, sizeof(to_notify) ); } } + +void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_t action) { + uint64_t _self = receiver; + if (receiver == "testapi"_n.value) { + print("exec 1"); + eosio::require_recipient( "bob"_n ); //-> exec 2 which would then cause execution of 4, 10 + + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal2")), + std::tuple<>()); + act1.send(); // -> exec 5 which would then cause execution of 6, 7, 8 + + if (is_account("fail1"_n)) { + eosio_assert(false, "fail at point 1"); + } + + eosio::action act2({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal3")), + std::tuple<>()); + act2.send(); // -> exec 9 + + eosio::require_recipient( "charlie"_n ); // -> exec 3 which would then cause execution of 11 + + } else if (receiver == "bob"_n.value) { + print("exec 2"); + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal_foo")), + std::tuple<>()); + act1.send(); // -> exec 10 + + eosio::require_recipient( "david"_n ); // -> exec 4 + } else if (receiver == "charlie"_n.value) { + print("exec 3"); + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal_bar")), + std::tuple<>()); // exec 11 + act1.send(); + + if (is_account("fail3"_n)) { + eosio_assert(false, "fail at point 3"); + } + + } else if (receiver == "david"_n.value) { + print("exec 4"); + } else { + eosio_assert(false, "assert failed at test_action::test_action_ordinal1"); + } +} +void test_action::test_action_ordinal2(uint64_t receiver, uint64_t code, uint64_t action) { + uint64_t _self = receiver; + if (receiver == "testapi"_n.value) { + print("exec 5"); + eosio::require_recipient( "david"_n ); // -> exec 6 + eosio::require_recipient( "erin"_n ); // -> exec 7 + + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal4")), + std::tuple<>()); + act1.send(); // -> exec 8 + } else if (receiver == "david"_n.value) { + print("exec 6"); + } else if (receiver == "erin"_n.value) { + print("exec 7"); + } else { + eosio_assert(false, "assert failed at test_action::test_action_ordinal2"); + } +} +void test_action::test_action_ordinal4(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 8"); +} +void test_action::test_action_ordinal3(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 9"); + + if (is_account("failnine"_n)) { + eosio_assert(false, "fail at point 9"); + } +} +void test_action::test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 10"); +} +void test_action::test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 11"); +} diff --git a/unittests/test-contracts/test_api/test_api.cpp b/unittests/test-contracts/test_api/test_api.cpp index 598990dc1a7..417c89a5da4 100644 --- a/unittests/test-contracts/test_api/test_api.cpp +++ b/unittests/test-contracts/test_api/test_api.cpp @@ -41,7 +41,8 @@ extern "C" { } WASM_TEST_HANDLER( test_action, assert_true_cf ); - if ( action != WASM_TEST_ACTION("test_transaction", "stateful_api") && action != WASM_TEST_ACTION("test_transaction", "context_free_api") ) + if ( action != WASM_TEST_ACTION("test_transaction", "stateful_api") && + action != WASM_TEST_ACTION("test_transaction", "context_free_api") ) require_auth(code); //test_types @@ -64,6 +65,12 @@ extern "C" { WASM_TEST_HANDLER ( test_action, test_publication_time ); WASM_TEST_HANDLER ( test_action, test_assert_code ); WASM_TEST_HANDLER_EX( test_action, test_ram_billing_in_notify ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal1 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal2 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal3 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal4 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_foo ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_bar ); // test named actions // We enforce action name matches action data type name, so name mangling will not work for these tests. diff --git a/unittests/test-contracts/test_api/test_api.hpp b/unittests/test-contracts/test_api/test_api.hpp index 865923fcfb2..bbcf9965352 100644 --- a/unittests/test-contracts/test_api/test_api.hpp +++ b/unittests/test-contracts/test_api/test_api.hpp @@ -69,6 +69,12 @@ struct test_action { static void test_publication_time(); static void test_assert_code(); static void test_ram_billing_in_notify(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal2(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal3(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal4(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action); }; struct test_db { diff --git a/unittests/test-contracts/test_api/test_api.wasm b/unittests/test-contracts/test_api/test_api.wasm index c7a7601ace1..9d81f87f7aa 100755 Binary files a/unittests/test-contracts/test_api/test_api.wasm and b/unittests/test-contracts/test_api/test_api.wasm differ diff --git a/unittests/test-contracts/test_api/test_transaction.cpp b/unittests/test-contracts/test_api/test_transaction.cpp index 065828ea307..535f9896e9b 100644 --- a/unittests/test-contracts/test_api/test_transaction.cpp +++ b/unittests/test-contracts/test_api/test_transaction.cpp @@ -283,7 +283,6 @@ void test_transaction::send_deferred_tx_with_dtt_action() { auto trx = transaction(); trx.actions.emplace_back(deferred_act); trx.delay_sec = dtt_act.delay_sec; - cancel_deferred( 0xffffffffffffffff ); // TODO: Remove this line after fixing deferred trx replacement RAM bug trx.send( 0xffffffffffffffff, name{dtt_act.payer}, true ); } diff --git a/unittests/wasm_tests.cpp b/unittests/wasm_tests.cpp index 5335ee037c4..c29ec8da8c4 100644 --- a/unittests/wasm_tests.cpp +++ b/unittests/wasm_tests.cpp @@ -92,7 +92,7 @@ BOOST_FIXTURE_TEST_CASE( basic_test, TESTER ) try { auto result = push_transaction( trx ); BOOST_CHECK_EQUAL(result->receipt->status, transaction_receipt::executed); BOOST_CHECK_EQUAL(result->action_traces.size(), 1u); - BOOST_CHECK_EQUAL(result->action_traces.at(0).receipt.receiver.to_string(), name(N(asserter)).to_string() ); + BOOST_CHECK_EQUAL(result->action_traces.at(0).receiver.to_string(), name(N(asserter)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.account.to_string(), name(N(asserter)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.name.to_string(), name(N(procassert)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.authorization.size(), 1u ); diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index 5df32c19f79..4a372000082 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -498,7 +498,8 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { tester2.chain->push_block( b ); } - auto log_trxs = [&]( const transaction_trace_ptr& t) { + auto log_trxs = [&](std::tuple x) { + auto& t = std::get<0>(x); if( !t || t->action_traces.size() == 0 ) return; const auto& act = t->action_traces[0].act;