diff --git a/.github/workflows/build-ton-linux-x86-64-shared.yml b/.github/workflows/build-ton-linux-x86-64-shared.yml index 34f92d93e..fcef8afd5 100644 --- a/.github/workflows/build-ton-linux-x86-64-shared.yml +++ b/.github/workflows/build-ton-linux-x86-64-shared.yml @@ -7,7 +7,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-20.04, ubuntu-22.04] + os: [ubuntu-20.04, ubuntu-22.04, ubuntu-24.04] runs-on: ${{ matrix.os }} steps: @@ -21,7 +21,7 @@ jobs: sudo apt-get update sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev liblz4-dev libjemalloc-dev - - name: Install clang-16 + - if: matrix.os != 'ubuntu-24.04' run: | wget https://apt.llvm.org/llvm.sh chmod +x llvm.sh diff --git a/.github/workflows/build-ton-wasm-emscripten.yml b/.github/workflows/build-ton-wasm-emscripten.yml index 92107ffdf..534d312fd 100644 --- a/.github/workflows/build-ton-wasm-emscripten.yml +++ b/.github/workflows/build-ton-wasm-emscripten.yml @@ -19,7 +19,7 @@ jobs: - name: Build TON WASM artifacts run: | - cd assembly/wasm + cp assembly/wasm/fift-func-wasm-build-ubuntu.sh . chmod +x fift-func-wasm-build-ubuntu.sh ./fift-func-wasm-build-ubuntu.sh -a diff --git a/.github/workflows/docker-ubuntu-image.yml b/.github/workflows/docker-ubuntu-image.yml index 449711d86..48c553efd 100644 --- a/.github/workflows/docker-ubuntu-image.yml +++ b/.github/workflows/docker-ubuntu-image.yml @@ -20,22 +20,49 @@ jobs: submodules: 'recursive' - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and export to Docker + uses: docker/build-push-action@v6 + with: + load: true + context: ./ + tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test + + - name: Test + run: | + docker run --rm -e "TEST=1" ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test + + - name: Get next tag + id: tag + run: | + git fetch --all --tags + git tag -l + NEW_TAG=v$(date +'%Y.%m') + FOUND=$(git tag -l | grep $NEW_TAG | wc -l) + if [ $FOUND -eq 0 ]; then + echo "TAG=$NEW_TAG" >> $GITHUB_OUTPUT + else + echo "TAG=$NEW_TAG-$FOUND" >> $GITHUB_OUTPUT + fi + - name: Build and push id: docker_build - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v6 with: + platforms: linux/amd64,linux/arm64 push: true context: ./ - tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest + tags: | + ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest + ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.TAG }} diff --git a/.github/workflows/ton-x86-64-windows.yml b/.github/workflows/ton-x86-64-windows.yml index 670261839..d5c9c243c 100644 --- a/.github/workflows/ton-x86-64-windows.yml +++ b/.github/workflows/ton-x86-64-windows.yml @@ -9,7 +9,7 @@ defaults: jobs: build: - runs-on: windows-2022 + runs-on: windows-2019 steps: - name: Get Current OS version @@ -23,9 +23,9 @@ jobs: - name: Build TON run: | - copy assembly\native\build-windows-github.bat . - copy assembly\native\build-windows.bat . - build-windows-github.bat Enterprise + copy assembly\native\build-windows-github-2019.bat . + copy assembly\native\build-windows-2019.bat . + build-windows-github-2019.bat Enterprise - name: Upload artifacts uses: actions/upload-artifact@master diff --git a/CMake/FindSodium.cmake b/CMake/FindSodium.cmake index 85194ee2f..c11e46f1d 100644 --- a/CMake/FindSodium.cmake +++ b/CMake/FindSodium.cmake @@ -37,12 +37,14 @@ if (NOT DEFINED SODIUM_USE_STATIC_LIBS) option(SODIUM_USE_STATIC_LIBS "enable to statically link against sodium" OFF) endif() if(NOT (SODIUM_USE_STATIC_LIBS EQUAL SODIUM_USE_STATIC_LIBS_LAST)) - unset(sodium_LIBRARY CACHE) - unset(SODIUM_LIBRARY_DEBUG CACHE) - unset(SODIUM_LIBRARY_RELEASE CACHE) - unset(sodium_DLL_DEBUG CACHE) - unset(sodium_DLL_RELEASE CACHE) - set(SODIUM_USE_STATIC_LIBS_LAST ${SODIUM_USE_STATIC_LIBS} CACHE INTERNAL "internal change tracking variable") + if (NOT SODIUM_LIBRARY_RELEASE) + unset(sodium_LIBRARY CACHE) + unset(SODIUM_LIBRARY_DEBUG CACHE) + unset(SODIUM_LIBRARY_RELEASE CACHE) + unset(sodium_DLL_DEBUG CACHE) + unset(sodium_DLL_RELEASE CACHE) + set(SODIUM_USE_STATIC_LIBS_LAST ${SODIUM_USE_STATIC_LIBS} CACHE INTERNAL "internal change tracking variable") + endif() endif() @@ -295,4 +297,4 @@ else() ) endif() endif() -endif() \ No newline at end of file +endif() diff --git a/CMakeLists.txt b/CMakeLists.txt index 658eab70c..b92ff6f1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -539,6 +539,9 @@ target_link_libraries(test-ton-collator overlay tdutils tdactor adnl tl_api dht add_executable(test-http test/test-http.cpp) target_link_libraries(test-http PRIVATE tonhttp) +add_executable(test-emulator test/test-td-main.cpp emulator/test/emulator-tests.cpp) +target_link_libraries(test-emulator PRIVATE emulator) + get_directory_property(HAS_PARENT PARENT_DIRECTORY) if (HAS_PARENT) set(ALL_TEST_SOURCE @@ -570,6 +573,7 @@ add_test(test-cells test-cells ${TEST_OPTIONS}) add_test(test-smartcont test-smartcont) add_test(test-net test-net) add_test(test-actors test-tdactor) +add_test(test-emulator test-emulator) #BEGIN tonlib add_test(test-tdutils test-tdutils) diff --git a/Changelog.md b/Changelog.md index effe339f4..e5df2f979 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,3 +1,20 @@ +## 2024.08 Update + +1. Introduction of dispatch queues, message envelopes with transaction chain metadata, and explicitly stored msg_queue size, which will be activated by `Config8.version >= 8` and new `Config8.capabilities` bits: `capStoreOutMsgQueueSize`, `capMsgMetadata`, `capDeferMessages`. +2. A number of changes to transcation executor which will activated for `Config8.version >= 8`: + - Check mode on invalid `action_send_msg`. Ignore action if `IGNORE_ERROR` (+2) bit is set, bounce if `BOUNCE_ON_FAIL` (+16) bit is set. + - Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`. + - Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent. + - Allow unfreeze through external messages. + - Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages. +3. A few issues with broadcasts were fixed: stop on receiving last piece, response to AdnlMessageCreateChannel +4. A number of fixes and improvements for emulator and tonlib: correct work with config_addr, not accepted externals, bounces, debug ops gas consumption, added version and c5 dump, fixed tonlib crashes +5. Added new flags and commands to the node, in particular `--fast-state-serializer`, `getcollatoroptionsjson`, `setcollatoroptionsjson` + +Besides the work of the core team, this update is based on the efforts of @krigga (emulator), stonfi team, in particular @dbaranovstonfi and @hey-researcher (emulator), and @loeul, @xiaoxianBoy, @simlecode (typos in comments and docs). + + + ## 2024.06 Update 1. Make Jemalloc default allocator diff --git a/Dockerfile b/Dockerfile index c5120e83e..cf4187630 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,15 @@ -FROM ubuntu:22.04 as builder +FROM ubuntu:22.04 AS builder RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev pkg-config autoconf automake libtool libjemalloc-dev && \ - rm -rf /var/lib/apt/lists/* -ENV CC clang -ENV CXX clang++ -ENV CCACHE_DISABLE 1 + DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev pkg-config autoconf automake libtool libjemalloc-dev lsb-release software-properties-common gnupg + +RUN wget https://apt.llvm.org/llvm.sh && \ + chmod +x llvm.sh && \ + ./llvm.sh 16 all && \ + rm -rf /var/lib/apt/lists/* + +ENV CC=/usr/bin/clang-16 +ENV CXX=/usr/bin/clang++-16 +ENV CCACHE_DISABLE=1 WORKDIR / RUN mkdir ton @@ -13,17 +18,16 @@ WORKDIR /ton COPY ./ ./ RUN mkdir build && \ - cd build && \ - cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DTON_USE_JEMALLOC=ON .. && \ - ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client + cd build && \ + cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DTON_USE_JEMALLOC=ON .. && \ + ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client FROM ubuntu:22.04 RUN apt-get update && \ - apt-get install -y wget libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev libjemalloc-dev && \ + apt-get install -y wget curl libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev libjemalloc-dev htop net-tools netcat iptraf-ng jq tcpdump pv plzip && \ rm -rf /var/lib/apt/lists/* -RUN mkdir -p /var/ton-work/db && \ - mkdir -p /var/ton-work/db/static +RUN mkdir -p /var/ton-work/db /var/ton-work/scripts COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon /usr/local/bin/ COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon-cli /usr/local/bin/ @@ -33,7 +37,7 @@ COPY --from=builder /ton/build/validator-engine-console/validator-engine-console COPY --from=builder /ton/build/utils/generate-random-id /usr/local/bin/ WORKDIR /var/ton-work/db -COPY ./docker/init.sh ./docker/control.template ./ -RUN chmod +x init.sh +COPY ./docker/init.sh ./docker/control.template /var/ton-work/scripts/ +RUN chmod +x /var/ton-work/scripts/init.sh -ENTRYPOINT ["/var/ton-work/db/init.sh"] +ENTRYPOINT ["/var/ton-work/scripts/init.sh"] diff --git a/adnl/adnl-ext-server.cpp b/adnl/adnl-ext-server.cpp index ed04469cb..162a53afb 100644 --- a/adnl/adnl-ext-server.cpp +++ b/adnl/adnl-ext-server.cpp @@ -91,7 +91,7 @@ td::Status AdnlInboundConnection::process_custom_packet(td::BufferSlice &data, b auto F = fetch_tl_object(data.clone(), true); if (F.is_ok()) { if (nonce_.size() > 0 || !remote_id_.is_zero()) { - return td::Status::Error(ErrorCode::protoviolation, "duplicate authentificate"); + return td::Status::Error(ErrorCode::protoviolation, "duplicate authenticate"); } auto f = F.move_as_ok(); nonce_ = td::SecureString{f->nonce_.size() + 256}; diff --git a/adnl/adnl-peer.cpp b/adnl/adnl-peer.cpp index 3e21a7f53..febbdac6e 100644 --- a/adnl/adnl-peer.cpp +++ b/adnl/adnl-peer.cpp @@ -504,6 +504,12 @@ void AdnlPeerPairImpl::create_channel(pubkeys::Ed25519 pub, td::uint32 date) { void AdnlPeerPairImpl::process_message(const adnlmessage::AdnlMessageCreateChannel &message) { create_channel(message.key(), message.date()); + if (respond_to_channel_create_after_.is_in_past()) { + respond_to_channel_create_after_ = td::Timestamp::in(td::Random::fast(1.0, 2.0)); + std::vector messages; + messages.emplace_back(adnlmessage::AdnlMessageNop{}, 0); + send_messages(std::move(messages)); + } } void AdnlPeerPairImpl::process_message(const adnlmessage::AdnlMessageConfirmChannel &message) { diff --git a/adnl/adnl-peer.hpp b/adnl/adnl-peer.hpp index 12ee01c6c..e9a5d428e 100644 --- a/adnl/adnl-peer.hpp +++ b/adnl/adnl-peer.hpp @@ -214,6 +214,7 @@ class AdnlPeerPairImpl : public AdnlPeerPair { pubkeys::Ed25519 channel_pub_; td::int32 channel_pk_date_; td::actor::ActorOwn channel_; + td::Timestamp respond_to_channel_create_after_; td::uint64 in_seqno_ = 0; td::uint64 out_seqno_ = 0; diff --git a/assembly/cicd/jenkins/test-builds.groovy b/assembly/cicd/jenkins/test-builds.groovy index a959d75ab..0b5ab7a38 100644 --- a/assembly/cicd/jenkins/test-builds.groovy +++ b/assembly/cicd/jenkins/test-builds.groovy @@ -1,4 +1,5 @@ pipeline { + agent none stages { stage('Run Builds') { @@ -12,7 +13,7 @@ pipeline { sh ''' cp assembly/native/build-ubuntu-shared.sh . chmod +x build-ubuntu-shared.sh - ./build-ubuntu-shared.sh -t -a + ./build-ubuntu-shared.sh -a ''' sh ''' cd artifacts @@ -31,7 +32,7 @@ pipeline { sh ''' cp assembly/nix/build-linux-x86-64-nix.sh . chmod +x build-linux-x86-64-nix.sh - ./build-linux-x86-64-nix.sh -t + ./build-linux-x86-64-nix.sh ''' sh ''' cd artifacts @@ -50,7 +51,7 @@ pipeline { sh ''' cp assembly/native/build-ubuntu-shared.sh . chmod +x build-ubuntu-shared.sh - ./build-ubuntu-shared.sh -t -a + ./build-ubuntu-shared.sh -a ''' sh ''' cd artifacts @@ -69,7 +70,7 @@ pipeline { sh ''' cp assembly/nix/build-linux-arm64-nix.sh . chmod +x build-linux-arm64-nix.sh - ./build-linux-arm64-nix.sh -t + ./build-linux-arm64-nix.sh ''' sh ''' cd artifacts @@ -88,7 +89,7 @@ pipeline { sh ''' cp assembly/native/build-macos-shared.sh . chmod +x build-macos-shared.sh - ./build-macos-shared.sh -t -a + ./build-macos-shared.sh -a ''' sh ''' cd artifacts @@ -107,7 +108,7 @@ pipeline { sh ''' cp assembly/nix/build-macos-nix.sh . chmod +x build-macos-nix.sh - ./build-macos-nix.sh -t + ./build-macos-nix.sh ''' sh ''' cd artifacts @@ -126,7 +127,7 @@ pipeline { sh ''' cp assembly/native/build-macos-shared.sh . chmod +x build-macos-shared.sh - ./build-macos-shared.sh -t -a + ./build-macos-shared.sh -a ''' sh ''' cd artifacts @@ -145,7 +146,7 @@ pipeline { sh ''' cp assembly/nix/build-macos-nix.sh . chmod +x build-macos-nix.sh - ./build-macos-nix.sh -t + ./build-macos-nix.sh ''' sh ''' cd artifacts @@ -164,7 +165,7 @@ pipeline { sh ''' cp assembly/native/build-macos-shared.sh . chmod +x build-macos-shared.sh - ./build-macos-shared.sh -t -a + ./build-macos-shared.sh -a ''' sh ''' cd artifacts @@ -182,7 +183,7 @@ pipeline { timeout(time: 180, unit: 'MINUTES') { bat ''' copy assembly\\native\\build-windows.bat . - build-windows.bat -t + build-windows.bat ''' bat ''' cd artifacts @@ -218,7 +219,7 @@ pipeline { steps { timeout(time: 180, unit: 'MINUTES') { sh ''' - cd assembly/wasm + cp assembly/wasm/fift-func-wasm-build-ubuntu.sh . chmod +x fift-func-wasm-build-ubuntu.sh ./fift-func-wasm-build-ubuntu.sh -a ''' diff --git a/assembly/native/build-macos-portable.sh b/assembly/native/build-macos-portable.sh index 32a09e452..b296d3393 100644 --- a/assembly/native/build-macos-portable.sh +++ b/assembly/native/build-macos-portable.sh @@ -158,7 +158,7 @@ if [ "$with_tests" = true ]; then http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \ test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \ - test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state + test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state test-emulator test $? -eq 0 || { echo "Can't compile ton"; exit 1; } else ninja storage-daemon storage-daemon-cli blockchain-explorer \ diff --git a/assembly/native/build-macos-shared.sh b/assembly/native/build-macos-shared.sh index 0f16eeda6..7574f481a 100644 --- a/assembly/native/build-macos-shared.sh +++ b/assembly/native/build-macos-shared.sh @@ -86,7 +86,7 @@ if [ "$with_tests" = true ]; then http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \ test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \ - test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state + test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state test-emulator test $? -eq 0 || { echo "Can't compile ton"; exit 1; } else ninja storage-daemon storage-daemon-cli blockchain-explorer \ diff --git a/assembly/native/build-ubuntu-portable.sh b/assembly/native/build-ubuntu-portable.sh index a3a11f1b2..b5a167626 100644 --- a/assembly/native/build-ubuntu-portable.sh +++ b/assembly/native/build-ubuntu-portable.sh @@ -150,7 +150,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \ test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \ test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \ - test-fec test-tddb test-db test-validator-session-state + test-fec test-tddb test-db test-validator-session-state test-emulator test $? -eq 0 || { echo "Can't compile ton"; exit 1; } else ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ diff --git a/assembly/native/build-ubuntu-shared.sh b/assembly/native/build-ubuntu-shared.sh index ec868ecd2..4ce86d81f 100644 --- a/assembly/native/build-ubuntu-shared.sh +++ b/assembly/native/build-ubuntu-shared.sh @@ -58,7 +58,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \ test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \ test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \ - test-fec test-tddb test-db test-validator-session-state + test-fec test-tddb test-db test-validator-session-state test-emulator test $? -eq 0 || { echo "Can't compile ton"; exit 1; } else ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ diff --git a/assembly/native/build-windows-2019.bat b/assembly/native/build-windows-2019.bat new file mode 100644 index 000000000..b528b05a2 --- /dev/null +++ b/assembly/native/build-windows-2019.bat @@ -0,0 +1,221 @@ +REM execute this script inside elevated (Run as Administrator) console "x64 Native Tools Command Prompt for VS 2019" + +echo off + +echo Installing chocolatey windows package manager... +@"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin" +choco -? +IF %errorlevel% NEQ 0 ( + echo Can't install chocolatey + exit /b %errorlevel% +) + +choco feature enable -n allowEmptyChecksums + +echo Installing pkgconfiglite... +choco install -y pkgconfiglite +IF %errorlevel% NEQ 0 ( + echo Can't install pkgconfiglite + exit /b %errorlevel% +) + +echo Installing ninja... +choco install -y ninja +IF %errorlevel% NEQ 0 ( + echo Can't install ninja + exit /b %errorlevel% +) + +if not exist "zlib" ( +git clone https://github.com/madler/zlib.git +cd zlib +git checkout v1.3.1 +cd contrib\vstudio\vc14 +msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142 + +IF %errorlevel% NEQ 0 ( + echo Can't install zlib + exit /b %errorlevel% +) +cd ..\..\..\.. +) else ( +echo Using zlib... +) + +if not exist "lz4" ( +git clone https://github.com/lz4/lz4.git +cd lz4 +git checkout v1.9.4 +cd build\VS2017\liblz4 +msbuild liblz4.vcxproj /p:Configuration=Release /p:platform=x64 -p:PlatformToolset=v142 + +IF %errorlevel% NEQ 0 ( + echo Can't install lz4 + exit /b %errorlevel% +) +cd ..\..\..\.. +) else ( +echo Using lz4... +) + +if not exist "secp256k1" ( +git clone https://github.com/bitcoin-core/secp256k1.git +cd secp256k1 +git checkout v0.3.2 +cmake -G "Visual Studio 16 2019" -A x64 -S . -B build -DSECP256K1_ENABLE_MODULE_RECOVERY=ON -DBUILD_SHARED_LIBS=OFF +IF %errorlevel% NEQ 0 ( + echo Can't configure secp256k1 + exit /b %errorlevel% +) +cmake --build build --config Release +IF %errorlevel% NEQ 0 ( + echo Can't install secp256k1 + exit /b %errorlevel% +) +cd .. +) else ( +echo Using secp256k1... +) + + +curl --retry 5 --retry-delay 10 -Lo libsodium-1.0.18-stable-msvc.zip https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable-msvc.zip +IF %errorlevel% NEQ 0 ( + echo Can't download libsodium + exit /b %errorlevel% +) +unzip libsodium-1.0.18-stable-msvc.zip +) else ( +echo Using libsodium... +) + +if not exist "openssl-3.1.4" ( +curl -Lo openssl-3.1.4.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-3.1.4.zip +IF %errorlevel% NEQ 0 ( + echo Can't download OpenSSL + exit /b %errorlevel% +) +unzip -q openssl-3.1.4.zip +) else ( +echo Using openssl... +) + +if not exist "libmicrohttpd-0.9.77-w32-bin" ( +curl -Lo libmicrohttpd-0.9.77-w32-bin.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/libmicrohttpd-0.9.77-w32-bin.zip +IF %errorlevel% NEQ 0 ( + echo Can't download libmicrohttpd + exit /b %errorlevel% +) +unzip -q libmicrohttpd-0.9.77-w32-bin.zip +) else ( +echo Using libmicrohttpd... +) + +if not exist "readline-5.0-1-lib" ( +curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip +IF %errorlevel% NEQ 0 ( + echo Can't download readline + exit /b %errorlevel% +) +unzip -q -d readline-5.0-1-lib readline-5.0-1-lib.zip +) else ( +echo Using readline... +) + + +set root=%cd% +echo %root% +set SODIUM_DIR=%root%\libsodium + +mkdir build +cd build +cmake -GNinja -DCMAKE_BUILD_TYPE=Release ^ +-DPORTABLE=1 ^ +-DSODIUM_USE_STATIC_LIBS=1 ^ +-DSECP256K1_FOUND=1 ^ +-DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include ^ +-DSECP256K1_LIBRARY=%root%\secp256k1\build\src\Release\libsecp256k1.lib ^ +-DLZ4_FOUND=1 ^ +-DLZ4_INCLUDE_DIRS=%root%\lz4\lib ^ +-DLZ4_LIBRARIES=%root%\lz4\build\VS2017\liblz4\bin\x64_Release\liblz4_static.lib ^ +-DMHD_FOUND=1 ^ +-DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib ^ +-DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static ^ +-DZLIB_FOUND=1 ^ +-DZLIB_INCLUDE_DIR=%root%\zlib ^ +-DZLIB_LIBRARIES=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib ^ +-DOPENSSL_FOUND=1 ^ +-DOPENSSL_INCLUDE_DIR=%root%\openssl-3.1.4\x64\include ^ +-DOPENSSL_CRYPTO_LIBRARY=%root%\openssl-3.1.4\x64\lib\libcrypto_static.lib ^ +-DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include ^ +-DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib ^ +-DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj" .. +IF %errorlevel% NEQ 0 ( + echo Can't configure TON + exit /b %errorlevel% +) + +IF "%1"=="-t" ( +ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^ +tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^ +json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^ +test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^ +test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^ +test-fec test-tddb test-db test-validator-session-state test-emulator +IF %errorlevel% NEQ 0 ( + echo Can't compile TON + exit /b %errorlevel% +) +) else ( +ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^ +tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^ +json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator +IF %errorlevel% NEQ 0 ( + echo Can't compile TON + exit /b %errorlevel% +) +) + +copy validator-engine\validator-engine.exe test +IF %errorlevel% NEQ 0 ( + echo validator-engine.exe does not exist + exit /b %errorlevel% +) + +IF "%1"=="-t" ( + echo Running tests... +REM ctest -C Release --output-on-failure -E "test-catchain|test-actors|test-validator-session-state" + ctest -C Release --output-on-failure -E "test-bigint" --timeout 1800 + IF %errorlevel% NEQ 0 ( + echo Some tests failed + exit /b %errorlevel% + ) +) + + +echo Creating artifacts... +cd .. +mkdir artifacts +mkdir artifacts\smartcont +mkdir artifacts\lib + +for %%I in (build\storage\storage-daemon\storage-daemon.exe ^ +build\storage\storage-daemon\storage-daemon-cli.exe ^ +build\blockchain-explorer\blockchain-explorer.exe ^ +build\crypto\fift.exe ^ +build\crypto\tlbc.exe ^ +build\crypto\func.exe ^ +build\crypto\create-state.exe ^ +build\validator-engine-console\validator-engine-console.exe ^ +build\tonlib\tonlib-cli.exe ^ +build\tonlib\tonlibjson.dll ^ +build\http\http-proxy.exe ^ +build\rldp-http-proxy\rldp-http-proxy.exe ^ +build\dht-server\dht-server.exe ^ +build\lite-client\lite-client.exe ^ +build\validator-engine\validator-engine.exe ^ +build\utils\generate-random-id.exe ^ +build\utils\json2tlo.exe ^ +build\adnl\adnl-proxy.exe ^ +build\emulator\emulator.dll) do (strip -g %%I & copy %%I artifacts\) +xcopy /e /k /h /i crypto\smartcont artifacts\smartcont +xcopy /e /k /h /i crypto\fift\lib artifacts\lib diff --git a/assembly/native/build-windows-github-2019.bat b/assembly/native/build-windows-github-2019.bat new file mode 100644 index 000000000..4f7eee056 --- /dev/null +++ b/assembly/native/build-windows-github-2019.bat @@ -0,0 +1,2 @@ +call "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\VC\Auxiliary\Build\vcvars64.bat" +call build-windows-2019.bat -t diff --git a/assembly/native/build-windows-github.bat b/assembly/native/build-windows-github.bat index 7cad8c7e5..bfa2d3362 100644 --- a/assembly/native/build-windows-github.bat +++ b/assembly/native/build-windows-github.bat @@ -1,2 +1,2 @@ call "C:\Program Files\Microsoft Visual Studio\2022\%1\VC\Auxiliary\Build\vcvars64.bat" -call build-windows.bat -t \ No newline at end of file +call build-windows.bat -t diff --git a/assembly/native/build-windows.bat b/assembly/native/build-windows.bat index 9b7322e1d..a9871aa96 100644 --- a/assembly/native/build-windows.bat +++ b/assembly/native/build-windows.bat @@ -161,7 +161,7 @@ tonlib-cli validator-engine lite-client pow-miner validator-engine-console gener json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^ test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^ test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^ -test-fec test-tddb test-db test-validator-session-state +test-fec test-tddb test-db test-validator-session-state test-emulator IF %errorlevel% NEQ 0 ( echo Can't compile TON exit /b %errorlevel% diff --git a/assembly/wasm/fift-func-wasm-build-ubuntu.sh b/assembly/wasm/fift-func-wasm-build-ubuntu.sh index 6daf2d4cd..e7a54d16f 100644 --- a/assembly/wasm/fift-func-wasm-build-ubuntu.sh +++ b/assembly/wasm/fift-func-wasm-build-ubuntu.sh @@ -1,5 +1,3 @@ -# The script builds funcfift compiler to WASM - # Execute these prerequisites first # sudo apt update # sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev \ @@ -11,10 +9,12 @@ # sudo ./llvm.sh 16 all with_artifacts=false +scratch_new=false -while getopts 'a' flag; do +while getopts 'af' flag; do case "${flag}" in a) with_artifacts=true ;; + f) scratch_new=true ;; *) break ;; esac @@ -24,108 +24,139 @@ export CC=$(which clang-16) export CXX=$(which clang++-16) export CCACHE_DISABLE=1 -cd ../.. -rm -rf openssl zlib emsdk secp256k1 libsodium build echo `pwd` +if [ "$scratch_new" = true ]; then + echo Compiling openssl zlib lz4 emsdk secp256k1 libsodium emsdk ton + rm -rf openssl zlib lz4 emsdk secp256k1 libsodium build +fi -git clone https://github.com/openssl/openssl.git -cd openssl -git checkout checkout openssl-3.1.4 -./config -make -j16 -OPENSSL_DIR=`pwd` -cd .. - -git clone https://github.com/madler/zlib.git -cd zlib -ZLIB_DIR=`pwd` -cd .. - -git clone https://github.com/lz4/lz4.git -cd lz4 -LZ4_DIR=`pwd` -cd .. - -git clone https://github.com/bitcoin-core/secp256k1.git -cd secp256k1 -./autogen.sh -SECP256K1_DIR=`pwd` -cd .. - -git clone https://github.com/jedisct1/libsodium --branch stable -cd libsodium -SODIUM_DIR=`pwd` -cd .. - -mkdir build -cd build -cmake -GNinja -DCMAKE_BUILD_TYPE=Release \ --DCMAKE_CXX_STANDARD=17 \ --DOPENSSL_FOUND=1 \ --DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ --DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ --DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so \ --DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.so \ --DTON_USE_ABSEIL=OFF .. - -test $? -eq 0 || { echo "Can't configure TON build"; exit 1; } - -ninja fift smc-envelope -test $? -eq 0 || { echo "Can't compile fift "; exit 1; } +if [ ! -d "openssl" ]; then + git clone https://github.com/openssl/openssl.git + cd openssl + git checkout openssl-3.1.4 + ./config + make -j16 + OPENSSL_DIR=`pwd` + cd .. +else + OPENSSL_DIR=`pwd`/openssl + echo Using compiled openssl at $OPENSSL_DIR +fi -rm -rf * +if [ ! -d "build" ]; then + mkdir build + cd build + cmake -GNinja -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=17 \ + -DOPENSSL_FOUND=1 \ + -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ + -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so \ + -DTON_USE_ABSEIL=OFF .. + + test $? -eq 0 || { echo "Can't configure TON build"; exit 1; } + ninja fift smc-envelope + test $? -eq 0 || { echo "Can't compile fift "; exit 1; } + rm -rf * + cd .. +else + echo cleaning build... + rm -rf build/* +fi -cd .. +if [ ! -d "emsdk" ]; then + git clone https://github.com/emscripten-core/emsdk.git +echo + echo Using cloned emsdk +fi -git clone https://github.com/emscripten-core/emsdk.git cd emsdk ./emsdk install 3.1.19 ./emsdk activate 3.1.19 EMSDK_DIR=`pwd` -ls $EMSDK_DIR . $EMSDK_DIR/emsdk_env.sh export CC=$(which emcc) export CXX=$(which em++) export CCACHE_DISABLE=1 -cd ../openssl - -make clean -emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test -sed -i 's/CROSS_COMPILE=.*/CROSS_COMPILE=/g' Makefile -sed -i 's/-ldl//g' Makefile -sed -i 's/-O3/-Os/g' Makefile -emmake make depend -emmake make -j16 -test $? -eq 0 || { echo "Can't compile OpenSSL with emmake "; exit 1; } - -cd ../zlib - -emconfigure ./configure --static -emmake make -j16 -test $? -eq 0 || { echo "Can't compile zlib with emmake "; exit 1; } +cd .. -cd ../lz4 -emmake make -j16 -test $? -eq 0 || { echo "Can't compile lz4 with emmake "; exit 1; } +if [ ! -f "openssl/openssl_em" ]; then + cd openssl + make clean + emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test + sed -i 's/CROSS_COMPILE=.*/CROSS_COMPILE=/g' Makefile + sed -i 's/-ldl//g' Makefile + sed -i 's/-O3/-Os/g' Makefile + emmake make depend + emmake make -j16 + test $? -eq 0 || { echo "Can't compile OpenSSL with emmake "; exit 1; } + touch openssl_em + cd .. +else + echo Using compiled openssl with emscripten +fi -cd ../secp256k1 +if [ ! -d "zlib" ]; then + git clone https://github.com/madler/zlib.git + cd zlib + git checkout v1.3.1 + ZLIB_DIR=`pwd` + emconfigure ./configure --static + emmake make -j16 + test $? -eq 0 || { echo "Can't compile zlib with emmake "; exit 1; } + cd .. +else + ZLIB_DIR=`pwd`/zlib + echo Using compiled zlib with emscripten at $ZLIB_DIR +fi -emconfigure ./configure --enable-module-recovery -emmake make -j16 -test $? -eq 0 || { echo "Can't compile secp256k1 with emmake "; exit 1; } +if [ ! -d "lz4" ]; then + git clone https://github.com/lz4/lz4.git + cd lz4 + git checkout v1.9.4 + LZ4_DIR=`pwd` + emmake make -j16 + test $? -eq 0 || { echo "Can't compile lz4 with emmake "; exit 1; } + cd .. +else + LZ4_DIR=`pwd`/lz4 + echo Using compiled lz4 with emscripten at $LZ4_DIR +fi -cd ../libsodium +if [ ! -d "secp256k1" ]; then + git clone https://github.com/bitcoin-core/secp256k1.git + cd secp256k1 + git checkout v0.3.2 + ./autogen.sh + SECP256K1_DIR=`pwd` + emconfigure ./configure --enable-module-recovery + emmake make -j16 + test $? -eq 0 || { echo "Can't compile secp256k1 with emmake "; exit 1; } + cd .. +else + SECP256K1_DIR=`pwd`/secp256k1 + echo Using compiled secp256k1 with emscripten at $SECP256K1_DIR +fi -emconfigure ./configure --disable-ssp -emmake make -j16 -test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; } +if [ ! -d "libsodium" ]; then + git clone https://github.com/jedisct1/libsodium + cd libsodium + git checkout 1.0.18-RELEASE + SODIUM_DIR=`pwd` + emconfigure ./configure --disable-ssp + emmake make -j16 + test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; } + cd .. +else + SODIUM_DIR=`pwd`/libsodium + echo Using compiled libsodium with emscripten at $SODIUM_DIR +fi -cd ../build +cd build -emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release \ +emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \ -DZLIB_FOUND=1 \ -DZLIB_LIBRARIES=$ZLIB_DIR/libz.a \ -DZLIB_INCLUDE_DIR=$ZLIB_DIR \ @@ -133,18 +164,15 @@ emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release \ -DLZ4_LIBRARIES=$LZ4_DIR/lib/liblz4.a \ -DLZ4_INCLUDE_DIRS=$LZ4_DIR/lib \ -DOPENSSL_FOUND=1 \ --DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.a \ --DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.a \ -DCMAKE_TOOLCHAIN_FILE=$EMSDK_DIR/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake \ -DCMAKE_CXX_FLAGS="-sUSE_ZLIB=1" \ --DSECP256K1_FOUND=1 \ -DSECP256K1_INCLUDE_DIR=$SECP256K1_DIR/include \ -DSECP256K1_LIBRARY=$SECP256K1_DIR/.libs/libsecp256k1.a \ -DSODIUM_INCLUDE_DIR=$SODIUM_DIR/src/libsodium/include \ -DSODIUM_LIBRARY_RELEASE=$SODIUM_DIR/src/libsodium/.libs/libsodium.a \ --DSODIUM_USE_STATIC_LIBS=ON .. +.. test $? -eq 0 || { echo "Can't configure TON with emmake "; exit 1; } cp -R ../crypto/smartcont ../crypto/fift/lib crypto diff --git a/common/global-version.h b/common/global-version.h index a6775ffa4..a3032ebf2 100644 --- a/common/global-version.h +++ b/common/global-version.h @@ -19,6 +19,6 @@ namespace ton { // See doc/GlobalVersions.md -const int SUPPORTED_VERSION = 7; +const int SUPPORTED_VERSION = 8; } diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index 306194408..e21f18cbe 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -358,7 +358,8 @@ target_link_libraries(test-ed25519-crypto PUBLIC ton_crypto) add_library(fift-lib STATIC ${FIFT_SOURCE}) target_include_directories(fift-lib PUBLIC $) -target_link_libraries(fift-lib PUBLIC ton_crypto ton_db tdutils ton_block) +target_link_libraries(fift-lib PUBLIC ton_crypto tdutils ton_block) + if (USE_EMSCRIPTEN) target_link_options(fift-lib PRIVATE -fexceptions) target_compile_options(fift-lib PRIVATE -fexceptions) diff --git a/crypto/block/block-parse.cpp b/crypto/block/block-parse.cpp index 7d51b2e23..50851c795 100644 --- a/crypto/block/block-parse.cpp +++ b/crypto/block/block-parse.cpp @@ -813,19 +813,45 @@ int IntermediateAddress::get_size(const vm::CellSlice& cs) const { const IntermediateAddress t_IntermediateAddress; bool MsgEnvelope::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const { - return cs.fetch_ulong(4) == 4 // msg_envelope#4 - && t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress - && t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress - && t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams - && t_Ref_Message.validate_skip(ops, cs, weak); // msg:^Message + switch (get_tag(cs)) { + case 4: + return cs.fetch_ulong(4) == 4 // msg_envelope#4 + && t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress + && t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress + && t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams + && t_Ref_Message.validate_skip(ops, cs, weak); // msg:^Message + case 5: + return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5 + && t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress + && t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress + && t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams + && t_Ref_Message.validate_skip(ops, cs, weak) // msg:^Message + && Maybe(64).validate_skip(ops, cs, weak) // emitted_lt:(Maybe uint64) + && Maybe().validate_skip(ops, cs, weak); // metadata:(Maybe MsgMetadata) + default: + return false; + } } bool MsgEnvelope::skip(vm::CellSlice& cs) const { - return cs.advance(4) // msg_envelope#4 - && t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress - && t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress - && t_Grams.skip(cs) // fwd_fee_remaining:Grams - && t_Ref_Message.skip(cs); // msg:^Message + switch (get_tag(cs)) { + case 4: + return cs.advance(4) // msg_envelope#4 + && t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress + && t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress + && t_Grams.skip(cs) // fwd_fee_remaining:Grams + && t_Ref_Message.skip(cs); // msg:^Message + case 5: + return cs.advance(4) // msg_envelope_v2#5 + && t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress + && t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress + && t_Grams.skip(cs) // fwd_fee_remaining:Grams + && t_Ref_Message.skip(cs) // msg:^Message + && Maybe(64).skip(cs) // emitted_lt:(Maybe uint64) + && Maybe().skip(cs); // metadata:(Maybe MsgMetadata) + default: + return false; + } } bool MsgEnvelope::extract_fwd_fees_remaining(vm::CellSlice& cs) const { @@ -833,34 +859,101 @@ bool MsgEnvelope::extract_fwd_fees_remaining(vm::CellSlice& cs) const { } bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record& data) const { - return cs.fetch_ulong(4) == 4 // msg_envelope#4 - && t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress - && t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress - && t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams - && cs.fetch_ref_to(data.msg); // msg:^Message + switch (get_tag(cs)) { + case 4: + return cs.fetch_ulong(4) == 4 // msg_envelope#4 + && t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress + && t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress + && t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams + && cs.fetch_ref_to(data.msg); // msg:^Message + case 5: + return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5 + && t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress + && t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress + && t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams + && cs.fetch_ref_to(data.msg) // msg:^Message + && Maybe(64).skip(cs) // emitted_lt:(Maybe uint64) + && Maybe().skip(cs); // metadata:(Maybe MsgMetadata) + default: + return false; + } } bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record_std& data) const { - return cs.fetch_ulong(4) == 4 // msg_envelope#4 - && t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress - && t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress - && t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams - && cs.fetch_ref_to(data.msg); // msg:^Message + data.emitted_lt = {}; + data.metadata = {}; + switch (get_tag(cs)) { + case 4: + return cs.fetch_ulong(4) == 4 // msg_envelope#4 + && t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress + && t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress + && t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams + && cs.fetch_ref_to(data.msg); // msg:^Message + case 5: { + bool with_metadata, with_emitted_lt; + return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5 + && t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress + && t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress + && t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams + && cs.fetch_ref_to(data.msg) // msg:^Message + && cs.fetch_bool_to(with_emitted_lt) && + (!with_emitted_lt || cs.fetch_uint_to(64, data.emitted_lt.value_force())) // emitted_lt:(Maybe uint64) + && cs.fetch_bool_to(with_metadata) && + (!with_metadata || data.metadata.value_force().unpack(cs)); // metadata:(Maybe MsgMetadata) + } + default: + return false; + } +} + +bool MsgEnvelope::pack(vm::CellBuilder& cb, const Record_std& data) const { + bool v2 = (bool)data.metadata || (bool)data.emitted_lt; + if (!(cb.store_long_bool(v2 ? 5 : 4, 4) && // msg_envelope#4 / msg_envelope_v2#5 + cb.store_long_bool(data.cur_addr, 8) && // cur_addr:IntermediateAddress + cb.store_long_bool(data.next_addr, 8) && // next_addr:IntermediateAddress + t_Grams.store_integer_ref(cb, data.fwd_fee_remaining) && // fwd_fee_remaining:Grams + cb.store_ref_bool(data.msg))) { // msg:^Message + return false; + } + if (v2) { + if (!(cb.store_bool_bool((bool)data.emitted_lt) && + (!data.emitted_lt || cb.store_long_bool(data.emitted_lt.value(), 64)))) { // emitted_lt:(Maybe uint64) + return false; + } + if (!(cb.store_bool_bool((bool)data.metadata) && + (!data.metadata || data.metadata.value().pack(cb)))) { // metadata:(Maybe MsgMetadata) + return false; + } + } + return true; } -bool MsgEnvelope::unpack_std(vm::CellSlice& cs, int& cur_a, int& nhop_a, Ref& msg) const { - return cs.fetch_ulong(4) == 4 // msg_envelope#4 - && t_IntermediateAddress.fetch_regular(cs, cur_a) // cur_addr:IntermediateAddress - && t_IntermediateAddress.fetch_regular(cs, nhop_a) // next_addr:IntermediateAddress - && cs.fetch_ref_to(msg); +bool MsgEnvelope::pack_cell(td::Ref& cell, const Record_std& data) const { + vm::CellBuilder cb; + return pack(cb, data) && cb.finalize_to(cell); } -bool MsgEnvelope::get_created_lt(const vm::CellSlice& cs, unsigned long long& created_lt) const { +bool MsgEnvelope::get_emitted_lt(const vm::CellSlice& cs, unsigned long long& emitted_lt) const { + // Emitted lt is emitted_lt from MsgEnvelope (if present), otherwise created_lt if (!cs.size_refs()) { return false; } + if (get_tag(cs) == 5) { + vm::CellSlice cs2 = cs; + // msg_envelope_v2#5 cur_addr:IntermediateAddress + // next_addr:IntermediateAddress fwd_fee_remaining:Grams + // msg:^(Message Any) emitted_lt:(Maybe uint64) ... + bool have_emitted_lt; + if (!(cs2.skip_first(4) && t_IntermediateAddress.skip(cs2) && t_IntermediateAddress.skip(cs2) && + t_Grams.skip(cs2) && t_Ref_Message.skip(cs2) && cs2.fetch_bool_to(have_emitted_lt))) { + return false; + } + if (have_emitted_lt) { + return cs2.fetch_ulong_bool(64, emitted_lt); + } + } auto msg_cs = load_cell_slice(cs.prefetch_ref()); - return t_Message.get_created_lt(msg_cs, created_lt); + return t_Message.get_created_lt(msg_cs, emitted_lt); } const MsgEnvelope t_MsgEnvelope; @@ -1692,6 +1785,15 @@ bool InMsg::skip(vm::CellSlice& cs) const { && cs.advance(64) // transaction_id:uint64 && t_Grams.skip(cs) // fwd_fee:Grams && t_RefCell.skip(cs); // proof_delivered:^Cell + case msg_import_deferred_fin: + return cs.advance(5) // msg_import_deferred_fin$00100 + && t_Ref_MsgEnvelope.skip(cs) // in_msg:^MsgEnvelope + && t_Ref_Transaction.skip(cs) // transaction:^Transaction + && t_Grams.skip(cs); // fwd_fee:Grams + case msg_import_deferred_tr: + return cs.advance(5) // msg_import_deferred_tr$00101 + && t_Ref_MsgEnvelope.skip(cs) // in_msg:^MsgEnvelope + && t_Ref_MsgEnvelope.skip(cs); // out_msg:^MsgEnvelope } return false; } @@ -1734,12 +1836,22 @@ bool InMsg::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const { && cs.advance(64) // transaction_id:uint64 && t_Grams.validate_skip(ops, cs, weak) // fwd_fee:Grams && t_RefCell.validate_skip(ops, cs, weak); // proof_delivered:^Cell + case msg_import_deferred_fin: + return cs.advance(5) // msg_import_deferred_fin$00100 + && t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // in_msg:^MsgEnvelope + && t_Ref_Transaction.validate_skip(ops, cs, weak) // transaction:^Transaction + && t_Grams.validate_skip(ops, cs, weak); // fwd_fee:Grams + case msg_import_deferred_tr: + return cs.advance(5) // msg_import_deferred_tr$00101 + && t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // in_msg:^MsgEnvelope + && t_Ref_MsgEnvelope.validate_skip(ops, cs, weak); // out_msg:^MsgEnvelope } return false; } bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const { - switch (get_tag(cs)) { + int tag = get_tag(cs); + switch (tag) { case msg_import_ext: // inbound external message return t_ImportFees.null_value(cb); // external messages have no value and no import fees case msg_import_ihr: // IHR-forwarded internal message to its final destination @@ -1765,8 +1877,9 @@ bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const { && t_CurrencyCollection.null_value(cb); // value_imported := 0 } return false; - case msg_import_fin: // internal message delivered to its final destination in this block - if (cs.advance(3) && cs.size_refs() >= 2) { + case msg_import_fin: // internal message delivered to its final destination in this block + case msg_import_deferred_fin: // internal message from DispatchQueue to its final destination in this block + if (cs.advance(tag == msg_import_fin ? 3 : 5) && cs.size_refs() >= 2) { auto msg_env_cs = load_cell_slice(cs.fetch_ref()); MsgEnvelope::Record in_msg; td::RefInt256 fwd_fee, fwd_fee_remaining, value_grams, ihr_fee; @@ -1787,13 +1900,14 @@ bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const { msg_info.value.write()); // value_imported = msg.value + msg.ihr_fee + fwd_fee_remaining } return false; - case msg_import_tr: // transit internal message - if (cs.advance(3) && cs.size_refs() >= 2) { + case msg_import_tr: // transit internal message + case msg_import_deferred_tr: // internal message from DispatchQueue to OutMsgQueue + if (cs.advance(tag == msg_import_tr ? 3 : 5) && cs.size_refs() >= 2) { auto msg_env_cs = load_cell_slice(cs.fetch_ref()); MsgEnvelope::Record in_msg; - td::RefInt256 transit_fee, fwd_fee_remaining, value_grams, ihr_fee; + td::RefInt256 transit_fee = td::zero_refint(), fwd_fee_remaining, value_grams, ihr_fee; if (!(t_MsgEnvelope.unpack(msg_env_cs, in_msg) && cs.fetch_ref().not_null() && - t_Grams.as_integer_skip_to(cs, transit_fee) && + (tag == msg_import_deferred_tr || t_Grams.as_integer_skip_to(cs, transit_fee)) && (fwd_fee_remaining = t_Grams.as_integer(in_msg.fwd_fee_remaining)).not_null() && cmp(transit_fee, fwd_fee_remaining) <= 0)) { return false; @@ -1871,6 +1985,14 @@ bool OutMsg::skip(vm::CellSlice& cs) const { return cs.advance(3) // msg_export_tr_req$111 && t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope && RefTo{}.skip(cs); // imported:^InMsg + case msg_export_new_defer: + return cs.advance(5) // msg_export_new_defer$10100 + && t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope + && t_Ref_Transaction.skip(cs); // transaction:^Transaction + case msg_export_deferred_tr: + return cs.advance(5) // msg_export_deferred_tr$10101 + && t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope + && RefTo{}.skip(cs); // imported:^InMsg } return false; } @@ -1910,12 +2032,21 @@ bool OutMsg::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const { return cs.advance(3) // msg_export_tr_req$111 && t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope && RefTo{}.validate_skip(ops, cs, weak); // imported:^InMsg + case msg_export_new_defer: + return cs.advance(5) // msg_export_new_defer$10100 + && t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope + && t_Ref_Transaction.validate_skip(ops, cs, weak); // transaction:^Transaction + case msg_export_deferred_tr: + return cs.advance(5) // msg_export_deferred_tr$10101 + && t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope + && RefTo{}.validate_skip(ops, cs, weak); // imported:^InMsg } return false; } bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const { - switch (get_tag(cs)) { + auto tag = get_tag(cs); + switch (tag) { case msg_export_ext: // external outbound message carries no value if (cs.have(3, 2)) { return t_CurrencyCollection.null_value(cb); @@ -1929,10 +2060,13 @@ bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const { return cs.have(4 + 63, 1) && t_CurrencyCollection.null_value(cb); case msg_export_deq_short: // dequeueing record for outbound message, no exported value return cs.have(4 + 256 + 32 + 64 + 64) && t_CurrencyCollection.null_value(cb); - case msg_export_new: // newly-generated outbound internal message, queued - case msg_export_tr: // transit internal message, queued - case msg_export_tr_req: // transit internal message, re-queued from this shardchain - if (cs.advance(3) && cs.size_refs() >= 2) { + case msg_export_new: // newly-generated outbound internal message, queued + case msg_export_tr: // transit internal message, queued + case msg_export_tr_req: // transit internal message, re-queued from this shardchain + case msg_export_new_defer: // newly-generated outbound internal message, deferred + case msg_export_deferred_tr: // internal message from DispatchQueue, queued + int tag_len = (tag == msg_export_new_defer || tag == msg_export_deferred_tr) ? 5 : 3; + if (cs.advance(tag_len) && cs.size_refs() >= 2) { auto msg_env_cs = load_cell_slice(cs.fetch_ref()); MsgEnvelope::Record out_msg; if (!(cs.fetch_ref().not_null() && t_MsgEnvelope.unpack(msg_env_cs, out_msg))) { @@ -1954,12 +2088,12 @@ bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const { return false; } -bool OutMsg::get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) const { +bool OutMsg::get_emitted_lt(vm::CellSlice& cs, unsigned long long& emitted_lt) const { switch (get_tag(cs)) { case msg_export_ext: if (cs.have(3, 1)) { auto msg_cs = load_cell_slice(cs.prefetch_ref()); - return t_Message.get_created_lt(msg_cs, created_lt); + return t_Message.get_created_lt(msg_cs, emitted_lt); } else { return false; } @@ -1970,9 +2104,11 @@ bool OutMsg::get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) c case msg_export_deq_short: case msg_export_deq_imm: case msg_export_tr_req: + case msg_export_new_defer: + case msg_export_deferred_tr: if (cs.have(3, 1)) { auto out_msg_cs = load_cell_slice(cs.prefetch_ref()); - return t_MsgEnvelope.get_created_lt(out_msg_cs, created_lt); + return t_MsgEnvelope.get_emitted_lt(out_msg_cs, emitted_lt); } else { return false; } @@ -2003,26 +2139,53 @@ bool Aug_OutMsgQueue::eval_empty(vm::CellBuilder& cb) const { bool Aug_OutMsgQueue::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const { Ref msg_env; - unsigned long long created_lt; - return cs.fetch_ref_to(msg_env) && t_MsgEnvelope.get_created_lt(load_cell_slice(std::move(msg_env)), created_lt) && - cb.store_ulong_rchk_bool(created_lt, 64); + unsigned long long emitted_lt; + return cs.fetch_ref_to(msg_env) && t_MsgEnvelope.get_emitted_lt(load_cell_slice(std::move(msg_env)), emitted_lt) && + cb.store_ulong_rchk_bool(emitted_lt, 64); +} + +bool Aug_DispatchQueue::eval_fork(vm::CellBuilder& cb, vm::CellSlice& left_cs, vm::CellSlice& right_cs) const { + unsigned long long x, y; + return left_cs.fetch_ulong_bool(64, x) && right_cs.fetch_ulong_bool(64, y) && + cb.store_ulong_rchk_bool(std::min(x, y), 64); +} + +bool Aug_DispatchQueue::eval_empty(vm::CellBuilder& cb) const { + return cb.store_long_bool(0, 64); +} + +bool Aug_DispatchQueue::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const { + Ref messages_root; + if (!cs.fetch_maybe_ref(messages_root)) { + return false; + } + vm::Dictionary messages{std::move(messages_root), 64}; + td::BitArray<64> key_buffer; + td::uint64 key; + if (messages.get_minmax_key(key_buffer.bits(), 64).is_null()) { + key = (td::uint64)-1; + } else { + key = key_buffer.to_ulong(); + } + return cb.store_long_bool(key, 64); } const Aug_OutMsgQueue aug_OutMsgQueue; +const Aug_DispatchQueue aug_DispatchQueue; const OutMsgQueue t_OutMsgQueue; const ProcessedUpto t_ProcessedUpto; const HashmapE t_ProcessedInfo{96, t_ProcessedUpto}; const HashmapE t_IhrPendingInfo{256, t_uint128}; -// _ out_queue:OutMsgQueue proc_info:ProcessedInfo = OutMsgQueueInfo; +// _ out_queue:OutMsgQueue proc_info:ProcessedInfo extra:(Maybe OutMsgQueueExtra) = OutMsgQueueInfo; bool OutMsgQueueInfo::skip(vm::CellSlice& cs) const { - return t_OutMsgQueue.skip(cs) && t_ProcessedInfo.skip(cs) && t_IhrPendingInfo.skip(cs); + return t_OutMsgQueue.skip(cs) && t_ProcessedInfo.skip(cs) && Maybe().skip(cs); } bool OutMsgQueueInfo::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const { return t_OutMsgQueue.validate_skip(ops, cs, weak) && t_ProcessedInfo.validate_skip(ops, cs, weak) && - t_IhrPendingInfo.validate_skip(ops, cs, weak); + Maybe().validate_skip(ops, cs, weak); } const OutMsgQueueInfo t_OutMsgQueueInfo; diff --git a/crypto/block/block-parse.h b/crypto/block/block-parse.h index c0b117452..65f8b91fe 100644 --- a/crypto/block/block-parse.h +++ b/crypto/block/block-parse.h @@ -28,6 +28,7 @@ #include "td/utils/bits.h" #include "td/utils/StringBuilder.h" #include "ton/ton-types.h" +#include "block-auto.h" namespace block { @@ -469,11 +470,17 @@ struct MsgEnvelope final : TLB_Complex { int cur_addr, next_addr; td::RefInt256 fwd_fee_remaining; Ref msg; + td::optional emitted_lt; + td::optional metadata; }; bool unpack(vm::CellSlice& cs, Record& data) const; bool unpack(vm::CellSlice& cs, Record_std& data) const; - bool unpack_std(vm::CellSlice& cs, int& cur_a, int& nhop_a, Ref& msg) const; - bool get_created_lt(const vm::CellSlice& cs, unsigned long long& created_lt) const; + bool pack(vm::CellBuilder& cb, const Record_std& data) const; + bool pack_cell(td::Ref& cell, const Record_std& data) const; + bool get_emitted_lt(const vm::CellSlice& cs, unsigned long long& emitted_lt) const; + int get_tag(const vm::CellSlice& cs) const override { + return (int)cs.prefetch_ulong(4); + } }; extern const MsgEnvelope t_MsgEnvelope; @@ -801,12 +808,18 @@ struct InMsg final : TLB_Complex { msg_import_fin = 4, msg_import_tr = 5, msg_discard_fin = 6, - msg_discard_tr = 7 + msg_discard_tr = 7, + msg_import_deferred_fin = 8, + msg_import_deferred_tr = 9 }; bool skip(vm::CellSlice& cs) const override; bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override; int get_tag(const vm::CellSlice& cs) const override { - return (int)cs.prefetch_ulong(3); + int tag = (int)cs.prefetch_ulong(3); + if (tag != 1) { + return tag; + } + return (int)cs.prefetch_ulong(5) - 0b00100 + 8; } bool get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const; }; @@ -822,16 +835,24 @@ struct OutMsg final : TLB_Complex { msg_export_deq_imm = 4, msg_export_deq = 12, msg_export_deq_short = 13, - msg_export_tr_req = 7 + msg_export_tr_req = 7, + msg_export_new_defer = 20, // 0b10100 + msg_export_deferred_tr = 21 // 0b10101 }; bool skip(vm::CellSlice& cs) const override; bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override; int get_tag(const vm::CellSlice& cs) const override { int t = (int)cs.prefetch_ulong(3); - return t != 6 ? t : (int)cs.prefetch_ulong(4); + if (t == 6) { + return (int)cs.prefetch_ulong(4); + } + if (t == 5) { + return (int)cs.prefetch_ulong(5); + } + return t; } bool get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const; - bool get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) const; + bool get_emitted_lt(vm::CellSlice& cs, unsigned long long& emitted_lt) const; }; extern const OutMsg t_OutMsg; @@ -909,6 +930,16 @@ struct Aug_OutMsgQueue final : AugmentationCheckData { extern const Aug_OutMsgQueue aug_OutMsgQueue; +struct Aug_DispatchQueue final : AugmentationCheckData { + Aug_DispatchQueue() : AugmentationCheckData(gen::t_AccountDispatchQueue, t_uint64) { + } + bool eval_fork(vm::CellBuilder& cb, vm::CellSlice& left_cs, vm::CellSlice& right_cs) const override; + bool eval_empty(vm::CellBuilder& cb) const override; + bool eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const override; +}; + +extern const Aug_DispatchQueue aug_DispatchQueue; + struct OutMsgQueue final : TLB_Complex { HashmapAugE dict_type; OutMsgQueue() : dict_type(32 + 64 + 256, aug_OutMsgQueue){}; diff --git a/crypto/block/block.cpp b/crypto/block/block.cpp index a22fd1e56..cb371fa0b 100644 --- a/crypto/block/block.cpp +++ b/crypto/block/block.cpp @@ -28,6 +28,7 @@ #include "td/utils/tl_storers.h" #include "td/utils/misc.h" #include "td/utils/Random.h" +#include "vm/fmt.hpp" namespace block { using namespace std::literals::string_literals; @@ -642,7 +643,11 @@ bool EnqueuedMsgDescr::unpack(vm::CellSlice& cs) { } cur_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.cur_addr); next_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.next_addr); - lt_ = info.created_lt; + unsigned long long lt; + if (!tlb::t_MsgEnvelope.get_emitted_lt(vm::load_cell_slice(enq.out_msg), lt)) { + return invalidate(); + } + lt_ = lt; enqueued_lt_ = enq.enqueued_lt; hash_ = env.msg->get_hash().bits(); msg_ = std::move(env.msg); @@ -858,12 +863,20 @@ td::Status ShardState::unpack_out_msg_queue_info(Ref out_msg_queue_inf return td::Status::Error( -666, "ProcessedInfo in the state of "s + id_.to_str() + " is invalid according to automated validity checks"); } - if (!block::gen::t_IhrPendingInfo.validate_csr(1024, qinfo.ihr_pending)) { - return td::Status::Error( - -666, "IhrPendingInfo in the state of "s + id_.to_str() + " is invalid according to automated validity checks"); - } processed_upto_ = block::MsgProcessedUptoCollection::unpack(ton::ShardIdFull(id_), std::move(qinfo.proc_info)); - ihr_pending_ = std::make_unique(std::move(qinfo.ihr_pending), 320); + ihr_pending_ = std::make_unique(320); + if (qinfo.extra.write().fetch_long(1)) { + block::gen::OutMsgQueueExtra::Record extra; + if (!block::tlb::csr_unpack(qinfo.extra, extra)) { + return td::Status::Error(-666, "cannot unpack OutMsgQueueExtre in the state of "s + id_.to_str()); + } + dispatch_queue_ = std::make_unique(extra.dispatch_queue, 256, tlb::aug_DispatchQueue); + if (extra.out_queue_size.write().fetch_long(1)) { + out_msg_queue_size_ = extra.out_queue_size->prefetch_ulong(48); + } + } else { + dispatch_queue_ = std::make_unique(256, tlb::aug_DispatchQueue); + } auto shard1 = id_.shard_full(); td::BitArray<64> pfx{(long long)shard1.shard}; int pfx_len = shard_prefix_length(shard1); @@ -994,6 +1007,17 @@ td::Status ShardState::merge_with(ShardState& sib) { underload_history_ = overload_history_ = 0; // 10. compute vert_seqno vert_seqno_ = std::max(vert_seqno_, sib.vert_seqno_); + // 11. merge dispatch_queue (same as account dict) + if (!dispatch_queue_->combine_with(*sib.dispatch_queue_)) { + return td::Status::Error(-666, "cannot merge dispatch queues of the two ancestors"); + } + sib.dispatch_queue_.reset(); + // 11. merge out_msg_queue_size + if (out_msg_queue_size_ && sib.out_msg_queue_size_) { + out_msg_queue_size_.value() += sib.out_msg_queue_size_.value(); + } else { + out_msg_queue_size_ = {}; + } // Anything else? add here // ... @@ -1009,8 +1033,8 @@ td::Status ShardState::merge_with(ShardState& sib) { return td::Status::OK(); } -td::Result> ShardState::compute_split_out_msg_queue(ton::ShardIdFull subshard, - td::uint32* queue_size) { +td::Result> ShardState::compute_split_out_msg_queue( + ton::ShardIdFull subshard) { auto shard = id_.shard_full(); if (!ton::shard_is_parent(shard, subshard)) { return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() + @@ -1018,7 +1042,7 @@ td::Result> ShardState::compute_split_o } CHECK(out_msg_queue_); auto subqueue = std::make_unique(*out_msg_queue_); - int res = block::filter_out_msg_queue(*subqueue, shard, subshard, queue_size); + int res = block::filter_out_msg_queue(*subqueue, shard, subshard); if (res < 0) { return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str()); } @@ -1040,7 +1064,7 @@ td::Result> ShardState::compu return std::move(sub_processed_upto); } -td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) { +td::Status ShardState::split(ton::ShardIdFull subshard) { if (!ton::shard_is_parent(id_.shard_full(), subshard)) { return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() + " because it is not a parent"); @@ -1058,10 +1082,12 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) auto shard1 = id_.shard_full(); CHECK(ton::shard_is_parent(shard1, subshard)); CHECK(out_msg_queue_); - int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, queue_size); + td::uint64 queue_size; + int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, &queue_size); if (res1 < 0) { return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str()); } + out_msg_queue_size_ = queue_size; LOG(DEBUG) << "split counters: " << res1; // 3. processed_upto LOG(DEBUG) << "splitting ProcessedUpto"; @@ -1091,6 +1117,11 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) // NB: if total_fees_extra will be allowed to be non-empty, split it here too // 7. reset overload/underload history overload_history_ = underload_history_ = 0; + // 8. split dispatch_queue (same as account dict) + LOG(DEBUG) << "splitting dispatch_queue"; + CHECK(dispatch_queue_); + CHECK(dispatch_queue_->cut_prefix_subdict(pfx.bits(), pfx_len)); + CHECK(dispatch_queue_->has_common_prefix(pfx.bits(), pfx_len)); // 999. anything else? id_.id.shard = subshard.shard; id_.file_hash.set_zero(); @@ -1099,7 +1130,7 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) } int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard, - td::uint32* queue_size) { + td::uint64* queue_size) { if (queue_size) { *queue_size = 0; } @@ -1390,7 +1421,7 @@ bool ValueFlow::store(vm::CellBuilder& cb) const { && exported.store(cb2) // exported:CurrencyCollection && cb.store_ref_bool(cb2.finalize()) // ] && fees_collected.store(cb) // fees_collected:CurrencyCollection - && (burned.is_zero() || burned.store(cb)) // fees_burned:CurrencyCollection + && (burned.is_zero() || burned.store(cb)) // fees_burned:CurrencyCollection && fees_imported.store(cb2) // ^[ fees_imported:CurrencyCollection && recovered.store(cb2) // recovered:CurrencyCollection && created.store(cb2) // created:CurrencyCollection @@ -1419,8 +1450,7 @@ bool ValueFlow::fetch(vm::CellSlice& cs) { from_prev_blk.validate_unpack(std::move(f2.r1.from_prev_blk)) && to_next_blk.validate_unpack(std::move(f2.r1.to_next_blk)) && imported.validate_unpack(std::move(f2.r1.imported)) && exported.validate_unpack(std::move(f2.r1.exported)) && - fees_collected.validate_unpack(std::move(f2.fees_collected)) && - burned.validate_unpack(std::move(f2.burned)) && + fees_collected.validate_unpack(std::move(f2.fees_collected)) && burned.validate_unpack(std::move(f2.burned)) && fees_imported.validate_unpack(std::move(f2.r2.fees_imported)) && recovered.validate_unpack(std::move(f2.r2.recovered)) && created.validate_unpack(std::move(f2.r2.created)) && minted.validate_unpack(std::move(f2.r2.minted))) { @@ -2305,4 +2335,132 @@ bool parse_block_id_ext(td::Slice str, ton::BlockIdExt& blkid) { return parse_block_id_ext(str.begin(), str.end(), blkid); } +bool unpack_account_dispatch_queue(Ref csr, vm::Dictionary& dict, td::uint64& dict_size) { + if (csr.not_null()) { + block::gen::AccountDispatchQueue::Record rec; + if (!block::tlb::csr_unpack(std::move(csr), rec)) { + return false; + } + dict = vm::Dictionary{rec.messages, 64}; + dict_size = rec.count; + if (dict_size == 0 || dict.is_empty()) { + return false; + } + } else { + dict = vm::Dictionary{64}; + dict_size = 0; + } + return true; +} + +Ref pack_account_dispatch_queue(const vm::Dictionary& dict, td::uint64 dict_size) { + if (dict_size == 0) { + return {}; + } + // _ messages:(HashmapE 64 EnqueuedMsg) count:uint48 = AccountDispatchQueue; + vm::CellBuilder cb; + CHECK(dict.append_dict_to_bool(cb)); + cb.store_long(dict_size, 48); + return cb.as_cellslice_ref(); +} + +Ref get_dispatch_queue_min_lt_account(const vm::AugmentedDictionary& dispatch_queue, + ton::StdSmcAddress& addr) { + // TODO: This can be done more effectively + vm::AugmentedDictionary queue{dispatch_queue.get_root(), 256, tlb::aug_DispatchQueue}; + if (queue.is_empty()) { + return {}; + } + auto root_extra = queue.get_root_extra(); + if (root_extra.is_null()) { + return {}; + } + ton::LogicalTime min_lt = root_extra->prefetch_long(64); + while (true) { + td::Bits256 key; + int pfx_len = queue.get_common_prefix(key.bits(), 256); + if (pfx_len < 0) { + return {}; + } + if (pfx_len == 256) { + addr = key; + return queue.lookup(key); + } + key[pfx_len] = false; + vm::AugmentedDictionary queue_cut{queue.get_root(), 256, tlb::aug_DispatchQueue}; + if (!queue_cut.cut_prefix_subdict(key.bits(), pfx_len + 1)) { + return {}; + } + root_extra = queue_cut.get_root_extra(); + if (root_extra.is_null()) { + return {}; + } + ton::LogicalTime cut_min_lt = root_extra->prefetch_long(64); + if (cut_min_lt != min_lt) { + key[pfx_len] = true; + } + if (!queue.cut_prefix_subdict(key.bits(), pfx_len + 1)) { + return {}; + } + } +} + +bool remove_dispatch_queue_entry(vm::AugmentedDictionary& dispatch_queue, const ton::StdSmcAddress& addr, + ton::LogicalTime lt) { + auto account_dispatch_queue = dispatch_queue.lookup(addr); + if (account_dispatch_queue.is_null()) { + return false; + } + vm::Dictionary dict{64}; + td::uint64 dict_size; + if (!unpack_account_dispatch_queue(std::move(account_dispatch_queue), dict, dict_size)) { + return false; + } + td::BitArray<64> key; + key.store_ulong(lt); + auto entry = dict.lookup_delete(key); + if (entry.is_null()) { + return false; + } + --dict_size; + account_dispatch_queue = pack_account_dispatch_queue(dict, dict_size); + if (account_dispatch_queue.not_null()) { + dispatch_queue.set(addr, account_dispatch_queue); + } else { + dispatch_queue.lookup_delete(addr); + } + return true; +} + +bool MsgMetadata::unpack(vm::CellSlice& cs) { + // msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata; + int tag; + return cs.fetch_int_to(4, tag) && tag == 0 && cs.fetch_uint_to(32, depth) && + cs.prefetch_ulong(3) == 0b100 && // std address, no anycast + tlb::t_MsgAddressInt.extract_std_address(cs, initiator_wc, initiator_addr) && + cs.fetch_uint_to(64, initiator_lt); +} + +bool MsgMetadata::pack(vm::CellBuilder& cb) const { + // msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata; + return cb.store_long_bool(0, 4) && cb.store_long_bool(depth, 32) && + tlb::t_MsgAddressInt.store_std_address(cb, initiator_wc, initiator_addr) && + cb.store_long_bool(initiator_lt, 64); +} + +std::string MsgMetadata::to_str() const { + return PSTRING() << "[ depth=" << depth << " init=" << initiator_wc << ":" << initiator_addr.to_hex() << ":" + << initiator_lt << " ]"; +} + +bool MsgMetadata::operator==(const MsgMetadata& other) const { + return depth == other.depth && initiator_wc == other.initiator_wc && initiator_addr == other.initiator_addr && + initiator_lt == other.initiator_lt; +} + +bool MsgMetadata::operator!=(const MsgMetadata& other) const { + return !(*this == other); +} + + } // namespace block diff --git a/crypto/block/block.h b/crypto/block/block.h index c54949f43..5f3dadff4 100644 --- a/crypto/block/block.h +++ b/crypto/block/block.h @@ -417,6 +417,8 @@ struct ShardState { std::unique_ptr ihr_pending_; std::unique_ptr block_create_stats_; std::shared_ptr processed_upto_; + std::unique_ptr dispatch_queue_; + td::optional out_msg_queue_size_; bool is_valid() const { return id_.is_valid(); @@ -433,11 +435,10 @@ struct ShardState { ton::BlockSeqno prev_mc_block_seqno, bool after_split, bool clear_history, std::function for_each_mcseqno); td::Status merge_with(ShardState& sib); - td::Result> compute_split_out_msg_queue(ton::ShardIdFull subshard, - td::uint32* queue_size = nullptr); + td::Result> compute_split_out_msg_queue(ton::ShardIdFull subshard); td::Result> compute_split_processed_upto( ton::ShardIdFull subshard); - td::Status split(ton::ShardIdFull subshard, td::uint32* queue_size = nullptr); + td::Status split(ton::ShardIdFull subshard); td::Status unpack_out_msg_queue_info(Ref out_msg_queue_info); bool clear_load_history() { overload_history_ = underload_history_ = 0; @@ -658,7 +659,7 @@ class MtCarloComputeShare { }; int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard, - td::uint32* queue_size = nullptr); + td::uint64* queue_size = nullptr); std::ostream& operator<<(std::ostream& os, const ShardId& shard_id); @@ -749,4 +750,25 @@ bool parse_hex_hash(td::Slice str, td::Bits256& hash); bool parse_block_id_ext(const char* str, const char* end, ton::BlockIdExt& blkid); bool parse_block_id_ext(td::Slice str, ton::BlockIdExt& blkid); +bool unpack_account_dispatch_queue(Ref csr, vm::Dictionary& dict, td::uint64& dict_size); +Ref pack_account_dispatch_queue(const vm::Dictionary& dict, td::uint64 dict_size); +Ref get_dispatch_queue_min_lt_account(const vm::AugmentedDictionary& dispatch_queue, + ton::StdSmcAddress& addr); +bool remove_dispatch_queue_entry(vm::AugmentedDictionary& dispatch_queue, const ton::StdSmcAddress& addr, + ton::LogicalTime lt); + +struct MsgMetadata { + td::uint32 depth; + ton::WorkchainId initiator_wc; + ton::StdSmcAddress initiator_addr; + ton::LogicalTime initiator_lt; + + bool unpack(vm::CellSlice& cs); + bool pack(vm::CellBuilder& cb) const; + std::string to_str() const; + + bool operator==(const MsgMetadata& other) const; + bool operator!=(const MsgMetadata& other) const; +}; + } // namespace block diff --git a/crypto/block/block.tlb b/crypto/block/block.tlb index 3ae542399..a3684f563 100644 --- a/crypto/block/block.tlb +++ b/crypto/block/block.tlb @@ -172,6 +172,12 @@ interm_addr_ext$11 workchain_id:int32 addr_pfx:uint64 msg_envelope#4 cur_addr:IntermediateAddress next_addr:IntermediateAddress fwd_fee_remaining:Grams msg:^(Message Any) = MsgEnvelope; +msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata; +msg_envelope_v2#5 cur_addr:IntermediateAddress + next_addr:IntermediateAddress fwd_fee_remaining:Grams + msg:^(Message Any) + emitted_lt:(Maybe uint64) + metadata:(Maybe MsgMetadata) = MsgEnvelope; // msg_import_ext$000 msg:^(Message Any) transaction:^Transaction = InMsg; @@ -187,6 +193,9 @@ msg_discard_fin$110 in_msg:^MsgEnvelope transaction_id:uint64 fwd_fee:Grams = InMsg; msg_discard_tr$111 in_msg:^MsgEnvelope transaction_id:uint64 fwd_fee:Grams proof_delivered:^Cell = InMsg; +msg_import_deferred_fin$00100 in_msg:^MsgEnvelope + transaction:^Transaction fwd_fee:Grams = InMsg; +msg_import_deferred_tr$00101 in_msg:^MsgEnvelope out_msg:^MsgEnvelope = InMsg; // import_fees$_ fees_collected:Grams value_imported:CurrencyCollection = ImportFees; @@ -210,6 +219,10 @@ msg_export_tr_req$111 out_msg:^MsgEnvelope imported:^InMsg = OutMsg; msg_export_deq_imm$100 out_msg:^MsgEnvelope reimport:^InMsg = OutMsg; +msg_export_new_defer$10100 out_msg:^MsgEnvelope + transaction:^Transaction = OutMsg; +msg_export_deferred_tr$10101 out_msg:^MsgEnvelope + imported:^InMsg = OutMsg; _ enqueued_lt:uint64 out_msg:^MsgEnvelope = EnqueuedMsg; @@ -224,8 +237,15 @@ _ (HashmapE 96 ProcessedUpto) = ProcessedInfo; ihr_pending$_ import_lt:uint64 = IhrPendingSince; _ (HashmapE 320 IhrPendingSince) = IhrPendingInfo; +// key - created_lt +_ messages:(HashmapE 64 EnqueuedMsg) count:uint48 = AccountDispatchQueue; +// key - sender address, aug - min created_lt +_ (HashmapAugE 256 AccountDispatchQueue uint64) = DispatchQueue; + +out_msg_queue_extra#0 dispatch_queue:DispatchQueue out_queue_size:(Maybe uint48) = OutMsgQueueExtra; + _ out_queue:OutMsgQueue proc_info:ProcessedInfo - ihr_pending:IhrPendingInfo = OutMsgQueueInfo; + extra:(Maybe OutMsgQueueExtra) = OutMsgQueueInfo; // storage_used$_ cells:(VarUInteger 7) bits:(VarUInteger 7) public_cells:(VarUInteger 7) = StorageUsed; @@ -781,7 +801,7 @@ size_limits_config#01 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig; size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16 max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32 - max_acc_public_libraries:uint32 = SizeLimitsConfig; + max_acc_public_libraries:uint32 defer_out_queue_size_limit:uint32 = SizeLimitsConfig; _ SizeLimitsConfig = ConfigParam 43; // key is [ wc:int32 addr:uint256 ] diff --git a/crypto/block/mc-config.cpp b/crypto/block/mc-config.cpp index 1dbfeaedc..6da0f034d 100644 --- a/crypto/block/mc-config.cpp +++ b/crypto/block/mc-config.cpp @@ -1956,6 +1956,7 @@ td::Result Config::do_get_size_limits_config(td::Ref& rand_seed, const ComputeP // if the smart contract wants to randomize further, it can use RANDOMIZE instruction td::BitArray<256 + 256> data; data.bits().copy_from(cfg.block_rand_seed.cbits(), 256); - (data.bits() + 256).copy_from(account.addr_rewrite.cbits(), 256); + if (cfg.global_version >= 8) { + (data.bits() + 256).copy_from(account.addr.cbits(), 256); + } else { + (data.bits() + 256).copy_from(account.addr_rewrite.cbits(), 256); + } rand_seed.clear(); data.compute_sha256(rand_seed); return true; @@ -1600,12 +1604,22 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { cp.skip_reason = in_msg_state.not_null() ? ComputePhase::sk_bad_state : ComputePhase::sk_no_state; return true; } else if (in_msg_state.not_null()) { + if (cfg.allow_external_unfreeze) { + if (in_msg_extern && account.addr != in_msg_state->get_hash().bits()) { + // only for external messages with non-zero initstate in active accounts + LOG(DEBUG) << "in_msg_state hash mismatch in external message"; + cp.skip_reason = ComputePhase::sk_bad_state; + return true; + } + } unpack_msg_state(cfg, true); // use only libraries } - if (in_msg_extern && in_msg_state.not_null() && account.addr != in_msg_state->get_hash().bits()) { - LOG(DEBUG) << "in_msg_state hash mismatch in external message"; - cp.skip_reason = ComputePhase::sk_bad_state; - return true; + if (!cfg.allow_external_unfreeze) { + if (in_msg_extern && in_msg_state.not_null() && account.addr != in_msg_state->get_hash().bits()) { + LOG(DEBUG) << "in_msg_state hash mismatch in external message"; + cp.skip_reason = ComputePhase::sk_bad_state; + return true; + } } td::optional precompiled; @@ -1647,7 +1661,12 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { std::unique_ptr logger; auto vm_log = vm::VmLog(); if (cfg.with_vm_log) { - size_t log_max_size = cfg.vm_log_verbosity > 0 ? 1024 * 1024 : 256; + size_t log_max_size = 256; + if (cfg.vm_log_verbosity > 4) { + log_max_size = 32 << 20; + } else if (cfg.vm_log_verbosity > 0) { + log_max_size = 1 << 20; + } logger = std::make_unique(log_max_size); vm_log.log_interface = logger.get(); vm_log.log_options = td::LogOptions(VERBOSITY_NAME(DEBUG), true, false); @@ -1659,6 +1678,7 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { vm_log.log_mask |= vm::VmLog::DumpStack; if (cfg.vm_log_verbosity > 4) { vm_log.log_mask |= vm::VmLog::DumpStackVerbose; + vm_log.log_mask |= vm::VmLog::DumpC5; } } } @@ -1826,6 +1846,26 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { for (int i = n - 1; i >= 0; --i) { ap.result_arg = n - 1 - i; if (!block::gen::t_OutListNode.validate_ref(ap.action_list[i])) { + if (cfg.message_skip_enabled) { + // try to read mode from action_send_msg even if out_msg scheme is violated + // action should at least contain 40 bits: 32bit tag and 8 bit mode + // if (mode & 2), that is ignore error mode, skip action even for invalid message + // if there is no (mode & 2) but (mode & 16) presents - enable bounce if possible + bool special = true; + auto cs = load_cell_slice_special(ap.action_list[i], special); + if (!special) { + if ((cs.size() >= 40) && ((int)cs.fetch_ulong(32) == 0x0ec3c86d)) { + int mode = (int)cs.fetch_ulong(8); + if (mode & 2) { + ap.skipped_actions++; + ap.action_list[i] = {}; + continue; + } else if ((mode & 16) && cfg.bounce_on_fail_enabled) { + ap.bounce = true; + } + } + } + } ap.result_code = 34; // action #i invalid or unsupported ap.action_list_invalid = true; LOG(DEBUG) << "invalid action " << ap.result_arg << " found while preprocessing action list: error code " @@ -1835,6 +1875,9 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { } ap.valid = true; for (int i = n - 1; i >= 0; --i) { + if(ap.action_list[i].is_null()) { + continue; + } ap.result_arg = n - 1 - i; vm::CellSlice cs = load_cell_slice(ap.action_list[i]); CHECK(cs.fetch_ref().not_null()); @@ -1872,7 +1915,7 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { ap.no_funds = true; } LOG(DEBUG) << "invalid action " << ap.result_arg << " in action list: error code " << ap.result_code; - // This is reuqired here because changes to libraries are applied even if actipn phase fails + // This is required here because changes to libraries are applied even if actipn phase fails enforce_state_limits(); if (cfg.action_fine_enabled) { ap.action_fine = std::min(ap.action_fine, balance.grams); @@ -2280,6 +2323,15 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, return -1; } bool skip_invalid = (act_rec.mode & 2); + auto check_skip_invalid = [&](unsigned error_code) -> unsigned int { + if (skip_invalid) { + if (cfg.message_skip_enabled) { + ap.skipped_actions++; + } + return 0; + } + return error_code; + }; // try to parse suggested message in act_rec.out_msg td::RefInt256 fwd_fee, ihr_fee; block::gen::MessageRelaxed::Record msg; @@ -2346,8 +2398,12 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, if (!tlb::csr_unpack(msg.info, info) || !block::tlb::t_CurrencyCollection.validate_csr(info.value)) { return -1; } - fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee); - ihr_fee = block::tlb::t_Grams.as_integer(info.ihr_fee); + if (cfg.disable_custom_fess) { + fwd_fee = ihr_fee = td::zero_refint(); + } else { + fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee); + ihr_fee = block::tlb::t_Grams.as_integer(info.ihr_fee); + } } // set created_at and created_lt to correct values info.created_at = now; @@ -2363,7 +2419,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, bool to_mc = false; if (!check_rewrite_dest_addr(info.dest, cfg, &to_mc)) { LOG(DEBUG) << "invalid destination address in a proposed outbound message"; - return skip_invalid ? 0 : 36; // invalid destination address + return check_skip_invalid(36); // invalid destination address } // fetch message pricing info @@ -2378,7 +2434,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, if (!ext_msg && !(act_rec.mode & 0x80) && !(act_rec.mode & 1)) { if (!block::tlb::t_CurrencyCollection.validate_csr(info.value)) { LOG(DEBUG) << "invalid value:CurrencyCollection in proposed outbound message"; - return skip_invalid ? 0 : 37; + return check_skip_invalid(37); } block::CurrencyCollection value; CHECK(value.unpack(info.value)); @@ -2395,7 +2451,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, if (new_funds->sgn() < 0) { LOG(DEBUG) << "not enough value to transfer with the message: all of the inbound message value has been consumed"; - return skip_invalid ? 0 : 37; + return check_skip_invalid(37); } } funds = std::min(funds, new_funds); @@ -2433,17 +2489,17 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, if (sstat.cells > max_cells && max_cells < cfg.size_limits.max_msg_cells) { LOG(DEBUG) << "not enough funds to process a message (max_cells=" << max_cells << ")"; collect_fine(); - return skip_invalid ? 0 : 40; + return check_skip_invalid(40); } if (sstat.bits > cfg.size_limits.max_msg_bits || sstat.cells > max_cells) { LOG(DEBUG) << "message too large, invalid"; collect_fine(); - return skip_invalid ? 0 : 40; + return check_skip_invalid(40); } if (max_merkle_depth > max_allowed_merkle_depth) { LOG(DEBUG) << "message has too big merkle depth, invalid"; collect_fine(); - return skip_invalid ? 0 : 40; + return check_skip_invalid(40); } LOG(DEBUG) << "storage paid for a message: " << sstat.cells << " cells, " << sstat.bits << " bits"; @@ -2475,7 +2531,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, if (!block::tlb::t_CurrencyCollection.validate_csr(info.value)) { LOG(DEBUG) << "invalid value:CurrencyCollection in proposed outbound message"; collect_fine(); - return skip_invalid ? 0 : 37; + return check_skip_invalid(37); } if (info.ihr_disabled) { // if IHR is disabled, IHR fees will be always zero @@ -2502,7 +2558,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, LOG(DEBUG) << "not enough value to transfer with the message: all of the inbound message value has been consumed"; collect_fine(); - return skip_invalid ? 0 : 37; + return check_skip_invalid(37); } } } @@ -2518,7 +2574,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, LOG(DEBUG) << "not enough value attached to the message to pay forwarding fees : have " << req.grams << ", need " << fees_total; collect_fine(); - return skip_invalid ? 0 : 37; // not enough grams + return check_skip_invalid(37); // not enough grams } else { // decrease message value req.grams -= fees_total; @@ -2529,7 +2585,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, LOG(DEBUG) << "not enough grams to transfer with the message : remaining balance is " << ap.remaining_balance.to_str() << ", need " << req_grams_brutto << " (including forwarding fees)"; collect_fine(); - return skip_invalid ? 0 : 37; // not enough grams + return check_skip_invalid(37); // not enough grams } Ref new_extra; @@ -2539,7 +2595,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, << block::CurrencyCollection{0, req.extra}.to_str() << " required, only " << block::CurrencyCollection{0, ap.remaining_balance.extra}.to_str() << " available"; collect_fine(); - return skip_invalid ? 0 : 38; // not enough (extra) funds + return check_skip_invalid(38); // not enough (extra) funds } if (ap.remaining_balance.extra.not_null() || req.extra.not_null()) { LOG(DEBUG) << "subtracting extra currencies: " @@ -2563,7 +2619,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, LOG(DEBUG) << "outbound message does not fit into a cell after rewriting"; if (redoing == 2) { collect_fine(); - return skip_invalid ? 0 : 39; + return check_skip_invalid(39); } return -2; } @@ -2588,7 +2644,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, if (ap.remaining_balance.grams < fwd_fee) { LOG(DEBUG) << "not enough funds to pay for an outbound external message"; collect_fine(); - return skip_invalid ? 0 : 37; // not enough grams + return check_skip_invalid(37); // not enough grams } // repack message // ext_out_msg_info$11 constructor of CommonMsgInfo @@ -2603,7 +2659,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, LOG(DEBUG) << "outbound message does not fit into a cell after rewriting"; if (redoing == 2) { collect_fine(); - return (skip_invalid ? 0 : 39); + return check_skip_invalid(39); } return -2; } @@ -3514,7 +3570,7 @@ LtCellRef Transaction::extract_out_msg(unsigned i) { * @returns A triple of the logical time, the extracted output message and the transaction root. */ NewOutMsg Transaction::extract_out_msg_ext(unsigned i) { - return {start_lt + i + 1, std::move(out_msgs.at(i)), root}; + return {start_lt + i + 1, std::move(out_msgs.at(i)), root, i}; } /** @@ -3684,6 +3740,7 @@ td::Status FetchConfigParams::fetch_config_params( compute_phase_cfg->suspended_addresses = config.get_suspended_addresses(now); compute_phase_cfg->size_limits = size_limits; compute_phase_cfg->precompiled_contracts = config.get_precompiled_contracts_config(); + compute_phase_cfg->allow_external_unfreeze = compute_phase_cfg->global_version >= 8; } { // compute action_phase_cfg @@ -3707,6 +3764,8 @@ td::Status FetchConfigParams::fetch_config_params( action_phase_cfg->size_limits = size_limits; action_phase_cfg->action_fine_enabled = config.get_global_version() >= 4; action_phase_cfg->bounce_on_fail_enabled = config.get_global_version() >= 4; + action_phase_cfg->message_skip_enabled = config.get_global_version() >= 8; + action_phase_cfg->disable_custom_fess = config.get_global_version() >= 8; action_phase_cfg->mc_blackhole_addr = config.get_burning_config().blackhole_addr; } { diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index 6d8e8a29f..20d7cb291 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -66,8 +66,11 @@ struct NewOutMsg { ton::LogicalTime lt; Ref msg; Ref trans; - NewOutMsg(ton::LogicalTime _lt, Ref _msg, Ref _trans) - : lt(_lt), msg(std::move(_msg)), trans(std::move(_trans)) { + unsigned msg_idx; + td::optional metadata; + td::Ref msg_env_from_dispatch_queue; // Not null if from dispatch queue; in this case lt is emitted_lt + NewOutMsg(ton::LogicalTime _lt, Ref _msg, Ref _trans, unsigned _msg_idx) + : lt(_lt), msg(std::move(_msg)), trans(std::move(_trans)), msg_idx(_msg_idx) { } bool operator<(const NewOutMsg& other) const& { return lt < other.lt || (lt == other.lt && msg->get_hash() < other.msg->get_hash()); @@ -126,6 +129,7 @@ struct ComputePhaseConfig { bool stop_on_accept_message = false; PrecompiledContractsConfig precompiled_contracts; bool dont_run_precompiled_ = false; + bool allow_external_unfreeze{false}; ComputePhaseConfig() : gas_price(0), gas_limit(0), special_gas_limit(0), gas_credit(0) { compute_threshold(); @@ -163,6 +167,8 @@ struct ActionPhaseConfig { const WorkchainSet* workchains{nullptr}; bool action_fine_enabled{false}; bool bounce_on_fail_enabled{false}; + bool message_skip_enabled{false}; + bool disable_custom_fess{false}; td::optional mc_blackhole_addr; const MsgPrices& fetch_msg_prices(bool is_masterchain) const { return is_masterchain ? fwd_mc : fwd_std; diff --git a/crypto/common/bitstring.cpp b/crypto/common/bitstring.cpp index 5135cdf0d..52e57c9a8 100644 --- a/crypto/common/bitstring.cpp +++ b/crypto/common/bitstring.cpp @@ -640,11 +640,11 @@ long parse_bitstring_hex_literal(unsigned char* buff, std::size_t buff_size, con return bits; } -long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size, const char* str, const char* str_end) { +long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size_bits, const char* str, const char* str_end) { const char* ptr = str; - while (ptr < str_end && buff_size && (*ptr == '0' || *ptr == '1')) { + while (ptr < str_end && buff_size_bits && (*ptr == '0' || *ptr == '1')) { *buff++ = (bool)(*ptr++ & 1); - --buff_size; + --buff_size_bits; } return td::narrow_cast(ptr == str_end ? ptr - str : str - ptr - 1); } diff --git a/crypto/common/bitstring.h b/crypto/common/bitstring.h index dc3a2fa5b..257764788 100644 --- a/crypto/common/bitstring.h +++ b/crypto/common/bitstring.h @@ -58,7 +58,7 @@ unsigned long long bits_load_long_top(ConstBitPtr from, unsigned top_bits); long long bits_load_long(ConstBitPtr from, unsigned bits); unsigned long long bits_load_ulong(ConstBitPtr from, unsigned bits); long parse_bitstring_hex_literal(unsigned char* buff, std::size_t buff_size, const char* str, const char* str_end); -long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size, const char* str, const char* str_end); +long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size_bits, const char* str, const char* str_end); void bits_sha256(BitPtr to, ConstBitPtr from, std::size_t size); diff --git a/crypto/fift/Fift.cpp b/crypto/fift/Fift.cpp index 85511a38f..e0faca57c 100644 --- a/crypto/fift/Fift.cpp +++ b/crypto/fift/Fift.cpp @@ -49,7 +49,6 @@ td::Result Fift::interpret_istream(std::istream& stream, std::string curren } td::Result Fift::do_interpret(IntCtx& ctx, bool is_interactive) { - ctx.ton_db = &config_.ton_db; ctx.source_lookup = &config_.source_lookup; ctx.dictionary = ctx.main_dictionary = ctx.context = config_.dictionary; ctx.output_stream = config_.output_stream; diff --git a/crypto/fift/Fift.h b/crypto/fift/Fift.h index ebcf2ef4d..a17727993 100644 --- a/crypto/fift/Fift.h +++ b/crypto/fift/Fift.h @@ -19,7 +19,6 @@ #pragma once #include "SourceLookup.h" -#include "vm/db/TonDb.h" #include "Dictionary.h" #include "td/utils/Status.h" @@ -31,13 +30,11 @@ struct Fift { public: struct Config { fift::SourceLookup source_lookup; - vm::TonDb ton_db; fift::Dictionary dictionary; std::ostream* output_stream{&std::cout}; std::ostream* error_stream{&std::cerr}; bool show_backtrace{true}; }; - // Fift must own ton_db and dictionary, no concurrent access is allowed explicit Fift(Config config); td::Result interpret_file(std::string fname, std::string current_dir, bool interactive = false); diff --git a/crypto/fift/fift-main.cpp b/crypto/fift/fift-main.cpp index fd424e8cf..cdc36fc07 100644 --- a/crypto/fift/fift-main.cpp +++ b/crypto/fift/fift-main.cpp @@ -46,8 +46,6 @@ #include "SourceLookup.h" #include "words.h" -#include "vm/db/TonDb.h" - #include "td/utils/logging.h" #include "td/utils/misc.h" #include "td/utils/Parser.h" @@ -65,7 +63,6 @@ void usage(const char* progname) { "\t-I\tSets colon-separated (unix) or at-separated (windows) library source include path. If not indicated, " "$FIFTPATH is used instead.\n" "\t-L\tPre-loads a library source file\n" - "\t-d\tUse a ton database\n" "\t-s\tScript mode: use first argument as a fift source file and import remaining arguments as $n)\n" "\t-v\tSet verbosity level\n" "\t-V\tShow fift build information\n"; @@ -94,13 +91,12 @@ int main(int argc, char* const argv[]) { bool script_mode = false; std::vector library_source_files, source_list; std::vector source_include_path; - std::string ton_db_path; fift::Fift::Config config; int i; int new_verbosity_level = VERBOSITY_NAME(INFO); - while (!script_mode && (i = getopt(argc, argv, "hinI:L:d:sv:V")) != -1) { + while (!script_mode && (i = getopt(argc, argv, "hinI:L:sv:V")) != -1) { switch (i) { case 'i': interactive = true; @@ -115,9 +111,6 @@ int main(int argc, char* const argv[]) { case 'L': library_source_files.emplace_back(optarg); break; - case 'd': - ton_db_path = optarg; - break; case 's': script_mode = true; break; @@ -158,16 +151,6 @@ int main(int argc, char* const argv[]) { config.source_lookup.add_include_path(path); } - if (!ton_db_path.empty()) { - auto r_ton_db = vm::TonDbImpl::open(ton_db_path); - if (r_ton_db.is_error()) { - LOG(ERROR) << "Error opening ton database: " << r_ton_db.error().to_string(); - std::exit(2); - } - config.ton_db = r_ton_db.move_as_ok(); - // FIXME //std::atexit([&] { config.ton_db.reset(); }); - } - fift::init_words_common(config.dictionary); fift::init_words_vm(config.dictionary, true); // enable vm debug fift::init_words_ton(config.dictionary); diff --git a/crypto/fift/utils.cpp b/crypto/fift/utils.cpp index 68fc18c03..f37766a72 100644 --- a/crypto/fift/utils.cpp +++ b/crypto/fift/utils.cpp @@ -22,6 +22,8 @@ #include "td/utils/filesystem.h" #include "td/utils/misc.h" #include "td/utils/port/path.h" +#include "vm/boc.h" +#include namespace fift { namespace { diff --git a/crypto/fift/words.cpp b/crypto/fift/words.cpp index 8d652afcc..324f492c0 100644 --- a/crypto/fift/words.cpp +++ b/crypto/fift/words.cpp @@ -43,8 +43,6 @@ #include "vm/box.hpp" #include "vm/atom.h" -#include "vm/db/TonDb.h" // only for interpret_db_run_vm{,_parallel} - #include "block/block.h" #include "common/global-version.h" @@ -2077,23 +2075,23 @@ void interpret_bitstring_hex_literal(IntCtx& ctx) { auto s = ctx.parser->scan_word_to('}'); unsigned char buff[128]; int bits = (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), s.begin(), s.end()); - if (bits < 0) { + vm::CellBuilder cb; + if (bits < 0 || !cb.store_bits_bool(td::ConstBitPtr{buff}, bits)) { throw IntError{"Invalid hex bitstring constant"}; } - auto cs = Ref{true, vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize()}; - ctx.stack.push(std::move(cs)); + ctx.stack.push(cb.as_cellslice_ref()); push_argcount(ctx, 1); } void interpret_bitstring_binary_literal(IntCtx& ctx) { auto s = ctx.parser->scan_word_to('}'); unsigned char buff[128]; - int bits = (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff), s.begin(), s.end()); - if (bits < 0) { + int bits = (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff) * 8, s.begin(), s.end()); + vm::CellBuilder cb; + if (bits < 0 || !cb.store_bits_bool(td::ConstBitPtr{buff}, bits)) { throw IntError{"Invalid binary bitstring constant"}; } - auto cs = Ref{true, vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize()}; - ctx.stack.push(std::move(cs)); + ctx.stack.push(cb.as_cellslice_ref()); push_argcount(ctx, 1); } @@ -2721,114 +2719,6 @@ void interpret_vmop_dump(vm::Stack& stack) { stack.push_string(std::move(dump)); } -void do_interpret_db_run_vm_parallel(std::ostream* stream, vm::Stack& stack, vm::TonDb* ton_db_ptr, int threads_n, - int tasks_n) { - if (!ton_db_ptr || !*ton_db_ptr) { - throw vm::VmError{vm::Excno::fatal, "Ton database is not available"}; - } - auto& ton_db = *ton_db_ptr; - auto txn = ton_db->begin_transaction(); - auto txn_abort = td::ScopeExit() + [&] { ton_db->abort_transaction(std::move(txn)); }; - - struct Task { - vm::Ref code; - vm::SmartContractDb smart; - td::optional diff; - td::unique_ptr guard; - Ref stack; - int res{0}; - Ref data; - std::string log; - }; - std::vector tasks(tasks_n); - std::vector threads(threads_n); - - for (auto& task : tasks) { - task.code = stack.pop_cellslice(); - auto smart_hash = td::serialize(stack.pop_smallint_range(1000000000)); - task.smart = txn->begin_smartcontract(smart_hash); - task.guard = td::create_lambda_guard([&] { txn->abort_smartcontract(std::move(task.smart)); }); - auto argsn = stack.pop_smallint_range(100); - task.stack = stack.split_top(argsn); - } - - std::atomic next_task_i{0}; - auto run_tasks = [&] { - while (true) { - auto task_i = next_task_i++; - if (task_i >= tasks_n) { - break; - } - auto& task = tasks[task_i]; - auto data = task.smart->get_root(); - - StringLogger logger; - vm::VmLog log = create_vm_log(stream ? &logger : nullptr); - - task.res = vm::run_vm_code(task.code, task.stack, 3, &data, std::move(log)); - task.smart->set_root(data); - task.diff = vm::SmartContractDiff(std::move(task.smart)); - task.data = std::move(data); - task.log = std::move(logger.res); - } - }; - - td::Timer timer; - for (auto& thread : threads) { - thread = td::thread(run_tasks); - } - run_tasks(); - for (auto& thread : threads) { - thread.join(); - } - - if (stream) { - int id = 0; - for (auto& task : tasks) { - id++; - *stream << "Task #" << id << " vm_log begin" << std::endl; - *stream << task.log; - *stream << "Task #" << id << " vm_log end" << std::endl; - } - } - - LOG(ERROR) << timer; - timer = {}; - - for (auto& task : tasks) { - auto retn = task.stack.write().pop_smallint_range(100, -1); - if (retn == -1) { - retn = task.stack->depth(); - } - stack.push_from_stack(std::move(*task.stack), retn); - stack.push_smallint(task.res); - stack.push_cell(std::move(task.data)); - task.guard->dismiss(); - if (task.diff) { - txn->commit_smartcontract(std::move(task.diff.value())); - } else { - txn->commit_smartcontract(std::move(task.smart)); - } - } - LOG(ERROR) << timer; - timer = {}; - - txn_abort.dismiss(); - ton_db->commit_transaction(std::move(txn)); - timer = {}; - LOG(INFO) << "TonDB stats: \n" << ton_db->stats(); -} - -void interpret_db_run_vm(IntCtx& ctx) { - do_interpret_db_run_vm_parallel(ctx.error_stream, ctx.stack, ctx.ton_db, 0, 1); -} - -void interpret_db_run_vm_parallel(IntCtx& ctx) { - auto threads_n = ctx.stack.pop_smallint_range(32, 0); - auto tasks_n = ctx.stack.pop_smallint_range(1000000000); - do_interpret_db_run_vm_parallel(ctx.error_stream, ctx.stack, ctx.ton_db, threads_n, tasks_n); -} - void interpret_store_vm_cont(vm::Stack& stack) { auto vmcont = stack.pop_cont(); auto cb = stack.pop_builder(); @@ -3518,8 +3408,6 @@ void init_words_vm(Dictionary& d, bool enable_debug) { // d.def_ctx_word("runvmcode ", std::bind(interpret_run_vm, _1, 0x40)); // d.def_ctx_word("runvm ", std::bind(interpret_run_vm, _1, 0x45)); d.def_ctx_word("runvmx ", std::bind(interpret_run_vm, _1, -1)); - d.def_ctx_word("dbrunvm ", interpret_db_run_vm); - d.def_ctx_word("dbrunvm-parallel ", interpret_db_run_vm_parallel); d.def_stack_word("vmcont, ", interpret_store_vm_cont); d.def_stack_word("vmcont@ ", interpret_fetch_vm_cont); d.def_stack_word("(vmoplen) ", interpret_vmop_len); diff --git a/crypto/func/analyzer.cpp b/crypto/func/analyzer.cpp index ec6931af0..fb05bbb4b 100644 --- a/crypto/func/analyzer.cpp +++ b/crypto/func/analyzer.cpp @@ -17,6 +17,7 @@ Copyright 2017-2020 Telegram Systems LLP */ #include "func.h" +#include "vm/boc.h" namespace funC { diff --git a/crypto/funcfiftlib/funcfiftlib.cpp b/crypto/funcfiftlib/funcfiftlib.cpp index a041c25dd..0bef9eac7 100644 --- a/crypto/funcfiftlib/funcfiftlib.cpp +++ b/crypto/funcfiftlib/funcfiftlib.cpp @@ -33,6 +33,7 @@ #include "td/utils/Status.h" #include #include +#include "vm/boc.h" td::Result compile_internal(char *config_json) { TRY_RESULT(input_json, td::json_decode(td::MutableSlice(config_json))) diff --git a/crypto/smartcont/stdlib.fc b/crypto/smartcont/stdlib.fc index 978b94738..8fb27a7ea 100644 --- a/crypto/smartcont/stdlib.fc +++ b/crypto/smartcont/stdlib.fc @@ -244,7 +244,7 @@ cont bless(slice s) impure asm "BLESS"; ;;; In other words, the current smart contract agrees to buy some gas to finish the current transaction. ;;; This action is required to process external messages, which bring no value (hence no gas) with themselves. ;;; -;;; For more details check [accept_message effects](https://ton.org/docs/#/smart-contracts/accept). +;;; For more details check [accept_message effects](https://docs.ton.org/develop/smart-contracts/guidelines/accept). () accept_message() impure asm "ACCEPT"; ;;; Sets current gas limit `gl` to the minimum of limit and `gm`, and resets the gas credit `gc` to zero. @@ -282,10 +282,10 @@ int abs(int x) asm "ABS"; It is said that a primitive _loads_ some data, if it returns the data and the remainder of the slice - (so it can also be used as [modifying method](https://ton.org/docs/#/func/statements?id=modifying-methods)). + (so it can also be used as [modifying method](https://docs.ton.org/develop/func/statements#modifying-methods)). It is said that a primitive _preloads_ some data, if it returns only the data - (it can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods)). + (it can be used as [non-modifying method](https://docs.ton.org/develop/func/statements#non-modifying-methods)). Unless otherwise stated, loading and preloading primitives read the data from a prefix of the slice. -} @@ -416,7 +416,7 @@ int builder_depth(builder b) asm "BDEPTH"; # Builder primitives It is said that a primitive _stores_ a value `x` into a builder `b` if it returns a modified version of the builder `b'` with the value `x` stored at the end of it. - It can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods). + It can be used as [non-modifying method](https://docs.ton.org/develop/func/statements#non-modifying-methods). All the primitives below first check whether there is enough space in the `builder`, and only then check the range of the value being serialized. diff --git a/crypto/smc-envelope/ManualDns.cpp b/crypto/smc-envelope/ManualDns.cpp index 40ce3bac5..617ab9156 100644 --- a/crypto/smc-envelope/ManualDns.cpp +++ b/crypto/smc-envelope/ManualDns.cpp @@ -105,7 +105,7 @@ td::Result> DnsInterface::EntryData::as_cell() const { return error; } if (res.is_null()) { - return td::Status::Error("Entry data is emtpy"); + return td::Status::Error("Entry data is empty"); } return res; //dns_text#1eda _:Text = DNSRecord; diff --git a/crypto/test/fift/testdb.fif b/crypto/test/fift/testdb.fif deleted file mode 100644 index cbaa98178..000000000 --- a/crypto/test/fift/testdb.fif +++ /dev/null @@ -1,102 +0,0 @@ -"Asm.fif" include - -PROGRAM{ - -NEWPROC load_dict -NEWPROC generate_dict -NEWPROC save_dict - -NEWPROC do_get -NEWPROC do_set -NEWPROC do_erase - -main PROC:<{ - DUP 1 INT EQUAL IF:<{ - DROP - do_get CALL - }>ELSE<{ - DUP 2 INT EQUAL IF:<{ - DROP - do_set CALL - }>ELSE<{ - DUP 3 INT EQUAL IF:<{ - DROP - do_erase CALL - }> }> }> - -1 INT -}> - -do_get PROC:<{ - load_dict CALL - 32 INT - DICTIGET -}> - -do_set PROC:<{ - load_dict CALL - 32 INT - DICTISET - save_dict CALL -}> - -do_erase PROC:<{ - load_dict CALL - 32 INT - DICTIDEL - DROP - save_dict CALL -}> - -generate_dict PROC:<{ - 4 INT 100 INT REPEAT:<{ - DUP 2DUP MUL ROT 617 INT ADD 1000 INT MOD - }> - DROP 100 INT - NEWDICT - SWAP REPEAT:<{ - s0 s2 XCHG - NEWC - 16 STU - s0 s2 XCHG - 32 INT - DICTISETB - }> -}> - -load_dict PROC:<{ - PUSHROOT - CTOS DUP SEMPTY IF:<{ - DROP - generate_dict CALL - }> -}> - -save_dict PROC:<{ - NEWC - STSLICE - ENDC - POPROOT -}> - -}END>s constant pmc_prog - -{ 1 2 rot pmc_prog } : task_pmc_get -{ 2 3 rot pmc_prog } : task_pmc_set -{ 3 2 rot pmc_prog } : task_pmc_erase - -{ task_pmc_get dbrunvm 2drop } : pmc_get -{ task_pmc_set dbrunvm 2drop } : pmc_set -{ task_pmc_erase dbrunvm 2drop } : pmc_erase - - > DataCell::create(td::ConstBitPtr data, unsigned bits, if (bits != 8 + hash_bytes * 8) { return td::Status::Error("Not enouch data for a Library special cell"); } + if (!refs.empty()) { + return td::Status::Error("Library special cell has a cell reference"); + } break; } diff --git a/crypto/vm/log.h b/crypto/vm/log.h index dc0199b55..c8486324e 100644 --- a/crypto/vm/log.h +++ b/crypto/vm/log.h @@ -31,7 +31,7 @@ namespace vm { struct VmLog { td::LogInterface *log_interface{td::log_interface}; td::LogOptions log_options{td::log_options}; - enum { DumpStack = 2, ExecLocation = 4, GasRemaining = 8, DumpStackVerbose = 16 }; + enum { DumpStack = 2, ExecLocation = 4, GasRemaining = 8, DumpStackVerbose = 16, DumpC5 = 32 }; int log_mask{1}; static VmLog Null() { VmLog res; diff --git a/crypto/vm/stack.cpp b/crypto/vm/stack.cpp index 697605244..aac1b6b53 100644 --- a/crypto/vm/stack.cpp +++ b/crypto/vm/stack.cpp @@ -83,6 +83,14 @@ std::string StackEntry::to_lisp_string() const { return std::move(os).str(); } +static std::string cell_to_hex(const td::Ref &cell) { + auto boc = vm::std_boc_serialize(cell); + if (boc.is_ok()) { + return td::buffer_to_hex(boc.move_as_ok().as_slice()); + } + return "???"; +} + void StackEntry::dump(std::ostream& os, bool verbose) const { switch (tp) { case t_null: @@ -94,12 +102,7 @@ void StackEntry::dump(std::ostream& os, bool verbose) const { case t_cell: if (ref.not_null()) { if (verbose) { - std::string serialized = "???"; - auto boc = vm::std_boc_serialize(as_cell()); - if (boc.is_ok()) { - serialized = td::buffer_to_hex(boc.move_as_ok().as_slice()); - } - os << "C{" << serialized << "}"; + os << "C{" << cell_to_hex(as_cell()) << "}"; } else { os << "C{" << *as_cell() << "}"; } @@ -109,7 +112,12 @@ void StackEntry::dump(std::ostream& os, bool verbose) const { break; case t_builder: if (ref.not_null()) { - os << "BC{" << *as_builder() << "}"; + if (verbose) { + Ref cb = as_builder(); + os << "BC{" << cell_to_hex(cb.write().finalize_novm()) << "}"; + } else { + os << "BC{" << *as_builder() << "}"; + } } else { os << "BC{null}"; } @@ -117,7 +125,13 @@ void StackEntry::dump(std::ostream& os, bool verbose) const { case t_slice: { if (ref.not_null()) { os << "CS{"; - static_cast>(ref)->dump(os, 1, false); + if (verbose) { + CellBuilder cb; + cb.append_cellslice(as_slice()); + os << cell_to_hex(cb.finalize_novm()); + } else { + static_cast>(ref)->dump(os, 1, false); + } os << '}'; } else { os << "CS{null}"; diff --git a/crypto/vm/utils.cpp b/crypto/vm/utils.cpp index 783bf1327..52bfb0d43 100644 --- a/crypto/vm/utils.cpp +++ b/crypto/vm/utils.cpp @@ -96,10 +96,10 @@ td::Result convert_stack_entry(td::Slice str) { } if (l >= 3 && (str[0] == 'x' || str[0] == 'b') && str[1] == '{' && str.back() == '}') { unsigned char buff[128]; - int bits = - (str[0] == 'x') - ? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1) - : (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1); + int bits = (str[0] == 'x') + ? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1) + : (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff) * 8, str.begin() + 2, + str.end() - 1); if (bits < 0) { return td::Status::Error("failed to parse raw b{...}/x{...} number"); } diff --git a/crypto/vm/vm.cpp b/crypto/vm/vm.cpp index 3f595a00e..fb774f80a 100644 --- a/crypto/vm/vm.cpp +++ b/crypto/vm/vm.cpp @@ -441,10 +441,16 @@ int VmState::step() { if (log.log_mask & vm::VmLog::DumpStackVerbose) { mode += 4; } + std::unique_ptr tmp_ctx; + // install temporary dummy vm state interface to prevent charging for cell load operations during dump + VmStateInterface::Guard guard(tmp_ctx.get()); stack->dump(ss, mode); VM_LOG(this) << "stack:" << ss.str(); } if (stack_trace) { + std::unique_ptr tmp_ctx; + // install temporary dummy vm state interface to prevent charging for cell load operations during dump + VmStateInterface::Guard guard(tmp_ctx.get()); stack->dump(std::cerr, 3); } ++steps; @@ -523,6 +529,13 @@ int VmState::run() { res = vmoog.get_errno(); // no ~ for unhandled exceptions (to make their faking impossible) } if (!parent) { + if ((log.log_mask & VmLog::DumpC5) && cstate.committed) { + std::stringstream ss; + ss << "final c5: "; + StackEntry::maybe(cstate.c5).dump(ss, true); + ss << "\n"; + VM_LOG(this) << ss.str(); + } return res; } restore_parent = true; diff --git a/doc/GlobalVersions.md b/doc/GlobalVersions.md index 6c176552f..e649c009a 100644 --- a/doc/GlobalVersions.md +++ b/doc/GlobalVersions.md @@ -96,4 +96,17 @@ Operations for working with Merkle proofs, where cells can have non-zero level a ### Other changes * `GLOBALID` gets `ConfigParam 19` from the tuple, not from the config dict. This decreases gas usage. -* `SENDMSG` gets `ConfigParam 24/25` (message prices) from the tuple, not from the config dict, and also uses `ConfigParam 43` to get max_msg_cells. \ No newline at end of file +* `SENDMSG` gets `ConfigParam 24/25` (message prices) from the tuple, not from the config dict, and also uses `ConfigParam 43` to get max_msg_cells. + + +## Version 7 + +[Explicitly nullify](https://github.com/ton-blockchain/ton/pull/957/files) `due_payment` after due reimbursment. + +## Version 8 + +- Check mode on invalid `action_send_msg`. Ignore action if `IGNORE_ERROR` (+2) bit is set, bounce if `BOUNCE_ON_FAIL` (+16) bit is set. +- Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`. +- Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent. +- Allow unfreeze through external messages. +- Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages. \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index fd98374b6..47e109dbf 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,28 +1,525 @@ -# The Open Network Node -Dockerfile for The Open Network Node +# Official TON Docker image -#### Install +1. [Dockerfile](#docker) +2. [Kubernetes deployment on-premises](#deploy-on-premises-with-metallb-load-balancer-) +3. [Kubernetes deployment on AWS](#deploy-on-aws-cloud-amazon-web-services) +4. [Kubernetes deployment on GCP](#deploy-on-gcp-google-cloud-platform) +5. [Kubernetes deployment on AliCloud](#deploy-on-ali-cloud) +6. [Troubleshooting](#troubleshooting) +## Prerequisites + +The TON node, whether it is validator or fullnode, requires a public IP address. +If your server is within an internal network or kubernetes you have to make sure that the required ports are available from the outside. + +Also pay attention at [hardware requirements](https://docs.ton.org/participate/run-nodes/full-node) for TON fullnodes and validators. Pods and StatefulSets in this guide imply these requirements. + +It is recommended to everyone to read Docker chapter first in order to get a better understanding about TON Docker image and its parameters. + +## Docker + +### Installation ```docker pull ghcr.io/ton-blockchain/ton:latest``` -#### Create volume -```docker volume create ton-db``` -#### Run -```docker run -d --name ton-node --mount source=ton-db,target=/var/ton-work/db --network host -e "PUBLIC_IP=" -e "CONSOLE_PORT=" -e "LITESERVER=true" -e "LITE_PORT=" -it ghcr.io/ton-blockchain/ton``` +### Configuration +TON validator-engine supports number of command line parameters, +these parameters can be handed over to the container via environment variables. +Below is the list of supported arguments and their default values: + +| Argument | Description | Mandatory? | Default value | +|:------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------:|:-------------------------------------------------------:| +| PUBLIC_IP | This will be a public IP address of your TON node. Normally it is the same IP address as your server's external IP. This also can be your proxy server or load balancer IP address. | yes | | +| GLOBAL_CONFIG_URL | TON global configuration file. Mainnet - https://ton.org/global-config.json, Testnet - https://ton.org/testnet-global.config.json | no | https://api.tontech.io/ton/wallet-mainnet.autoconf.json | +| DUMP_URL | URL to TON dump. Specify dump from https://dump.ton.org. If you are using testnet dump, make sure to download global config for testnet. | no | | +| VALIDATOR_PORT | UDP port that must be available from the outside. Used for communication with other nodes. | no | 30001 | +| CONSOLE_PORT | This TCP port is used to access validator's console. Not necessarily to be opened for external access. | no | 30002 | +| LITE_PORT | Lite-server's TCP port. Used by lite-client. | no | 30003 | +| LITESERVER | true or false. Set to true if you want up and running lite-server. | no | false | +| STATE_TTL | Node's state will be gc'd after this time (in seconds). | no | 86400 | +| ARCHIVE_TTL | Node's archived blocks will be deleted after this time (in seconds). | no | 86400 | +| THREADS | Number of threads used by validator-engine. | no | 8 | +| VERBOSITY | Verbosity level. | no | 3 | +| CUSTOM_ARG | validator-engine might have some undocumented arguments. This is reserved for the test purposes.
For example you can pass **--logname /var/ton-work/log** in order to have log files. | no | | + +### Run the node - the quick way +The below command runs docker container with a TON node, that will start synchronization process. + +Notice **--network host** option, means that the Docker container will use the network namespace of the host machine. +In this case there is no need to map ports between the host and the container. The container will use the same IP address and ports as the host. +This approach simplifies networking configuration for the container, and usually is used on the dedicated server with assigned public IP. + +Keep in mind that this option can also introduce security concerns because the container has access to the host's network interfaces directly, which might not be desirable in a multi-tenant environment. + +Check your firewall configuration and make sure that at least UDP port 43677 is publicly available. +Find out your PUBLIC_IP: +``` +curl -4 ifconfig.me +``` +and replace it in the command below: +``` +docker run -d --name ton-node -v /data/db:/var/ton-work/db \ +-e "PUBLIC_IP=" \ +-e "LITESERVER=true" \ +-e "DUMP_URL=https://dump.ton.org/dumps/latest.tar.lz" \ +--network host \ +-it ghcr.io/ton-blockchain/ton +``` +If you don't need Lite-server, then remove -e "LITESERVER=true". + +### Run the node - isolated way +In production environments it is recommended to use **Port mapping** feature of Docker's default bridge network. +When you use port mapping, Docker allocates a specific port on the host to forward traffic to a port inside the container. +This is ideal for running multiple containers with isolated networks on the same host. +``` +docker run -d --name ton-node -v /data/db:/var/ton-work/db \ +-e "PUBLIC_IP=" \ +-e "DUMP_URL=https://dump.ton.org/dumps/latest.tar.lz" \ +-e "VALIDATOR_PORT=443" \ +-e "CONSOLE_PORT=88" \ +-e "LITE_PORT=443" \ +-e "LITESERVER=true" \ +-p 443:443/udp \ +-p 88:88/tcp \ +-p 443:443/tcp \ +-it ghcr.io/ton-blockchain/ton +``` +Adjust ports per your need. +Check your firewall configuration and make sure that customized ports (443/udp, 88/tcp and 443/tcp in this example) are publicly available. + +### Verify if TON node is operating correctly +After executing above command check the log files: + +```docker logs ton-node``` + +This is totally fine if in the log output for some time (up to 15 minutes) you see messages like: + +```log +failed to download proof link: [Error : 651 : no nodes] +``` + +After some time you should be able to see multiple messages similar to these below: +```log +failed to download key blocks: [Error : 652 : adnl query timeout] +last key block is [ w=-1 s=9223372036854775808 seq=34879845 rcEsfLF3E80PqQPWesW+rlOY2EpXd5UDrW32SzRWgus= C1Hs+q2Vew+WxbGL6PU1P6R2iYUJVJs4032CTS/DQzI= ] +getnextkey: [Error : 651 : not inited] +downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=1442840576 (71MB/s)downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=1442840576 (71MB/s) +finished downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=4520747390 +getnextkey: [Error : 651 : not inited] +getnextkey: [Error : 651 : not inited] +``` +As you noticed we have mounted docker volume to a local folder **/data/db**. +Go inside this folder on your server and check if its size is growing (```sudo du -h .*```) + +Now connect to the running container: +``` +docker exec -ti ton-node /bin/bash +``` +and try to connect and execute **getconfig** command via validator-engine-console: +``` +validator-engine-console -k client -p server.pub -a localhost:$(jq .control[].port <<< cat /var/ton-work/db/config.json) -c getconfig +``` +if you see a json output that means that validator-engine is up, now execute **last** command with a lite-client: +``` +lite-client -a localhost:$(jq .liteservers[].port <<< cat /var/ton-work/db/config.json) -p liteserver.pub -c last +``` +if you see the following output: +``` +conn ready +failed query: [Error : 652 : adnl query timeout] +cannot get server version and time (server too old?) +server version is too old (at least 1.1 with capabilities 1 required), some queries are unavailable +fatal error executing command-line queries, skipping the rest +``` +it means that the lite-server is up, but the node is not synchronized yet. +Once the node is synchronized, the output of **last** command will be similar to this one: + +``` +conn ready +server version is 1.1, capabilities 7 +server time is 1719306580 (delta 0) +last masterchain block is (-1,8000000000000000,20435927):47A517265B25CE4F2C8B3058D46343C070A4B31C5C37745390CE916C7D1CE1C5:279F9AA88C8146257E6C9B537905238C26E37DC2E627F2B6F1D558CB29A6EC82 +server time is 1719306580 (delta 0) +zerostate id set to -1:823F81F306FF02694F935CF5021548E3CE2B86B529812AF6A12148879E95A128:67E20AC184B9E039A62667ACC3F9C00F90F359A76738233379EFA47604980CE8 +``` +If you can't make it working, refer to the [Troubleshooting](#troubleshooting) section below. +### Use validator-engine-console +```docker exec -ti ton-node /bin/bash``` + +```validator-engine-console -k client -p server.pub -a 127.0.0.1:$(jq .control[].port <<< cat /var/ton-work/db/config.json)``` + +### Use lite-client +```docker exec -ti ton-node /bin/bash``` + +```lite-client -p liteserver.pub -a 127.0.0.1:$(jq .liteservers[].port <<< cat /var/ton-work/db/config.json)``` + +If you use lite-client outside the Docker container, copy the **liteserver.pub** from the container: + +```docker cp ton-node:/var/ton-work/db/liteserver.pub /your/path``` + +```lite-client -p /your/path/liteserver.pub -a :``` + +### Stop TON docker container +``` +docker stop ton-node +``` + +## Kubernetes +### Deploy in a quick way (without load balancer) +If the nodes within your kubernetes cluster have external IPs, +make sure that the PUBLIC_IP used for validator-engine matches the node's external IP. +If all Kubernetes nodes are inside DMZ - skip this section. + +#### Prepare +If you are using **flannel** network driver you can find node's IP this way: +```yaml +kubectl get nodes +kubectl describe node | grep public-ip +``` +for **calico** driver use: +```yaml +kubectl describe node | grep IPv4Address +``` +Double check if your Kubernetes node's external IP coincides with the host's IP address: +``` +kubectl run --image=ghcr.io/ton-blockchain/ton:latest validator-engine-pod --env="HOST_IP=1.1.1.1" --env="PUBLIC_IP=1.1.1.1" +kubectl exec -it validator-engine-pod -- curl -4 ifconfig.me +kubectl delete pod validator-engine-pod +``` +If IPs do not match, refer to the sections where load balancers are used. + +Now do the following: +* Add a label to this particular node. +* By this label our pod will know where to be deployed and what storage to use: +``` +kubectl label nodes node_type=ton-validator +``` +* Replace **** (and ports if needed) in file [ton-node-port.yaml](ton-node-port.yaml). +* Replace **** with a real path on host for Persistent Volume. +* If you change the ports, make sure you specify appropriate env vars in Pod section. +* If you want to use dynamic storage provisioning via volumeClaimTemplates, feel free to create own StorageClass. + +#### Install +```yaml +kubectl apply -f ton-node-port.yaml +``` + +this deployment uses host's network stack (**hostNetwork: true**) option and service of **NodePort** type. +Actually you can also use service of type **LoadBalancer**. +This way the service will get public IP assigned to the endpoints. + +#### Verify installation +See if service endpoints were correctly created: + +```yaml +kubectl get endpoints + +NAME ENDPOINTS +validator-engine-srv :30002,:30001,:30003 +``` +Check the logs for the deployment status: +```yaml +kubectl logs validator-engine-pod +``` +or go inside the pod and check if blockchain size is growing: +```yaml +kubectl exec --stdin --tty validator-engine-pod -- /bin/bash +du -h . +``` +### Deploy on-premises with metalLB load balancer + +Often Kubernetes cluster is located in DMZ, is behind corporate firewall and access is controlled via proxy configuration. +In this case we can't use host's network stack (**hostNetwork: true**) within a Pod and must manually proxy the access to the pod. + +A **LoadBalancer** service type automatically provisions an external load balancer (such as those provided by cloud providers like AWS, GCP, Azure) and assigns a public IP address to your service. In a non-cloud environment or in a DMZ setup, you need to manually configure the load balancer. + +If you are running your Kubernetes cluster on-premises or in an environment where an external load balancer is not automatically provided, you can use a load balancer implementation like MetalLB. + +#### Prepare +Select the node where persistent storage will be located for TON validator. +* Add a label to this particular node. By this label our pod will know where to be deployed: +``` +kubectl label nodes node_type=ton-validator +``` +* Replace **** (and ports if needed) in file [ton-metal-lb.yaml](ton-metal-lb.yaml). +* Replace **** with a real path on host for Persistent Volume. +* If you change the ports, make sure you specify appropriate env vars in Pod section. +* If you want to use dynamic storage provisioning via volumeClaimTemplates, feel free to create own StorageClass. + +* Install MetalLB +```yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.5/config/manifests/metallb-native.yaml +``` + +* Configure MetalLB +Create a configuration map to define the IP address range that MetalLB can use for external load balancer services. +```yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - 10.244.1.0/24 <-- your CIDR address +``` +apply configuration +```yaml +kubectl apply -f metallb-config.yaml +``` +#### Install + +```yaml +kubectl apply -f ton-metal-lb.yaml +``` +We do not use Pod Node Affinity here, since the Pod will remember the host with local storage it was bound to. + +#### Verify installation +Assume your network CIDR (**--pod-network-cidr**) within cluster is 10.244.1.0/24, then you can compare the output with the one below: +```yaml +kubectl get service + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes ClusterIP 443/TCP 28h +validator-engine-srv LoadBalancer 10.244.1.1 30001:30001/UDP,30002:30002/TCP,30003:30003/TCP 60m +``` +you can see that endpoints are pointing to metal-LB subnet: +``` +kubectl get endpoints + +NAME ENDPOINTS +kubernetes :6443 +validator-engine-srv 10.244.1.10:30002,10.244.1.10:30001,10.244.1.10:30003 +``` +and metal-LB itself operates with the right endpoint: +``` +kubectl describe service metallb-webhook-service -n metallb-system + +Name: metallb-webhook-service +Namespace: metallb-system +Selector: component=controller +Type: ClusterIP +IP: +IPs: +Port: 443/TCP +TargetPort: 9443/TCP +Endpoints: 10.244.2.3:9443 <-- CIDR +``` + +Use the commands from the previous chapter to see if node operates properly. + +### Deploy on AWS cloud (Amazon Web Services) + +#### Prepare +* AWS EKS is configured with worker nodes with selected add-ons: + * CoreDNS - Enable service discovery within your cluster. + * kube-proxy - Enable service networking within your cluster. + * Amazon VPC CNI - Enable pod networking within your cluster. +* Allocate Elastic IP. +* Replace **** with the newly created Elastic IP in [ton-aws.yaml](ton-aws.yaml) +* Replace **** with Elastic IP allocation ID (see in AWS console). +* Adjust StorageClass name. Make sure you are providing fast storage. + +#### Install + +```kubectl apply -f ton-aws.yaml``` + +#### Verify installation +Use instructions from the previous sections. + +### Deploy on GCP (Google Cloud Platform) + +#### Prepare +* Kubernetes cluster of type Standard (not Autopilot). +* Premium static IP address. +* Adjust firewall rules and security groups to allow ports 30001/udp, 30002/tcp and 30003/tcp (default ones). +* Replace **** (and ports if needed) in file [ton-gcp.yaml](ton-gcp.yaml). +* Adjust StorageClass name. Make sure you are providing fast storage. + +* Load Balancer will be created automatically according to Kubernetes service in yaml file. + +#### Install +```kubectl apply -f ton-gcp.yaml``` + +#### Verify installation +Use instructions from the previous sections. + +### Deploy on Ali Cloud + +#### Prepare +* AliCloud kubernetes cluster. +* Elastic IP. +* Replace **** with Elastic IP allocation ID (see in AliCloud console). +* Replace **** (and ports if needed) in file [ton-ali.yaml](ton-ali.yaml) with the elastic IP attached to your CLB. +* Adjust StorageClass name. Make sure you are providing fast storage. + +#### Install +```kubectl apply -f ton-ali.yaml``` + +As a result CLB (classic internal Load Balancer) will be created automatically with assigned external IP. + +#### Verify installation +Use instructions from the previous sections. + +## Troubleshooting +## Docker +### TON node cannot synchronize, constantly see messages [Error : 651 : no nodes] in the log + +Start the new container without starting validator-engine: + +``` +docker run -it -v /data/db:/var/ton-work/db \ +-e "HOST_IP=" \ +-e "PUBLIC_IP=" \ +-e "LITESERVER=true" \ +-p 43677:43677/udp \ +-p 43678:43678/tcp \ +-p 43679:43679/tcp \ +--entrypoint /bin/bash \ +ghcr.io/ton-blockchain/ton +``` +identify your PUBLIC_IP: +``` +curl -4 ifconfig.me +``` +compare if resulted IP coincides with your . +If it doesn't, exit container and launch it with the correct public IP. +Then open UDP port (inside the container) you plan to allocate for TON node using netcat utility: +``` +nc -ul 30001 +``` +and from any **other** linux machine check if you can reach this UDP port by sending a test message to that port: +``` +echo "test" | nc -u 30001 +``` +as a result inside the container you have to receive the "test" message. + +If you don't get the message inside the docker container, that means that either your firewall, LoadBalancer, NAT or proxy is blocking it. +Ask your system administrator for assistance. + +In the same way you can check if TCP port is available: + +Execute inside the container ```nc -l 30003``` and test connection from another server +```nc -vz 30003``` + +### Can't connect to lite-server +* check if lite-server was enabled on start by passing **"LITESERVER=true"** argument; +* check if TCP port (LITE_PORT) is available from the outside. From any other linux machine execute: + ``` +nc -vz +``` +### How to see what traffic is generated inside the TON docker container? +There is available a traffic monitoring utility inside the container, just execute: +``` +iptraf-ng +``` +Other tools like **tcpdump**, **nc**, **wget**, **curl**, **ifconfig**, **pv**, **plzip**, **jq** and **netstat** are also available. + +### How to build TON docker image from sources? +``` +git clone --recursive https://github.com/ton-blockchain/ton.git +cd ton +docker build . +``` + +## Kubernetes +### AWS +#### After installing AWS LB, load balancer is still not available (pending): +``` +kubectl get deployment -n kube-system aws-load-balancer-controller +``` +Solution: + +Try to install AWS LoadBalancer using ```Helm``` way. + +--- + +#### After installing AWS LB and running ton node, service shows error: + +```k describe service validator-engine-srv``` + +```log +Failed build model due to unable to resolve at least one subnet (0 match VPC and tags: [kubernetes.io/role/elb]) +``` +Solution: + +You haven't labeled the AWS subnets with the correct resource tags. + +* Public Subnets should be resource tagged with: kubernetes.io/role/elb: 1 +* Private Subnets should be tagged with: kubernetes.io/role/internal-elb: 1 +* Both private and public subnets should be tagged with: kubernetes.io/cluster/${your-cluster-name}: owned +* or if the subnets are also used by non-EKS resources kubernetes.io/cluster/${your-cluster-name}: shared + +So create tags for at least one subnet: +``` +kubernetes.io/role/elb: 1 +kubernetes.io/cluster/: owner +``` +--- +#### AWS Load Balancer works, but I still see ```[no nodes]``` in validator's log +It is required to add the security group for the EC2 instances to the load balancer along with the default security group. +It's a misleading that the default security group has "everything open." + +Add security group (default name is usually something like 'launch-wizard-1'). +And make sure you allow the ports you specified or default ports 30001/udp, 30002/tcp and 30003/tcp. + +You can also set inbound and outbound rules of new security group to allow ALL ports and for ALL protocols and for source CIDR 0.0.0.0/0 for testing purposes. + +--- + +#### Pending PersistentVolumeClaim ```Waiting for a volume to be created either by the external provisioner 'ebs.csi.aws.com' or manually by the system administrator.``` + +Solution: + +Configure Amazon EBS CSI driver for working PersistentVolumes in EKS. + +1. Enable IAM OIDC provider +``` +eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=k8s-my --approve +``` + +2. Create Amazon EBS CSI driver IAM role +``` +eksctl create iamserviceaccount \ +--region us-west-2 \ +--name ebs-csi-controller-sa \ +--namespace kube-system \ +--cluster k8s-my \ +--attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \ +--approve \ +--role-only \ +--role-name AmazonEKS_EBS_CSI_DriverRole +``` + +3. Add the Amazon EBS CSI add-on +```yaml +eksctl create addon --name aws-ebs-csi-driver --cluster k8s-my --service-account-role-arn arn:aws:iam::$(aws sts get-caller-identity --query Account --output text):role/AmazonEKS_EBS_CSI_DriverRole --force +``` +### Google Cloud +#### Load Balancer cannot obtain external IP (pending) -If you don't need Liteserver, then remove -e "LITESERVER=true". +``` +kubectl describe service validator-engine-srv -#### Use -```docker exec -ti /bin/bash``` +Events: +Type Reason Age From Message + ---- ------ ---- ---- ------- +Warning LoadBalancerMixedProtocolNotSupported 7m8s g-cloudprovider LoadBalancers with multiple protocols are not supported. +Normal EnsuringLoadBalancer 113s (x7 over 7m8s) service-controller Ensuring load balancer +Warning SyncLoadBalancerFailed 113s (x7 over 7m8s) service-controller Error syncing load balancer: failed to ensure load balancer: mixed protocol is not supported for LoadBalancer +``` +Solution: -```./validator-engine-console -k client -p server.pub -a :``` +Create static IP address of type Premium in GCP console and use it as a value for field ```loadBalancerIP``` in Kubernetes service. -IP:PORT is shown at start of container. +### Ali Cloud -#### Lite-client -To use lite-client you need to get liteserver.pub from container. +#### Validator logs always show +``` +Client got error [PosixError : Connection reset by peer : 104 : Error on [fd:45]] +[!NetworkManager][&ADNL_WARNING] [networkmanager]: received too small proxy packet of size 21 +``` +Solution: -```docker cp :/var/ton-work/db/liteserver.pub /your/path``` +The node is sychnronizing, but very slow though. +Try to use Network Load Balancer (NLB) instead of default CLB. -Then you can connect to it, but be sure you use right port, it's different from fullnode console port. -```lite-client -a : -p liteserver.pub``` diff --git a/docker/control.template b/docker/control.template index 857bcebcd..13b9b6b76 100644 --- a/docker/control.template +++ b/docker/control.template @@ -6,4 +6,4 @@ "permissions" : 15 } ] - } \ No newline at end of file + } diff --git a/docker/init.sh b/docker/init.sh index 1fe4f5e10..2d64690c5 100644 --- a/docker/init.sh +++ b/docker/init.sh @@ -1,30 +1,93 @@ #!/usr/bin/env bash -# global config -if [ ! -z "$GCONFURL" ]; then +if [ ! -z "$TEST" ]; then + echo -e "Running simple validator-engine test..." + validator-engine -h + test $? -eq 2 || { echo "simple validator-engine test failed"; exit 1; } + exit 0; +fi + +# global config +if [ ! -z "$GLOBAL_CONFIG_URL" ]; then echo -e "\e[1;32m[+]\e[0m Downloading provided global config." - wget -q $GCONFURL -O /var/ton-work/db/ton-global.config + wget -q $GLOBAL_CONFIG_URL -O /var/ton-work/db/ton-global.config else - echo -e "\e[1;33m[=]\e[0m No global config provided, downloading default." + echo -e "\e[1;33m[=]\e[0m No global config provided, downloading mainnet default." wget -q https://api.tontech.io/ton/wallet-mainnet.autoconf.json -O /var/ton-work/db/ton-global.config fi +if [ -z "$VALIDATOR_PORT" ]; then + VALIDATOR_PORT=30001 + echo -e "\e[1;33m[=]\e[0m Using default VALIDATOR_PORT $VALIDATOR_PORT udp" +else + echo -e "\e[1;33m[=]\e[0m Using VALIDATOR_PORT $VALIDATOR_PORT udp" +fi + # Init local config with IP:PORT if [ ! -z "$PUBLIC_IP" ]; then - if [ -z "$CONSOLE_PORT" ]; then - CONSOLE_PORT="43678" - fi - echo -e "\e[1;32m[+]\e[0m Using provided IP: $PUBLIC_IP:$CONSOLE_PORT" - validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$CONSOLE_PORT" + echo -e "\e[1;32m[+]\e[0m Using provided IP: $PUBLIC_IP:$VALIDATOR_PORT" else - echo -e "\e[1;31m[!]\e[0m No IP:PORT provided, exiting" + echo -e "\e[1;31m[!]\e[0m No PUBLIC_IP provided, exiting..." exit 1 fi +if [ ! -f "/var/ton-work/db/config.json" ]; then + echo -e "\e[1;32m[+]\e[0m Initializing validator-engine:" + echo validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$VALIDATOR_PORT" + validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$VALIDATOR_PORT" + test $? -eq 0 || { echo "Cannot initialize validator-engine"; exit 2; } +fi + +if [ ! -z "$DUMP_URL" ]; then + echo -e "\e[1;32m[+]\e[0m Using provided dump $DUMP_URL" + if [ ! -f "dump_downloaded" ]; then + echo -e "\e[1;32m[+]\e[0m Downloading dump..." + curl --retry 10 --retry-delay 30 -Ls $DUMP_URL | pv | plzip -d -n8 | tar -xC /var/ton-work/db + touch dump_downloaded + else + echo -e "\e[1;32m[+]\e[0m Dump has been already used." + fi +fi + +if [ -z "$STATE_TTL" ]; then + STATE_TTL=86400 + echo -e "\e[1;33m[=]\e[0m Using default STATE_TTL $STATE_TTL" +else + echo -e "\e[1;33m[=]\e[0m Using STATE_TTL $STATE_TTL" +fi + +if [ -z "$ARCHIVE_TTL" ]; then + ARCHIVE_TTL=86400 + echo -e "\e[1;33m[=]\e[0m Using default ARCHIVE_TTL $ARCHIVE_TTL" +else + echo -e "\e[1;33m[=]\e[0m Using ARCHIVE_TTL $ARCHIVE_TTL" +fi + +if [ -z "$THREADS" ]; then + THREADS=8 + echo -e "\e[1;33m[=]\e[0m Using default THREADS $THREADS" +else + echo -e "\e[1;33m[=]\e[0m Using THREADS $THREADS" +fi + +if [ -z "$VERBOSITY" ]; then + VERBOSITY=3 + echo -e "\e[1;33m[=]\e[0m Using default VERBOSITY $VERBOSITY" +else + echo -e "\e[1;33m[=]\e[0m Using VERBOSITY $VERBOSITY" +fi + +if [ -z "$CONSOLE_PORT" ]; then + CONSOLE_PORT=30002 + echo -e "\e[1;33m[=]\e[0m Using default CONSOLE_PORT $CONSOLE_PORT tcp" +else + echo -e "\e[1;33m[=]\e[0m Using CONSOLE_PORT $CONSOLE_PORT tcp" +fi + # Generating server certificate if [ -f "./server" ]; then echo -e "\e[1;33m[=]\e[0m Found existing server certificate, skipping" -else +else echo -e "\e[1;32m[+]\e[0m Generating and installing server certificate for remote control" read -r SERVER_ID1 SERVER_ID2 <<< $(generate-random-id -m keys -n server) echo "Server IDs: $SERVER_ID1 $SERVER_ID2" @@ -32,16 +95,16 @@ else fi # Generating client certificate -if [ -f "./client" ]; then +if [ -f "./client" ]; then echo -e "\e[1;33m[=]\e[0m Found existing client certificate, skipping" else read -r CLIENT_ID1 CLIENT_ID2 <<< $(generate-random-id -m keys -n client) echo -e "\e[1;32m[+]\e[0m Generated client private certificate $CLIENT_ID1 $CLIENT_ID2" echo -e "\e[1;32m[+]\e[0m Generated client public certificate" # Adding client permissions - sed -e "s/CONSOLE-PORT/\"$(printf "%q" $CONSOLE_PORT)\"/g" -e "s~SERVER-ID~\"$(printf "%q" $SERVER_ID2)\"~g" -e "s~CLIENT-ID~\"$(printf "%q" $CLIENT_ID2)\"~g" control.template > control.new - sed -e "s~\"control\"\ \:\ \[~$(printf "%q" $(cat control.new))~g" config.json > config.json.new - mv config.json.new config.json + sed -e "s/CONSOLE-PORT/\"$(printf "%q" $CONSOLE_PORT)\"/g" -e "s~SERVER-ID~\"$(printf "%q" $SERVER_ID2)\"~g" -e "s~CLIENT-ID~\"$(printf "%q" $CLIENT_ID2)\"~g" /var/ton-work/scripts/control.template > control.new + sed -e "s~\"control\"\ \:\ \[~$(printf "%q" $(cat control.new))~g" /var/ton-work/db/config.json > config.json.new + mv config.json.new /var/ton-work/db/config.json fi # Liteserver @@ -50,20 +113,25 @@ if [ -z "$LITESERVER" ]; then else if [ -f "./liteserver" ]; then echo -e "\e[1;33m[=]\e[0m Found existing liteserver certificate, skipping" - else + else echo -e "\e[1;32m[+]\e[0m Generating and installing liteserver certificate for remote control" read -r LITESERVER_ID1 LITESERVER_ID2 <<< $(generate-random-id -m keys -n liteserver) echo "Liteserver IDs: $LITESERVER_ID1 $LITESERVER_ID2" cp liteserver /var/ton-work/db/keyring/$LITESERVER_ID1 + if [ -z "$LITE_PORT" ]; then - LITE_PORT="43679" + LITE_PORT=30003 + echo -e "\e[1;33m[=]\e[0m Using default LITE_PORT $LITE_PORT tcp" + else + echo -e "\e[1;33m[=]\e[0m Using LITE_PORT $LITE_PORT tcp" fi + LITESERVERS=$(printf "%q" "\"liteservers\":[{\"id\":\"$LITESERVER_ID2\",\"port\":\"$LITE_PORT\"}") - sed -e "s~\"liteservers\"\ \:\ \[~$LITESERVERS~g" config.json > config.json.liteservers - mv config.json.liteservers config.json + sed -e "s~\"liteservers\"\ \:\ \[~$LITESERVERS~g" /var/ton-work/db/config.json > config.json.liteservers + mv config.json.liteservers /var/ton-work/db/config.json fi fi -echo -e "\e[1;32m[+]\e[0m Running validator-engine" - -exec validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db \ No newline at end of file +echo -e "\e[1;32m[+]\e[0m Starting validator-engine:" +echo validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --state-ttl $STATE_TTL --archive-ttl $ARCHIVE_TTL --threads $THREADS --verbosity $VERBOSITY $CUSTOM_ARG +exec validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --state-ttl $STATE_TTL --archive-ttl $ARCHIVE_TTL --threads $THREADS --verbosity $VERBOSITY $CUSTOM_ARG diff --git a/docker/ton-ali.yaml b/docker/ton-ali.yaml new file mode 100644 index 000000000..03ffbdb0f --- /dev/null +++ b/docker/ton-ali.yaml @@ -0,0 +1,121 @@ +apiVersion: "apps/v1" +kind: StatefulSet +metadata: + name: validator-engine-pod + labels: + name: validator-engine-pod +spec: + volumeClaimTemplates: + - metadata: + name: validator-engine-pvc + spec: + storageClassName: alicloud-disk-ssd + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 800Gi + serviceName: validator-engine-srv-headless + replicas: 1 + selector: + matchLabels: + name: validator-engine-pod + template: + metadata: + labels: + name: validator-engine-pod + spec: + containers: + - name: validator-engine-container + image: ghcr.io/neodix42/ton:latest + env: + - name: PUBLIC_IP + value: "" + - name: GLOBAL_CONFIG_URL + value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json" + - name: DUMP_URL + value: "https://dump.ton.org/dumps/latest.tar.lz" + - name: LITESERVER + value: "true" + - name: VALIDATOR_PORT + value: "30001" + - name: CONSOLE_PORT + value: "30002" + - name: LITE_PORT + value: "30003" + - name: STATE_TTL + value: "86400" + - name: ARCHIVE_TTL + value: "86400" + - name: THREADS + value: "8" + - name: VERBOSITY + value: "3" + ports: + - containerPort: 30001 + protocol: UDP + - containerPort: 30002 + protocol: TCP + - containerPort: 30003 + protocol: TCP + volumeMounts: + - mountPath: "/var/ton-work/db" + name: validator-engine-pvc + resources: + requests: + memory: "64Gi" + cpu: "16" + limits: + memory: "128Gi" + cpu: "32" +--- +kind: Service +apiVersion: v1 +metadata: + name: validator-engine-srv + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-eip-ids: "" + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet" +spec: + type: LoadBalancer + externalTrafficPolicy: Local + ports: + - name: validator-udp + nodePort: 30001 + port: 30001 + targetPort: 30001 + protocol: UDP + - name: console-tcp + nodePort: 30002 + port: 30002 + targetPort: 30002 + protocol: TCP + - name: ls-tcp + nodePort: 30003 + port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod +--- +apiVersion: v1 +kind: Service +metadata: + name: validator-engine-srv-headless +spec: + clusterIP: None + ports: + - name: validator-udp + port: 30001 + targetPort: 30001 + protocol: UDP + - name: console-tcp + port: 30002 + targetPort: 30002 + protocol: TCP + - name: ls-tcp + port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod diff --git a/docker/ton-aws.yaml b/docker/ton-aws.yaml new file mode 100644 index 000000000..da16cbae9 --- /dev/null +++ b/docker/ton-aws.yaml @@ -0,0 +1,122 @@ +apiVersion: "apps/v1" +kind: StatefulSet +metadata: + name: validator-engine-pod + labels: + name: validator-engine-pod +spec: + volumeClaimTemplates: + - metadata: + name: validator-engine-pvc + spec: + storageClassName: gp2 + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 800Gi + serviceName: validator-engine-srv-headless + replicas: 1 + selector: + matchLabels: + name: validator-engine-pod + template: + metadata: + labels: + name: validator-engine-pod + spec: + containers: + - name: validator-engine-container + image: ghcr.io/neodix42/ton:latest + env: + - name: PUBLIC_IP + value: "" + - name: GLOBAL_CONFIG_URL + value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json" + - name: DUMP_URL + value: "https://dump.ton.org/dumps/latest.tar.lz" + - name: LITESERVER + value: "true" + - name: VALIDATOR_PORT + value: "30001" + - name: CONSOLE_PORT + value: "30002" + - name: LITE_PORT + value: "30003" + - name: STATE_TTL + value: "86400" + - name: ARCHIVE_TTL + value: "86400" + - name: THREADS + value: "8" + - name: VERBOSITY + value: "3" + ports: + - containerPort: 30001 + protocol: UDP + - containerPort: 30002 + protocol: TCP + - containerPort: 30003 + protocol: TCP + volumeMounts: + - mountPath: "/var/ton-work/db" + name: validator-engine-pvc + resources: + requests: + memory: "64Gi" + cpu: "16" + limits: + memory: "128Gi" + cpu: "32" +--- +kind: Service +apiVersion: v1 +metadata: + name: validator-engine-srv + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-eip-allocations: "" # Replace with your EIP allocation ID +spec: + type: LoadBalancer + ports: + - name: validator-udp + nodePort: 30001 + port: 30001 + targetPort: 30001 + protocol: UDP + - name: console-tcp + nodePort: 30002 + port: 30002 + targetPort: 30002 + protocol: TCP + - name: ls-tcp + nodePort: 30003 + port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod +--- +apiVersion: v1 +kind: Service +metadata: + name: validator-engine-srv-headless +spec: + clusterIP: None + ports: + - name: validator-udp + port: 30001 + targetPort: 30001 + protocol: UDP + - name: console-tcp + port: 30002 + targetPort: 30002 + protocol: TCP + - name: ls-tcp + port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod diff --git a/docker/ton-gcp.yaml b/docker/ton-gcp.yaml new file mode 100644 index 000000000..0ded5a794 --- /dev/null +++ b/docker/ton-gcp.yaml @@ -0,0 +1,134 @@ +apiVersion: "apps/v1" +kind: StatefulSet +metadata: + name: validator-engine-pod + labels: + name: validator-engine-pod +spec: + volumeClaimTemplates: + - metadata: + name: validator-engine-pvc + spec: + storageClassName: standard-rwo + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 800Gi + serviceName: validator-engine-srv-headless + replicas: 1 + selector: + matchLabels: + name: validator-engine-pod + template: + metadata: + labels: + name: validator-engine-pod + spec: + containers: + - name: validator-engine-container + image: ghcr.io/neodix42/ton:latest + env: + - name: PUBLIC_IP + value: "" + - name: GLOBAL_CONFIG_URL + value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json" + - name: DUMP_URL + value: "https://dump.ton.org/dumps/latest.tar.lz" + - name: LITESERVER + value: "true" + - name: VALIDATOR_PORT + value: "30001" + - name: CONSOLE_PORT + value: "30002" + - name: LITE_PORT + value: "30003" + - name: STATE_TTL + value: "86400" + - name: ARCHIVE_TTL + value: "86400" + - name: THREADS + value: "8" + - name: VERBOSITY + value: "3" + ports: + - containerPort: 30001 + protocol: UDP + - containerPort: 30002 + protocol: TCP + - containerPort: 30003 + protocol: TCP + volumeMounts: + - mountPath: "/var/ton-work/db" + name: validator-engine-pvc + resources: + requests: + memory: "64Gi" + cpu: "16" + limits: + memory: "128Gi" + cpu: "32" +--- +kind: Service +apiVersion: v1 +metadata: + name: validator-engine-srv +spec: + type: LoadBalancer + loadBalancerIP: + ports: + - port: 30001 + targetPort: 30001 + protocol: UDP + selector: + name: validator-engine-pod +--- +kind: Service +apiVersion: v1 +metadata: + name: validator-engine-console-srv +spec: + type: LoadBalancer + loadBalancerIP: + ports: + - port: 30002 + targetPort: 30002 + protocol: TCP + selector: + name: validator-engine-pod +--- +kind: Service +apiVersion: v1 +metadata: + name: lite-server-srv +spec: + type: LoadBalancer + loadBalancerIP: + ports: + - port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod +--- +apiVersion: v1 +kind: Service +metadata: + name: validator-engine-srv-headless +spec: + clusterIP: None + ports: + - name: validator-udp + port: 30001 + targetPort: 30001 + protocol: UDP + - name: console-tcp + port: 30002 + targetPort: 30002 + protocol: TCP + - name: ls-tcp + port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod diff --git a/docker/ton-metal-lb.yaml b/docker/ton-metal-lb.yaml new file mode 100644 index 000000000..ceaf3a7c0 --- /dev/null +++ b/docker/ton-metal-lb.yaml @@ -0,0 +1,118 @@ +apiVersion: v1 +kind: Pod +metadata: + name: validator-engine-pod + labels: + name: validator-engine-pod +spec: + volumes: + - name: validator-engine-pv + persistentVolumeClaim: + claimName: validator-engine-pvc + containers: + - name: validator-engine-container + image: ghcr.io/neodix42/ton:latest + env: + - name: PUBLIC_IP + value: "" + - name: GLOBAL_CONFIG_URL + value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json" + - name: DUMP_URL + value: "https://dump.ton.org/dumps/latest.tar.lz" + - name: LITESERVER + value: "true" + - name: VALIDATOR_PORT + value: "30001" + - name: CONSOLE_PORT + value: "30002" + - name: LITE_PORT + value: "30003" + - name: STATE_TTL + value: "86400" + - name: ARCHIVE_TTL + value: "86400" + - name: THREADS + value: "8" + - name: VERBOSITY + value: "3" + volumeMounts: + - mountPath: "/var/ton-work/db" + name: validator-engine-pv + resources: + requests: + memory: "64Gi" + cpu: "16" + limits: + memory: "128Gi" + cpu: "32" +--- +kind: Service +apiVersion: v1 +metadata: + name: validator-engine-srv + annotations: + metallb.universe.tf/address-pool: first-pool +spec: + type: LoadBalancer + ports: + - name: validator-engine-public-udp-port + nodePort: 30001 + port: 30001 + targetPort: 30001 + protocol: UDP + - name: validator-console-tcp-port + nodePort: 30002 + port: 30002 + targetPort: 30002 + protocol: TCP + - name: lite-server-tcp-port + nodePort: 30003 + port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: validator-engine-pv + labels: + type: local +spec: + storageClassName: local-storage + capacity: + storage: 800Gi + accessModes: + - ReadWriteOnce + - ReadOnlyMany + persistentVolumeReclaimPolicy: Retain + local: + path: + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: node_type + operator: In + values: + - ton-validator +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: validator-engine-pvc +spec: + storageClassName: local-storage + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 800Gi diff --git a/docker/ton-node-port.yaml b/docker/ton-node-port.yaml new file mode 100644 index 000000000..ec594031f --- /dev/null +++ b/docker/ton-node-port.yaml @@ -0,0 +1,126 @@ +apiVersion: v1 +kind: Pod +metadata: + name: validator-engine-pod + labels: + name: validator-engine-pod +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node_type + operator: In + values: + - ton-validator + hostNetwork: true + volumes: + - name: validator-engine-pv + persistentVolumeClaim: + claimName: validator-engine-pvc + containers: + - name: validator-engine-container + image: ghcr.io/neodix42/ton:latest + env: + - name: PUBLIC_IP + value: "" + - name: GLOBAL_CONFIG_URL + value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json" + - name: DUMP_URL + value: "https://dump.ton.org/dumps/latest.tar.lz" + - name: LITESERVER + value: "true" + - name: VALIDATOR_PORT + value: "30001" + - name: CONSOLE_PORT + value: "30002" + - name: LITE_PORT + value: "30003" + - name: STATE_TTL + value: "86400" + - name: ARCHIVE_TTL + value: "86400" + - name: THREADS + value: "8" + - name: VERBOSITY + value: "3" + volumeMounts: + - mountPath: "/var/ton-work/db" + name: validator-engine-pv + resources: + requests: + memory: "64Gi" + cpu: "16" + limits: + memory: "128Gi" + cpu: "32" +--- +kind: Service +apiVersion: v1 +metadata: + name: validator-engine-srv +spec: + type: NodePort + ports: + - name: validator-engine-public-udp-port + nodePort: 30001 + port: 30001 + targetPort: 30001 + protocol: UDP + - name: validator-console-tcp-port + nodePort: 30002 + port: 30002 + targetPort: 30002 + protocol: TCP + - name: lite-server-tcp-port + nodePort: 30003 + port: 30003 + targetPort: 30003 + protocol: TCP + selector: + name: validator-engine-pod +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: validator-engine-pv + labels: + type: local +spec: + storageClassName: local-storage + capacity: + storage: 800Gi + accessModes: + - ReadWriteOnce + - ReadOnlyMany + persistentVolumeReclaimPolicy: Retain + local: + path: + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: node_type + operator: In + values: + - ton-validator +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: validator-engine-pvc +spec: + storageClassName: local-storage + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 800Gi diff --git a/emulator/CMakeLists.txt b/emulator/CMakeLists.txt index 7a4b7676b..dc8cbf62b 100644 --- a/emulator/CMakeLists.txt +++ b/emulator/CMakeLists.txt @@ -35,7 +35,7 @@ else() add_library(emulator STATIC ${EMULATOR_SOURCE} ${EMULATOR_HEADERS}) endif() -target_link_libraries(emulator PUBLIC emulator_static) +target_link_libraries(emulator PUBLIC emulator_static git) generate_export_header(emulator EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/emulator_export.h) target_include_directories(emulator PUBLIC $ @@ -48,7 +48,7 @@ if (USE_EMSCRIPTEN) add_executable(emulator-emscripten ${EMULATOR_EMSCRIPTEN_SOURCE}) target_link_libraries(emulator-emscripten PUBLIC emulator) target_link_options(emulator-emscripten PRIVATE -sEXPORTED_RUNTIME_METHODS=_malloc,free,UTF8ToString,stringToUTF8,allocate,ALLOC_NORMAL,lengthBytesUTF8) - target_link_options(emulator-emscripten PRIVATE -sEXPORTED_FUNCTIONS=_emulate,_free,_run_get_method,_create_emulator,_destroy_emulator,_emulate_with_emulator) + target_link_options(emulator-emscripten PRIVATE -sEXPORTED_FUNCTIONS=_emulate,_free,_run_get_method,_create_emulator,_destroy_emulator,_emulate_with_emulator,_version) target_link_options(emulator-emscripten PRIVATE -sEXPORT_NAME=EmulatorModule) target_link_options(emulator-emscripten PRIVATE -sERROR_ON_UNDEFINED_SYMBOLS=0) target_link_options(emulator-emscripten PRIVATE -Oz) diff --git a/emulator/emulator-emscripten.cpp b/emulator/emulator-emscripten.cpp index e76607c62..17639d280 100644 --- a/emulator/emulator-emscripten.cpp +++ b/emulator/emulator-emscripten.cpp @@ -254,4 +254,8 @@ const char *run_get_method(const char *params, const char* stack, const char* co return output; } +const char *version() { + return emulator_version(); +} + } \ No newline at end of file diff --git a/emulator/emulator-extern.cpp b/emulator/emulator-extern.cpp index 6d38ae59b..52c374edb 100644 --- a/emulator/emulator-extern.cpp +++ b/emulator/emulator-extern.cpp @@ -9,6 +9,7 @@ #include "tvm-emulator.hpp" #include "crypto/vm/stack.hpp" #include "crypto/vm/memo.h" +#include "git.h" td::Result> boc_b64_to_cell(const char *boc) { TRY_RESULT_PREFIX(boc_decoded, td::base64_decode(td::Slice(boc)), "Can't decode base64 boc: "); @@ -65,7 +66,18 @@ const char *external_not_accepted_response(std::string&& vm_log, int vm_exit_cod td::Result decode_config(const char* config_boc) { TRY_RESULT_PREFIX(config_params_cell, boc_b64_to_cell(config_boc), "Can't deserialize config params boc: "); - auto global_config = block::Config(config_params_cell, td::Bits256::zero(), block::Config::needWorkchainInfo | block::Config::needSpecialSmc | block::Config::needCapabilities); + auto config_dict = std::make_unique(config_params_cell, 32); + auto config_addr_cell = config_dict->lookup_ref(td::BitArray<32>::zero()); + if (config_addr_cell.is_null()) { + return td::Status::Error("Can't find config address (param 0) is missing in config params"); + } + auto config_addr_cs = vm::load_cell_slice(std::move(config_addr_cell)); + if (config_addr_cs.size() != 0x100) { + return td::Status::Error(PSLICE() << "configuration parameter 0 with config address has wrong size"); + } + ton::StdSmcAddress config_addr; + config_addr_cs.fetch_bits_to(config_addr); + auto global_config = block::Config(config_params_cell, std::move(config_addr), block::Config::needWorkchainInfo | block::Config::needSpecialSmc | block::Config::needCapabilities); TRY_STATUS_PREFIX(global_config.unpack(), "Can't unpack config params: "); return global_config; } @@ -76,8 +88,17 @@ void *transaction_emulator_create(const char *config_params_boc, int vm_log_verb LOG(ERROR) << global_config_res.move_as_error().message(); return nullptr; } + auto global_config = std::make_shared(global_config_res.move_as_ok()); + return new emulator::TransactionEmulator(std::move(global_config), vm_log_verbosity); +} - return new emulator::TransactionEmulator(global_config_res.move_as_ok(), vm_log_verbosity); +void *emulator_config_create(const char *config_params_boc) { + auto config = decode_config(config_params_boc); + if (config.is_error()) { + LOG(ERROR) << "Error decoding config: " << config.move_as_error(); + return nullptr; + } + return new block::Config(config.move_as_ok()); } const char *transaction_emulator_emulate_transaction(void *transaction_emulator, const char *shard_account_boc, const char *message_boc) { @@ -319,7 +340,21 @@ bool transaction_emulator_set_config(void *transaction_emulator, const char* con return false; } - emulator->set_config(global_config_res.move_as_ok()); + emulator->set_config(std::make_shared(global_config_res.move_as_ok())); + + return true; +} + +void config_deleter(block::Config* ptr) { + // We do not delete the config object, since ownership management is delegated to the caller +} + +bool transaction_emulator_set_config_object(void *transaction_emulator, void* config) { + auto emulator = static_cast(transaction_emulator); + + std::shared_ptr config_ptr(static_cast(config), config_deleter); + + emulator->set_config(config_ptr); return true; } @@ -461,6 +496,13 @@ bool tvm_emulator_set_c7(void *tvm_emulator, const char *address, uint32_t unixt return true; } +bool tvm_emulator_set_config_object(void* tvm_emulator, void* config) { + auto emulator = static_cast(tvm_emulator); + auto global_config = std::shared_ptr(static_cast(config), config_deleter); + emulator->set_config(global_config); + return true; +} + bool tvm_emulator_set_prev_blocks_info(void *tvm_emulator, const char* info_boc) { auto emulator = static_cast(tvm_emulator); @@ -672,3 +714,16 @@ const char *tvm_emulator_send_internal_message(void *tvm_emulator, const char *m void tvm_emulator_destroy(void *tvm_emulator) { delete static_cast(tvm_emulator); } + +void emulator_config_destroy(void *config) { + delete static_cast(config); +} + +const char* emulator_version() { + auto version_json = td::JsonBuilder(); + auto obj = version_json.enter_object(); + obj("emulatorLibCommitHash", GitMetadata::CommitSHA1()); + obj("emulatorLibCommitDate", GitMetadata::CommitDate()); + obj.leave(); + return strdup(version_json.string_builder().as_cslice().c_str()); +} diff --git a/emulator/emulator-extern.h b/emulator/emulator-extern.h index b418e5b0d..e69a9cb0b 100644 --- a/emulator/emulator-extern.h +++ b/emulator/emulator-extern.h @@ -16,6 +16,13 @@ extern "C" { */ EMULATOR_EXPORT void *transaction_emulator_create(const char *config_params_boc, int vm_log_verbosity); +/** + * @brief Creates Config object from base64 encoded BoC + * @param config_params_boc Base64 encoded BoC serialized Config dictionary (Hashmap 32 ^Cell) + * @return Pointer to Config object or nullptr in case of error + */ +EMULATOR_EXPORT void *emulator_config_create(const char *config_params_boc); + /** * @brief Set unixtime for emulation * @param transaction_emulator Pointer to TransactionEmulator object @@ -49,7 +56,7 @@ EMULATOR_EXPORT bool transaction_emulator_set_rand_seed(void *transaction_emulat EMULATOR_EXPORT bool transaction_emulator_set_ignore_chksig(void *transaction_emulator, bool ignore_chksig); /** - * @brief Set unixtime for emulation + * @brief Set config for emulation * @param transaction_emulator Pointer to TransactionEmulator object * @param config_boc Base64 encoded BoC serialized Config dictionary (Hashmap 32 ^Cell) * @return true in case of success, false in case of error @@ -57,7 +64,15 @@ EMULATOR_EXPORT bool transaction_emulator_set_ignore_chksig(void *transaction_em EMULATOR_EXPORT bool transaction_emulator_set_config(void *transaction_emulator, const char* config_boc); /** - * @brief Set unixtime for emulation + * @brief Set config for emulation + * @param transaction_emulator Pointer to TransactionEmulator object + * @param config Pointer to Config object + * @return true in case of success, false in case of error + */ +EMULATOR_EXPORT bool transaction_emulator_set_config_object(void *transaction_emulator, void* config); + +/** + * @brief Set libraries for emulation * @param transaction_emulator Pointer to TransactionEmulator object * @param libs_boc Base64 encoded BoC serialized shared libraries dictionary (HashmapE 256 ^Cell). * @return true in case of success, false in case of error @@ -167,6 +182,14 @@ EMULATOR_EXPORT bool tvm_emulator_set_libraries(void *tvm_emulator, const char * */ EMULATOR_EXPORT bool tvm_emulator_set_c7(void *tvm_emulator, const char *address, uint32_t unixtime, uint64_t balance, const char *rand_seed_hex, const char *config); +/** + * @brief Set config for TVM emulator + * @param tvm_emulator Pointer to TVM emulator + * @param config Pointer to Config object + * @return true in case of success, false in case of error + */ +EMULATOR_EXPORT bool tvm_emulator_set_config_object(void* tvm_emulator, void* config); + /** * @brief Set tuple of previous blocks (13th element of c7) * @param tvm_emulator Pointer to TVM emulator @@ -278,6 +301,17 @@ EMULATOR_EXPORT const char *tvm_emulator_send_internal_message(void *tvm_emulato */ EMULATOR_EXPORT void tvm_emulator_destroy(void *tvm_emulator); +/** + * @brief Destroy Config object + * @param tvm_emulator Pointer to Config object + */ +EMULATOR_EXPORT void emulator_config_destroy(void *config); + +/** + * @brief Get git commit hash and date of the library + */ +EMULATOR_EXPORT const char* emulator_version(); + #ifdef __cplusplus } // extern "C" #endif diff --git a/emulator/emulator_export_list b/emulator/emulator_export_list index 93f5dbac8..feb653e2f 100644 --- a/emulator/emulator_export_list +++ b/emulator/emulator_export_list @@ -4,6 +4,7 @@ _transaction_emulator_set_lt _transaction_emulator_set_rand_seed _transaction_emulator_set_ignore_chksig _transaction_emulator_set_config +_transaction_emulator_set_config_object _transaction_emulator_set_libs _transaction_emulator_set_debug_enabled _transaction_emulator_set_prev_blocks_info @@ -11,9 +12,12 @@ _transaction_emulator_emulate_transaction _transaction_emulator_emulate_tick_tock_transaction _transaction_emulator_destroy _emulator_set_verbosity_level +_emulator_config_create +_emulator_config_destroy _tvm_emulator_create _tvm_emulator_set_libraries _tvm_emulator_set_c7 +_tvm_emulator_set_config_object _tvm_emulator_set_prev_blocks_info _tvm_emulator_set_gas_limit _tvm_emulator_set_debug_enabled @@ -22,3 +26,4 @@ _tvm_emulator_send_external_message _tvm_emulator_send_internal_message _tvm_emulator_destroy _tvm_emulator_emulate_run_method +_emulator_version diff --git a/emulator/test/emulator-tests.cpp b/emulator/test/emulator-tests.cpp new file mode 100644 index 000000000..24394b498 --- /dev/null +++ b/emulator/test/emulator-tests.cpp @@ -0,0 +1,402 @@ +#include "td/utils/tests.h" + +#include "block/block-auto.h" +#include "block/block.h" +#include "block/block-parse.h" + +#include "crypto/vm/boc.h" + +#include "td/utils/base64.h" +#include "td/utils/crypto.h" +#include "td/utils/JsonBuilder.h" + +#include "smc-envelope/WalletV3.h" + +#include "emulator/emulator-extern.h" + +// testnet config as of 27.06.24 +const char *config_boc = "te6cckICAl8AAQAANecAAAIBIAABAAICAtgAAwAEAgL1AA0ADgIBIAAFAAYCAUgCPgI/AgEgAAcACAIBSAAJAAoCASAAHgAfAgEgAGUAZgIBSAALAAwCAWoA0gDTAQFI" + "AJIBAUgAsgEDpDMADwIBbgAQABEAQDPAueB1cC0DTaIjG28I/scJsoxoIScEE9LNtuiQoYa2AgOuIAASABMBA7LwABoBASAAFAEBIAAYAQHAABUCAWoAFgAXAIm/VzGV" + "o387z8N7BhdH91LBHMMhBLu7nv21jwo9wtTSXQIBABvI0aFLnw2QbZgjMPCLRdtRHxhUyinQudg6sdiohIwgwCAAQ79oJ47o6vzJDO5wV60LQESEyBcI3zuSSKtFQIlz" + "hk86tAMBg+mbgbrrZVY0qEWL8HxF+gYzy9t5jLO50+QkJ2DWbWFHj0Qaw5TPlNDYOnY0A2VNeAnS9bZ98W8X7FTvgVqStlmABAAZAIOgCYiOTH0TnIIa0oSKjkT3CsgH" + "NUU1Iy/5E472ortANeCAAAAAAAAAAAAAAAAROiXXYZuWf8AAi5Oy+xV/i+2JL9ABA6BgABsCASAAHAAdAFur4AAAAAAHGv1JjQAAEeDul1fav9HZ8+939/IsLGZ46E5h" + "3qjR13yIrB8mcfbBAFur/////8AHGv1JjQAAEeDul1fav9HZ8+939/IsLGZ46E5h3qjR13yIrB8mcfbBAgEgACAAIQIBIAAzADQCASAAIgAjAgEgACkAKgIBIAAkACUB" + "AUgAKAEBIAAmAQEgACcAQFVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVAEAzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMwBAAQEBAQEBAQEBAQEB" + "AQEBAQEBAQEBAQEBAQEBAQEBAQECASAAKwAsAQFYAC8BASAALQEBIAAuAEDv5x0Thgr6pq6ur2NvkWhIf4DxAxsL+Nk5rknT6n99oABTAf//////////////////////" + "////////////////////gAAAAIAAAAFAAQHAADACASAAMQAyABW+AAADvLNnDcFVUAAVv////7y9GpSiABACASAANQA2AgEgADcAOAIBIABCAEMCASAATgBPAgEgADkA" + "OgIBIAA+AD8BASAAOwEBIAA9AQHAADwAt9BTLudOzwABAnAAKtiftocOhhpk4QsHt8jHSWwV/O7nxvFyZKUf75zoqiN3Bfb/JZk7D9mvTw7EDHU5BlaNBz2ml2s54kRz" + "l0iBoQAAAAAP////+AAAAAAAAAAEABMaQ7msoAEBIB9IAQEgAEABASAAQQAUa0ZVPxAEO5rKAAAgAAAcIAAACWAAAAC0AAADhAEBIABEAQEgAEUAGsQAAAAGAAAAAAAA" + "AC4CA81AAEYARwIBIABVAEgAA6igAgEgAEkASgIBIABLAEwCASAATQBdAgEgAFsAXgIBIABbAFsCAUgAYQBhAQEgAFABASAAYgIBIABRAFICAtkAUwBUAgm3///wYABf" + "AGACASAAVQBWAgFiAFwAXQIBIABgAFcCAc4AYQBhAgEgAFgAWQIBIABaAF4CASAAXgBbAAFYAgEgAGEAYQIBIABeAF4AAdQAAUgAAfwCAdQAYQBhAAEgAgKRAGMAZAAq" + "NgIGAgUAD0JAAJiWgAAAAAEAAAH0ACo2BAcDBQBMS0ABMS0AAAAAAgAAA+gCASAAZwBoAgEgAHoAewIBIABpAGoCASAAcABxAgEgAGsAbAEBSABvAQEgAG0BASAAbgAM" + "AB4AHgADADFgkYTnKgAHEcN5N+CAAGteYg9IAAAB4AAIAE3QZgAAAAAAAAAAAAAAAIAAAAAAAAD6AAAAAAAAAfQAAAAAAAPQkEACASAAcgBzAgEgAHYAdwEBIAB0AQEg" + "AHUAlNEAAAAAAAAAZAAAAAAAD0JA3gAAAAAnEAAAAAAAAAAPQkAAAAAAAhYOwAAAAAAAACcQAAAAAAAmJaAAAAAABfXhAAAAAAA7msoAAJTRAAAAAAAAAGQAAAAAAACc" + "QN4AAAAAAZAAAAAAAAAAD0JAAAAAAAAPQkAAAAAAAAAnEAAAAAAAmJaAAAAAAAX14QAAAAAAO5rKAAEBIAB4AQEgAHkAUF3DAAIAAAAIAAAAEAAAwwAATiAAAYagAAJJ" + "8MMAAAPoAAATiAAAJxAAUF3DAAIAAAAIAAAAEAAAwwAehIAAmJaAATEtAMMAAABkAAATiAAAJxACAUgAfAB9AgEgAIAAgQEBIAB+AQEgAH8AQuoAAAAAAJiWgAAAAAAn" + "EAAAAAAAD0JAAAAAAYAAVVVVVQBC6gAAAAAABhqAAAAAAAGQAAAAAAAAnEAAAAABgABVVVVVAgEgAIIAgwEBWACGAQEgAIQBASAAhQAkwgEAAAD6AAAA+gAAA+gAAAAP" + "AErZAQMAAAfQAAA+gAAAAAMAAAAIAAAABAAgAAAAIAAAAAQAACcQAQHAAIcCASAAiACJAgFIAIoAiwIBagCQAJEAA9+wAgFYAIwAjQIBIACOAI8AQb7c3f6FapnFy4B4" + "QZnAdwvqMfKODXM49zeESA3vRM2QFABBvrMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzM4AEG+tWede5qpBXVOzaq9SvpqBpwzTJ067Hk01rWZxT5wQ7gAQb8a" + "Yme1MOiTF+EsYXWNG8wYLwlq/ZXmR6g2PgSXaPOEegBBvzSTEofK4j4twU1E7XMbFoxvESypy3LTYwDOK8PDTfsWASsSZn08y2Z9WOsAEAAQD/////////3AAJMCAswA" + "lACVAgEgAJYAlwIBIACkAKUCASAAmACZAgEgAJ4AnwIBIACaAJsCASAAnACdAJsc46BJ4rulpzksHMZaJjfdtBExV1HRdikp9U7VlmJllrEaW2TYAFmAXnBlZIRH4Sqp" + "CbKkE6v60jyawOEYfVWJDgHg5kDaLMWq7kWQy6AAmxzjoEniuRloX7kgG9FNmRyw/AB/KERuToZdY5v8AHv9JJ8bCIKAWYBecGVkhEt/mk7tOEXbKUWuqIz/1NliY9sm" + "KNHFQimyb79WXudTIACbHOOgSeK0/SaSD6j2aEnWfmW/B7LOQBq2QiiBlnaLIzfq+J2HM0BZgF5wZWSEWPYUSh0McOyjsLL8prcsF5RNab+7jLN/5bOme1r98c8gAJsc" + "46BJ4rT4ptGRb52wRyHzhe/A8y/IQOC/W5R5aC6/l1IM4f/EgFmAXnBlZIRmDW7+WN70SpQsfX5DetODFOpW6zjCBx7cDf6E+rEipKACASAAoAChAgEgAKIAowCbHOOg" + "SeKqqZCAjJ16vfAa2GI9Dcp/I9zBTG2CwPqbx22lq00uLoBZgF5wZWSETeqWp7jqIGPuCYnPZSlQ1fMuSS4e1gF/i9uIeD8GEkNgAJsc46BJ4rugeQAFCtwRUJhvWRbx" + "smlpXTdXCio8SJSBdH/6VPCkAFmAXnBlZIRQPeE6JpjzEwkPI2mvCM1sDTcny96f2dhZ2DcBQmmywCAAmxzjoEnimDpTGClVkh/V+/mJmKVKEpdp4MvFgP5onw6saJRD" + "QApAWYBecGVkhElWAHSIgIhlXt+lUyQjmndd50temeILBd7WJwjjWBeIIACbHOOgSeKtcjPEr2gq3gMraY11K9Ikv1SPcVaj3veDWrY1o4nxKcBZgF5wZWSEabqKQLtX" + "PIkaYDaKvupB8EOxFDWpuMaJJVqafjw4h4sgAgEgAKYApwIBIACsAK0CASAAqACpAgEgAKoAqwCbHOOgSeK8POt5lMj96a3WrXWw7peFtWWh5oi9wsZqXRsrnHM4eoBZ" + "gF5wZWSEXlJk0ILG3LG9zsmxXf+r2OTayqr9FSKLBt9LJAow+aBgAJsc46BJ4qjb23m1w/0EvFl179XCQUUMk32z0kjSh+t6V2jnnqeFwFmAXnBlZIR2KWk8cqZgC06K" + "AphhfzE3VceQWtppAGEbybk06szO9KAAmxzjoEnihVEG74vb19K1l5o8WtWa0dH/gTPfytoA1LsVXR3ztfgAWYBecGVkhEVHN0AzKnDpKLX5P7Tnay/Ogc4rxeoks/yh" + "U3aWhEnGIACbHOOgSeKNl8PpsnZjGIy1CTzi01K8MhvQAEhGlzUDwj2ACC/yFUALGRulQuFOdHw2ulDcYktF860U0mFOYFaQPC7MVNbEeSsk45C9tSPgAgEgAK4ArwIB" + "IACwALEAmxzjoEnivAzuiTw+hkcXtw4XyJGYavfPayk6ehceV8FqrxrzKbQACMou1fGNuRpwF6ilPaS03+BSsz0YID1gpIkGozQp7gRFcQsyZFvVYACbHOOgSeKsoYF9" + "T9f0ArrtFxbViCRmpw2DsDzrllY35uHzP9DEosAICQwVUUQOx01jZ84Uy8ccqQ90Ml6tj5Sw14wOK055ds2sYSPy532gAJsc46BJ4piyhqkrUrk/KUOony6llV0S+DnZ" + "xDLdccZzKJ7bV+XiAAeBJKPSjdajMGMdZwRvewwnwsyc/7uHN718Pd8cHn7VQG1i9BJSeaAAmxzjoEnihY8aTVKeJnW4JHbfVPfkJwElQXxxqG94pNWmN6n9I5jABA51" + "90xtZChBtmQcmPHlOmtU6aLeZ+HBY7/jW6AMz26cNcymYyIuIAErEmZ9WOtmfXULABAAEA/////////3wACzAgLMALQAtQIBIAC2ALcCASAAxADFAgEgALgAuQIBIAC+" + "AL8CASAAugC7AgEgALwAvQCbHOOgSeK5Nyl3TF7AOD2UwhNOh+y3h9P5e0emd2zjffbNatQR1EBS4qdSDsPAZjIVSudNcsvyCAIbiOyNPYmj/MJG5lMjVLkYt4TIEDCg" + "AJsc46BJ4q0qr9PzfnnT+A41FG5Owo+9L+LsuT6PrQkuoR7XsLMzgFLioMqMr4sLf5pO7ThF2ylFrqiM/9TZYmPbJijRxUIpsm+/Vl7nUyAAmxzjoEnisgCK09re8agW" + "Ee8S6q329jm1WbZoHBHjO9oP0q3qItiAUuKgyoyviwfhKqkJsqQTq/rSPJrA4Rh9VYkOAeDmQNosxaruRZDLoACbHOOgSeKeKPVNUBZ96hhTOP8lp1kiAm2wfuT0HIxn" + "lw/0cyISP8BS4qDKjK+LGPYUSh0McOyjsLL8prcsF5RNab+7jLN/5bOme1r98c8gAgEgAMAAwQIBIADCAMMAmxzjoEnip+PTCe8vsapzyPHm88uO5qKBwt9yvn+S6aJW" + "OlcBqeDAUuKgyoyviyYNbv5Y3vRKlCx9fkN604MU6lbrOMIHHtwN/oT6sSKkoACbHOOgSeKwOTDV9phg7jYWvy7bbTD8N773bX9y1P7lxC7vtvdbvsBS4qDKjK+LDeqW" + "p7jqIGPuCYnPZSlQ1fMuSS4e1gF/i9uIeD8GEkNgAJsc46BJ4opGGis7tEqqLAW2742I2ugw5S5lFxeYpc4D9f/qbOMhwFLioMqMr4sQPeE6JpjzEwkPI2mvCM1sDTcn" + "y96f2dhZ2DcBQmmywCAAmxzjoEniqGUvGQXdvzVXTq/g3DpDkom5aqVipETXzq2o+FZdGDfAUuKgyoyviwlWAHSIgIhlXt+lUyQjmndd50temeILBd7WJwjjWBeIIAIB" + "IADGAMcCASAAzADNAgEgAMgAyQIBIADKAMsAmxzjoEnihA6ouVC73YehzpHoNBKL8q3Gp4YbwxOBhJdxpNWePHwAUuKgyoyviym6ikC7VzyJGmA2ir7qQfBDsRQ1qbjG" + "iSVamn48OIeLIACbHOOgSeKr2ACjLl9IlajrtDqvMLD+lfOMRQvmZAaL2NVDooVPYQBS4qDKjK+LHlJk0ILG3LG9zsmxXf+r2OTayqr9FSKLBt9LJAow+aBgAJsc46BJ" + "4oohDH+XJf2EoPKNkp+gv/WG2UonjUWXV+B/IvWUldUuQFLioMqMr4s2KWk8cqZgC06KAphhfzE3VceQWtppAGEbybk06szO9KAAmxzjoEnilP2IvoMbkK7LwTeBBX8u" + "dYI608SRo4nDIg7XUWQf2CYAUuKgyoyviwVHN0AzKnDpKLX5P7Tnay/Ogc4rxeoks/yhU3aWhEnGIAIBIADOAM8CASAA0ADRAJsc46BJ4qS3beCYCuu47Ohag9xU5wk6" + "/1uLtI/5NZ+VaqSyKsGdAApHFgZLFGK0fDa6UNxiS0XzrRTSYU5gVpA8LsxU1sR5KyTjkL21I+AAmxzjoEnivJI7eg6kFGx7dvMX7Xzoog/s5cwHxrcfec5z8/aP/8kA" + "CFtq86KYH4dNY2fOFMvHHKkPdDJerY+UsNeMDitOeXbNrGEj8ud9oACbHOOgSeKlwkl68jfkl6kGCq/tElh6bM85sFBPnt7exnkRJq68iQAG+mnlyjEXYzBjHWcEb3sM" + "J8LMnP+7hze9fD3fHB5+1UBtYvQSUnmgAJsc46BJ4oYswn2e5gWf+Va6NJ+K8sfz4qIHmVG2ryktqCkE9P8hQAPDhRot06toQbZkHJjx5TprVOmi3mfhwWO/41ugDM9u" + "nDXMpmMiLiABASAA1AEBIAD6AQsAtb0+sEAA1QIBIADWANcCA8H4ANgA2QID4fgA+AD5AgEgAPwA/QIBIADaANsCASAA3ADdAgEgAbgBuQIBIAGQAZECASAA3gDfAgEg" + "AOAA4QIBIADqAOsAQb7edpH5xbuqiZNqTG9H7flTOIfNiYtDxI5AH4T6G4tcVAIBIADiAOMAQb6U4RvTn2B6e+8nmlEv/eZoRz1YKr3qyDudETjcrMFgKAIBIADkAOUC" + "ASAA5gDnAgEgAOgA6QBBvgukN4cHaqlFuawJv/TGaxhU3HU2B5iu8cZPVMOseQOgAEG+K7U1xAKEqaBEZoqjpyAnvSx8Z9jfPTeAR/anR5axvmAAQb4tEpbKJaulevOY" + "XQPqlmgiMgHDU6C6X7KRxpFyzPf0YABBvjbzLj0Z1oudyhyW/QhJ0OUxRj9zEM8Y1YUI9Py3ga6gAgFqAOwA7QIBIADuAO8AQb4JmTypqySHVMVJMHWspb3xrs2Lrdy4" + "eJ+M7QxpbS4cIABBvgOb8O+4IZEUWqtnRGQ8JpMkMBocpZyk/do3d/9MYnVgAgEgAPAA8QBBvqQeZ13QP0lszxNKt380fCWuaV94vwC/bfuqmrlg1/fIAgEgAPIA8wIB" + "IAD0APUAQb4G2ph6AS/mD/+cIv4aIYm1z5jAgCW/TTDEr72ygXOP4ABBvhBZkdUWyc1zdg9Fhp9QSsWD+LSyXChKLJOiMF3rVNqgAgEgAPYA9wBBvhsYuojZc90oYnM2" + "WQ+c6cHdiTDRBD2UgxkJlbkZa+mgAEG9wBVbqgGsx1Pog5dkmDyUl4VIe1ZME2BEDY6zMNoQYsAAQb3R4obtqmXfb1H2NxdElqeDuWD4d+Y73ozNJ7dE4jGfQAIBIAHw" + "AfECASACGAIZAQPAwAD7AFWgESjR4FjxyuEAXHMvOQot+HG+D9TtSQavwKbeV09n3G92AAAAAAAAAH0QAgEgAP4A/wIBIAEcAR0CASABAAEBAgEgAR4BHwIBIAECAQMC" + "ASABEAERAgEgAQQBBQIBIAEIAQkCAWIBBgEHAEG+tp/96j2CYcuIRGkfljl5uv/Pilfg3KwCY8xwdr1JdqgAA97wAEG99o5GkuI7pwd5/g4Lt+avHh31l5WoNTndbJgd" + "dTJBicACAUgBCgELAgEgAQwBDQBBvgIKjJdXg0pHrRIfDgYLQ20dIU6mEbDa1FxtUXy9B6rgAEG+Cev2EcR/qY3lMYZ3tIojHR5s+wWySfwNg7XZgP23waACASABDgEP" + "AEG+fZGfOd+cHGx01cd8+xQAwUjfI/VrANsfVPw1jZFJhTAAQb4y2lPdHZUPm695Z+bh0Z1dcta4xXX7fl6dlc2SXOliIABBvhfW5EoZl/I8jARohetHRk6pp1y3mrXR" + "28rFYjHHtJCgAgFqARIBEwIBIAEUARUAQb4zE+Nef80O9dLZy91HfPiOb6EEQ8YqyWKyIU+KeaYLIABBvgPcWeL0jqPxd5IiX7AAYESGqFqZ7o60BjQZJwpPQP1gAgEg" + "ARYBFwBBvofANH7PG2eeTdX5Vr2ZUebxCfwJyzBCE4oriUVRU3jIAgEgARgBGQIBIAEaARsAQb4btDCZEGRAOXaB6WwVqFzYTd1zZgyp15BIuy9n029k4ABBvimf97Kd" + "WV/siLZ3qM/+nVRE+t0X0XdLsOK51DJ6WSPgAEG+CQrglDQDcC3b6lTaIr2tVPRR4RlxVAwxYNcF+6BkvaAAQb4mML93xvUT+iBDJrOfhiRGSs3vOczEy9DJAbuCb7aU" + "4AIBIAFAAUECASABYAFhAgEgASABIQIBIAE0ATUCASABIgEjAgFYAS4BLwIBIAEkASUAQb6L1UE7T5lmGOuEiyPgykuqAW0ENCaxjsi4fdzZq2D0GAICcAEmAScCASAB" + "KAEpAD+9QolK/7nMhu3MO9bzK31P7DqSFoQkLyeYP3RWz5f3KwA/vVaiOV3iXF+2BW0R7uGwqmnXP7y0cjEHibQT6v4MssECASABKgErAgV/rWABLAEtAEG96YUi7d3r" + "hTwVGwv/pocif6dNQ6DcZ3JVzvqdhFltQ0AAQb3zT7C1dlWQlR1QmfrLfaGi5Sj94Guq/gLQXakuFmoVwAA/u8n6yK+GpbUUdG9dja4DHHLGGEu5ZXb6rUHFOFMS7kAA" + "P7v3dUiUhgaZGC+mdUGyJEzagm0IMNe3d2Q1lCRBTK5AAEG+co6LJmQv3h46OSV3KsT2gWyv6MLPKOrfIXFt86dsXVACASABMAExAEG+KQF+kzAAZybpH/1z1zYof09W" + "YAAY6MbQHDj3AO9dCGACASABMgEzAEG9xJZFhUbajV1FgRPu0X8LSHY3DIBRmI4wC6uLpNG5lkAAQb3/+UXNzozn7Eb1PsCLs8NaD2VhG+9qBBlvLJG76KkTQAIBIAE2" + "ATcCASABPgE/AgEgATgBOQIBYgE8AT0AQb5l6UC6/ZmwRTHlWwthzsJcYx+8Vj2vmom9/nu617FmkAIBIAE6ATsAQb4J64Df7Vfb8/jmlGnsZByGAdCsEWA/FfWXyVEU" + "5d6CoABBvhv0Q/VEAfHxjnYRJRxb6xtGetqoO1OgjstzC/3Ok41gAEG964EWqVOQS0JWHUcxnAz6STWs7+BsROmocJCo+xmqe0AAQb3vR9oRALXcwLQPRb70F/gP7SAV" + "WqyMgCIasOqw+b47wABBvpbvxWd5+q2vJUVqR9AlbEIfdFysLR0PXGgVlBf8x5hYAEG+j9bgcxjKxRmfMrJEC6BbHTCQ+WNXqC3H+z591gZw0AgCASABQgFDAgEgAUgB" + "SQIBSAFEAUUAQb7KkreZXaSZXSPGxbgwuJddzpWJly3MFNYwALkyQcIdDABBvnLW0BTZocy0D6h48ehPtgqA0XqNxrqB86bTTks9uvuQAgEgAUYBRwBBvjYzcOXWIfyk" + "HqSDt3m92Hacz/XRoWD5F4yy0AQ/E0ogAEG+AShOVhiiJZ6Itzjs8O75CiiF+eXloz74MSVsHpPAMiACASABSgFLAgEgAVABUQIDeuABTAFNAgFYAU4BTwA/vVuDIbt9" + "1w2Z2FpLSOsyAUPo2ovei28SxaHKDSUdRz0AP71qm4D4evL40x1qJi6AGLh6oOBtxFr5bgc8Xr8jaeWRAEG+HzK7ymUhDh5PL//pLHqwaYidq3sym7hIWC32Rqol+mAA" + "Qb41DOvSox2jnjN40ZFtUSQhSJMCyEWhBRdRERRSltibIAIBIAFSAVMCASABWAFZAgFYAVQBVQIBIAFWAVcAQb3cHJ+brtBSsROnSioWNJqFxZ+5hIGX7ta5KuhleBFn" + "wABBvf/lQA5TJrGDmv6EqacNl5j6ktTzbQOEGqpl45xcekNAAEG+Nve9GdRJhn/t0fgYe7d1pkTBxa2AfiXcWeRYqE1K3yAAQb4jrXHoxDyh1ZYGBdBoQgLaScxW6pZR" + "1hEhJC8BqF+5IAIBIAFaAVsCAVgBXgFfAEG+CdErMSfFYmEK9J9XimJDXyszQjtVELtHIXQt7AvQjKACAUgBXAFdAEC9ivFB4bA7PAP0VXnTs784TO/4CoWLb1QqRdyr" + "0orLAgBAvb5z8xm2yt/HlB1G9TB2Qna4rVgzGxI/n4z3UYr3a7gAQb3f0PQO3/nU5ypuXD5/SaZboj2RhZjd5z47o7VM8AjDwABBvfGIqWXxgi7mCltWrYf4pQa2aRZP" + "FvMA8LBV1hmpauDAAgEgAWIBYwIBIAGAAYECASABZAFlAgEgAXIBcwIBIAFmAWcCAVgBcAFxAgFIAWgBaQIBIAFqAWsAQb33dj2qlHUSOf2DkiVrVwhcqy3SkE9YbBfn" + "zU07vK+uwABBvdxiQ8Yt/Lb9BztkNe9dyXuUyTOcKJRlF9BteI2LK99AAgEgAWwBbQBBvjxAsXZAtTQoMwJV27nrzNCyFum1aU1fbygeFMFuYX9gAgFIAW4BbwBBvdro" + "odCnIayUb5VXYFh23qJGAE4Oed7iqqU/L0iFAPpAAD+9QlUpU0rFnXRmWi3ZnIsFtIIm3JDSdtVPEGqGefBt/wA/vWGl+1GrGASEj3GaAizvMOXDl69yZpcU2YUtCHfG" + "jLUAQb4d/oR88TrfAGcKrMn44T3wBnbh3TWVQWr8rVq0bYTnYABBvhpY6fA3+apwMQXdpEMu8s8uFXf+625mtfciMt0dh4LgAgEgAXQBdQIBIAF4AXkAQb5d0CvPvsyC" + "ZxuTbUe5O2PtTudCwtgc3Ou4DMuX2WizEAIBSAF2AXcAQb3BrlEdo+Hw0uZZJxCgCdxWs/njs6bTHuprY7HtqNl0QABBvcSsc0L20So00ByQZ2oo0aUWf4BlreuHcpYk" + "R/C5Av7AAgEgAXoBewIBIAF+AX8CASABfAF9AEG+ErNElODwkPB+KvEKqCtCz8CS5HCcsC8/VoJGV5f0+uAAQb3FCW/Cy20jtvAS0j4k9eQvRg9tcpaQgFnHc5cB7Fdv" + "wABBvc5nMn9h2c6FeqzonvA74SwaTxZXTgLEXOKOIFOki9BAAEG+NkNRDvICKDQNaqBlpx1LnSn5qpShA00BPg8Tfv+LHaAAQb4+0zsN9j+Lxs1EvbGG0fMwbeeqbWlx" + "TzyjV4LE+0uJYAIBIAGCAYMCAUgBigGLAgEgAYQBhQIBIAGGAYcAQb5O+6O6Y7dWb4HOnMBK4fZ7QNo9woEzBIeKd5+K08xlkABBvlwlLor18dZ5/O3AomXxI5hxYM4o" + "J1Xrrx0JChLVxHpQAgFYAYgBiQBBvn9hAM+g43TTR8vOvZfnhX3kPBCgPp3T0+YF+Ai6RFHwAEG99KmZCgwzysLzIR2TNaJdbyX4lKduOMlCmhCp4L9gJEAAQb3Ntnmm" + "W4yzmAdiAYg7sNjoD8sCiWIvgvkpuYpTXcyiQAIBZgGMAY0CAW4BjgGPAEC9hzviVxD170gIZfsWPGFKfbOB6LCP5YhH7I7fWz7wdwBAvaey9kbu3gkPDYYEraB8b3sF" + "UrCgg4ask3C+O8UJ1mkAQL2wAL6FGQaCTbDdEwGUJ82TDpVMLoNr4ZGZWxcofghZAEC9lqzgehIXoMRj58vAWaHnNAi6UXEU5Ce942dJqf4HawIBIAGSAZMCASABqAGp" + "AgEgAZQBlQIBIAGkAaUCASABlgGXAgEgAZwBnQIBagGYAZkCASABmgGbAEC9syAieemf3vF3umY0lCaQxLhwvbTFuL8eQxPYrpeZ8ABAvbl6reyIsCKH2fq2I8+oEnkS" + "4xYy3RUH/7ka152WrisAQb4CJHgAcs+wQzgf/9IPKdknw/ej0Z+Q+n3BtSEKi0hIoABBvgqovnD/owP5nsA4G62765H5klOyA1TV+7jriGf2CtjgAgFYAZ4BnwIBIAGg" + "AaEAQb3dAG8Nta3/iYiTymgGxV0CfKQlN6UlidHeNgbvtMT9wABBve7An2cFgShRoZx3xA7hUDRtwbcLae0x4dPQQlAH8o3AAEG+HDeG9ZNvkzq3wDDpGt0cb5cHHFQ0" + "itHD3s5R2YHy8eACAWIBogGjAD+9ewqjet2JVaCzHa8NXfnW3ZtLEzEASpk9eicyztCrvwA/vXDzaFNMjF1BnqMojulsIHfT2Dj1ltCTVvoe8wu+GKcCASABpgGnAEG+" + "un2oV7CbmRhYGc7tLiCXj/L40+4ZlzvlmEnZPxyuQrgAQb5ElmikSUchX0lT+0ASVhwF0OBnUB8X4TD4m4/v2Dfl0ABBvlBR7mcUQO8IfN+DkkDYHF1reSJZhv08w6k+" + "JIA6ITiwAgEgAaoBqwIBIAG0AbUCAVgBrAGtAgEgAbIBswBBvhX0m4apMW/GEDxtnd+z0ug75voHd+OibSQbA2+tUPigAgEgAa4BrwIBWAGwAbEAQb3WKikPb9a/J2ti" + "V6yOhNUW5BivimV3gM+EI3VAxst6QAA/vUeSH4ZL+7V8eQBEF/0lm/ouIJ+wQs5QTzBpsSHSXLcAP71t4YT+jYHLpx5Gv3HFoOzL5rhg0Ukud8G3adF8AYlRAEG+Zf0n" + "TrwaPPTPlLjegNsGkoz7UV5wz7oYQet9+SNmRfAAQb5m0tqyXFYp4ntucDLTwJV1gxwoh6JoJL1Y0rfwfLQhUABBvqSCHVak+jIc9ANutTAfHpZNM3YdGky7yaDzsTrg" + "0WhIAgN9eAG2AbcAP70AGCAXHtaQJNqiST0rNTs8mUZSo5H6vM7gvA+3q7+iAD+9FgzFlOZUrfRtonCQzjDSFzrRv4l/94TFs9oi+RQ6kgIBIAG6AbsCASAB1gHXAgEg" + "AbwBvQIBIAHKAcsCASABvgG/AgEgAcQBxQBBvqg93lUVxmlCEks5kL8jTFcqg8lElfAi8dSee8j2jFDIAgEgAcABwQICcwHCAcMAQb5gqEQiOqBKE6++9fJCR6LRVtNC" + "cE9MFknXFlF0leXQMAA/vWDgwPyHRVDvZl2iYgjJ3nWePRW2wjoUWAxrbgzB5a8AP71vi5ua8R9Xas7ZJOxnHw9u9q/5yyOmKiac4YXhpzZdAEG+s1A7ERdFjokIunFC" + "SgeOxki+V8FwbGaF2nFzHDuF3TgCASABxgHHAEG+VoZmB1FqSlGFLPm5r9LBLAX67F6BFQLDlwahNArjz1ACAnIByAHJAD+9QiJtY3MezTL7KB0xvFikeKH4EL/XSXL0" + "b7P1FoVCXwA/vWinW8a2SNxgyMi+e0ML00BiBRy4kZh/JQrAHMZZ3Y0CASABzAHNAgEgAdIB0wIBWAHOAc8CBX+rYAHQAdEAQb4MUGwt25IQd3/yHjI03F71G8Kp2GMa" + "MEv2TiWoTKbs4ABBvjfgYNaJyJijra4RuhLyyPeGUpRcBZhwzdStzQ2MIyDgAD+8XsswC94XkGKDsoUR3B73WxXRX2LdrWSok77uwX/c8AA/vF/xbT+aFbepxFKzgZQ9" + "HbF9uy1KEVspm2/20klhldAAQb6ORoMEHrkmcAR+9ntDkAj0Hq6gLGUT0ceglU8Tm9jfuAIBIAHUAdUAQb5A/TMaqnaKx2BBvcxafTpwUxZYRXcKXTAZj80OapRScABB" + "vm8iGJqmHDhbx34EGjoh2YHhU4mpC/HVkmnz7NBQA0LwAgEgAdgB2QIBIAHmAecCASAB2gHbAgEgAd4B3wIDeqAB3AHdAEG+rC9orZ39Jto92k4zrR5989Z4qySyANXA" + "U8TLG5+0zfgAP71bgmShTXyEATbw0sECEmtwNtuzKI+S3DHEAPCPRhvTAD+9YC74p2ZuEIcz5A4sE69a7MTFuARvrmQnzUDgc7Mo3QIBIAHgAeECA3jgAeQB5QBBvlnO" + "v0cNQ7XgFJEwo9boghCVUHzfZ+urQtJh6esRW5xQAgFqAeIB4wBAvYY1sTf2ZnuWrkRZ+aijWbaH+q5ZMHkghn/Ys+tCZhoAQL2mLfoqMZw77ln7oAn0Cna+Bkp/snNw" + "xHgR2MTl/uqVAD+9XiSecyAvpnbNK3Z28HAfLhXvbXN59PmK+A7M2VDdAwA/vVcEpETq6AblfmVHtN91B7GNEyGglVc2447ooPciTZMCAUgB6AHpAgEgAe4B7wIBIAHq" + "AesAQb5J79ZyWgm+nqrXs6x0I4wkPiKQBH28C7RWNfPTqAfu8ABBvga7i8W/V7fCfyaKf+LLs48ld6A5hMVDltkVnlrlk+IgAgFYAewB7QBAvZIZkLzw7YHDbLe+Scl6" + "3uhdXfRwOUa0JHwJvuhGG3kAQL2a+QtRGkljjF6hjiME0j7LnnMjJkDh6mYBahv3SgufAEG+q3Z1cONnEXUOq6coX7x0RaK8l2WJj/QViIJee2G6qcgAQb6p4a4p479A" + "eC04K9HUR0x8B9TDrIBoSgVyWXe7xEjGWAIBIAHyAfMCASACBAIFAgEgAfQB9QIBIAH6AfsCAUgB9gH3AEG/JvWFCk64ubdT7k9fADlAADZW2oUeE0F//hNAx5vmQ24C" + "ASAB+AH5AEG+ortA8RL/qsRfVCCcmhh9yV+abEsHsmRmSDIyM5jiKZgAQb52rnetuJmLxwetwRXlQ8SwkzMrIHn9f1t+3vxypn8ikABBvlRRrWQUSUCo75+dTtj6fP1U" + "VTmV5DEujv1TIAc3ZLZQAgFYAfwB/QIBIAH+Af8AQb6OgDPbFGfKzqixWPD2Hmgt4G6KWUdQTJBPH3A9K+TZ6ABBvoMGKypw006AeRYqimLjmY2Ufp+SHk8C0ZJBNgVB" + "lzw4AgFqAgACAQIBWAICAgMAQb4FNJ5NJO4+0QwlVAWckUZXdk+PfYDexDZ1+ju9SxhF4ABBvjxQpfN455vPpJ/T+t2rtlKCE9X6KviHFRV802gCPe5gAEG+eMP12XnW" + "n0wTl6XmbgClnjYFM2JY2UAZYhUaknKJf3AAQb5WLKPfVeykQ1NoeXCT+51aWRbOsYTKmyd3AQSzEZ39EAIBIAIGAgcCASACDAINAgFYAggCCQIBIAIKAgsAQb68pxxy" + "oAcWOvpflv3VjfgrRk9v44uazdxMziPqfc1hGABBvqK0CHqoBidcEUJHx4naV3TtgmUv1oEhGpt3DFLGnncoAEG+xnddXOiUNI6DJEK4qY1Cxoa8Hl6iQkWXMWUwTPTo" + "H6wAQb72G1Ke4q6X03mCI87z+qVMO/gd+xvXv6SSwdWpfbnvjAIBIAIOAg8AQb8B8+e/xOcnn+D3yL8SGkEf/SXAx3pRSH/Lf3UDC6zxGgIBIAIQAhEAQb7an34AE4Mg" + "4PeqZAW6F6j/JbgFl8egPBFDGYC5dIgrvABBvpMd78gzSiVsK0zz0AHtEja8x1UoB/NDZMjn+l86NQK4AgFYAhICEwIBIAIUAhUAQb4zj6RBc4mQ6p3ng7mGJ7tp7Mbz" + "ERhe7obkM9A0wnCCIABBvcdlWZEG0Xj7uGgLfagzT4G4zmtS/JDEdPQBzOA0r99AAgEgAhYCFwBAvYD00VNmocZyrS8LPuogdwJgYw9wWC7QCKaicnWos7IAQL2UR4JV" + "cHfZibOIOqdJm+OTPN6Z1z0bykKu09Up+xc/AgEgAhoCGwIBIAIoAikCASACHAIdAgEgAiYCJwIBWAIeAh8CASACJAIlAEG+pJiW3Qo4nq8pKjVzzfs3/0uJxMmWXYyD" + "sduLHtuy8ggCASACIAIhAEG+VOzUzgqzn6yjJdPd2lOP2LQqiZF7O2/LbcmLzMf+hfACAnICIgIjAD+9bmuGAYNACsk0M2FDu866cYUghqLilNK52oLflBoKXQA/vU+c" + "jkDnrb+NojfOEJpwm2m9hlmHmr3HOWwyl4LEIcEAQb7xrpmUHCzHHfaaDbiK66LDRKeKblhi4QoTVRthJ2OzbABBvu6d/bOGE/iiKiKq5AGCvcetA3Izw45ihY196+ey" + "/BbcAEG/IPVJM6fGP9OC+PczMUdiKPNfwkUrt4eslgzXXEY0qCIAQb8FwRfn4LbYMTzpLsSBuEI3vAaLitADflpdxp+M5JVWtgIBIAIqAisCASACNgI3AEG/OXz/ktGT" + "HClb8arzLt3XEjlJTw9LEYxjGvSJNff79loCASACLAItAgFIAi4CLwIBIAIwAjEAQb5bNqQnT8GAdHDnixf9NzTB5VYvmnvaYs6m53KwbxMzsABBvlGslmQWFAphVxFA" + "GGIJvfuk/oBpngdzy0sJ8WxmWNSQAgN+ugIyAjMCAW4CNAI1AD+84Hccb00HqhGM3lRQZIZ3QmOuWlRDBQ9+uXRKu1L+hAA/vOLc2o+R4+ofOAQzeQiU06F6MN1nTGWW" + "J0eurH869zQAQb36Q2nDRQfZx/XsGJ+z0zYtk4S6OXPZcUASOm420y1FQABBvd9bukINCpKmNEXeA+ve7Mnhp8WSt+MPJFDCUYjDLZ1AAgEgAjgCOQBBvzD0lLSsv1Pi" + "WQ0jVDajeXFbJ/TkSakvdy+g0TPR27KGAgFYAjoCOwIBWAI8Aj0AQb53taVCRMwrV1sky/EE45BOJoTTJ0d6vkLZIb6j4k+G0ABBvlKuPPc+sdv9ffRS/Kj+bSQKZFE7" + "fT/jbtog/5dYYCCQAEG+ZZdBcxF7VCWJS+ti78o7J2qY+aXyKipCl2P0CfXeUhAAQb5gdZIvzW7H8KDz4y1oKMiuAzlXY+TF7PGVAwUvGCn0UAIBIAJAAkEBA6DAAkwB" + "AfwCQgIBIAJDAkQBwbnpmKopRu2n8DHZCDhXCHvJdckI7xw0kBvbb0npdd7jjldXaYBVRMxJsrwBE0/IJ4amdSKh5/Ec0+nZhJr583uAAAAAAAAAAAAAAABtiv/XlkR5" + "bE7cmy0osGrcZKJHU0ACRwEB1AJFAQH0AkYBwcaYme1MOiTF+EsYXWNG8wYLwlq/ZXmR6g2PgSXaPOEeN1Z517mqkFdU7Nqr1K+moGnDNMnTrseTTWtZnFPnBDuAAAAA" + "AAAAAAAAAABtiv/XlkR5bE7cmy0osGrcZKJHU0ACRwLFAaUkEAuNdJLBIqJ50rOuJIeLHBBTEnUHFMTTlSvkBfBlTSx/ArBlJBChmMwsWi3fU4ek+WJDvjF7AhFPUcNX" + "4kaAAAAAAAAAAAAAAAAAJ37Hglt9pn14Z9Vgj9pE3L7fXbBAAkcCTgIBIAJIAkkCASACSgJLAIO/z+IwR9x5RqPSfAzguJqFxanKeUhZQgFsmKwj4GuAK2WAAAAAAAAA" + "AAAAAAB7G3oHXwv9lQmh8vd3TonVSERFqMAAgr+jPzrhTYloKgTCsGgEFNx7OdH+sJ98etJnwrIVSsFxHwAAAAAAAAAAAAAAAOsF4basDVdO8s8p/fAcwLo9j5vxAIK/" + "n8LJGSxLhg32E0QLb7fZPphHZGiLJJFDrBMD8NcM15MAAAAAAAAAAAAAAADlTNYxyXvgdnFyrRaQRoiWLQnS/gLFAbUl61s8X25tzWBr7nugeg7IMDUhKEm34FWUmcD2" + "utVNIR8VdL9iPRR4dwjF/dVl4ymiWr+kkJXphEJvGbzwSXSAAAAAAAAAAAAAAAAAWZG0lbam3LV4+pciTNFehvbNeeLAAk0CTgIBIAJPAlAAMEO5rKAEO5rKADehIAPk" + "4cBAX14QA5iWgAIBIAJRAlIAg7/T7quzPdTpPcCght7xTpoi+g9Sw7gtkYDSyaOh0qHc0AAAAAAAAAAAAAAAADavGw+/CvXTnyDIJ6fZU+llAiixQAIBIAJTAlQCASAC" + "WwJcAgEgAlUCVgCBv1wad2ywThLttxU0gcwWuSJSuLNadPm8j3J85ggRzjkGAAAAAAAAAAAAAAAB1xLrLNteGQzkOClxdvv3E/l3M5UAgb8JuDCFQxifbIdTfjd1x7Mq" + "S+Z7dzIUkHtIdVjcVeFT2AAAAAAAAAAAAAAAAiwal03Yl9B7p2fVDSCtlYsZX6m+AgEgAlcCWAIBIAJZAloAgb7jxvbib0yb3DKvQBDcHL/hdg7NjCuqjUQ09t8hgmhV" + "oAAAAAAAAAAAAAAABEGpMZGoNId5F80sBzWgnjo+AP2UAIG+sE8ccijAbmkaBJVfyfgqY5pf4QSO+c5IFGVC9WwlY/AAAAAAAAAAAAAAAAeg08QveVui23B9QhrdMd7a" + "nx/sGACBvqxwYOyAk+H0YGBc70gZFJc6oqUvcHywU+yJNBfSNh+AAAAAAAAAAAAAAAADFU5kDFbQI6mIkEJqJNGncvWjiygCASACXQJeAIG/acxhhr+dznhtppGVCg+k" + "FqjL65rOddHn1mwyRj1rYgQAAAAAAAAAAAAAAACRfpTwfZ9v81WVbRpRYN+1/m9YhwCBvw9fhTm/NqURBT4FuwJczZWe39F575hmpFtt8KVniCwIAAAAAAAAAAAAAAAB" + "DkxuMKeNKjBZpVAjNVjJ/URzwhoAgb8RuD3rFDyNUpuXtBAnWTykKVAuY7UKLrye419st2b25AAAAAAAAAAAAAAAAlUrmS7Amiwb/77tvRUhnpfLLMXeL4vIgQ=="; + + +constexpr td::int64 Ton = 1000000000; + +TEST(Emulator, wallet_int_and_ext_msg) { + td::Ed25519::PrivateKey priv_key = td::Ed25519::generate_private_key().move_as_ok(); + auto pub_key = priv_key.get_public_key().move_as_ok(); + ton::WalletV3::InitData init_data; + init_data.public_key = pub_key.as_octet_string(); + init_data.wallet_id = 239; + auto wallet = ton::WalletV3::create(init_data, 2); + + auto address = wallet->get_address(); + + void *emulator = transaction_emulator_create(config_boc, 3); + const uint64_t lt = 42000000000; + CHECK(transaction_emulator_set_lt(emulator, lt)); + const uint32_t utime = 1337; + transaction_emulator_set_unixtime(emulator, utime); + + std::string shard_account_after_boc_b64; + + // emulate internal message with init state on uninit account + { + td::Ref account_root; + block::gen::Account().cell_pack_account_none(account_root); + auto none_shard_account_cell = vm::CellBuilder().store_ref(account_root).store_bits(td::Bits256::zero().as_bitslice()).store_long(0).finalize(); + auto none_shard_account_boc = td::base64_encode(std_boc_serialize(none_shard_account_cell).move_as_ok()); + + td::Ref int_msg; + { + block::gen::Message::Record message; + block::gen::CommonMsgInfo::Record_int_msg_info msg_info; + msg_info.ihr_disabled = true; + msg_info.bounce = false; + msg_info.bounced = false; + { + block::gen::MsgAddressInt::Record_addr_std src; + src.anycast = vm::CellBuilder().store_zeroes(1).as_cellslice_ref(); + src.workchain_id = 0; + src.address = td::Bits256();; + tlb::csr_pack(msg_info.src, src); + } + { + block::gen::MsgAddressInt::Record_addr_std dest; + dest.anycast = vm::CellBuilder().store_zeroes(1).as_cellslice_ref(); + dest.workchain_id = address.workchain; + dest.address = address.addr; + tlb::csr_pack(msg_info.dest, dest); + } + { + block::CurrencyCollection cc{10 * Ton}; + cc.pack_to(msg_info.value); + } + { + vm::CellBuilder cb; + block::tlb::t_Grams.store_integer_value(cb, td::BigInt256(int(0.03 * Ton))); + msg_info.fwd_fee = cb.as_cellslice_ref(); + } + { + vm::CellBuilder cb; + block::tlb::t_Grams.store_integer_value(cb, td::BigInt256(0)); + msg_info.ihr_fee = cb.as_cellslice_ref(); + } + msg_info.created_lt = 0; + msg_info.created_at = static_cast(utime); + tlb::csr_pack(message.info, msg_info); + message.init = vm::CellBuilder() + .store_ones(1) + .store_zeroes(1) + .append_cellslice(vm::load_cell_slice(ton::GenericAccount::get_init_state(wallet->get_state()))) + .as_cellslice_ref(); + message.body = vm::CellBuilder().store_zeroes(1).as_cellslice_ref(); + + tlb::type_pack_cell(int_msg, block::gen::t_Message_Any, message); + } + + CHECK(int_msg.not_null()); + + auto int_msg_boc = td::base64_encode(std_boc_serialize(int_msg).move_as_ok()); + + std::string int_emu_res = transaction_emulator_emulate_transaction(emulator, none_shard_account_boc.c_str(), int_msg_boc.c_str()); + LOG(ERROR) << "int_emu_res = " << int_emu_res; + + auto int_result_json = td::json_decode(td::MutableSlice(int_emu_res)); + CHECK(int_result_json.is_ok()); + auto int_result_value = int_result_json.move_as_ok(); + auto& int_result_obj = int_result_value.get_object(); + + auto success_field = td::get_json_object_field(int_result_obj, "success", td::JsonValue::Type::Boolean, false); + CHECK(success_field.is_ok()); + auto success = success_field.move_as_ok().get_boolean(); + CHECK(success); + + auto transaction_field = td::get_json_object_field(int_result_obj, "transaction", td::JsonValue::Type::String, false); + CHECK(transaction_field.is_ok()); + auto transaction_boc_b64 = transaction_field.move_as_ok().get_string(); + auto transaction_boc = td::base64_decode(transaction_boc_b64); + CHECK(transaction_boc.is_ok()); + auto trans_cell = vm::std_boc_deserialize(transaction_boc.move_as_ok()); + CHECK(trans_cell.is_ok()); + td::Bits256 trans_hash = trans_cell.ok()->get_hash().bits(); + block::gen::Transaction::Record trans; + block::gen::TransactionDescr::Record_trans_ord trans_descr; + CHECK(tlb::unpack_cell(trans_cell.move_as_ok(), trans) && tlb::unpack_cell(trans.description, trans_descr)); + CHECK(trans.outmsg_cnt == 0); + CHECK(trans.account_addr == wallet->get_address().addr); + CHECK(trans_descr.aborted == false); + CHECK(trans_descr.destroyed == false); + CHECK(trans.lt == lt); + CHECK(trans.now == utime); + + auto shard_account_field = td::get_json_object_field(int_result_obj, "shard_account", td::JsonValue::Type::String, false); + CHECK(shard_account_field.is_ok()); + auto shard_account_boc_b64 = shard_account_field.move_as_ok().get_string(); + shard_account_after_boc_b64 = shard_account_boc_b64.str(); + auto shard_account_boc = td::base64_decode(shard_account_boc_b64); + CHECK(shard_account_boc.is_ok()); + auto shard_account_cell = vm::std_boc_deserialize(shard_account_boc.move_as_ok()); + CHECK(shard_account_cell.is_ok()); + block::gen::ShardAccount::Record shard_account; + block::gen::Account::Record_account account; + CHECK(tlb::unpack_cell(shard_account_cell.move_as_ok(), shard_account) && tlb::unpack_cell(shard_account.account, account)); + CHECK(shard_account.last_trans_hash == trans_hash); + CHECK(shard_account.last_trans_lt == lt); + ton::WorkchainId wc; + ton::StdSmcAddress addr; + CHECK(block::tlb::t_MsgAddressInt.extract_std_address(account.addr, wc, addr)); + CHECK(address.workchain == wc); + CHECK(address.addr == addr); + } + + // emulate external message + { + auto ext_body = wallet->make_a_gift_message(priv_key, utime + 60, {ton::WalletV3::Gift{block::StdAddress(0, ton::StdSmcAddress()), 1 * Ton}}); + CHECK(ext_body.is_ok()); + auto ext_msg = ton::GenericAccount::create_ext_message(address, {}, ext_body.move_as_ok()); + auto ext_msg_boc = td::base64_encode(std_boc_serialize(ext_msg).move_as_ok()); + std::string ext_emu_res = transaction_emulator_emulate_transaction(emulator, shard_account_after_boc_b64.c_str(), ext_msg_boc.c_str()); + LOG(ERROR) << "ext_emu_res = " << ext_emu_res; + + auto ext_result_json = td::json_decode(td::MutableSlice(ext_emu_res)); + CHECK(ext_result_json.is_ok()); + auto ext_result = ext_result_json.move_as_ok(); + auto &ext_result_obj = ext_result.get_object(); + auto ext_success_field = td::get_json_object_field(ext_result_obj, "success", td::JsonValue::Type::Boolean, false); + CHECK(ext_success_field.is_ok()); + auto ext_success = ext_success_field.move_as_ok().get_boolean(); + CHECK(ext_success); + + auto ext_transaction_field = td::get_json_object_field(ext_result_obj, "transaction", td::JsonValue::Type::String, false); + CHECK(ext_transaction_field.is_ok()); + auto ext_transaction_boc_b64 = ext_transaction_field.move_as_ok().get_string(); + auto ext_transaction_boc = td::base64_decode(ext_transaction_boc_b64); + CHECK(ext_transaction_boc.is_ok()); + auto ext_trans_cell = vm::std_boc_deserialize(ext_transaction_boc.move_as_ok()); + CHECK(ext_trans_cell.is_ok()); + td::Bits256 ext_trans_hash = ext_trans_cell.ok()->get_hash().bits(); + block::gen::Transaction::Record ext_trans; + block::gen::TransactionDescr::Record_trans_ord ext_trans_descr; + CHECK(tlb::unpack_cell(ext_trans_cell.move_as_ok(), ext_trans) && tlb::unpack_cell(ext_trans.description, ext_trans_descr)); + CHECK(ext_trans.outmsg_cnt == 1); + CHECK(ext_trans.account_addr == wallet->get_address().addr); + CHECK(ext_trans_descr.aborted == false); + CHECK(ext_trans_descr.destroyed == false); + + auto ext_shard_account_field = td::get_json_object_field(ext_result_obj, "shard_account", td::JsonValue::Type::String, false); + CHECK(ext_shard_account_field.is_ok()); + auto ext_shard_account_boc_b64 = ext_shard_account_field.move_as_ok().get_string(); + auto ext_shard_account_boc = td::base64_decode(ext_shard_account_boc_b64); + CHECK(ext_shard_account_boc.is_ok()); + auto ext_shard_account_cell = vm::std_boc_deserialize(ext_shard_account_boc.move_as_ok()); + CHECK(ext_shard_account_cell.is_ok()); + block::gen::ShardAccount::Record ext_shard_account; + block::gen::Account::Record_account ext_account; + CHECK(tlb::unpack_cell(ext_shard_account_cell.move_as_ok(), ext_shard_account) && tlb::unpack_cell(ext_shard_account.account, ext_account)); + CHECK(ext_shard_account.last_trans_hash == ext_trans_hash); + CHECK(ext_shard_account.last_trans_lt == ext_trans.lt); + ton::WorkchainId wc; + ton::StdSmcAddress addr; + CHECK(block::tlb::t_MsgAddressInt.extract_std_address(ext_account.addr, wc, addr)); + CHECK(address.workchain == wc); + CHECK(address.addr == addr); + } +} + +TEST(Emulator, tvm_emulator) { + td::Ed25519::PrivateKey priv_key = td::Ed25519::generate_private_key().move_as_ok(); + auto pub_key = priv_key.get_public_key().move_as_ok(); + ton::WalletV3::InitData init_data; + init_data.public_key = pub_key.as_octet_string(); + init_data.wallet_id = 239; + init_data.seqno = 1337; + auto wallet = ton::WalletV3::create(init_data, 2); + + auto code = ton::SmartContractCode::get_code(ton::SmartContractCode::Type::WalletV3, 2); + auto code_boc_b64 = td::base64_encode(std_boc_serialize(code).move_as_ok()); + auto data = ton::WalletV3::get_init_data(init_data); + auto data_boc_b64 = td::base64_encode(std_boc_serialize(data).move_as_ok()); + + void *tvm_emulator = tvm_emulator_create(code_boc_b64.c_str(), data_boc_b64.c_str(), 1); + unsigned method_crc = td::crc16("seqno"); + unsigned method_id = (method_crc & 0xffff) | 0x10000; + auto stack = td::make_ref(); + vm::CellBuilder stack_cb; + CHECK(stack->serialize(stack_cb)); + auto stack_cell = stack_cb.finalize(); + auto stack_boc = td::base64_encode(std_boc_serialize(stack_cell).move_as_ok()); + + char addr_buffer[49] = {0}; + CHECK(wallet->get_address().rserialize_to(addr_buffer)); + + auto rand_seed = std::string(64, 'F'); + CHECK(tvm_emulator_set_c7(tvm_emulator, addr_buffer, 1337, 10 * Ton, rand_seed.c_str(), config_boc)); + std::string tvm_res = tvm_emulator_run_get_method(tvm_emulator, method_id, stack_boc.c_str()); + LOG(ERROR) << "tvm_res = " << tvm_res; + + auto result_json = td::json_decode(td::MutableSlice(tvm_res)); + CHECK(result_json.is_ok()); + auto result = result_json.move_as_ok(); + auto& result_obj = result.get_object(); + + auto success_field = td::get_json_object_field(result_obj, "success", td::JsonValue::Type::Boolean, false); + CHECK(success_field.is_ok()); + auto success = success_field.move_as_ok().get_boolean(); + CHECK(success); + + auto stack_field = td::get_json_object_field(result_obj, "stack", td::JsonValue::Type::String, false); + CHECK(stack_field.is_ok()); + auto stack_val = stack_field.move_as_ok(); + auto& stack_obj = stack_val.get_string(); + auto stack_res_boc = td::base64_decode(stack_obj); + CHECK(stack_res_boc.is_ok()); + auto stack_res_cell = vm::std_boc_deserialize(stack_res_boc.move_as_ok()); + CHECK(stack_res_cell.is_ok()); + td::Ref stack_res; + auto stack_res_cs = vm::load_cell_slice(stack_res_cell.move_as_ok()); + CHECK(vm::Stack::deserialize_to(stack_res_cs, stack_res)); + CHECK(stack_res->depth() == 1); + CHECK(stack_res.write().pop_int()->to_long() == init_data.seqno); +} diff --git a/emulator/transaction-emulator.cpp b/emulator/transaction-emulator.cpp index 2e8ba0374..e87b2dfbb 100644 --- a/emulator/transaction-emulator.cpp +++ b/emulator/transaction-emulator.cpp @@ -25,7 +25,7 @@ td::Result> TransactionEmu utime = (unsigned)std::time(nullptr); } - auto fetch_res = block::FetchConfigParams::fetch_config_params(config_, prev_blocks_info_, &old_mparams, + auto fetch_res = block::FetchConfigParams::fetch_config_params(*config_, prev_blocks_info_, &old_mparams, &storage_prices, &storage_phase_cfg, &rand_seed_, &compute_phase_cfg, &action_phase_cfg, &masterchain_create_fee, @@ -130,17 +130,28 @@ td::Result TransactionEmulator::emulate_t } TRY_RESULT(emulation, emulate_transaction(std::move(account), msg_root, utime, lt, trans_type)); + + if (auto emulation_result_ptr = dynamic_cast(emulation.get())) { + auto& emulation_result = *emulation_result_ptr; + + if (td::Bits256(emulation_result.transaction->get_hash().bits()) != td::Bits256(original_trans->get_hash().bits())) { + return td::Status::Error("transaction hash mismatch"); + } - auto emulation_result = dynamic_cast(*emulation); - if (td::Bits256(emulation_result.transaction->get_hash().bits()) != td::Bits256(original_trans->get_hash().bits())) { - return td::Status::Error("transaction hash mismatch"); - } + if (!check_state_update(emulation_result.account, record_trans)) { + return td::Status::Error("account hash mismatch"); + } - if (!check_state_update(emulation_result.account, record_trans)) { - return td::Status::Error("account hash mismatch"); - } + return emulation_result; - return emulation_result; + } else if (auto emulation_not_accepted_ptr = dynamic_cast(emulation.get())) { + return td::Status::Error( PSTRING() + << "VM Log: " << emulation_not_accepted_ptr->vm_log + << ", VM Exit Code: " << emulation_not_accepted_ptr->vm_exit_code + << ", Elapsed Time: " << emulation_not_accepted_ptr->elapsed_time); + } else { + return td::Status::Error("emulation failed"); + } } td::Result TransactionEmulator::emulate_transactions_chain(block::Account&& account, std::vector>&& original_transactions) { @@ -227,7 +238,9 @@ td::Result> TransactionEmulator return td::Status::Error(-669,"cannot create action phase of a new transaction for smart contract "s + acc->addr.to_hex()); } - if (trans->bounce_enabled && !trans->compute_phase->success && !trans->prepare_bounce_phase(*action_phase_cfg)) { + if (trans->bounce_enabled + && (!trans->compute_phase->success || trans->action_phase->state_exceeds_limits || trans->action_phase->bounce) + && !trans->prepare_bounce_phase(*action_phase_cfg)) { return td::Status::Error(-669,"cannot create bounce phase of a new transaction for smart contract "s + acc->addr.to_hex()); } @@ -250,8 +263,8 @@ void TransactionEmulator::set_ignore_chksig(bool ignore_chksig) { ignore_chksig_ = ignore_chksig; } -void TransactionEmulator::set_config(block::Config &&config) { - config_ = std::forward(config); +void TransactionEmulator::set_config(std::shared_ptr config) { + config_ = std::move(config); } void TransactionEmulator::set_libs(vm::Dictionary &&libs) { diff --git a/emulator/transaction-emulator.h b/emulator/transaction-emulator.h index 8186a3c4a..eae109f40 100644 --- a/emulator/transaction-emulator.h +++ b/emulator/transaction-emulator.h @@ -9,7 +9,7 @@ namespace emulator { class TransactionEmulator { - block::Config config_; + std::shared_ptr config_; vm::Dictionary libraries_; int vm_log_verbosity_; ton::UnixTime unixtime_; @@ -20,7 +20,7 @@ class TransactionEmulator { td::Ref prev_blocks_info_; public: - TransactionEmulator(block::Config&& config, int vm_log_verbosity = 0) : + TransactionEmulator(std::shared_ptr config, int vm_log_verbosity = 0) : config_(std::move(config)), libraries_(256), vm_log_verbosity_(vm_log_verbosity), unixtime_(0), lt_(0), rand_seed_(td::BitArray<256>::zero()), ignore_chksig_(false), debug_enabled_(false) { } @@ -57,7 +57,7 @@ class TransactionEmulator { }; const block::Config& get_config() { - return config_; + return *config_; } ton::UnixTime get_unixtime() { @@ -74,7 +74,7 @@ class TransactionEmulator { void set_lt(ton::LogicalTime lt); void set_rand_seed(td::BitArray<256>& rand_seed); void set_ignore_chksig(bool ignore_chksig); - void set_config(block::Config &&config); + void set_config(std::shared_ptr config); void set_libs(vm::Dictionary &&libs); void set_debug_enabled(bool debug_enabled); void set_prev_blocks_info(td::Ref prev_blocks_info); diff --git a/emulator/tvm-emulator.hpp b/emulator/tvm-emulator.hpp index a9f248b72..413298c99 100644 --- a/emulator/tvm-emulator.hpp +++ b/emulator/tvm-emulator.hpp @@ -24,12 +24,12 @@ class TvmEmulator { } void set_c7(block::StdAddress address, uint32_t unixtime, uint64_t balance, td::BitArray<256> rand_seed, std::shared_ptr config) { - args_.set_address(address); + args_.set_address(std::move(address)); args_.set_now(unixtime); args_.set_balance(balance); - args_.set_rand_seed(rand_seed); + args_.set_rand_seed(std::move(rand_seed)); if (config) { - args_.set_config(config); + args_.set_config(std::move(config)); } } @@ -37,6 +37,10 @@ class TvmEmulator { args_.set_c7(std::move(c7)); } + void set_config(std::shared_ptr config) { + args_.set_config(std::move(config)); + } + void set_prev_blocks_info(td::Ref tuple) { args_.set_prev_blocks_info(std::move(tuple)); } @@ -46,7 +50,8 @@ class TvmEmulator { } Answer run_get_method(int method_id, td::Ref stack) { - return smc_.run_get_method(args_.set_stack(stack).set_method_id(method_id)); + ton::SmartContract::Args args = args_; + return smc_.run_get_method(args.set_stack(stack).set_method_id(method_id)); } Answer send_external_message(td::Ref message_body) { @@ -54,7 +59,8 @@ class TvmEmulator { } Answer send_internal_message(td::Ref message_body, uint64_t amount) { - return smc_.send_internal_message(message_body, args_.set_amount(amount)); + ton::SmartContract::Args args = args_; + return smc_.send_internal_message(message_body, args.set_amount(amount)); } }; } \ No newline at end of file diff --git a/lite-client/lite-client.cpp b/lite-client/lite-client.cpp index 55d46ad1f..020aca705 100644 --- a/lite-client/lite-client.cpp +++ b/lite-client/lite-client.cpp @@ -949,8 +949,8 @@ bool TestNode::show_help(std::string command) { "lasttrans[dump] []\tShows or dumps specified transaction and " "several preceding " "ones\n" - "listblocktrans[rev] [ ]\tLists block transactions, " - "starting immediately after or before the specified one\n" + "listblocktrans[rev][meta] [ ]\tLists block " + "transactions, starting immediately after or before the specified one\n" "blkproofchain[step] []\tDownloads and checks proof of validity of the " "second " "indicated block (or the last known masterchain block) starting from given block\n" @@ -1074,6 +1074,13 @@ bool TestNode::do_parse_line() { return parse_block_id_ext(blkid) && parse_uint32(count) && (seekeoln() || (parse_hash(hash) && parse_lt(lt) && (mode |= 128) && seekeoln())) && get_block_transactions(blkid, mode, count, hash, lt); + } else if (word == "listblocktransmeta" || word == "listblocktransrevmeta") { + lt = 0; + int mode = (word == "listblocktransmeta" ? 7 : 0x47); + mode |= 256; + return parse_block_id_ext(blkid) && parse_uint32(count) && + (seekeoln() || (parse_hash(hash) && parse_lt(lt) && (mode |= 128) && seekeoln())) && + get_block_transactions(blkid, mode, count, hash, lt); } else if (word == "blkproofchain" || word == "blkproofchainstep") { ton::BlockIdExt blkid2{}; return parse_block_id_ext(blkid) && (seekeoln() || parse_block_id_ext(blkid2)) && seekeoln() && @@ -2493,23 +2500,40 @@ bool TestNode::get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned } else { auto f = F.move_as_ok(); std::vector transactions; + std::vector> metadata; for (auto& id : f->ids_) { transactions.emplace_back(id->account_, id->lt_, id->hash_); + metadata.push_back(std::move(id->metadata_)); } td::actor::send_closure_later(Self, &TestNode::got_block_transactions, ton::create_block_id(f->id_), mode, - f->req_count_, f->incomplete_, std::move(transactions), std::move(f->proof_)); + f->req_count_, f->incomplete_, std::move(transactions), std::move(metadata), + std::move(f->proof_)); } }); } -void TestNode::got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete, - std::vector trans, td::BufferSlice proof) { +void TestNode::got_block_transactions( + ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete, std::vector trans, + std::vector> metadata, td::BufferSlice proof) { LOG(INFO) << "got up to " << req_count << " transactions from block " << blkid.to_str(); auto out = td::TerminalIO::out(); int count = 0; - for (auto& t : trans) { + for (size_t i = 0; i < trans.size(); ++i) { + auto& t = trans[i]; out << "transaction #" << ++count << ": account " << t.acc_addr.to_hex() << " lt " << t.trans_lt << " hash " << t.trans_hash.to_hex() << std::endl; + if (mode & 256) { + auto& meta = metadata.at(i); + if (meta == nullptr) { + out << " metadata: " << std::endl; + } else { + out << " metadata: " + << block::MsgMetadata{(td::uint32)meta->depth_, meta->initiator_->workchain_, meta->initiator_->id_, + (ton::LogicalTime)meta->initiator_lt_} + .to_str() + << std::endl; + } + } } out << (incomplete ? "(block transaction list incomplete)" : "(end of block transaction list)") << std::endl; } diff --git a/lite-client/lite-client.h b/lite-client/lite-client.h index 219ba7d5c..17680f448 100644 --- a/lite-client/lite-client.h +++ b/lite-client/lite-client.h @@ -258,7 +258,9 @@ class TestNode : public td::actor::Actor { bool get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned count, ton::Bits256 acc_addr, ton::LogicalTime lt); void got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete, - std::vector trans, td::BufferSlice proof); + std::vector trans, + std::vector> metadata, + td::BufferSlice proof); bool get_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode); void got_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode, td::BufferSlice res); bool get_creator_stats(ton::BlockIdExt blkid, int mode, unsigned req_count, ton::Bits256 start_after, diff --git a/overlay/overlay-fec-broadcast.cpp b/overlay/overlay-fec-broadcast.cpp index aed5248b8..cd030742a 100644 --- a/overlay/overlay-fec-broadcast.cpp +++ b/overlay/overlay-fec-broadcast.cpp @@ -78,7 +78,6 @@ td::Status OverlayFecBroadcastPart::check_signature() { } td::Status OverlayFecBroadcastPart::run_checks() { - TRY_STATUS(check_time()); TRY_STATUS(check_duplicate()); TRY_STATUS(check_source()); @@ -94,14 +93,17 @@ void BroadcastFec::broadcast_checked(td::Result R) { overlay_->deliver_broadcast(get_source().compute_short_id(), data_.clone()); auto manager = overlay_->overlay_manager(); while (!parts_.empty()) { - distribute_part(parts_.begin()->first); + distribute_part(parts_.begin()->first); } + + is_checked_ = true; } // Do we need status here?? -td::Status BroadcastFec::distribute_part(td::uint32 seqno) { +td::Status BroadcastFec::distribute_part(td::uint32 seqno) { auto i = parts_.find(seqno); if (i == parts_.end()) { + VLOG(OVERLAY_WARNING) << "not distibuting empty part " << seqno; // should not get here return td::Status::OK(); } @@ -132,7 +134,6 @@ td::Status BroadcastFec::distribute_part(td::uint32 seqno) { } td::Status OverlayFecBroadcastPart::apply() { - if (!bcast_) { bcast_ = overlay_->get_fec_broadcast(broadcast_hash_); } @@ -165,16 +166,20 @@ td::Status OverlayFecBroadcastPart::apply() { return S; } } else { - if(untrusted_) { + if (untrusted_) { auto P = td::PromiseCreator::lambda( - [id = broadcast_hash_, overlay_id = actor_id(overlay_)](td::Result RR) mutable { - td::actor::send_closure(std::move(overlay_id), &OverlayImpl::broadcast_checked, id, std::move(RR)); - }); + [id = broadcast_hash_, overlay_id = actor_id(overlay_)](td::Result RR) mutable { + td::actor::send_closure(std::move(overlay_id), &OverlayImpl::broadcast_checked, id, std::move(RR)); + }); overlay_->check_broadcast(bcast_->get_source().compute_short_id(), R.move_as_ok(), std::move(P)); } else { overlay_->deliver_broadcast(bcast_->get_source().compute_short_id(), R.move_as_ok()); } } + } else { + bcast_->set_overlay(overlay_); + bcast_->set_src_peer_id(src_peer_id_); + TRY_STATUS(bcast_->add_part(seqno_, data_.clone(), export_serialized_short(), export_serialized())); } return td::Status::OK(); } @@ -304,7 +309,8 @@ td::Status OverlayFecBroadcastPart::create_new(OverlayImpl *overlay, td::actor:: auto B = std::make_unique( broadcast_hash, part_hash, PublicKey{}, overlay->get_certificate(local_id), data_hash, size, flags, - part_data_hash, std::move(part), seqno, std::move(fec_type), date, td::BufferSlice{}, false, nullptr, overlay, adnl::AdnlNodeIdShort::zero()); + part_data_hash, std::move(part), seqno, std::move(fec_type), date, td::BufferSlice{}, false, nullptr, overlay, + adnl::AdnlNodeIdShort::zero()); auto to_sign = B->to_sign(); auto P = td::PromiseCreator::lambda( diff --git a/overlay/overlay-fec-broadcast.hpp b/overlay/overlay-fec-broadcast.hpp index 612af22fb..85de648e3 100644 --- a/overlay/overlay-fec-broadcast.hpp +++ b/overlay/overlay-fec-broadcast.hpp @@ -82,15 +82,15 @@ class BroadcastFec : public td::ListNode { } } - td::Status add_part(td::uint32 seqno, td::BufferSlice data, - td::BufferSlice serialized_fec_part_short, + td::Status add_part(td::uint32 seqno, td::BufferSlice data, td::BufferSlice serialized_fec_part_short, td::BufferSlice serialized_fec_part) { - CHECK(decoder_); - td::fec::Symbol s; - s.id = seqno; - s.data = std::move(data); + if (decoder_) { + td::fec::Symbol s; + s.id = seqno; + s.data = std::move(data); - decoder_->add_symbol(std::move(s)); + decoder_->add_symbol(std::move(s)); + } parts_[seqno] = std::pair(std::move(serialized_fec_part_short), std::move(serialized_fec_part)); @@ -200,8 +200,13 @@ class BroadcastFec : public td::ListNode { td::Status distribute_part(td::uint32 seqno); + bool is_checked() const { + return is_checked_; + } + private: bool ready_ = false; + bool is_checked_ = false; Overlay::BroadcastHash hash_; Overlay::BroadcastDataHash data_hash_; @@ -281,7 +286,7 @@ class OverlayFecBroadcastPart : public td::ListNode { , signature_(std::move(signature)) , is_short_(is_short) , bcast_(bcast) - , overlay_(overlay) + , overlay_(overlay) , src_peer_id_(src_peer_id) { } @@ -300,7 +305,7 @@ class OverlayFecBroadcastPart : public td::ListNode { signature_ = std::move(signature); } void update_overlay(OverlayImpl *overlay); - + tl_object_ptr export_tl(); tl_object_ptr export_tl_short(); td::BufferSlice export_serialized(); @@ -310,14 +315,16 @@ class OverlayFecBroadcastPart : public td::ListNode { td::Status run() { TRY_STATUS(run_checks()); TRY_STATUS(apply()); - if(!untrusted_) { + if (!untrusted_ || bcast_->is_checked()) { TRY_STATUS(distribute()); } return td::Status::OK(); } - static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id, tl_object_ptr broadcast); - static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id, tl_object_ptr broadcast); + static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id, + tl_object_ptr broadcast); + static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id, + tl_object_ptr broadcast); static td::Status create_new(OverlayImpl *overlay, td::actor::ActorId overlay_actor_id, PublicKeyHash local_id, Overlay::BroadcastDataHash data_hash, td::uint32 size, td::uint32 flags, td::BufferSlice part, td::uint32 seqno, fec::FecType fec_type, diff --git a/recent_changelog.md b/recent_changelog.md index 25a93c189..930fa9c9f 100644 --- a/recent_changelog.md +++ b/recent_changelog.md @@ -1,11 +1,16 @@ -## 2024.04 Update - -1. Make Jemalloc default allocator -2. Add candidate broadcasting and caching -3. Limit per address speed for external messages broadcast by reasonably large number -4. Overlay improvements: fix dropping peers in small custom overlays, fix wrong certificate on missed keyblocks -5. Extended statistics and logs for celldb usage, session stats, persistent state serialization -6. Tonlib and explorer fixes -7. Flags for precize control of Celldb: `--celldb-cache-size`, `--celldb-direct-io` and `--celldb-preload-all` -8. Add valiator-console command to stop persistent state serialization -9. Use `@` path separator for defining include path in fift and create-state utilities on Windows only. +## 2024.08 Update + +1. Introduction of dispatch queues, message envelopes with transaction chain metadata, and explicitly stored msg_queue size, which will be activated by `Config8.version >= 8` and new `Config8.capabilities` bits: `capStoreOutMsgQueueSize`, `capMsgMetadata`, `capDeferMessages`. +2. A number of changes to transcation executor which will activated for `Config8.version >= 8`: + - Check mode on invalid `action_send_msg`. Ignore action if `IGNORE_ERROR` (+2) bit is set, bounce if `BOUNCE_ON_FAIL` (+16) bit is set. + - Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`. + - Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent. + - Allow unfreeze through external messages. + - Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages. +3. A few issues with broadcasts were fixed: stop on receiving last piece, response to AdnlMessageCreateChannel +4. A number of fixes and improvements for emulator and tonlib: correct work with config_addr, not accepted externals, bounces, debug ops gas consumption, added version and c5 dump, fixed tonlib crashes +5. Added new flags and commands to the node, in particular `--fast-state-serializer`, `getcollatoroptionsjson`, `setcollatoroptionsjson` + +Besides the work of the core team, this update is based on the efforts of @krigga (emulator), stonfi team, in particular @dbaranovstonfi and @hey-researcher (emulator), and @loeul, @xiaoxianBoy, @simlecode (typos in comments and docs). + + diff --git a/tddb/td/db/KeyValue.h b/tddb/td/db/KeyValue.h index 4f30a272b..980e367c6 100644 --- a/tddb/td/db/KeyValue.h +++ b/tddb/td/db/KeyValue.h @@ -18,6 +18,7 @@ */ #pragma once #include "td/utils/Status.h" +#include "td/utils/Time.h" #include "td/utils/logging.h" #include namespace td { diff --git a/tddb/td/db/RocksDb.cpp b/tddb/td/db/RocksDb.cpp index f8688c006..f1af0cf79 100644 --- a/tddb/td/db/RocksDb.cpp +++ b/tddb/td/db/RocksDb.cpp @@ -258,11 +258,17 @@ Status RocksDb::flush() { Status RocksDb::begin_snapshot() { snapshot_.reset(db_->GetSnapshot()); + if (options_.snapshot_statistics) { + options_.snapshot_statistics->begin_snapshot(snapshot_.get()); + } return td::Status::OK(); } Status RocksDb::end_snapshot() { if (snapshot_) { + if (options_.snapshot_statistics) { + options_.snapshot_statistics->end_snapshot(snapshot_.get()); + } db_->ReleaseSnapshot(snapshot_.release()); } return td::Status::OK(); @@ -271,4 +277,42 @@ Status RocksDb::end_snapshot() { RocksDb::RocksDb(std::shared_ptr db, RocksDbOptions options) : db_(std::move(db)), options_(options) { } + +void RocksDbSnapshotStatistics::begin_snapshot(const rocksdb::Snapshot *snapshot) { + auto lock = std::unique_lock(mutex_); + auto id = reinterpret_cast(snapshot); + auto ts = td::Timestamp::now().at(); + CHECK(id_to_ts_.emplace(id, ts).second); + CHECK(by_ts_.emplace(ts, id).second); +} + +void RocksDbSnapshotStatistics::end_snapshot(const rocksdb::Snapshot *snapshot) { + auto lock = std::unique_lock(mutex_); + auto id = reinterpret_cast(snapshot); + auto it = id_to_ts_.find(id); + CHECK(it != id_to_ts_.end()); + auto ts = it->second; + CHECK(by_ts_.erase(std::make_pair(ts, id)) == 1u); + CHECK(id_to_ts_.erase(id) == 1u); +} + +td::Timestamp RocksDbSnapshotStatistics::oldest_snapshot_timestamp() const { + auto lock = std::unique_lock(mutex_); + if (by_ts_.empty()) { + return {}; + } + return td::Timestamp::at(by_ts_.begin()->first); +} + +std::string RocksDbSnapshotStatistics::to_string() const { + td::Timestamp oldest_snapshot = oldest_snapshot_timestamp(); + double value; + if (oldest_snapshot) { + value = td::Timestamp::now().at() - oldest_snapshot.at(); + } else { + value = -1; + } + return PSTRING() << "td.rocksdb.snapshot.oldest_snapshot_ago.seconds : " << value << "\n"; +} + } // namespace td diff --git a/tddb/td/db/RocksDb.h b/tddb/td/db/RocksDb.h index 5efcd0f48..32c53a529 100644 --- a/tddb/td/db/RocksDb.h +++ b/tddb/td/db/RocksDb.h @@ -26,6 +26,12 @@ #include "td/utils/Status.h" #include "td/utils/optional.h" +#include "td/utils/Time.h" + +#include +#include +#include + namespace rocksdb { class Cache; class OptimisticTransactionDB; @@ -36,10 +42,22 @@ class Statistics; } // namespace rocksdb namespace td { +struct RocksDbSnapshotStatistics { + void begin_snapshot(const rocksdb::Snapshot *snapshot); + void end_snapshot(const rocksdb::Snapshot *snapshot); + td::Timestamp oldest_snapshot_timestamp() const; + std::string to_string() const; + + private: + mutable std::mutex mutex_; + std::map id_to_ts_; + std::set> by_ts_; +}; struct RocksDbOptions { std::shared_ptr statistics = nullptr; std::shared_ptr block_cache; // Default - one 1G cache for all RocksDb + std::shared_ptr snapshot_statistics = nullptr; bool use_direct_reads = false; }; diff --git a/tddb/td/db/utils/StreamInterface.h b/tddb/td/db/utils/StreamInterface.h index 5262f3df5..cf8f1d519 100644 --- a/tddb/td/db/utils/StreamInterface.h +++ b/tddb/td/db/utils/StreamInterface.h @@ -27,7 +27,7 @@ namespace td { // Generic stream interface // Will to hide implementations details. // CyclicBuffer, ChainBuffer, Bounded ChainBuffer, some clever writers. They all should be interchangable -// Most implementaions will assume that reading and writing may happen concurrently +// Most implementations will assume that reading and writing may happen concurrently class StreamReaderInterface { public: diff --git a/tddb/td/db/utils/StreamToFileActor.cpp b/tddb/td/db/utils/StreamToFileActor.cpp index 24202da41..5a3427d38 100644 --- a/tddb/td/db/utils/StreamToFileActor.cpp +++ b/tddb/td/db/utils/StreamToFileActor.cpp @@ -73,7 +73,7 @@ Result StreamToFileActor::do_loop() { // Also it could be useful to check error and stop immediately. TRY_RESULT(is_closed, is_closed()); - // Flush all data that is awailable on the at the beginning of loop + // Flush all data that is available on the at the beginning of loop TRY_STATUS(do_flush_once()); if ((sync_at_ && sync_at_.is_in_past()) || is_closed) { diff --git a/tddb/test/key_value.cpp b/tddb/test/key_value.cpp index e04e7ee90..921ad059a 100644 --- a/tddb/test/key_value.cpp +++ b/tddb/test/key_value.cpp @@ -60,9 +60,20 @@ TEST(KeyValue, simple) { ensure_value(as_slice(x), as_slice(x)); kv.reset(); - kv = std::make_unique(td::RocksDb::open(db_name.str()).move_as_ok()); + td::RocksDbOptions options; + options.snapshot_statistics = std::make_shared(); + kv = std::make_unique(td::RocksDb::open(db_name.str(), options).move_as_ok()); ensure_value("A", "HELLO"); ensure_value(as_slice(x), as_slice(x)); + + CHECK(!options.snapshot_statistics->oldest_snapshot_timestamp()); + auto snapshot = kv->snapshot(); + CHECK(options.snapshot_statistics->oldest_snapshot_timestamp()); + auto snapshot2 = kv->snapshot(); + snapshot.reset(); + CHECK(options.snapshot_statistics->oldest_snapshot_timestamp()); + snapshot2.reset(); + CHECK(!options.snapshot_statistics->oldest_snapshot_timestamp()); }; TEST(KeyValue, async_simple) { diff --git a/tdutils/td/utils/filesystem.cpp b/tdutils/td/utils/filesystem.cpp index 562a42816..b84b6b3f8 100644 --- a/tdutils/td/utils/filesystem.cpp +++ b/tdutils/td/utils/filesystem.cpp @@ -68,9 +68,14 @@ Result read_file_impl(CSlice path, int64 size, int64 offset) { return Status::Error("Failed to read file: invalid size"); } auto content = create_empty(narrow_cast(size)); - TRY_RESULT(got_size, from_file.pread(as_mutable_slice(content), offset)); - if (got_size != static_cast(size)) { - return Status::Error("Failed to read file"); + MutableSlice slice = as_mutable_slice(content); + while (!slice.empty()) { + TRY_RESULT(got_size, from_file.pread(slice, offset)); + if (got_size == 0) { + return Status::Error("Failed to read file"); + } + offset += got_size; + slice.remove_prefix(got_size); } from_file.close(); return std::move(content); @@ -103,9 +108,15 @@ Status write_file(CSlice to, Slice data, WriteFileOptions options) { TRY_STATUS(to_file.lock(FileFd::LockFlags::Write, to.str(), 10)); TRY_STATUS(to_file.truncate_to_current_position(0)); } - TRY_RESULT(written, to_file.write(data)); - if (written != size) { - return Status::Error(PSLICE() << "Failed to write file: written " << written << " bytes instead of " << size); + size_t total_written = 0; + while (!data.empty()) { + TRY_RESULT(written, to_file.write(data)); + if (written == 0) { + return Status::Error(PSLICE() << "Failed to write file: written " << total_written << " bytes instead of " + << size); + } + total_written += written; + data.remove_prefix(written); } if (options.need_sync) { TRY_STATUS(to_file.sync()); diff --git a/tdutils/td/utils/optional.h b/tdutils/td/utils/optional.h index 44575948c..7723d2c31 100644 --- a/tdutils/td/utils/optional.h +++ b/tdutils/td/utils/optional.h @@ -66,6 +66,12 @@ class optional { DCHECK(*this); return impl_.ok_ref(); } + T &value_force() { + if (!*this) { + *this = T(); + } + return value(); + } T &operator*() { return value(); } @@ -88,6 +94,14 @@ class optional { impl_.emplace(std::forward(args)...); } + bool operator==(const optional& other) const { + return (bool)*this == (bool)other && (!(bool)*this || value() == other.value()); + } + + bool operator!=(const optional& other) const { + return !(*this == other); + } + private: Result impl_; }; diff --git a/tdutils/td/utils/port/Stat.cpp b/tdutils/td/utils/port/Stat.cpp index 00e634381..d7ec50f75 100644 --- a/tdutils/td/utils/port/Stat.cpp +++ b/tdutils/td/utils/port/Stat.cpp @@ -413,4 +413,52 @@ Result cpu_stat() { #endif } +Result get_total_ram() { +#if TD_LINUX + TRY_RESULT(fd, FileFd::open("/proc/meminfo", FileFd::Read)); + SCOPE_EXIT { + fd.close(); + }; + constexpr int TMEM_SIZE = 10000; + char mem[TMEM_SIZE]; + TRY_RESULT(size, fd.read(MutableSlice(mem, TMEM_SIZE - 1))); + if (size >= TMEM_SIZE - 1) { + return Status::Error("Failed for read /proc/meminfo"); + } + mem[size] = 0; + const char* s = mem; + while (*s) { + const char *name_begin = s; + while (*s != 0 && *s != '\n') { + s++; + } + auto name_end = name_begin; + while (is_alpha(*name_end)) { + name_end++; + } + Slice name(name_begin, name_end); + if (name == "MemTotal") { + Slice value(name_end, s); + if (!value.empty() && value[0] == ':') { + value.remove_prefix(1); + } + value = trim(value); + value = split(value).first; + TRY_RESULT_PREFIX(mem, to_integer_safe(value), "Invalid value of MemTotal"); + if (mem >= 1ULL << (64 - 10)) { + return Status::Error("Invalid value of MemTotal"); + } + return mem * 1024; + } + if (*s == 0) { + break; + } + s++; + } + return Status::Error("No MemTotal in /proc/meminfo"); +#else + return Status::Error("Not supported"); +#endif +} + } // namespace td diff --git a/tdutils/td/utils/port/Stat.h b/tdutils/td/utils/port/Stat.h index f446c39d6..cd16ebb36 100644 --- a/tdutils/td/utils/port/Stat.h +++ b/tdutils/td/utils/port/Stat.h @@ -64,4 +64,6 @@ Status update_atime(CSlice path) TD_WARN_UNUSED_RESULT; #endif +Result get_total_ram() TD_WARN_UNUSED_RESULT; + } // namespace td diff --git a/tl-utils/lite-utils.cpp b/tl-utils/lite-utils.cpp index 9ea7756a7..daa3dbaf0 100644 --- a/tl-utils/lite-utils.cpp +++ b/tl-utils/lite-utils.cpp @@ -159,6 +159,7 @@ std::string lite_query_name_by_id(int id) { {lite_api::liteServer_getLibrariesWithProof::ID, "getLibrariesWithProof"}, {lite_api::liteServer_getShardBlockProof::ID, "getShardBlockProof"}, {lite_api::liteServer_getOutMsgQueueSizes::ID, "getOutMsgQueueSizes"}, + {lite_api::liteServer_getBlockOutMsgQueueSize::ID, "getBlockOutMsgQueueSize"}, {lite_api::liteServer_nonfinal_getCandidate::ID, "nonfinal.getCandidate"}, {lite_api::liteServer_nonfinal_getValidatorGroups::ID, "nonfinal.getValidatorGroups"}}; auto it = names.find(id); diff --git a/tl/generate/scheme/lite_api.tl b/tl/generate/scheme/lite_api.tl index 6d91be8f7..879d7ff4a 100644 --- a/tl/generate/scheme/lite_api.tl +++ b/tl/generate/scheme/lite_api.tl @@ -41,7 +41,8 @@ liteServer.shardInfo id:tonNode.blockIdExt shardblk:tonNode.blockIdExt shard_pro liteServer.allShardsInfo id:tonNode.blockIdExt proof:bytes data:bytes = liteServer.AllShardsInfo; liteServer.transactionInfo id:tonNode.blockIdExt proof:bytes transaction:bytes = liteServer.TransactionInfo; liteServer.transactionList ids:(vector tonNode.blockIdExt) transactions:bytes = liteServer.TransactionList; -liteServer.transactionId mode:# account:mode.0?int256 lt:mode.1?long hash:mode.2?int256 = liteServer.TransactionId; +liteServer.transactionMetadata mode:# depth:int initiator:liteServer.accountId initiator_lt:long = liteServer.TransactionMetadata; +liteServer.transactionId#b12f65af mode:# account:mode.0?int256 lt:mode.1?long hash:mode.2?int256 metadata:mode.8?liteServer.transactionMetadata = liteServer.TransactionId; liteServer.transactionId3 account:int256 lt:long = liteServer.TransactionId3; liteServer.blockTransactions id:tonNode.blockIdExt req_count:# incomplete:Bool ids:(vector liteServer.transactionId) proof:bytes = liteServer.BlockTransactions; liteServer.blockTransactionsExt id:tonNode.blockIdExt req_count:# incomplete:Bool transactions:bytes proof:bytes = liteServer.BlockTransactionsExt; @@ -59,6 +60,7 @@ liteServer.shardBlockProof masterchain_id:tonNode.blockIdExt links:(vector liteS liteServer.lookupBlockResult id:tonNode.blockIdExt mode:# mc_block_id:tonNode.blockIdExt client_mc_state_proof:bytes mc_block_proof:bytes shard_links:(vector liteServer.shardBlockLink) header:bytes prev_header:bytes = liteServer.LookupBlockResult; liteServer.outMsgQueueSize id:tonNode.blockIdExt size:int = liteServer.OutMsgQueueSize; liteServer.outMsgQueueSizes shards:(vector liteServer.outMsgQueueSize) ext_msg_queue_size_limit:int = liteServer.OutMsgQueueSizes; +liteServer.blockOutMsgQueueSize mode:# id:tonNode.blockIdExt size:long proof:mode.0?bytes = liteServer.BlockOutMsgQueueSize; liteServer.debug.verbosity value:int = liteServer.debug.Verbosity; @@ -97,6 +99,7 @@ liteServer.getLibraries library_list:(vector int256) = liteServer.LibraryResult; liteServer.getLibrariesWithProof id:tonNode.blockIdExt mode:# library_list:(vector int256) = liteServer.LibraryResultWithProof; liteServer.getShardBlockProof id:tonNode.blockIdExt = liteServer.ShardBlockProof; liteServer.getOutMsgQueueSizes mode:# wc:mode.0?int shard:mode.0?long = liteServer.OutMsgQueueSizes; +liteServer.getBlockOutMsgQueueSize mode:# id:tonNode.blockIdExt want_proof:mode.0?true = liteServer.BlockOutMsgQueueSize; liteServer.nonfinal.getValidatorGroups mode:# wc:mode.0?int shard:mode.0?long = liteServer.nonfinal.ValidatorGroups; liteServer.nonfinal.getCandidate id:liteServer.nonfinal.candidateId = liteServer.nonfinal.Candidate; diff --git a/tl/generate/scheme/lite_api.tlo b/tl/generate/scheme/lite_api.tlo index d6e65b184..6ece1d20f 100644 Binary files a/tl/generate/scheme/lite_api.tlo and b/tl/generate/scheme/lite_api.tlo differ diff --git a/tl/generate/scheme/ton_api.tl b/tl/generate/scheme/ton_api.tl index 58691ffd7..b33ca5425 100644 --- a/tl/generate/scheme/ton_api.tl +++ b/tl/generate/scheme/ton_api.tl @@ -606,6 +606,11 @@ engine.validator.customOverlayNode adnl_id:int256 msg_sender:Bool msg_sender_pri engine.validator.customOverlay name:string nodes:(vector engine.validator.customOverlayNode) = engine.validator.CustomOverlay; engine.validator.customOverlaysConfig overlays:(vector engine.validator.customOverlay) = engine.validator.CustomOverlaysConfig; +engine.validator.collatorOptions + deferring_enabled:Bool defer_messages_after:int defer_out_queue_size_limit:long + dispatch_phase_2_max_total:int dispatch_phase_3_max_total:int + dispatch_phase_2_max_per_initiator:int dispatch_phase_3_max_per_initiator:int = engine.validator.CollatorOptions; + ---functions--- ---types--- @@ -653,7 +658,7 @@ engine.validator.onePerfTimerStat time:int min:double avg:double max:double = en engine.validator.perfTimerStatsByName name:string stats:(vector engine.validator.OnePerfTimerStat) = engine.validator.PerfTimerStatsByName; engine.validator.perfTimerStats stats:(vector engine.validator.PerfTimerStatsByName) = engine.validator.PerfTimerStats; -engine.validator.shardOutQueueSize size:int = engine.validator.ShardOutQueueSize; +engine.validator.shardOutQueueSize size:long = engine.validator.ShardOutQueueSize; ---functions--- @@ -715,6 +720,9 @@ engine.validator.showCustomOverlays = engine.validator.CustomOverlaysConfig; engine.validator.setStateSerializerEnabled enabled:Bool = engine.validator.Success; +engine.validator.setCollatorOptionsJson json:string = engine.validator.Success; +engine.validator.getCollatorOptionsJson = engine.validator.JsonConfig; + ---types--- storage.pong = storage.Pong; diff --git a/tl/generate/scheme/ton_api.tlo b/tl/generate/scheme/ton_api.tlo index ad6bb0c99..da1aa331d 100644 Binary files a/tl/generate/scheme/ton_api.tlo and b/tl/generate/scheme/ton_api.tlo differ diff --git a/tl/generate/scheme/tonlib_api.tl b/tl/generate/scheme/tonlib_api.tl index a6172376a..6cf40d005 100644 --- a/tl/generate/scheme/tonlib_api.tl +++ b/tl/generate/scheme/tonlib_api.tl @@ -230,6 +230,8 @@ blocks.blockSignatures id:ton.blockIdExt signatures:(vector blocks.signature) = blocks.shardBlockLink id:ton.blockIdExt proof:bytes = blocks.ShardBlockLink; blocks.blockLinkBack to_key_block:Bool from:ton.blockIdExt to:ton.blockIdExt dest_proof:bytes proof:bytes state_proof:bytes = blocks.BlockLinkBack; blocks.shardBlockProof from:ton.blockIdExt mc_id:ton.blockIdExt links:(vector blocks.shardBlockLink) mc_proof:(vector blocks.blockLinkBack) = blocks.ShardBlockProof; +blocks.outMsgQueueSize id:ton.blockIdExt size:int32 = blocks.OutMsgQueueSize; +blocks.outMsgQueueSizes shards:(vector blocks.outMsgQueueSize) ext_msg_queue_size_limit:int32 = blocks.OutMsgQueueSizes; configInfo config:tvm.cell = ConfigInfo; @@ -309,6 +311,7 @@ smc.forget id:int53 = Ok; smc.getCode id:int53 = tvm.Cell; smc.getData id:int53 = tvm.Cell; smc.getState id:int53 = tvm.Cell; +smc.getRawFullAccountState id:int53 = raw.FullAccountState; smc.runGetMethod id:int53 method:smc.MethodId stack:vector = smc.RunResult; smc.getLibraries library_list:(vector int256) = smc.LibraryResult; @@ -331,6 +334,7 @@ blocks.getTransactionsExt id:ton.blockIdExt mode:# count:# after:blocks.accountT blocks.getBlockHeader id:ton.blockIdExt = blocks.Header; blocks.getMasterchainBlockSignatures seqno:int32 = blocks.BlockSignatures; blocks.getShardBlockProof id:ton.blockIdExt mode:# from:mode.0?ton.blockIdExt = blocks.ShardBlockProof; +blocks.getOutMsgQueueSizes mode:# wc:mode.0?int32 shard:mode.0?int64 = blocks.OutMsgQueueSizes; onLiteServerQueryResult id:int64 bytes:bytes = Ok; onLiteServerQueryError id:int64 error:error = Ok; diff --git a/tl/generate/scheme/tonlib_api.tlo b/tl/generate/scheme/tonlib_api.tlo index 7657852ea..686bd9181 100644 Binary files a/tl/generate/scheme/tonlib_api.tlo and b/tl/generate/scheme/tonlib_api.tlo differ diff --git a/ton/ton-types.h b/ton/ton-types.h index 3f40fb656..915682655 100644 --- a/ton/ton-types.h +++ b/ton/ton-types.h @@ -57,7 +57,10 @@ enum GlobalCapabilities { capBounceMsgBody = 4, capReportVersion = 8, capSplitMergeTransactions = 16, - capShortDequeue = 32 + capShortDequeue = 32, + capStoreOutMsgQueueSize = 64, + capMsgMetadata = 128, + capDeferMessages = 256 }; inline int shard_pfx_len(ShardId shard) { diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index 39029ca4e..f62b9ae47 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -1971,7 +1971,7 @@ class RunEmulator : public TonlibQueryActor { check(r_config.move_as_error()); return; } - std::unique_ptr config = r_config.move_as_ok(); + std::shared_ptr config = r_config.move_as_ok(); auto r_shard_account = account_state_->to_shardAccountCellSlice(); if (r_shard_account.is_error()) { @@ -1995,7 +1995,7 @@ class RunEmulator : public TonlibQueryActor { return; } vm::Dictionary libraries = global_libraries_; - emulator::TransactionEmulator trans_emulator(std::move(*config)); + emulator::TransactionEmulator trans_emulator(config); trans_emulator.set_prev_blocks_info(prev_blocks_info.move_as_ok()); trans_emulator.set_libs(std::move(libraries)); trans_emulator.set_rand_seed(block_id_.rand_seed); @@ -4360,6 +4360,17 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_getState& request, return td::Status::OK(); } +td::Status TonlibClient::do_request(const tonlib_api::smc_getRawFullAccountState& request, + td::Promise>&& promise) { + auto it = smcs_.find(request.id_); + if (it == smcs_.end()) { + return TonlibError::InvalidSmcId(); + } + auto& acc = it->second; + promise.set_result(acc->to_raw_fullAccountState()); + return td::Status::OK(); +} + bool is_list(vm::StackEntry entry) { while (true) { if (entry.type() == vm::StackEntry::Type::t_null) { @@ -5563,6 +5574,11 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_lookupBlock& reques client_.with_last_block( [self = this, blkid, lite_block = std::move(lite_block), mode = request.mode_, lt = (td::uint64)request.lt_, utime = (td::uint32)request.utime_, promise = std::move(promise)](td::Result r_last_block) mutable { + if (r_last_block.is_error()) { + promise.set_error(r_last_block.move_as_error_prefix(TonlibError::Internal("get last block failed "))); + return; + } + self->client_.send_query(ton::lite_api::liteServer_lookupBlockWithProof(mode, std::move(lite_block), ton::create_tl_lite_block_id(r_last_block.ok().last_block_id), lt, utime), promise.wrap([blkid, mode, utime, lt, last_block = r_last_block.ok().last_block_id](lite_api_ptr&& result) -> td::Result> { @@ -6031,6 +6047,24 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_getShardBlockProof& return td::Status::OK(); } +td::Status TonlibClient::do_request(const tonlib_api::blocks_getOutMsgQueueSizes& request, + td::Promise>&& promise) { + client_.send_query(ton::lite_api::liteServer_getOutMsgQueueSizes(request.mode_, request.wc_, request.shard_), + promise.wrap([](lite_api_ptr&& queue_sizes) { + tonlib_api::blocks_outMsgQueueSizes result; + result.ext_msg_queue_size_limit_ = queue_sizes->ext_msg_queue_size_limit_; + for (auto &x : queue_sizes->shards_) { + tonlib_api::blocks_outMsgQueueSize shard; + shard.id_ = to_tonlib_api(*x->id_); + shard.size_ = x->size_; + result.shards_.push_back(tonlib_api::make_object(std::move(shard))); + } + return tonlib_api::make_object(std::move(result)); + })); + + return td::Status::OK(); +} + void TonlibClient::load_libs_from_disk() { LOG(DEBUG) << "loading libraries from disk cache"; auto r_data = kv_->get("tonlib.libcache"); diff --git a/tonlib/tonlib/TonlibClient.h b/tonlib/tonlib/TonlibClient.h index 001df748c..7db443247 100644 --- a/tonlib/tonlib/TonlibClient.h +++ b/tonlib/tonlib/TonlibClient.h @@ -324,6 +324,8 @@ class TonlibClient : public td::actor::Actor { td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_getState& request, td::Promise>&& promise); + td::Status do_request(const tonlib_api::smc_getRawFullAccountState& request, + td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_runGetMethod& request, td::Promise>&& promise); @@ -390,6 +392,8 @@ class TonlibClient : public td::actor::Actor { td::Promise>&& promise); td::Status do_request(const tonlib_api::blocks_getShardBlockProof& request, td::Promise>&& promise); + td::Status do_request(const tonlib_api::blocks_getOutMsgQueueSizes& request, + td::Promise>&& promise); void get_config_param(int32_t param, int32_t mode, ton::BlockIdExt block, td::Promise>&& promise); diff --git a/tonlib/tonlib/tonlib-cli.cpp b/tonlib/tonlib/tonlib-cli.cpp index 5a32b50f3..f955adb5a 100644 --- a/tonlib/tonlib/tonlib-cli.cpp +++ b/tonlib/tonlib/tonlib-cli.cpp @@ -1361,10 +1361,10 @@ class TonlibCli : public td::actor::Actor { } if (l >= 3 && (str[0] == 'x' || str[0] == 'b') && str[1] == '{' && str.back() == '}') { unsigned char buff[128]; - int bits = - (str[0] == 'x') - ? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1) - : (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1); + int bits = (str[0] == 'x') ? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2, + str.end() - 1) + : (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff) * 8, + str.begin() + 2, str.end() - 1); if (bits < 0) { return td::Status::Error("Failed to parse slice"); } diff --git a/validator-engine-console/validator-engine-console-query.cpp b/validator-engine-console/validator-engine-console-query.cpp index 5385d2e6c..41721ab96 100644 --- a/validator-engine-console/validator-engine-console-query.cpp +++ b/validator-engine-console/validator-engine-console-query.cpp @@ -1203,3 +1203,63 @@ td::Status SetStateSerializerEnabledQuery::receive(td::BufferSlice data) { td::TerminalIO::out() << "success\n"; return td::Status::OK(); } + +td::Status SetCollatorOptionsJsonQuery::run() { + TRY_RESULT_ASSIGN(file_name_, tokenizer_.get_token()); + TRY_STATUS(tokenizer_.check_endl()); + return td::Status::OK(); +} + +td::Status SetCollatorOptionsJsonQuery::send() { + TRY_RESULT(data, td::read_file(file_name_)); + auto b = + ton::create_serialize_tl_object(data.as_slice().str()); + td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise()); + return td::Status::OK(); +} + +td::Status SetCollatorOptionsJsonQuery::receive(td::BufferSlice data) { + TRY_RESULT_PREFIX(f, ton::fetch_tl_object(data.as_slice(), true), + "received incorrect answer: "); + td::TerminalIO::out() << "success\n"; + return td::Status::OK(); +} + +td::Status ResetCollatorOptionsQuery::run() { + TRY_STATUS(tokenizer_.check_endl()); + return td::Status::OK(); +} + +td::Status ResetCollatorOptionsQuery::send() { + auto b = ton::create_serialize_tl_object("{}"); + td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise()); + return td::Status::OK(); +} + +td::Status ResetCollatorOptionsQuery::receive(td::BufferSlice data) { + TRY_RESULT_PREFIX(f, ton::fetch_tl_object(data.as_slice(), true), + "received incorrect answer: "); + td::TerminalIO::out() << "success\n"; + return td::Status::OK(); +} + +td::Status GetCollatorOptionsJsonQuery::run() { + TRY_RESULT_ASSIGN(file_name_, tokenizer_.get_token()); + TRY_STATUS(tokenizer_.check_endl()); + return td::Status::OK(); +} + +td::Status GetCollatorOptionsJsonQuery::send() { + auto b = + ton::create_serialize_tl_object(); + td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise()); + return td::Status::OK(); +} + +td::Status GetCollatorOptionsJsonQuery::receive(td::BufferSlice data) { + TRY_RESULT_PREFIX(f, ton::fetch_tl_object(data.as_slice(), true), + "received incorrect answer: "); + TRY_STATUS(td::write_file(file_name_, f->data_)); + td::TerminalIO::out() << "saved config to " << file_name_ << "\n"; + return td::Status::OK(); +} diff --git a/validator-engine-console/validator-engine-console-query.h b/validator-engine-console/validator-engine-console-query.h index 3047350fe..08ac1572a 100644 --- a/validator-engine-console/validator-engine-console-query.h +++ b/validator-engine-console/validator-engine-console-query.h @@ -1229,3 +1229,66 @@ class SetStateSerializerEnabledQuery : public Query { private: bool enabled_; }; + +class SetCollatorOptionsJsonQuery : public Query { + public: + SetCollatorOptionsJsonQuery(td::actor::ActorId console, Tokenizer tokenizer) + : Query(console, std::move(tokenizer)) { + } + td::Status run() override; + td::Status send() override; + td::Status receive(td::BufferSlice data) override; + static std::string get_name() { + return "setcollatoroptionsjson"; + } + static std::string get_help() { + return "setcollatoroptionsjson \tset collator options from file "; + } + std::string name() const override { + return get_name(); + } + + private: + std::string file_name_; +}; + +class ResetCollatorOptionsQuery : public Query { + public: + ResetCollatorOptionsQuery(td::actor::ActorId console, Tokenizer tokenizer) + : Query(console, std::move(tokenizer)) { + } + td::Status run() override; + td::Status send() override; + td::Status receive(td::BufferSlice data) override; + static std::string get_name() { + return "resetcollatoroptions"; + } + static std::string get_help() { + return "resetcollatoroptions\tset collator options to default values"; + } + std::string name() const override { + return get_name(); + } +}; + +class GetCollatorOptionsJsonQuery : public Query { + public: + GetCollatorOptionsJsonQuery(td::actor::ActorId console, Tokenizer tokenizer) + : Query(console, std::move(tokenizer)) { + } + td::Status run() override; + td::Status send() override; + td::Status receive(td::BufferSlice data) override; + static std::string get_name() { + return "getcollatoroptionsjson"; + } + static std::string get_help() { + return "getcollatoroptionsjson \tsave current collator options to file "; + } + std::string name() const override { + return get_name(); + } + + private: + std::string file_name_; +}; diff --git a/validator-engine-console/validator-engine-console.cpp b/validator-engine-console/validator-engine-console.cpp index 4878a292f..d8a230801 100644 --- a/validator-engine-console/validator-engine-console.cpp +++ b/validator-engine-console/validator-engine-console.cpp @@ -147,6 +147,9 @@ void ValidatorEngineConsole::run() { add_query_runner(std::make_unique>()); add_query_runner(std::make_unique>()); add_query_runner(std::make_unique>()); + add_query_runner(std::make_unique>()); + add_query_runner(std::make_unique>()); + add_query_runner(std::make_unique>()); } bool ValidatorEngineConsole::envelope_send_query(td::BufferSlice query, td::Promise promise) { diff --git a/validator-engine/CMakeLists.txt b/validator-engine/CMakeLists.txt index 5df720fe6..73949d808 100644 --- a/validator-engine/CMakeLists.txt +++ b/validator-engine/CMakeLists.txt @@ -14,5 +14,9 @@ add_executable(validator-engine ${VALIDATOR_ENGINE_SOURCE}) target_link_libraries(validator-engine overlay tdutils tdactor adnl tl_api dht rldp rldp2 catchain validatorsession full-node validator ton_validator validator fift-lib memprof git ${JEMALLOC_LIBRARIES}) +if (JEMALLOC_FOUND) + target_include_directories(validator-engine PRIVATE ${JEMALLOC_INCLUDE_DIR}) + target_compile_definitions(validator-engine PRIVATE -DTON_USE_JEMALLOC=1) +endif() install(TARGETS validator-engine RUNTIME DESTINATION bin) diff --git a/validator-engine/validator-engine.cpp b/validator-engine/validator-engine.cpp index 897e3c53b..88cef8d49 100644 --- a/validator-engine/validator-engine.cpp +++ b/validator-engine/validator-engine.cpp @@ -75,6 +75,10 @@ #include "block/precompiled-smc/PrecompiledSmartContract.h" #include "interfaces/validator-manager.h" +#if TON_USE_JEMALLOC +#include +#endif + Config::Config() { out_port = 3278; full_node = ton::PublicKeyHash::zero(); @@ -1179,6 +1183,55 @@ class CheckDhtServerStatusQuery : public td::actor::Actor { td::Promise promise_; }; +#if TON_USE_JEMALLOC +class JemallocStatsWriter : public td::actor::Actor { + public: + void start_up() override { + alarm(); + } + + void alarm() override { + alarm_timestamp() = td::Timestamp::in(60.0); + auto r_stats = get_stats(); + if (r_stats.is_error()) { + LOG(WARNING) << "Jemalloc stats error : " << r_stats.move_as_error(); + } else { + auto s = r_stats.move_as_ok(); + LOG(WARNING) << "JEMALLOC_STATS : [ timestamp=" << (ton::UnixTime)td::Clocks::system() + << " allocated=" << s.allocated << " active=" << s.active << " metadata=" << s.metadata + << " resident=" << s.resident << " ]"; + } + } + + private: + struct JemallocStats { + size_t allocated, active, metadata, resident; + }; + + static td::Result get_stats() { + size_t sz = sizeof(size_t); + static size_t epoch = 1; + if (mallctl("epoch", &epoch, &sz, &epoch, sz)) { + return td::Status::Error("Failed to refrash stats"); + } + JemallocStats stats; + if (mallctl("stats.allocated", &stats.allocated, &sz, nullptr, 0)) { + return td::Status::Error("Cannot get stats.allocated"); + } + if (mallctl("stats.active", &stats.active, &sz, nullptr, 0)) { + return td::Status::Error("Cannot get stats.active"); + } + if (mallctl("stats.metadata", &stats.metadata, &sz, nullptr, 0)) { + return td::Status::Error("Cannot get stats.metadata"); + } + if (mallctl("stats.resident", &stats.resident, &sz, nullptr, 0)) { + return td::Status::Error("Cannot get stats.resident"); + } + return stats; + } +}; +#endif + void ValidatorEngine::set_local_config(std::string str) { local_config_ = str; } @@ -1202,6 +1255,9 @@ void ValidatorEngine::schedule_shutdown(double at) { } void ValidatorEngine::start_up() { alarm_timestamp() = td::Timestamp::in(1.0 + td::Random::fast(0, 100) * 0.01); +#if TON_USE_JEMALLOC + td::actor::create_actor("mem-stat").release(); +#endif } void ValidatorEngine::alarm() { @@ -1411,7 +1467,18 @@ td::Status ValidatorEngine::load_global_config() { h.push_back(b); } validator_options_.write().set_hardforks(std::move(h)); - validator_options_.write().set_state_serializer_enabled(config_.state_serializer_enabled); + + auto r_total_ram = td::get_total_ram(); + if (r_total_ram.is_error()) { + LOG(ERROR) << "Failed to get total RAM size: " << r_total_ram.move_as_error(); + } else { + td::uint64 total_ram = r_total_ram.move_as_ok(); + LOG(WARNING) << "Total RAM = " << td::format::as_size(total_ram); + if (total_ram >= (90ULL << 30)) { + fast_state_serializer_enabled_ = true; + } + } + validator_options_.write().set_fast_state_serializer_enabled(fast_state_serializer_enabled_); return td::Status::OK(); } @@ -1823,6 +1890,9 @@ void ValidatorEngine::started_overlays() { void ValidatorEngine::start_validator() { validator_options_.write().set_allow_blockchain_init(config_.validators.size() > 0); + validator_options_.write().set_state_serializer_enabled(config_.state_serializer_enabled); + load_collator_options(); + validator_manager_ = ton::validator::ValidatorManagerFactory::create( validator_options_, db_root_, keyring_.get(), adnl_.get(), rldp_.get(), overlay_manager_.get()); @@ -2413,6 +2483,69 @@ void ValidatorEngine::del_custom_overlay_from_config(std::string name, td::Promi promise.set_error(td::Status::Error(PSTRING() << "no overlay \"" << name << "\" in config")); } +static td::Result> parse_collator_options(td::MutableSlice json_str) { + td::Ref ref{true}; + ton::validator::CollatorOptions& opts = ref.write(); + + // Set default values (from_json leaves missing fields as is) + ton::ton_api::engine_validator_collatorOptions f; + f.deferring_enabled_ = opts.deferring_enabled; + f.defer_out_queue_size_limit_ = opts.defer_out_queue_size_limit; + f.defer_messages_after_ = opts.defer_messages_after; + f.dispatch_phase_2_max_total_ = opts.dispatch_phase_2_max_total; + f.dispatch_phase_3_max_total_ = opts.dispatch_phase_3_max_total; + f.dispatch_phase_2_max_per_initiator_ = opts.dispatch_phase_2_max_per_initiator; + f.dispatch_phase_3_max_per_initiator_ = + opts.dispatch_phase_3_max_per_initiator ? opts.dispatch_phase_3_max_per_initiator.value() : -1; + + TRY_RESULT_PREFIX(json, td::json_decode(json_str), "failed to parse json: "); + TRY_STATUS_PREFIX(ton::ton_api::from_json(f, json.get_object()), "json does not fit TL scheme: "); + + if (f.defer_messages_after_ <= 0) { + return td::Status::Error("defer_messages_after should be positive"); + } + if (f.defer_out_queue_size_limit_ < 0) { + return td::Status::Error("defer_out_queue_size_limit should be non-negative"); + } + if (f.dispatch_phase_2_max_total_ < 0) { + return td::Status::Error("dispatch_phase_2_max_total should be non-negative"); + } + if (f.dispatch_phase_3_max_total_ < 0) { + return td::Status::Error("dispatch_phase_3_max_total should be non-negative"); + } + if (f.dispatch_phase_2_max_per_initiator_ < 0) { + return td::Status::Error("dispatch_phase_2_max_per_initiator should be non-negative"); + } + + opts.deferring_enabled = f.deferring_enabled_; + opts.defer_messages_after = f.defer_messages_after_; + opts.defer_out_queue_size_limit = f.defer_out_queue_size_limit_; + opts.dispatch_phase_2_max_total = f.dispatch_phase_2_max_total_; + opts.dispatch_phase_3_max_total = f.dispatch_phase_3_max_total_; + opts.dispatch_phase_2_max_per_initiator = f.dispatch_phase_2_max_per_initiator_; + if (f.dispatch_phase_3_max_per_initiator_ >= 0) { + opts.dispatch_phase_3_max_per_initiator = f.dispatch_phase_3_max_per_initiator_; + } else { + opts.dispatch_phase_3_max_per_initiator = {}; + } + + return ref; +} + +void ValidatorEngine::load_collator_options() { + auto r_data = td::read_file(collator_options_file()); + if (r_data.is_error()) { + return; + } + td::BufferSlice data = r_data.move_as_ok(); + auto r_collator_options = parse_collator_options(data.as_slice()); + if (r_collator_options.is_error()) { + LOG(ERROR) << "Failed to read collator options from file: " << r_collator_options.move_as_error(); + return; + } + validator_options_.write().set_collator_options(r_collator_options.move_as_ok()); +} + void ValidatorEngine::check_key(ton::PublicKeyHash id, td::Promise promise) { if (keys_.count(id) == 1) { promise.set_value(td::Unit()); @@ -3492,7 +3625,7 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_getShardO if (!dest) { td::actor::send_closure( manager, &ton::validator::ValidatorManagerInterface::get_out_msg_queue_size, handle->id(), - [promise = std::move(promise)](td::Result R) mutable { + [promise = std::move(promise)](td::Result R) mutable { if (R.is_error()) { promise.set_value(create_control_query_error(R.move_as_error_prefix("failed to get queue size: "))); } else { @@ -3683,6 +3816,53 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setStateS }); } +void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setCollatorOptionsJson &query, + td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm, + td::Promise promise) { + if (!(perm & ValidatorEnginePermissions::vep_modify)) { + promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::error, "not authorized"))); + return; + } + if (!started_) { + promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::notready, "not started"))); + return; + } + auto r_collator_options = parse_collator_options(query.json_); + if (r_collator_options.is_error()) { + promise.set_value(create_control_query_error(r_collator_options.move_as_error_prefix("failed to parse json: "))); + return; + } + auto S = td::write_file(collator_options_file(), query.json_); + if (S.is_error()) { + promise.set_value(create_control_query_error(r_collator_options.move_as_error_prefix("failed to write file: "))); + return; + } + validator_options_.write().set_collator_options(r_collator_options.move_as_ok()); + td::actor::send_closure(validator_manager_, &ton::validator::ValidatorManagerInterface::update_options, + validator_options_); + promise.set_value(ton::create_serialize_tl_object()); +} + +void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_getCollatorOptionsJson &query, + td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm, + td::Promise promise) { + if (!(perm & ValidatorEnginePermissions::vep_default)) { + promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::error, "not authorized"))); + return; + } + if (!started_) { + promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::notready, "not started"))); + return; + } + auto r_data = td::read_file(collator_options_file()); + if (r_data.is_error()) { + promise.set_value(ton::create_serialize_tl_object("{}")); + } else { + promise.set_value( + ton::create_serialize_tl_object(r_data.ok().as_slice().str())); + } +} + void ValidatorEngine::process_control_query(td::uint16 port, ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, td::Promise promise) { @@ -3782,7 +3962,7 @@ void need_scheduler_status(int sig) { need_scheduler_status_flag.store(true); } -void dump_memory_stats() { +void dump_memprof_stats() { if (!is_memprof_on()) { return; } @@ -3807,8 +3987,20 @@ void dump_memory_stats() { LOG(WARNING) << td::tag("fast_backtrace_success_rate", get_fast_backtrace_success_rate()); } +void dump_jemalloc_prof() { +#if TON_USE_JEMALLOC + const char *filename = "/tmp/validator-jemalloc.dump"; + if (mallctl("prof.dump", nullptr, nullptr, &filename, sizeof(const char *)) == 0) { + LOG(ERROR) << "Written jemalloc dump to " << filename; + } else { + LOG(ERROR) << "Failed to write jemalloc dump to " << filename; + } +#endif +} + void dump_stats() { - dump_memory_stats(); + dump_memprof_stats(); + dump_jemalloc_prof(); LOG(WARNING) << td::NamedThreadSafeCounter::get_default(); } @@ -4046,6 +4238,13 @@ int main(int argc, char *argv[]) { acts.push_back([&x, v]() { td::actor::send_closure(x, &ValidatorEngine::set_catchain_max_block_delay, v); }); return td::Status::OK(); }); + p.add_option( + '\0', "fast-state-serializer", + "faster persistent state serializer, but requires more RAM (enabled automatically on machines with >= 90GB RAM)", + [&]() { + acts.push_back( + [&x]() { td::actor::send_closure(x, &ValidatorEngine::set_fast_state_serializer_enabled, true); }); + }); auto S = p.run(argc, argv); if (S.is_error()) { LOG(ERROR) << "failed to parse options: " << S.move_as_error(); diff --git a/validator-engine/validator-engine.hpp b/validator-engine/validator-engine.hpp index 8adc8d9a7..b00c97130 100644 --- a/validator-engine/validator-engine.hpp +++ b/validator-engine/validator-engine.hpp @@ -220,6 +220,7 @@ class ValidatorEngine : public td::actor::Actor { bool started_ = false; ton::BlockSeqno truncate_seqno_{0}; std::string session_logs_file_; + bool fast_state_serializer_enabled_ = false; std::set unsafe_catchains_; std::map> unsafe_catchain_rotations_; @@ -299,6 +300,9 @@ class ValidatorEngine : public td::actor::Actor { void set_catchain_max_block_delay(double value) { catchain_max_block_delay_ = value; } + void set_fast_state_serializer_enabled(bool value) { + fast_state_serializer_enabled_ = value; + } void start_up() override; ValidatorEngine() { } @@ -384,12 +388,16 @@ class ValidatorEngine : public td::actor::Actor { std::string custom_overlays_config_file() const { return db_root_ + "/custom-overlays.json"; } + std::string collator_options_file() const { + return db_root_ + "/collator-options.json"; + } void load_custom_overlays_config(); td::Status write_custom_overlays_config(); void add_custom_overlay_to_config( ton::tl_object_ptr overlay, td::Promise promise); void del_custom_overlay_from_config(std::string name, td::Promise promise); + void load_collator_options(); void check_key(ton::PublicKeyHash id, td::Promise promise); @@ -477,6 +485,10 @@ class ValidatorEngine : public td::actor::Actor { ton::PublicKeyHash src, td::uint32 perm, td::Promise promise); void run_control_query(ton::ton_api::engine_validator_setStateSerializerEnabled &query, td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm, td::Promise promise); + void run_control_query(ton::ton_api::engine_validator_setCollatorOptionsJson &query, td::BufferSlice data, + ton::PublicKeyHash src, td::uint32 perm, td::Promise promise); + void run_control_query(ton::ton_api::engine_validator_getCollatorOptionsJson &query, td::BufferSlice data, + ton::PublicKeyHash src, td::uint32 perm, td::Promise promise); template void run_control_query(T &query, td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm, td::Promise promise) { diff --git a/validator-session/validator-session.hpp b/validator-session/validator-session.hpp index b6a9ab0ca..580582824 100644 --- a/validator-session/validator-session.hpp +++ b/validator-session/validator-session.hpp @@ -90,7 +90,7 @@ class ValidatorSessionImpl : public ValidatorSession { td::actor::ActorOwn catchain_; std::unique_ptr description_; - double catchain_max_block_delay_ = 0.5; + double catchain_max_block_delay_ = 0.4; void on_new_round(td::uint32 round); void on_catchain_started(); diff --git a/validator/db/archive-manager.cpp b/validator/db/archive-manager.cpp index b87d04f78..14d3ec469 100644 --- a/validator/db/archive-manager.cpp +++ b/validator/db/archive-manager.cpp @@ -310,14 +310,17 @@ void ArchiveManager::get_file(ConstBlockHandle handle, FileReference ref_id, td: get_file_short_cont(std::move(ref_id), get_max_temp_file_desc_idx(), std::move(promise)); } -void ArchiveManager::written_perm_state(FileReferenceShort id) { - perm_states_.emplace(id.hash(), id); +void ArchiveManager::register_perm_state(FileReferenceShort id) { + BlockSeqno masterchain_seqno = 0; + id.ref().visit(td::overloaded( + [&](const fileref::PersistentStateShort &x) { masterchain_seqno = x.masterchain_seqno; }, [&](const auto &) {})); + perm_states_[{masterchain_seqno, id.hash()}] = id; } void ArchiveManager::add_zero_state(BlockIdExt block_id, td::BufferSlice data, td::Promise promise) { auto id = FileReference{fileref::ZeroState{block_id}}; auto hash = id.hash(); - if (perm_states_.find(hash) != perm_states_.end()) { + if (perm_states_.find({0, hash}) != perm_states_.end()) { promise.set_value(td::Unit()); return; } @@ -328,7 +331,7 @@ void ArchiveManager::add_zero_state(BlockIdExt block_id, td::BufferSlice data, t if (R.is_error()) { promise.set_error(R.move_as_error()); } else { - td::actor::send_closure(SelfId, &ArchiveManager::written_perm_state, id); + td::actor::send_closure(SelfId, &ArchiveManager::register_perm_state, id); promise.set_value(td::Unit()); } }); @@ -357,12 +360,13 @@ void ArchiveManager::add_persistent_state_gen(BlockIdExt block_id, BlockIdExt ma add_persistent_state_impl(block_id, masterchain_block_id, std::move(promise), std::move(create_writer)); } -void ArchiveManager::add_persistent_state_impl(BlockIdExt block_id, BlockIdExt masterchain_block_id, - td::Promise promise, - std::function)> create_writer) { +void ArchiveManager::add_persistent_state_impl( + BlockIdExt block_id, BlockIdExt masterchain_block_id, td::Promise promise, + std::function)> create_writer) { auto id = FileReference{fileref::PersistentState{block_id, masterchain_block_id}}; + BlockSeqno masterchain_seqno = masterchain_block_id.seqno(); auto hash = id.hash(); - if (perm_states_.find(hash) != perm_states_.end()) { + if (perm_states_.find({masterchain_seqno, hash}) != perm_states_.end()) { promise.set_value(td::Unit()); return; } @@ -373,7 +377,7 @@ void ArchiveManager::add_persistent_state_impl(BlockIdExt block_id, BlockIdExt m if (R.is_error()) { promise.set_error(R.move_as_error()); } else { - td::actor::send_closure(SelfId, &ArchiveManager::written_perm_state, id); + td::actor::send_closure(SelfId, &ArchiveManager::register_perm_state, id); promise.set_value(td::Unit()); } }); @@ -383,7 +387,7 @@ void ArchiveManager::add_persistent_state_impl(BlockIdExt block_id, BlockIdExt m void ArchiveManager::get_zero_state(BlockIdExt block_id, td::Promise promise) { auto id = FileReference{fileref::ZeroState{block_id}}; auto hash = id.hash(); - if (perm_states_.find(hash) == perm_states_.end()) { + if (perm_states_.find({0, hash}) == perm_states_.end()) { promise.set_error(td::Status::Error(ErrorCode::notready, "zerostate not in db")); return; } @@ -395,18 +399,38 @@ void ArchiveManager::get_zero_state(BlockIdExt block_id, td::Promise promise) { auto id = FileReference{fileref::ZeroState{block_id}}; auto hash = id.hash(); - if (perm_states_.find(hash) == perm_states_.end()) { + if (perm_states_.find({0, hash}) == perm_states_.end()) { promise.set_result(false); return; } promise.set_result(true); } +void ArchiveManager::get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) { + auto it = perm_states_.lower_bound({cur_mc_seqno, FileHash::zero()}); + if (it == perm_states_.begin()) { + promise.set_value({}); + return; + } + --it; + BlockSeqno mc_seqno = it->first.first; + std::vector> files; + while (it->first.first == mc_seqno) { + files.emplace_back(db_root_ + "/archive/states/" + it->second.filename_short(), it->second.shard()); + if (it == perm_states_.begin()) { + break; + } + --it; + } + promise.set_value(std::move(files)); +} + void ArchiveManager::get_persistent_state(BlockIdExt block_id, BlockIdExt masterchain_block_id, td::Promise promise) { auto id = FileReference{fileref::PersistentState{block_id, masterchain_block_id}}; auto hash = id.hash(); - if (perm_states_.find(hash) == perm_states_.end()) { + if (perm_states_.find({masterchain_block_id.seqno(), hash}) == perm_states_.end()) { promise.set_error(td::Status::Error(ErrorCode::notready, "state file not in db")); return; } @@ -419,7 +443,7 @@ void ArchiveManager::get_persistent_state_slice(BlockIdExt block_id, BlockIdExt td::int64 max_size, td::Promise promise) { auto id = FileReference{fileref::PersistentState{block_id, masterchain_block_id}}; auto hash = id.hash(); - if (perm_states_.find(hash) == perm_states_.end()) { + if (perm_states_.find({masterchain_block_id.seqno(), hash}) == perm_states_.end()) { promise.set_error(td::Status::Error(ErrorCode::notready, "state file not in db")); return; } @@ -432,7 +456,7 @@ void ArchiveManager::check_persistent_state(BlockIdExt block_id, BlockIdExt mast td::Promise promise) { auto id = FileReference{fileref::PersistentState{block_id, masterchain_block_id}}; auto hash = id.hash(); - if (perm_states_.find(hash) == perm_states_.end()) { + if (perm_states_.find({masterchain_block_id.seqno(), hash}) == perm_states_.end()) { promise.set_result(false); return; } @@ -884,13 +908,11 @@ void ArchiveManager::start_up() { R = FileReferenceShort::create(newfname); R.ensure(); } - auto f = R.move_as_ok(); - auto hash = f.hash(); - perm_states_[hash] = std::move(f); + register_perm_state(R.move_as_ok()); } }).ensure(); - persistent_state_gc(FileHash::zero()); + persistent_state_gc({0, FileHash::zero()}); double open_since = td::Clocks::system() - opts_->get_archive_preload_period(); for (auto it = files_.rbegin(); it != files_.rend(); ++it) { @@ -976,11 +998,12 @@ void ArchiveManager::run_gc(UnixTime mc_ts, UnixTime gc_ts, UnixTime archive_ttl } } -void ArchiveManager::persistent_state_gc(FileHash last) { - if (perm_states_.size() == 0) { +void ArchiveManager::persistent_state_gc(std::pair last) { + if (perm_states_.empty()) { delay_action( - [hash = FileHash::zero(), SelfId = actor_id(this)]() { - td::actor::send_closure(SelfId, &ArchiveManager::persistent_state_gc, hash); + [SelfId = actor_id(this)]() { + td::actor::send_closure(SelfId, &ArchiveManager::persistent_state_gc, + std::pair{0, FileHash::zero()}); }, td::Timestamp::in(1.0)); return; @@ -993,12 +1016,12 @@ void ArchiveManager::persistent_state_gc(FileHash last) { it = perm_states_.begin(); } + auto key = it->first; auto &F = it->second; - auto hash = F.hash(); int res = 0; BlockSeqno seqno = 0; - F.ref().visit(td::overloaded([&](const fileref::ZeroStateShort &x) { res = 1; }, + F.ref().visit(td::overloaded([&](const fileref::ZeroStateShort &) { res = 1; }, [&](const fileref::PersistentStateShort &x) { res = 0; seqno = x.masterchain_seqno; @@ -1010,24 +1033,41 @@ void ArchiveManager::persistent_state_gc(FileHash last) { perm_states_.erase(it); } if (res != 0) { - delay_action([hash, SelfId = actor_id( - this)]() { td::actor::send_closure(SelfId, &ArchiveManager::persistent_state_gc, hash); }, + delay_action([key, SelfId = actor_id( + this)]() { td::actor::send_closure(SelfId, &ArchiveManager::persistent_state_gc, key); }, + td::Timestamp::in(1.0)); + return; + } + CHECK(seqno == key.first); + + // Do not delete the most recent fully serialized state + bool allow_delete = false; + auto it2 = perm_states_.lower_bound({seqno + 1, FileHash::zero()}); + if (it2 != perm_states_.end()) { + it2 = perm_states_.lower_bound({it2->first.first + 1, FileHash::zero()}); + if (it2 != perm_states_.end()) { + allow_delete = true; + } + } + if (!allow_delete) { + delay_action([key, SelfId = actor_id( + this)]() { td::actor::send_closure(SelfId, &ArchiveManager::persistent_state_gc, key); }, td::Timestamp::in(1.0)); return; } - auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), hash](td::Result R) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), key](td::Result R) { if (R.is_error()) { - td::actor::send_closure(SelfId, &ArchiveManager::got_gc_masterchain_handle, nullptr, hash); + td::actor::send_closure(SelfId, &ArchiveManager::got_gc_masterchain_handle, nullptr, key); } else { - td::actor::send_closure(SelfId, &ArchiveManager::got_gc_masterchain_handle, R.move_as_ok(), hash); + td::actor::send_closure(SelfId, &ArchiveManager::got_gc_masterchain_handle, R.move_as_ok(), key); } }); get_block_by_seqno(AccountIdPrefixFull{masterchainId, 0}, seqno, std::move(P)); } -void ArchiveManager::got_gc_masterchain_handle(ConstBlockHandle handle, FileHash hash) { +void ArchiveManager::got_gc_masterchain_handle(ConstBlockHandle handle, std::pair key) { bool to_del = false; if (!handle || !handle->inited_unix_time() || !handle->unix_time()) { to_del = true; @@ -1035,15 +1075,15 @@ void ArchiveManager::got_gc_masterchain_handle(ConstBlockHandle handle, FileHash auto ttl = ValidatorManager::persistent_state_ttl(handle->unix_time()); to_del = ttl < td::Clocks::system(); } - auto it = perm_states_.find(hash); + auto it = perm_states_.find(key); CHECK(it != perm_states_.end()); auto &F = it->second; if (to_del) { td::unlink(db_root_ + "/archive/states/" + F.filename_short()).ignore(); perm_states_.erase(it); } - delay_action([hash, SelfId = actor_id( - this)]() { td::actor::send_closure(SelfId, &ArchiveManager::persistent_state_gc, hash); }, + delay_action([key, SelfId = actor_id( + this)]() { td::actor::send_closure(SelfId, &ArchiveManager::persistent_state_gc, key); }, td::Timestamp::in(1.0)); } diff --git a/validator/db/archive-manager.hpp b/validator/db/archive-manager.hpp index a1ed97022..622969ec5 100644 --- a/validator/db/archive-manager.hpp +++ b/validator/db/archive-manager.hpp @@ -54,6 +54,8 @@ class ArchiveManager : public td::actor::Actor { td::int64 max_size, td::Promise promise); void check_persistent_state(BlockIdExt block_id, BlockIdExt masterchain_block_id, td::Promise promise); void check_zero_state(BlockIdExt block_id, td::Promise promise); + void get_previous_persistent_state_files(BlockSeqno cur_mc_seqno, + td::Promise>> promise); void truncate(BlockSeqno masterchain_seqno, ConstBlockHandle handle, td::Promise promise); //void truncate_continue(BlockSeqno masterchain_seqno, td::Promise promise); @@ -180,7 +182,7 @@ class ArchiveManager : public td::actor::Actor { return p.key ? key_files_ : p.temp ? temp_files_ : files_; } - std::map perm_states_; + std::map, FileReferenceShort> perm_states_; // Mc block seqno, hash -> state void load_package(PackageId seqno); void delete_package(PackageId seqno, td::Promise promise); @@ -207,10 +209,10 @@ class ArchiveManager : public td::actor::Actor { void add_persistent_state_impl(BlockIdExt block_id, BlockIdExt masterchain_block_id, td::Promise promise, std::function)> create_writer); - void written_perm_state(FileReferenceShort id); + void register_perm_state(FileReferenceShort id); - void persistent_state_gc(FileHash last); - void got_gc_masterchain_handle(ConstBlockHandle handle, FileHash hash); + void persistent_state_gc(std::pair last); + void got_gc_masterchain_handle(ConstBlockHandle handle, std::pair key); std::string db_root_; td::Ref opts_; diff --git a/validator/db/celldb.cpp b/validator/db/celldb.cpp index dfbee0a1a..463e6e34a 100644 --- a/validator/db/celldb.cpp +++ b/validator/db/celldb.cpp @@ -84,11 +84,13 @@ void CellDbIn::start_up() { }; CellDbBase::start_up(); + td::RocksDbOptions db_options; if (!opts_->get_disable_rocksdb_stats()) { statistics_ = td::RocksDb::create_statistics(); statistics_flush_at_ = td::Timestamp::in(60.0); + snapshot_statistics_ = std::make_shared(); + db_options.snapshot_statistics = snapshot_statistics_; } - td::RocksDbOptions db_options; db_options.statistics = statistics_; if (opts_->get_celldb_cache_size()) { db_options.block_cache = td::RocksDb::create_cache(opts_->get_celldb_cache_size().value()); @@ -193,7 +195,11 @@ void CellDbIn::get_cell_db_reader(td::Promise> } void CellDbIn::flush_db_stats() { - auto stats = td::RocksDb::statistics_to_string(statistics_) + cell_db_statistics_.to_string(); + if (opts_->get_disable_rocksdb_stats()) { + return; + } + auto stats = td::RocksDb::statistics_to_string(statistics_) + snapshot_statistics_->to_string() + + cell_db_statistics_.to_string(); auto to_file_r = td::FileFd::open(path_ + "/db_stats.txt", td::FileFd::Truncate | td::FileFd::Create | td::FileFd::Write, 0644); if (to_file_r.is_error()) { diff --git a/validator/db/celldb.hpp b/validator/db/celldb.hpp index 7dc1fa781..b3857971c 100644 --- a/validator/db/celldb.hpp +++ b/validator/db/celldb.hpp @@ -27,6 +27,7 @@ #include "auto/tl/ton_api.h" #include "validator.h" #include "db-utils.h" +#include "td/db/RocksDb.h" namespace rocksdb { class Statistics; @@ -139,6 +140,7 @@ class CellDbIn : public CellDbBase { }; std::shared_ptr statistics_; + std::shared_ptr snapshot_statistics_; CellDbStatistics cell_db_statistics_; td::Timestamp statistics_flush_at_ = td::Timestamp::never(); diff --git a/validator/db/rootdb.cpp b/validator/db/rootdb.cpp index ff9abae68..93dcfc91f 100644 --- a/validator/db/rootdb.cpp +++ b/validator/db/rootdb.cpp @@ -317,6 +317,12 @@ void RootDb::check_zero_state_file_exists(BlockIdExt block_id, td::Promise td::actor::send_closure(archive_db_, &ArchiveManager::check_zero_state, block_id, std::move(promise)); } +void RootDb::get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) { + td::actor::send_closure(archive_db_, &ArchiveManager::get_previous_persistent_state_files, cur_mc_seqno, + std::move(promise)); +} + void RootDb::store_block_handle(BlockHandle handle, td::Promise promise) { td::actor::send_closure(archive_db_, &ArchiveManager::update_handle, std::move(handle), std::move(promise)); } diff --git a/validator/db/rootdb.hpp b/validator/db/rootdb.hpp index 97b9550b8..45044e4f8 100644 --- a/validator/db/rootdb.hpp +++ b/validator/db/rootdb.hpp @@ -84,6 +84,8 @@ class RootDb : public Db { void store_zero_state_file(BlockIdExt block_id, td::BufferSlice state, td::Promise promise) override; void get_zero_state_file(BlockIdExt block_id, td::Promise promise) override; void check_zero_state_file_exists(BlockIdExt block_id, td::Promise promise) override; + void get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) override; void try_get_static_file(FileHash file_hash, td::Promise promise) override; diff --git a/validator/fabric.h b/validator/fabric.h index 6bb668452..949a6c9ff 100644 --- a/validator/fabric.h +++ b/validator/fabric.h @@ -80,8 +80,8 @@ void run_validate_query(ShardIdFull shard, UnixTime min_ts, BlockIdExt min_maste td::Promise promise, bool is_fake = false); void run_collate_query(ShardIdFull shard, td::uint32 min_ts, const BlockIdExt& min_masterchain_block_id, std::vector prev, Ed25519_PublicKey local_id, td::Ref validator_set, - td::actor::ActorId manager, td::Timestamp timeout, - td::Promise promise); + td::Ref collator_opts, td::actor::ActorId manager, + td::Timestamp timeout, td::Promise promise); void run_collate_hardfork(ShardIdFull shard, const BlockIdExt& min_masterchain_block_id, std::vector prev, td::actor::ActorId manager, td::Timestamp timeout, td::Promise promise); diff --git a/validator/full-node-shard.cpp b/validator/full-node-shard.cpp index fa0c3e62a..fbdbbfd7a 100644 --- a/validator/full-node-shard.cpp +++ b/validator/full-node-shard.cpp @@ -832,8 +832,7 @@ void FullNodeShardImpl::download_persistent_state(BlockIdExt id, BlockIdExt mast auto &b = choose_neighbour(); td::actor::create_actor(PSTRING() << "downloadstatereq" << id.id.to_str(), id, masterchain_block_id, adnl_id_, overlay_id_, b.adnl_id, priority, timeout, validator_manager_, - b.use_rldp2() ? (td::actor::ActorId)rldp2_ : rldp_, - overlays_, adnl_, client_, std::move(promise)) + rldp2_, overlays_, adnl_, client_, std::move(promise)) .release(); } @@ -867,10 +866,9 @@ void FullNodeShardImpl::get_next_key_blocks(BlockIdExt block_id, td::Timestamp t void FullNodeShardImpl::download_archive(BlockSeqno masterchain_seqno, std::string tmp_dir, td::Timestamp timeout, td::Promise promise) { auto &b = choose_neighbour(); - td::actor::create_actor( - "archive", masterchain_seqno, std::move(tmp_dir), adnl_id_, overlay_id_, b.adnl_id, timeout, validator_manager_, - b.use_rldp2() ? (td::actor::ActorId)rldp2_ : rldp_, overlays_, adnl_, client_, - create_neighbour_promise(b, std::move(promise))) + td::actor::create_actor("archive", masterchain_seqno, std::move(tmp_dir), adnl_id_, overlay_id_, + b.adnl_id, timeout, validator_manager_, rldp2_, overlays_, adnl_, + client_, create_neighbour_promise(b, std::move(promise))) .release(); } diff --git a/validator/full-node-shard.hpp b/validator/full-node-shard.hpp index cec7c6494..a7cf89ac5 100644 --- a/validator/full-node-shard.hpp +++ b/validator/full-node-shard.hpp @@ -46,10 +46,6 @@ struct Neighbour { void query_failed(); void update_roundtrip(double t); - bool use_rldp2() const { - return std::make_pair(proto_version, capabilities) >= std::make_pair(2, 2); - } - static Neighbour zero; }; diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index 055c1aed2..913a0ed87 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -44,7 +44,8 @@ class Collator final : public td::actor::Actor { return SUPPORTED_VERSION; } static constexpr long long supported_capabilities() { - return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue; + return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue | + ton::capStoreOutMsgQueueSize | ton::capMsgMetadata | ton::capDeferMessages; } using LtCellRef = block::LtCellRef; using NewOutMsg = block::NewOutMsg; @@ -70,6 +71,7 @@ class Collator final : public td::actor::Actor { std::vector> prev_states; std::vector> prev_block_data; Ed25519_PublicKey created_by_; + Ref collator_opts_; Ref validator_set_; td::actor::ActorId manager; td::Timestamp timeout; @@ -89,7 +91,8 @@ class Collator final : public td::actor::Actor { public: Collator(ShardIdFull shard, bool is_hardfork, td::uint32 min_ts, BlockIdExt min_masterchain_block_id, std::vector prev, Ref validator_set, Ed25519_PublicKey collator_id, - td::actor::ActorId manager, td::Timestamp timeout, td::Promise promise); + Ref collator_opts, td::actor::ActorId manager, td::Timestamp timeout, + td::Promise promise); ~Collator() override = default; bool is_busy() const { return busy_; @@ -192,7 +195,10 @@ class Collator final : public td::actor::Actor { std::priority_queue, std::greater> new_msgs; std::pair last_proc_int_msg_, first_unproc_int_msg_; std::unique_ptr in_msg_dict, out_msg_dict, out_msg_queue_, sibling_out_msg_queue_; - td::uint32 out_msg_queue_size_ = 0; + std::map unprocessed_deferred_messages_; // number of messages from dispatch queue in new_msgs + td::uint64 out_msg_queue_size_ = 0; + td::uint64 old_out_msg_queue_size_ = 0; + bool have_out_msg_queue_size_in_state_ = false; std::unique_ptr ihr_pending; std::shared_ptr processed_upto_, sibling_processed_upto_; std::unique_ptr block_create_stats_; @@ -203,6 +209,19 @@ class Collator final : public td::actor::Actor { std::vector> collated_roots_; std::unique_ptr block_candidate; + std::unique_ptr dispatch_queue_; + std::map sender_generated_messages_count_; + unsigned dispatch_queue_ops_{0}; + std::map last_dispatch_queue_emitted_lt_; + bool have_unprocessed_account_dispatch_queue_ = false; + bool dispatch_queue_total_limit_reached_ = false; + td::uint64 defer_out_queue_size_limit_; + td::uint64 hard_defer_out_queue_size_limit_; + + bool msg_metadata_enabled_ = false; + bool deferring_messages_enabled_ = false; + bool store_out_msg_queue_size_ = false; + td::PerfWarningTimer perf_timer_; // block::Account* lookup_account(td::ConstBitPtr addr) const; @@ -231,7 +250,7 @@ class Collator final : public td::actor::Actor { bool fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton::ShardIdFull& owner); bool fix_processed_upto(block::MsgProcessedUptoCollection& upto); void got_neighbor_out_queue(int i, td::Result> res); - void got_out_queue_size(size_t i, td::Result res); + void got_out_queue_size(size_t i, td::Result res); bool adjust_shard_config(); bool store_shard_fees(ShardIdFull shard, const block::CurrencyCollection& fees, const block::CurrencyCollection& created); @@ -249,7 +268,8 @@ class Collator final : public td::actor::Actor { Ref& in_msg); bool create_ticktock_transactions(int mask); bool create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask); - Ref create_ordinary_transaction(Ref msg_root, bool is_special_tx = false); + Ref create_ordinary_transaction(Ref msg_root, td::optional msg_metadata, + LogicalTime after_lt, bool is_special_tx = false); bool check_cur_validator_set(); bool unpack_last_mc_state(); bool unpack_last_state(); @@ -278,7 +298,7 @@ class Collator final : public td::actor::Actor { int priority); // td::Result register_external_message(td::Slice ext_msg_boc); void register_new_msg(block::NewOutMsg msg); - void register_new_msgs(block::transaction::Transaction& trans); + void register_new_msgs(block::transaction::Transaction& trans, td::optional msg_metadata); bool process_new_messages(bool enqueue_only = false); int process_one_new_message(block::NewOutMsg msg, bool enqueue_only = false, Ref* is_special = nullptr); bool process_inbound_internal_messages(); @@ -286,10 +306,15 @@ class Collator final : public td::actor::Actor { const block::McShardDescr& src_nb); bool process_inbound_external_messages(); int process_external_message(Ref msg); - bool enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt); + bool process_dispatch_queue(); + bool process_deferred_message(Ref enq_msg, StdSmcAddress src_addr, LogicalTime lt, + td::optional& msg_metadata); + bool enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, StdSmcAddress src_addr, + bool defer = false); bool enqueue_transit_message(Ref msg, Ref old_msg_env, ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix, ton::AccountIdPrefixFull dest_prefix, - td::RefInt256 fwd_fee_remaining); + td::RefInt256 fwd_fee_remaining, td::optional msg_metadata, + td::optional emitted_lt = {}); bool delete_out_msg_queue_msg(td::ConstBitPtr key); bool insert_in_msg(Ref in_msg); bool insert_out_msg(Ref out_msg); diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index ba38ba423..c6dd7caf2 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -1,7 +1,7 @@ /* - This file is part of TON Blockchain Library. + This file is part of TON Blockchain Library. - TON Blockchain Library is free software: you can redistribute it and/or modify + TON Blockchain Library is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. @@ -71,20 +71,22 @@ static inline bool dbg(int c) { * @param prev A vector of BlockIdExt representing the previous blocks. * @param validator_set A reference to the ValidatorSet. * @param collator_id The public key of the block creator. + * @param collator_opts A reference to CollatorOptions. * @param manager The ActorId of the ValidatorManager. * @param timeout The timeout for the collator. * @param promise The promise to return the result. */ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockIdExt min_masterchain_block_id, - std::vector prev, td::Ref validator_set, Ed25519_PublicKey collator_id, - td::actor::ActorId manager, td::Timestamp timeout, - td::Promise promise) + std::vector prev, Ref validator_set, Ed25519_PublicKey collator_id, + Ref collator_opts, td::actor::ActorId manager, + td::Timestamp timeout, td::Promise promise) : shard_(shard) , is_hardfork_(is_hardfork) , min_ts(min_ts) , min_mc_block_id{min_masterchain_block_id} , prev_blocks(std::move(prev)) , created_by_(collator_id) + , collator_opts_(collator_opts) , validator_set_(std::move(validator_set)) , manager(manager) , timeout(timeout) @@ -551,7 +553,7 @@ bool Collator::preprocess_prev_mc_state() { /** * Callback function called after retrieving the Masterchain state. * - * @param res The retreived masterchain state. + * @param res The retrieved masterchain state. */ void Collator::after_get_mc_state(td::Result, BlockIdExt>> res) { LOG(WARNING) << "in Collator::after_get_mc_state()"; @@ -619,7 +621,7 @@ void Collator::after_get_shard_state(int idx, td::Result> res) { * Callback function called after retrieving block data for a previous block. * * @param idx The index of the previous block (0 or 1). - * @param res The retreived block data. + * @param res The retreved block data. */ void Collator::after_get_block_data(int idx, td::Result> res) { LOG(DEBUG) << "in Collator::after_get_block_data(" << idx << ")"; @@ -694,6 +696,9 @@ bool Collator::unpack_last_mc_state() { create_stats_enabled_ = config_->create_stats_enabled(); report_version_ = config_->has_capability(ton::capReportVersion); short_dequeue_records_ = config_->has_capability(ton::capShortDequeue); + store_out_msg_queue_size_ = config_->has_capability(ton::capStoreOutMsgQueueSize); + msg_metadata_enabled_ = config_->has_capability(ton::capMsgMetadata); + deferring_messages_enabled_ = config_->has_capability(ton::capDeferMessages); shard_conf_ = std::make_unique(*config_); prev_key_block_exists_ = config_->get_last_key_block(prev_key_block_, prev_key_block_lt_); if (prev_key_block_exists_) { @@ -794,19 +799,20 @@ bool Collator::request_neighbor_msg_queues() { } /** - * Requests the size of the outbound message queue from the previous state(s). + * Requests the size of the outbound message queue from the previous state(s) if needed. * * @returns True if the request was successful, false otherwise. */ bool Collator::request_out_msg_queue_size() { - if (after_split_) { - // If block is after split, the size is calculated during split (see Collator::split_last_state) + if (have_out_msg_queue_size_in_state_) { + // if after_split then have_out_msg_queue_size_in_state_ is always true, since the size is calculated during split return true; } + out_msg_queue_size_ = 0; for (size_t i = 0; i < prev_blocks.size(); ++i) { ++pending; send_closure_later(manager, &ValidatorManager::get_out_msg_queue_size, prev_blocks[i], - [self = get_self(), i](td::Result res) { + [self = get_self(), i](td::Result res) { td::actor::send_closure(std::move(self), &Collator::got_out_queue_size, i, std::move(res)); }); } @@ -885,14 +891,14 @@ void Collator::got_neighbor_out_queue(int i, td::Result> res) * @param i The index of the previous block (0 or 1). * @param res The result object containing the size of the queue. */ -void Collator::got_out_queue_size(size_t i, td::Result res) { +void Collator::got_out_queue_size(size_t i, td::Result res) { --pending; if (res.is_error()) { fatal_error( res.move_as_error_prefix(PSTRING() << "failed to get message queue size from prev block #" << i << ": ")); return; } - td::uint32 size = res.move_as_ok(); + td::uint64 size = res.move_as_ok(); LOG(WARNING) << "got outbound queue size from prev block #" << i << ": " << size; out_msg_queue_size_ += size; check_pending(); @@ -1016,7 +1022,7 @@ bool Collator::split_last_state(block::ShardState& ss) { return fatal_error(res2.move_as_error()); } sibling_processed_upto_ = res2.move_as_ok(); - auto res3 = ss.split(shard_, &out_msg_queue_size_); + auto res3 = ss.split(shard_); if (res3.is_error()) { return fatal_error(std::move(res3)); } @@ -1052,7 +1058,12 @@ bool Collator::import_shard_state_data(block::ShardState& ss) { out_msg_queue_ = std::move(ss.out_msg_queue_); processed_upto_ = std::move(ss.processed_upto_); ihr_pending = std::move(ss.ihr_pending_); + dispatch_queue_ = std::move(ss.dispatch_queue_); block_create_stats_ = std::move(ss.block_create_stats_); + if (ss.out_msg_queue_size_) { + have_out_msg_queue_size_in_state_ = true; + out_msg_queue_size_ = ss.out_msg_queue_size_.value(); + } return true; } @@ -1776,6 +1787,7 @@ bool Collator::try_collate() { last_proc_int_msg_.second.set_zero(); first_unproc_int_msg_.first = ~0ULL; first_unproc_int_msg_.second.set_ones(); + old_out_msg_queue_size_ = out_msg_queue_size_; if (is_masterchain()) { LOG(DEBUG) << "getting the list of special smart contracts"; auto res = config_->get_special_smartcontracts(); @@ -1960,6 +1972,10 @@ bool Collator::fetch_config_params() { return fatal_error(res.move_as_error()); } compute_phase_cfg_.libraries = std::make_unique(config_->get_libraries_root(), 256); + defer_out_queue_size_limit_ = std::max(collator_opts_->defer_out_queue_size_limit, + compute_phase_cfg_.size_limits.defer_out_queue_size_limit); + // This one is checked in validate-query + hard_defer_out_queue_size_limit_ = compute_phase_cfg_.size_limits.defer_out_queue_size_limit; return true; } @@ -2090,6 +2106,11 @@ bool Collator::do_collate() { if (!init_value_create()) { return fatal_error("cannot compute the value to be created / minted / recovered"); } + // 2-. take messages from dispatch queue + LOG(INFO) << "process dispatch queue"; + if (!process_dispatch_queue()) { + return fatal_error("cannot process dispatch queue"); + } // 2. tick transactions LOG(INFO) << "create tick transactions"; if (!create_ticktock_transactions(2)) { @@ -2597,7 +2618,7 @@ bool Collator::create_special_transaction(block::CurrencyCollection amount, Ref< } CHECK(block::gen::t_Message_Any.validate_ref(msg)); CHECK(block::tlb::t_Message.validate_ref(msg)); - if (process_one_new_message(block::NewOutMsg{lt, msg, Ref{}}, false, &in_msg) != 1) { + if (process_one_new_message(block::NewOutMsg{lt, msg, Ref{}, 0}, false, &in_msg) != 1) { return fatal_error("cannot generate special transaction for recovering "s + amount.to_str() + " to account " + addr.to_hex()); } @@ -2622,7 +2643,7 @@ bool Collator::create_special_transactions() { * * @param smc_addr The address of the smart contract. * @param req_start_lt The requested start logical time for the transaction. - * @param mask The value indicating wheter the thansaction is tick (mask == 2) or tock (mask == 1). + * @param mask The value indicating whether the thansaction is tick (mask == 2) or tock (mask == 1). * * @returns True if the transaction was created successfully, false otherwise. */ @@ -2639,13 +2660,18 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t return true; } req_start_lt = std::max(req_start_lt, start_lt + 1); + auto it = last_dispatch_queue_emitted_lt_.find(acc->addr); + if (it != last_dispatch_queue_emitted_lt_.end()) { + req_start_lt = std::max(req_start_lt, it->second + 1); + } if (acc->last_trans_end_lt_ >= start_lt && acc->transactions.empty()) { return fatal_error(td::Status::Error(-666, PSTRING() << "last transaction time in the state of account " << workchain() << ":" << smc_addr.to_hex() << " is too large")); } std::unique_ptr trans = std::make_unique( - *acc, mask == 2 ? block::transaction::Transaction::tr_tick : block::transaction::Transaction::tr_tock, req_start_lt, now_); + *acc, mask == 2 ? block::transaction::Transaction::tr_tick : block::transaction::Transaction::tr_tock, + req_start_lt, now_); if (!trans->prepare_storage_phase(storage_phase_cfg_, true)) { return fatal_error(td::Status::Error( -666, std::string{"cannot create storage phase of a new transaction for smart contract "} + smc_addr.to_hex())); @@ -2675,7 +2701,8 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t td::Status::Error(-666, std::string{"cannot commit new transaction for smart contract "} + smc_addr.to_hex())); } update_max_lt(acc->last_trans_end_lt_); - register_new_msgs(*trans); + block::MsgMetadata new_msg_metadata{0, acc->workchain, acc->addr, trans->start_lt}; + register_new_msgs(*trans, std::move(new_msg_metadata)); return true; } @@ -2683,11 +2710,15 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t * Creates an ordinary transaction using a given message. * * @param msg_root The root of the message to be processed serialized using Message TLB-scheme. + * @param msg_metadata Metadata of the inbound message. + * @param after_lt Transaction lt will be grater than after_lt. Used for deferred messages. * @param is_special_tx True if creating a special transaction (mint/recover), false otherwise. * * @returns The root of the serialized transaction, or an empty reference if the transaction creation fails. */ -Ref Collator::create_ordinary_transaction(Ref msg_root, bool is_special_tx) { +Ref Collator::create_ordinary_transaction(Ref msg_root, + td::optional msg_metadata, LogicalTime after_lt, + bool is_special_tx) { ton::StdSmcAddress addr; auto cs = vm::load_cell_slice(msg_root); bool external; @@ -2731,8 +2762,15 @@ Ref Collator::create_ordinary_transaction(Ref msg_root, bool block::Account* acc = acc_res.move_as_ok(); assert(acc); + if (external) { + after_lt = std::max(after_lt, last_proc_int_msg_.first); + } + auto it = last_dispatch_queue_emitted_lt_.find(acc->addr); + if (it != last_dispatch_queue_emitted_lt_.end()) { + after_lt = std::max(after_lt, it->second); + } auto res = impl_create_ordinary_transaction(msg_root, acc, now_, start_lt, &storage_phase_cfg_, &compute_phase_cfg_, - &action_phase_cfg_, external, last_proc_int_msg_.first); + &action_phase_cfg_, external, after_lt); if (res.is_error()) { auto error = res.move_as_error(); if (error.code() == -701) { @@ -2756,7 +2794,14 @@ Ref Collator::create_ordinary_transaction(Ref msg_root, bool return {}; } - register_new_msgs(*trans); + td::optional new_msg_metadata; + if (external || is_special_tx) { + new_msg_metadata = block::MsgMetadata{0, acc->workchain, acc->addr, trans->start_lt}; + } else if (msg_metadata) { + new_msg_metadata = std::move(msg_metadata); + ++new_msg_metadata.value().depth; + } + register_new_msgs(*trans, std::move(new_msg_metadata)); update_max_lt(acc->last_trans_end_lt_); value_flow_.burned += trans->blackhole_burned; return trans_root; @@ -2791,13 +2836,12 @@ td::Result> Collator::impl_crea << ":" << acc->addr.to_hex() << " is too large"); } auto trans_min_lt = lt; - if (external) { - // transactions processing external messages must have lt larger than all processed internal messages - trans_min_lt = std::max(trans_min_lt, after_lt); - } + // transactions processing external messages must have lt larger than all processed internal messages + // if account has deferred message processed in this block, the next transaction should have lt > emitted_lt + trans_min_lt = std::max(trans_min_lt, after_lt); - std::unique_ptr trans = - std::make_unique(*acc, block::transaction::Transaction::tr_ord, trans_min_lt + 1, utime, msg_root); + std::unique_ptr trans = std::make_unique( + *acc, block::transaction::Transaction::tr_ord, trans_min_lt + 1, utime, msg_root); bool ihr_delivered = false; // FIXME if (!trans->unpack_input_msg(ihr_delivered, action_phase_cfg)) { if (external) { @@ -2896,7 +2940,7 @@ bool Collator::update_last_proc_int_msg(const std::pair* is_special) { + bool from_dispatch_queue = msg.msg_env_from_dispatch_queue.not_null(); Ref src, dest; bool enqueue, external; auto cs = load_cell_slice(msg.msg); @@ -2972,7 +3017,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R if (!tlb::unpack(cs, info)) { return -1; } - CHECK(info.created_lt == msg.lt && info.created_at == now_); + CHECK(info.created_lt == msg.lt && info.created_at == now_ && !from_dispatch_queue); src = std::move(info.src); enqueue = external = true; break; @@ -2982,7 +3027,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R if (!tlb::unpack(cs, info)) { return -1; } - CHECK(info.created_lt == msg.lt && info.created_at == now_); + CHECK(from_dispatch_queue || (info.created_lt == msg.lt && info.created_at == now_)); src = std::move(info.src); dest = std::move(info.dest); fwd_fees = block::tlb::t_Grams.as_integer(info.fwd_fee); @@ -2994,7 +3039,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R default: return -1; } - CHECK(is_our_address(std::move(src))); + CHECK(is_our_address(src)); if (external) { // 1. construct a msg_export_ext OutMsg vm::CellBuilder cb; @@ -3006,9 +3051,46 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R // (if ever a structure in the block for listing all external outbound messages appears, insert this message there as well) return 0; } - if (enqueue) { - auto lt = msg.lt; - bool ok = enqueue_message(std::move(msg), std::move(fwd_fees), lt); + + WorkchainId src_wc; + StdSmcAddress src_addr; + CHECK(block::tlb::t_MsgAddressInt.extract_std_address(src, src_wc, src_addr)); + CHECK(src_wc == workchain()); + bool is_special_account = is_masterchain() && config_->is_special_smartcontract(src_addr); + bool defer = false; + if (!from_dispatch_queue) { + if (deferring_messages_enabled_ && collator_opts_->deferring_enabled && !is_special && !is_special_account && + msg.msg_idx != 0) { + if (++sender_generated_messages_count_[src_addr] >= collator_opts_->defer_messages_after || + out_msg_queue_size_ > defer_out_queue_size_limit_) { + defer = true; + } + } + if (dispatch_queue_->lookup(src_addr).not_null() || unprocessed_deferred_messages_.count(src_addr)) { + defer = true; + } + } else { + auto &x = unprocessed_deferred_messages_[src_addr]; + CHECK(x > 0); + if (--x == 0) { + unprocessed_deferred_messages_.erase(src_addr); + } + } + + if (enqueue || defer) { + bool ok; + if (from_dispatch_queue) { + auto msg_env = msg.msg_env_from_dispatch_queue; + block::tlb::MsgEnvelope::Record_std env; + CHECK(block::tlb::unpack_cell(msg_env, env)); + auto src_prefix = block::tlb::MsgAddressInt::get_prefix(src); + auto dest_prefix = block::tlb::MsgAddressInt::get_prefix(dest); + CHECK(env.emitted_lt && env.emitted_lt.value() == msg.lt); + ok = enqueue_transit_message(std::move(msg.msg), std::move(msg_env), src_prefix, src_prefix, dest_prefix, + std::move(env.fwd_fee_remaining), std::move(env.metadata), msg.lt); + } else { + ok = enqueue_message(std::move(msg), std::move(fwd_fees), src_addr, defer); + } return ok ? 0 : -1; } // process message by a transaction in this block: @@ -3019,26 +3101,36 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R return -1; } // 1. create a Transaction processing this Message - auto trans_root = create_ordinary_transaction(msg.msg, is_special != nullptr); + auto trans_root = create_ordinary_transaction(msg.msg, msg.metadata, msg.lt, is_special != nullptr); if (trans_root.is_null()) { fatal_error("cannot create transaction for re-processing output message"); return -1; } // 2. create a MsgEnvelope enveloping this Message - vm::CellBuilder cb; - CHECK(cb.store_long_bool(0x46060, 20) // msg_envelope#4 cur_addr:.. next_addr:.. - && block::tlb::t_Grams.store_integer_ref(cb, fwd_fees) // fwd_fee_remaining:t_Grams - && cb.store_ref_bool(msg.msg)); // msg:^(Message Any) - Ref msg_env = cb.finalize(); + block::tlb::MsgEnvelope::Record_std msg_env_rec{0x60, 0x60, fwd_fees, msg.msg, {}, msg.metadata}; + Ref msg_env; + CHECK(block::tlb::pack_cell(msg_env, msg_env_rec)); if (verbosity > 2) { std::cerr << "new (processed outbound) message envelope: "; block::gen::t_MsgEnvelope.print_ref(std::cerr, msg_env); } // 3. create InMsg, referring to this MsgEnvelope and this Transaction - CHECK(cb.store_long_bool(3, 3) // msg_import_imm$011 - && cb.store_ref_bool(msg_env) // in_msg:^MsgEnvelope - && cb.store_ref_bool(trans_root) // transaction:^Transaction - && block::tlb::t_Grams.store_integer_ref(cb, fwd_fees)); // fwd_fee:Grams + vm::CellBuilder cb; + if (from_dispatch_queue) { + auto msg_env = msg.msg_env_from_dispatch_queue; + block::tlb::MsgEnvelope::Record_std env; + CHECK(block::tlb::unpack_cell(msg_env, env)); + CHECK(env.emitted_lt && env.emitted_lt.value() == msg.lt); + CHECK(cb.store_long_bool(0b00100, 5) // msg_import_deferred_fin$00100 + && cb.store_ref_bool(msg_env) // in_msg:^MsgEnvelope + && cb.store_ref_bool(trans_root) // transaction:^Transaction + && block::tlb::t_Grams.store_integer_ref(cb, env.fwd_fee_remaining)); // fwd_fee:Grams + } else { + CHECK(cb.store_long_bool(3, 3) // msg_import_imm$011 + && cb.store_ref_bool(msg_env) // in_msg:^MsgEnvelope + && cb.store_ref_bool(trans_root) // transaction:^Transaction + && block::tlb::t_Grams.store_integer_ref(cb, fwd_fees)); // fwd_fee:Grams + } // 4. insert InMsg into InMsgDescr Ref in_msg = cb.finalize(); if (!insert_in_msg(in_msg)) { @@ -3049,14 +3141,16 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R *is_special = in_msg; return 1; } - // 5. create OutMsg, referring to this MsgEnvelope and InMsg - CHECK(cb.store_long_bool(2, 3) // msg_export_imm$010 - && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope - && cb.store_ref_bool(msg.trans) // transaction:^Transaction - && cb.store_ref_bool(in_msg)); // reimport:^InMsg - // 6. insert OutMsg into OutMsgDescr - if (!insert_out_msg(cb.finalize())) { - return -1; + if (!from_dispatch_queue) { + // 5. create OutMsg, referring to this MsgEnvelope and InMsg + CHECK(cb.store_long_bool(2, 3) // msg_export_imm$010 + && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope + && cb.store_ref_bool(msg.trans) // transaction:^Transaction + && cb.store_ref_bool(in_msg)); // reimport:^InMsg + // 6. insert OutMsg into OutMsgDescr + if (!insert_out_msg(cb.finalize())) { + return -1; + } } // 7. check whether the block is full now if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { @@ -3081,41 +3175,61 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R * @param cur_prefix The account ID prefix for the next hop. * @param dest_prefix The prefix of the destination account ID. * @param fwd_fee_remaining The remaining forward fee. + * @param msg_metadata Metadata of the message. + * @param emitted_lt If present - the message was taken from DispatchQueue, and msg_env will have this emitted_lt. * * @returns True if the transit message is successfully enqueued, false otherwise. */ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_env, ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix, - ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining) { - LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256); - bool requeue = is_our_address(prev_prefix); + ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining, + td::optional msg_metadata, + td::optional emitted_lt) { + bool from_dispatch_queue = (bool)emitted_lt; + if (from_dispatch_queue) { + LOG(DEBUG) << "enqueueing message from dispatch queue " << msg->get_hash().bits().to_hex(256) + << ", emitted_lt=" << emitted_lt.value(); + } else { + LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256); + } + bool requeue = !from_dispatch_queue && is_our_address(prev_prefix) && !from_dispatch_queue; // 1. perform hypercube routing auto route_info = block::perform_hypercube_routing(cur_prefix, dest_prefix, shard_); if ((unsigned)route_info.first > 96 || (unsigned)route_info.second > 96) { return fatal_error("cannot perform hypercube routing for a transit message"); } // 2. compute our part of transit fees - td::RefInt256 transit_fee = action_phase_cfg_.fwd_std.get_next_part(fwd_fee_remaining); + td::RefInt256 transit_fee = + from_dispatch_queue ? td::zero_refint() : action_phase_cfg_.fwd_std.get_next_part(fwd_fee_remaining); fwd_fee_remaining -= transit_fee; CHECK(td::sgn(transit_fee) >= 0 && td::sgn(fwd_fee_remaining) >= 0); // 3. create a new MsgEnvelope - vm::CellBuilder cb; - CHECK(cb.store_long_bool(4, 4) // msg_envelope#4 cur_addr:.. next_addr:.. - && cb.store_long_bool(route_info.first, 8) // cur_addr:IntermediateAddress - && cb.store_long_bool(route_info.second, 8) // next_addr:IntermediateAddress - && block::tlb::t_Grams.store_integer_ref(cb, fwd_fee_remaining) // fwd_fee_remaining:t_Grams - && cb.store_ref_bool(msg)); // msg:^(Message Any) - Ref msg_env = cb.finalize(); + block::tlb::MsgEnvelope::Record_std msg_env_rec{route_info.first, route_info.second, fwd_fee_remaining, msg, + emitted_lt, std::move(msg_metadata)}; + Ref msg_env; + CHECK(block::tlb::t_MsgEnvelope.pack_cell(msg_env, msg_env_rec)); // 4. create InMsg - CHECK(cb.store_long_bool(5, 3) // msg_import_tr$101 - && cb.store_ref_bool(old_msg_env) // in_msg:^MsgEnvelope - && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope - && block::tlb::t_Grams.store_integer_ref(cb, transit_fee)); // transit_fee:Grams + vm::CellBuilder cb; + if (from_dispatch_queue) { + CHECK(cb.store_long_bool(0b00101, 5) // msg_import_deferred_tr$00101 + && cb.store_ref_bool(old_msg_env) // in_msg:^MsgEnvelope + && cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope + } else { + CHECK(cb.store_long_bool(5, 3) // msg_import_tr$101 + && cb.store_ref_bool(old_msg_env) // in_msg:^MsgEnvelope + && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope + && block::tlb::t_Grams.store_integer_ref(cb, transit_fee)); // transit_fee:Grams + } Ref in_msg = cb.finalize(); // 5. create a new OutMsg - CHECK(cb.store_long_bool(requeue ? 7 : 3, 3) // msg_export_tr$011 or msg_export_tr_req$111 - && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope - && cb.store_ref_bool(in_msg)); // imported:^InMsg + // msg_export_tr$011 / msg_export_tr_req$111 / msg_export_deferred_tr$10101 + if (from_dispatch_queue) { + CHECK(cb.store_long_bool(0b10101, 5)); + } else { + CHECK(cb.store_long_bool(requeue ? 7 : 3, 3)); + } + CHECK(cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope + && cb.store_ref_bool(in_msg)); // imported:^InMsg Ref out_msg = cb.finalize(); // 4.1. insert OutMsg into OutMsgDescr if (verbosity > 2) { @@ -3134,8 +3248,8 @@ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_ return fatal_error("cannot insert a new InMsg into InMsgDescr"); } // 5. create EnqueuedMsg - CHECK(cb.store_long_bool(start_lt) // _ enqueued_lt:uint64 - && cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope = EnqueuedMsg; + CHECK(cb.store_long_bool(from_dispatch_queue ? emitted_lt.value() : start_lt) // _ enqueued_lt:uint64 + && cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope = EnqueuedMsg; // 6. insert EnqueuedMsg into OutMsgQueue // NB: we use here cur_prefix instead of src_prefix; should we check that route_info.first >= next_addr.use_dest_bits of the old envelope? auto next_hop = block::interpolate_addr(cur_prefix, dest_prefix, route_info.second); @@ -3237,9 +3351,14 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT LOG(ERROR) << "cannot unpack CommonMsgInfo of an inbound internal message"; return false; } - if (info.created_lt != lt) { + if (!env.emitted_lt && info.created_lt != lt) { LOG(ERROR) << "inbound internal message has an augmentation value in source OutMsgQueue distinct from the one in " - "its contents"; + "its contents (CommonMsgInfo)"; + return false; + } + if (env.emitted_lt && env.emitted_lt.value() != lt) { + LOG(ERROR) << "inbound internal message has an augmentation value in source OutMsgQueue distinct from the one in " + "its contents (deferred_it in MsgEnvelope)"; return false; } if (!block::tlb::validate_message_libs(env.msg)) { @@ -3302,7 +3421,8 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT bool our = ton::shard_contains(shard_, cur_prefix); bool to_us = ton::shard_contains(shard_, dest_prefix); - block::EnqueuedMsgDescr enq_msg_descr{cur_prefix, next_prefix, info.created_lt, enqueued_lt, + block::EnqueuedMsgDescr enq_msg_descr{cur_prefix, next_prefix, + env.emitted_lt ? env.emitted_lt.value() : info.created_lt, enqueued_lt, env.msg->get_hash().bits()}; if (processed_upto_->already_processed(enq_msg_descr)) { LOG(DEBUG) << "inbound internal message with lt=" << enq_msg_descr.lt_ << " hash=" << enq_msg_descr.hash_.to_hex() @@ -3319,7 +3439,7 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT // destination is outside our shard, relay transit message // (very similar to enqueue_message()) if (!enqueue_transit_message(std::move(env.msg), std::move(msg_env), cur_prefix, next_prefix, dest_prefix, - std::move(env.fwd_fee_remaining))) { + std::move(env.fwd_fee_remaining), std::move(env.metadata))) { return fatal_error("cannot enqueue transit internal message with key "s + key.to_hex(352)); } return !our || delete_out_msg_queue_msg(key); @@ -3328,7 +3448,7 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT // process the message by an ordinary transaction similarly to process_one_new_message() // // 8. create a Transaction processing this Message - auto trans_root = create_ordinary_transaction(env.msg); + auto trans_root = create_ordinary_transaction(env.msg, env.metadata, 0); if (trans_root.is_null()) { return fatal_error("cannot create transaction for processing inbound message"); } @@ -3368,6 +3488,9 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT * @returns True if the processing was successful, false otherwise. */ bool Collator::process_inbound_internal_messages() { + if (have_unprocessed_account_dispatch_queue_) { + return true; + } while (!block_full_ && !nb_out_msgs_->is_eof()) { block_full_ = !block_limit_status_->fits(block::ParamLimits::cl_normal); if (block_full_) { @@ -3476,7 +3599,7 @@ int Collator::process_external_message(Ref msg) { } // process message by a transaction in this block: // 1. create a Transaction processing this Message - auto trans_root = create_ordinary_transaction(msg); + auto trans_root = create_ordinary_transaction(msg, /* metadata = */ {}, 0); if (trans_root.is_null()) { if (busy_) { // transaction rejected by account @@ -3500,6 +3623,230 @@ int Collator::process_external_message(Ref msg) { return 1; } +/** + * Processes messages from dispatch queue + * + * Messages from dispatch queue are taken in three steps: + * 1. Take one message from each account (in the order of lt) + * 2. Take up to 10 per account (including from p.1), up to 20 per initiator, up to 150 in total + * 3. Take up to X messages per initiator, up to 150 in total. X depends on out msg queue size + * + * @returns True if the processing was successful, false otherwise. + */ +bool Collator::process_dispatch_queue() { + if (out_msg_queue_size_ > defer_out_queue_size_limit_ && old_out_msg_queue_size_ > hard_defer_out_queue_size_limit_) { + return true; + } + have_unprocessed_account_dispatch_queue_ = true; + size_t max_total_count[3] = {1 << 30, collator_opts_->dispatch_phase_2_max_total, + collator_opts_->dispatch_phase_3_max_total}; + size_t max_per_initiator[3] = {1 << 30, collator_opts_->dispatch_phase_2_max_per_initiator, 0}; + if (collator_opts_->dispatch_phase_3_max_per_initiator) { + max_per_initiator[2] = collator_opts_->dispatch_phase_3_max_per_initiator.value(); + } else if (out_msg_queue_size_ <= 256) { + max_per_initiator[2] = 10; + } else if (out_msg_queue_size_ <= 512) { + max_per_initiator[2] = 2; + } else if (out_msg_queue_size_ <= 1500) { + max_per_initiator[2] = 1; + } + for (int iter = 0; iter < 3; ++iter) { + if (max_per_initiator[iter] == 0 || max_total_count[iter] == 0) { + continue; + } + vm::AugmentedDictionary cur_dispatch_queue{dispatch_queue_->get_root(), 256, block::tlb::aug_DispatchQueue}; + std::map, size_t> count_per_initiator; + size_t total_count = 0; + while (!cur_dispatch_queue.is_empty()) { + block_full_ = !block_limit_status_->fits(block::ParamLimits::cl_normal); + if (block_full_) { + LOG(INFO) << "BLOCK FULL, stop processing dispatch queue"; + return true; + } + if (soft_timeout_.is_in_past(td::Timestamp::now())) { + block_full_ = true; + LOG(WARNING) << "soft timeout reached, stop processing dispatch queue"; + return true; + } + StdSmcAddress src_addr; + auto account_dispatch_queue = block::get_dispatch_queue_min_lt_account(cur_dispatch_queue, src_addr); + if (account_dispatch_queue.is_null()) { + return fatal_error("invalid dispatch queue in shard state"); + } + vm::Dictionary dict{64}; + td::uint64 dict_size; + if (!block::unpack_account_dispatch_queue(account_dispatch_queue, dict, dict_size)) { + return fatal_error(PSTRING() << "invalid account dispatch queue for account " << src_addr.to_hex()); + } + td::BitArray<64> key; + Ref enqueued_msg = dict.extract_minmax_key(key.bits(), 64, false, false); + LogicalTime lt = key.to_ulong(); + + td::optional msg_metadata; + if (!process_deferred_message(std::move(enqueued_msg), src_addr, lt, msg_metadata)) { + return fatal_error(PSTRING() << "error processing internal message from dispatch queue: account=" + << src_addr.to_hex() << ", lt=" << lt); + } + + // Remove message from DispatchQueue + bool ok; + if (iter == 0 || + (iter == 1 && sender_generated_messages_count_[src_addr] >= collator_opts_->defer_messages_after)) { + ok = cur_dispatch_queue.lookup_delete(src_addr).not_null(); + } else { + dict.lookup_delete(key); + --dict_size; + account_dispatch_queue = block::pack_account_dispatch_queue(dict, dict_size); + ok = account_dispatch_queue.not_null() ? cur_dispatch_queue.set(src_addr, account_dispatch_queue) + : cur_dispatch_queue.lookup_delete(src_addr).not_null(); + } + if (!ok) { + return fatal_error(PSTRING() << "error processing internal message from dispatch queue: account=" + << src_addr.to_hex() << ", lt=" << lt); + } + if (msg_metadata) { + auto initiator = std::make_tuple(msg_metadata.value().initiator_wc, msg_metadata.value().initiator_addr, + msg_metadata.value().initiator_lt); + size_t initiator_count = ++count_per_initiator[initiator]; + if (initiator_count >= max_per_initiator[iter]) { + cur_dispatch_queue.lookup_delete(src_addr); + } + } + ++total_count; + if (total_count >= max_total_count[iter]) { + dispatch_queue_total_limit_reached_ = true; + break; + } + } + if (iter == 0) { + have_unprocessed_account_dispatch_queue_ = false; + } + } + return true; +} + +/** + * Processes an internal message from DispatchQueue. + * The message may create a transaction or be enqueued. + * + * Similar to Collator::process_inbound_message. + * + * @param enq_msg The internal message serialized using EnqueuedMsg TLB-scheme. + * @param src_addr 256-bit address of the sender. + * @param lt The logical time of the message. + * @param msg_metadata Reference to store msg_metadata + * + * @returns True if the message was processed successfully, false otherwise. + */ +bool Collator::process_deferred_message(Ref enq_msg, StdSmcAddress src_addr, LogicalTime lt, + td::optional& msg_metadata) { + if (!block::remove_dispatch_queue_entry(*dispatch_queue_, src_addr, lt)) { + return fatal_error(PSTRING() << "failed to delete message from DispatchQueue: address=" << src_addr.to_hex() + << ", lt=" << lt); + } + ++dispatch_queue_ops_; + if (!(dispatch_queue_ops_ & 63)) { + if (!block_limit_status_->add_proof(dispatch_queue_->get_root_cell())) { + return false; + } + } + ++sender_generated_messages_count_[src_addr]; + + LogicalTime enqueued_lt = 0; + if (enq_msg.is_null() || enq_msg->size_ext() != 0x10040 || (enqueued_lt = enq_msg->prefetch_ulong(64)) != lt) { + if (enq_msg.not_null()) { + block::gen::t_EnqueuedMsg.print(std::cerr, *enq_msg); + } + LOG(ERROR) << "internal message in DispatchQueue is not a valid EnqueuedMsg (created lt " << lt << ", enqueued " + << enqueued_lt << ")"; + return false; + } + auto msg_env = enq_msg->prefetch_ref(); + CHECK(msg_env.not_null()); + // 0. check MsgEnvelope + if (msg_env->get_level() != 0) { + LOG(ERROR) << "cannot import a message with non-zero level!"; + return false; + } + if (!block::gen::t_MsgEnvelope.validate_ref(msg_env)) { + LOG(ERROR) << "MsgEnvelope from DispatchQueue is invalid according to automated checks"; + return false; + } + if (!block::tlb::t_MsgEnvelope.validate_ref(msg_env)) { + LOG(ERROR) << "MsgEnvelope from DispatchQueue is invalid according to hand-written checks"; + return false; + } + // 1. unpack MsgEnvelope + block::tlb::MsgEnvelope::Record_std env; + if (!tlb::unpack_cell(msg_env, env)) { + LOG(ERROR) << "cannot unpack MsgEnvelope from DispatchQueue"; + return false; + } + // 2. unpack CommonMsgInfo of the message + vm::CellSlice cs{vm::NoVmOrd{}, env.msg}; + if (block::gen::t_CommonMsgInfo.get_tag(cs) != block::gen::CommonMsgInfo::int_msg_info) { + LOG(ERROR) << "internal message from DispatchQueue is not in fact internal!"; + return false; + } + block::gen::CommonMsgInfo::Record_int_msg_info info; + if (!tlb::unpack(cs, info)) { + LOG(ERROR) << "cannot unpack CommonMsgInfo of an internal message from DispatchQueue"; + return false; + } + if (info.created_lt != lt) { + LOG(ERROR) << "internal message has lt in DispatchQueue distinct from the one in " + "its contents"; + return false; + } + if (!block::tlb::validate_message_libs(env.msg)) { + LOG(ERROR) << "internal message in DispatchQueue has invalid StateInit"; + return false; + } + // 2.1. check fwd_fee and fwd_fee_remaining + td::RefInt256 orig_fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee); + if (env.fwd_fee_remaining > orig_fwd_fee) { + LOG(ERROR) << "internal message if DispatchQueue has fwd_fee_remaining=" << td::dec_string(env.fwd_fee_remaining) + << " larger than original fwd_fee=" << td::dec_string(orig_fwd_fee); + return false; + } + // 3. extract source and destination shards + auto src_prefix = block::tlb::t_MsgAddressInt.get_prefix(info.src); + auto dest_prefix = block::tlb::t_MsgAddressInt.get_prefix(info.dest); + if (!(src_prefix.is_valid() && dest_prefix.is_valid())) { + LOG(ERROR) << "internal message in DispatchQueue has invalid source or destination address"; + return false; + } + // 4. chech current and next hop shards + if (env.cur_addr != 0 || env.next_addr != 0) { + LOG(ERROR) << "internal message in DispatchQueue is expected to have zero cur_addr and next_addr"; + return false; + } + // 5. calculate emitted_lt + LogicalTime emitted_lt = std::max(start_lt, last_dispatch_queue_emitted_lt_[src_addr]) + 1; + auto it = accounts.find(src_addr); + if (it != accounts.end()) { + emitted_lt = std::max(emitted_lt, it->second->last_trans_end_lt_ + 1); + } + last_dispatch_queue_emitted_lt_[src_addr] = emitted_lt; + update_max_lt(emitted_lt + 1); + + env.emitted_lt = emitted_lt; + if (!block::tlb::pack_cell(msg_env, env)) { + return fatal_error("cannot pack msg envelope"); + } + + // 6. create NewOutMsg + block::NewOutMsg new_msg{emitted_lt, env.msg, {}, 0}; + new_msg.metadata = env.metadata; + new_msg.msg_env_from_dispatch_queue = msg_env; + ++unprocessed_deferred_messages_[src_addr]; + LOG(INFO) << "delivering deferred message from account " << src_addr.to_hex() << ", lt=" << lt + << ", emitted_lt=" << emitted_lt; + register_new_msg(std::move(new_msg)); + msg_metadata = std::move(env.metadata); + return true; +} + /** * Inserts an InMsg into the block's InMsgDescr. * @@ -3517,8 +3864,9 @@ bool Collator::insert_in_msg(Ref in_msg) { return false; } Ref msg = cs.prefetch_ref(); - int tag = (int)cs.prefetch_ulong(3); - if (!(tag == 0 || tag == 2)) { // msg_import_ext$000 or msg_import_ihr$010 contain (Message Any) directly + int tag = block::gen::t_InMsg.get_tag(cs); + // msg_import_ext$000 or msg_import_ihr$010 contain (Message Any) directly + if (!(tag == block::gen::InMsg::msg_import_ext || tag == block::gen::InMsg::msg_import_ihr)) { // extract Message Any from MsgEnvelope to compute correct key auto cs2 = load_cell_slice(std::move(msg)); if (!cs2.size_refs()) { @@ -3599,11 +3947,15 @@ bool Collator::insert_out_msg(Ref out_msg, td::ConstBitPtr msg_hash) { * * @param msg The new outbound message to enqueue. * @param fwd_fees_remaining The remaining forward fees for the message. - * @param enqueued_lt The logical time at which the message is enqueued. + * @param src_addr 256-bit address of the sender + * @param defer Put the message to DispatchQueue * * @returns True if the message was successfully enqueued, false otherwise. */ -bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt) { +bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, StdSmcAddress src_addr, + bool defer) { + LogicalTime enqueued_lt = msg.lt; + CHECK(msg.msg_env_from_dispatch_queue.is_null()); // 0. unpack src_addr and dest_addr block::gen::CommonMsgInfo::Record_int_msg_info info; if (!tlb::unpack_cell_inexact(msg.msg, info)) { @@ -3623,18 +3975,24 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema return fatal_error("cannot perform hypercube routing for a new outbound message"); } // 2. create a new MsgEnvelope - vm::CellBuilder cb; - CHECK(cb.store_long_bool(4, 4) // msg_envelope#4 cur_addr:.. next_addr:.. - && cb.store_long_bool(route_info.first, 8) // cur_addr:IntermediateAddress - && cb.store_long_bool(route_info.second, 8) // next_addr:IntermediateAddress - && block::tlb::t_Grams.store_integer_ref(cb, fwd_fees_remaining) // fwd_fee_remaining:t_Grams - && cb.store_ref_bool(msg.msg)); // msg:^(Message Any) - Ref msg_env = cb.finalize(); + block::tlb::MsgEnvelope::Record_std msg_env_rec{ + defer ? 0 : route_info.first, defer ? 0 : route_info.second, fwd_fees_remaining, msg.msg, {}, msg.metadata}; + Ref msg_env; + CHECK(block::tlb::pack_cell(msg_env, msg_env_rec)); // 3. create a new OutMsg - CHECK(cb.store_long_bool(1, 3) // msg_export_new$001 - && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope - && cb.store_ref_bool(msg.trans)); // transaction:^Transaction - Ref out_msg = cb.finalize(); + vm::CellBuilder cb; + Ref out_msg; + if (defer) { + CHECK(cb.store_long_bool(0b10100, 5) // msg_export_new_defer$10100 + && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope + && cb.store_ref_bool(msg.trans)); // transaction:^Transaction + out_msg = cb.finalize(); + } else { + CHECK(cb.store_long_bool(1, 3) // msg_export_new$001 + && cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope + && cb.store_ref_bool(msg.trans)); // transaction:^Transaction + out_msg = cb.finalize(); + } // 4. insert OutMsg into OutMsgDescr if (verbosity > 2) { std::cerr << "OutMsg for a newly-generated message: "; @@ -3646,7 +4004,30 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema // 5. create EnqueuedMsg CHECK(cb.store_long_bool(enqueued_lt) // _ enqueued_lt:uint64 && cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope = EnqueuedMsg; - // 6. insert EnqueuedMsg into OutMsgQueue + + // 6. insert EnqueuedMsg into OutMsgQueue (or DispatchQueue) + if (defer) { + LOG(INFO) << "deferring new message from account " << workchain() << ":" << src_addr.to_hex() << ", lt=" << msg.lt; + vm::Dictionary dispatch_dict{64}; + td::uint64 dispatch_dict_size; + if (!block::unpack_account_dispatch_queue(dispatch_queue_->lookup(src_addr), dispatch_dict, dispatch_dict_size)) { + return fatal_error(PSTRING() << "cannot unpack AccountDispatchQueue for account " << src_addr.to_hex()); + } + td::BitArray<64> key; + key.store_ulong(msg.lt); + if (!dispatch_dict.set_builder(key, cb, vm::Dictionary::SetMode::Add)) { + return fatal_error(PSTRING() << "cannot add message to AccountDispatchQueue for account " << src_addr.to_hex() + << ", lt=" << msg.lt); + } + ++dispatch_dict_size; + dispatch_queue_->set(src_addr, block::pack_account_dispatch_queue(dispatch_dict, dispatch_dict_size)); + ++dispatch_queue_ops_; + if (!(dispatch_queue_ops_ & 63)) { + return block_limit_status_->add_proof(dispatch_queue_->get_root_cell()); + } + return true; + } + auto next_hop = block::interpolate_addr(src_prefix, dest_prefix, route_info.second); td::BitArray<32 + 64 + 256> key; key.bits().store_int(next_hop.workchain, 32); @@ -3680,7 +4061,7 @@ bool Collator::process_new_messages(bool enqueue_only) { block::NewOutMsg msg = new_msgs.top(); new_msgs.pop(); block_limit_status_->extra_out_msgs--; - if (block_full_ && !enqueue_only) { + if ((block_full_ || have_unprocessed_account_dispatch_queue_) && !enqueue_only) { LOG(INFO) << "BLOCK FULL, enqueue all remaining new messages"; enqueue_only = true; } @@ -3713,11 +4094,17 @@ void Collator::register_new_msg(block::NewOutMsg new_msg) { * Registers new messages that were created in the transaction. * * @param trans The transaction containing the messages. + * @param msg_metadata Metadata of the new messages. */ -void Collator::register_new_msgs(block::transaction::Transaction& trans) { +void Collator::register_new_msgs(block::transaction::Transaction& trans, + td::optional msg_metadata) { CHECK(trans.root.not_null()); for (unsigned i = 0; i < trans.out_msgs.size(); i++) { - register_new_msg(trans.extract_out_msg_ext(i)); + block::NewOutMsg msg = trans.extract_out_msg_ext(i); + if (msg_metadata_enabled_) { + msg.metadata = msg_metadata; + } + register_new_msg(std::move(msg)); } } @@ -4274,7 +4661,21 @@ bool Collator::check_block_overload() { << " lt_delta=" << block_limit_status_->cur_lt - block_limit_status_->limits.start_lt << " size_estimate=" << block_size_estimate_; auto cl = block_limit_status_->classify(); - if (cl <= block::ParamLimits::cl_underload) { + if (cl >= block::ParamLimits::cl_soft || dispatch_queue_total_limit_reached_) { + std::string message = "block is overloaded "; + if (cl >= block::ParamLimits::cl_soft) { + message += PSTRING() << "(category " << cl << ")"; + } else { + message += "(long dispatch queue processing)"; + } + if (out_msg_queue_size_ > SPLIT_MAX_QUEUE_SIZE) { + LOG(INFO) << message << ", but don't set overload history because out_msg_queue size is too big to split (" + << out_msg_queue_size_ << " > " << SPLIT_MAX_QUEUE_SIZE << ")"; + } else { + overload_history_ |= 1; + LOG(INFO) << message; + } + } else if (cl <= block::ParamLimits::cl_underload) { if (out_msg_queue_size_ > MERGE_MAX_QUEUE_SIZE) { LOG(INFO) << "block is underloaded, but don't set underload history because out_msg_queue size is too big to merge (" @@ -4283,15 +4684,6 @@ bool Collator::check_block_overload() { underload_history_ |= 1; LOG(INFO) << "block is underloaded"; } - } else if (cl >= block::ParamLimits::cl_soft) { - if (out_msg_queue_size_ > SPLIT_MAX_QUEUE_SIZE) { - LOG(INFO) << "block is overloaded (category " << cl - << "), but don't set overload history because out_msg_queue size is too big to split (" - << out_msg_queue_size_ << " > " << SPLIT_MAX_QUEUE_SIZE << ")"; - } else { - overload_history_ |= 1; - LOG(INFO) << "block is overloaded (category " << cl << ")"; - } } else { LOG(INFO) << "block is loaded normally"; } @@ -4617,9 +5009,27 @@ bool Collator::compute_out_msg_queue_info(Ref& out_msg_queue_info) { rt->print_rec(std::cerr); } vm::CellBuilder cb; + // out_msg_queue_extra#0 dispatch_queue:DispatchQueue out_queue_size:(Maybe uint48) = OutMsgQueueExtra; + // ... extra:(Maybe OutMsgQueueExtra) + if (!dispatch_queue_->is_empty() || store_out_msg_queue_size_) { + if (!(cb.store_long_bool(1, 1) && cb.store_long_bool(0, 4) && dispatch_queue_->append_dict_to_bool(cb))) { + return false; + } + if (!(cb.store_bool_bool(store_out_msg_queue_size_) && + (!store_out_msg_queue_size_ || cb.store_long_bool(out_msg_queue_size_, 48)))) { + return false; + } + } else { + if (!cb.store_long_bool(0, 1)) { + return false; + } + } + vm::CellSlice maybe_extra = cb.as_cellslice(); + cb.reset(); + return register_out_msg_queue_op(true) && out_msg_queue_->append_dict_to_bool(cb) // _ out_queue:OutMsgQueue && processed_upto_->pack(cb) // proc_info:ProcessedInfo - && ihr_pending->append_dict_to_bool(cb) // ihr_pending:IhrPendingInfo + && cb.append_cellslice_bool(maybe_extra) // extra:(Maybe OutMsgQueueExtra) && cb.finalize_to(out_msg_queue_info); } diff --git a/validator/impl/fabric.cpp b/validator/impl/fabric.cpp index 997fa9a18..d69492393 100644 --- a/validator/impl/fabric.cpp +++ b/validator/impl/fabric.cpp @@ -213,8 +213,8 @@ void run_validate_query(ShardIdFull shard, UnixTime min_ts, BlockIdExt min_maste void run_collate_query(ShardIdFull shard, td::uint32 min_ts, const BlockIdExt& min_masterchain_block_id, std::vector prev, Ed25519_PublicKey collator_id, td::Ref validator_set, - td::actor::ActorId manager, td::Timestamp timeout, - td::Promise promise) { + td::Ref collator_opts, td::actor::ActorId manager, + td::Timestamp timeout, td::Promise promise) { BlockSeqno seqno = 0; for (auto& p : prev) { if (p.seqno() > seqno) { @@ -223,7 +223,8 @@ void run_collate_query(ShardIdFull shard, td::uint32 min_ts, const BlockIdExt& m } td::actor::create_actor(PSTRING() << "collate" << shard.to_str() << ":" << (seqno + 1), shard, false, min_ts, min_masterchain_block_id, std::move(prev), std::move(validator_set), - collator_id, std::move(manager), timeout, std::move(promise)) + collator_id, std::move(collator_opts), std::move(manager), timeout, + std::move(promise)) .release(); } @@ -238,7 +239,8 @@ void run_collate_hardfork(ShardIdFull shard, const BlockIdExt& min_masterchain_b } td::actor::create_actor(PSTRING() << "collate" << shard.to_str() << ":" << (seqno + 1), shard, true, 0, min_masterchain_block_id, std::move(prev), td::Ref{}, - Ed25519_PublicKey{Bits256::zero()}, std::move(manager), timeout, std::move(promise)) + Ed25519_PublicKey{Bits256::zero()}, td::Ref{true}, + std::move(manager), timeout, std::move(promise)) .release(); } diff --git a/validator/impl/liteserver.cpp b/validator/impl/liteserver.cpp index 7fa6e59e5..d6fad7ee2 100644 --- a/validator/impl/liteserver.cpp +++ b/validator/impl/liteserver.cpp @@ -287,6 +287,9 @@ void LiteQuery::perform() { [&](lite_api::liteServer_getOutMsgQueueSizes& q) { this->perform_getOutMsgQueueSizes(q.mode_ & 1 ? ShardIdFull(q.wc_, q.shard_) : td::optional()); }, + [&](lite_api::liteServer_getBlockOutMsgQueueSize& q) { + this->perform_getBlockOutMsgQueueSize(q.mode_, create_block_id(q.id_)); + }, [&](auto& obj) { this->abort_query(td::Status::Error(ErrorCode::protoviolation, "unknown query")); })); } @@ -2376,6 +2379,45 @@ void LiteQuery::perform_listBlockTransactions(BlockIdExt blkid, int mode, int co request_block_data(blkid); } +static td::Result> get_in_msg_metadata( + const Ref& in_msg_descr_root, const Ref& trans_root) { + vm::AugmentedDictionary in_msg_descr{vm::load_cell_slice_ref(in_msg_descr_root), 256, block::tlb::aug_InMsgDescr}; + block::gen::Transaction::Record transaction; + if (!block::tlb::unpack_cell(trans_root, transaction)) { + return td::Status::Error("invalid Transaction in block"); + } + Ref msg = transaction.r1.in_msg->prefetch_ref(); + if (msg.is_null()) { + return nullptr; + } + td::Bits256 in_msg_hash = msg->get_hash().bits(); + Ref in_msg = in_msg_descr.lookup(in_msg_hash); + if (in_msg.is_null()) { + return td::Status::Error(PSTRING() << "no InMsg in InMsgDescr for message with hash " << in_msg_hash.to_hex()); + } + int tag = block::gen::t_InMsg.get_tag(*in_msg); + if (tag != block::gen::InMsg::msg_import_imm && tag != block::gen::InMsg::msg_import_fin && + tag != block::gen::InMsg::msg_import_deferred_fin) { + return nullptr; + } + Ref msg_env = in_msg->prefetch_ref(); + if (msg_env.is_null()) { + return td::Status::Error(PSTRING() << "no MsgEnvelope in InMsg for message with hash " << in_msg_hash.to_hex()); + } + block::tlb::MsgEnvelope::Record_std env; + if (!block::tlb::unpack_cell(std::move(msg_env), env)) { + return td::Status::Error(PSTRING() << "failed to unpack MsgEnvelope for message with hash " << in_msg_hash.to_hex()); + } + if (!env.metadata) { + return nullptr; + } + block::MsgMetadata& metadata = env.metadata.value(); + return create_tl_object( + 0, metadata.depth, + create_tl_object(metadata.initiator_wc, metadata.initiator_addr), + metadata.initiator_lt); +} + void LiteQuery::finish_listBlockTransactions(int mode, int req_count) { LOG(INFO) << "completing a listBlockTransactions(" << base_blk_id_.to_str() << ", " << mode << ", " << req_count << ", " << acc_addr_.to_hex() << ", " << trans_lt_ << ") liteserver query"; @@ -2395,6 +2437,8 @@ void LiteQuery::finish_listBlockTransactions(int mode, int req_count) { acc_addr_.set_ones(); trans_lt_ = ~0ULL; } + bool with_metadata = mode & 256; + mode &= ~256; std::vector> result; bool eof = false; ton::LogicalTime reverse = (mode & 64) ? ~0ULL : 0; @@ -2448,8 +2492,18 @@ void LiteQuery::finish_listBlockTransactions(int mode, int req_count) { trans_lt_ = reverse; break; } - result.push_back(create_tl_object(mode, cur_addr, cur_trans.to_long(), - tvalue->get_hash().bits())); + tl_object_ptr metadata; + if (with_metadata) { + auto r_metadata = get_in_msg_metadata(extra.in_msg_descr, tvalue); + if (r_metadata.is_error()) { + fatal_error(r_metadata.move_as_error()); + return; + } + metadata = r_metadata.move_as_ok(); + } + result.push_back(create_tl_object( + mode | (metadata ? 256 : 0), cur_addr, cur_trans.to_long(), tvalue->get_hash().bits(), + std::move(metadata))); ++count; } } @@ -2484,6 +2538,36 @@ void LiteQuery::perform_listBlockTransactionsExt(BlockIdExt blkid, int mode, int request_block_data(blkid); } +static td::Status process_all_in_msg_metadata(const Ref& in_msg_descr_root, + const std::vector>& trans_roots) { + vm::AugmentedDictionary in_msg_descr{vm::load_cell_slice_ref(in_msg_descr_root), 256, block::tlb::aug_InMsgDescr}; + for (const Ref& trans_root : trans_roots) { + block::gen::Transaction::Record transaction; + if (!block::tlb::unpack_cell(trans_root, transaction)) { + return td::Status::Error("invalid Transaction in block"); + } + Ref msg = transaction.r1.in_msg->prefetch_ref(); + if (msg.is_null()) { + continue; + } + td::Bits256 in_msg_hash = msg->get_hash().bits(); + Ref in_msg = in_msg_descr.lookup(in_msg_hash); + if (in_msg.is_null()) { + return td::Status::Error(PSTRING() << "no InMsg in InMsgDescr for message with hash " << in_msg_hash.to_hex()); + } + int tag = block::gen::t_InMsg.get_tag(*in_msg); + if (tag == block::gen::InMsg::msg_import_imm || tag == block::gen::InMsg::msg_import_fin || + tag == block::gen::InMsg::msg_import_deferred_fin) { + Ref msg_env = in_msg->prefetch_ref(); + if (msg_env.is_null()) { + return td::Status::Error(PSTRING() << "no MsgEnvelope in InMsg for message with hash " << in_msg_hash.to_hex()); + } + vm::load_cell_slice(msg_env); + } + } + return td::Status::OK(); +} + void LiteQuery::finish_listBlockTransactionsExt(int mode, int req_count) { LOG(INFO) << "completing a listBlockTransactionsExt(" << base_blk_id_.to_str() << ", " << mode << ", " << req_count << ", " << acc_addr_.to_hex() << ", " << trans_lt_ << ") liteserver query"; @@ -2495,6 +2579,10 @@ void LiteQuery::finish_listBlockTransactionsExt(int mode, int req_count) { CHECK(rhash == base_blk_id_.root_hash); vm::MerkleProofBuilder pb; auto virt_root = block_root; + if (mode & 256) { + // with msg metadata in proof + mode |= 32; + } if (mode & 32) { // proof requested virt_root = pb.init(std::move(virt_root)); @@ -2560,6 +2648,13 @@ void LiteQuery::finish_listBlockTransactionsExt(int mode, int req_count) { ++count; } } + if (mode & 256) { + td::Status S = process_all_in_msg_metadata(extra.in_msg_descr, trans_roots); + if (S.is_error()) { + fatal_error(S.move_as_error()); + return; + } + } } catch (vm::VmError err) { fatal_error("error while parsing AccountBlocks of block "s + base_blk_id_.to_str() + " : " + err.get_msg()); return; @@ -3252,7 +3347,7 @@ void LiteQuery::continue_getOutMsgQueueSizes(td::optional shard, Re auto ig = mp.init_guard(); for (size_t i = 0; i < blocks.size(); ++i) { td::actor::send_closure(manager_, &ValidatorManager::get_out_msg_queue_size, blocks[i], - [promise = ig.get_promise(), res, i, id = blocks[i]](td::Result R) mutable { + [promise = ig.get_promise(), res, i, id = blocks[i]](td::Result R) mutable { TRY_RESULT_PROMISE(promise, value, std::move(R)); res->at(i) = create_tl_object( create_tl_lite_block_id(id), value); @@ -3271,6 +3366,73 @@ void LiteQuery::continue_getOutMsgQueueSizes(td::optional shard, Re }); } +void LiteQuery::perform_getBlockOutMsgQueueSize(int mode, BlockIdExt blkid) { + LOG(INFO) << "started a getBlockOutMsgQueueSize(" << blkid.to_str() << ", " << mode << ") liteserver query"; + mode_ = mode; + if (!blkid.is_valid_full()) { + fatal_error("invalid BlockIdExt"); + return; + } + set_continuation([=]() -> void { finish_getBlockOutMsgQueueSize(); }); + request_block_data_state(blkid); +} + +void LiteQuery::finish_getBlockOutMsgQueueSize() { + LOG(INFO) << "completing getBlockOutNsgQueueSize() query"; + bool with_proof = mode_ & 1; + Ref state_root = state_->root_cell(); + vm::MerkleProofBuilder pb; + if (with_proof) { + pb = vm::MerkleProofBuilder{state_root}; + state_root = pb.root(); + } + block::gen::ShardStateUnsplit::Record sstate; + block::gen::OutMsgQueueInfo::Record out_msg_queue_info; + if (!tlb::unpack_cell(state_root, sstate) || !tlb::unpack_cell(sstate.out_msg_queue_info, out_msg_queue_info)) { + fatal_error("cannot unpack shard state"); + return; + } + vm::CellSlice& extra_slice = out_msg_queue_info.extra.write(); + if (extra_slice.fetch_long(1) == 0) { + fatal_error("no out_msg_queue_size in shard state"); + return; + } + block::gen::OutMsgQueueExtra::Record out_msg_queue_extra; + if (!tlb::unpack(extra_slice, out_msg_queue_extra)) { + fatal_error("cannot unpack OutMsgQueueExtra"); + return; + } + vm::CellSlice& size_slice = out_msg_queue_extra.out_queue_size.write(); + if (size_slice.fetch_long(1) == 0) { + fatal_error("no out_msg_queue_size in shard state"); + return; + } + td::uint64 size = size_slice.prefetch_ulong(48); + + td::BufferSlice proof; + if (with_proof) { + Ref proof1, proof2; + if (!make_state_root_proof(proof1)) { + return; + } + if (!pb.extract_proof_to(proof2)) { + fatal_error("unknown error creating Merkle proof"); + return; + } + auto r_proof = vm::std_boc_serialize_multi({std::move(proof1), std::move(proof2)}); + if (r_proof.is_error()) { + fatal_error(r_proof.move_as_error()); + return; + } + proof = r_proof.move_as_ok(); + } + LOG(INFO) << "getBlockOutMsgQueueSize(" << blk_id_.to_str() << ", " << mode_ << ") query completed"; + auto b = ton::create_serialize_tl_object( + mode_, ton::create_tl_lite_block_id(blk_id_), size, std::move(proof)); + finish_query(std::move(b)); +} + + void LiteQuery::perform_nonfinal_getCandidate(td::Bits256 source, BlockIdExt blkid, td::Bits256 collated_data_hash) { LOG(INFO) << "started a nonfinal.getCandidate liteserver query"; td::actor::send_closure_later( diff --git a/validator/impl/liteserver.hpp b/validator/impl/liteserver.hpp index 34e569c99..2d75dc61c 100644 --- a/validator/impl/liteserver.hpp +++ b/validator/impl/liteserver.hpp @@ -170,6 +170,8 @@ class LiteQuery : public td::actor::Actor { std::vector> result); void perform_getOutMsgQueueSizes(td::optional shard); void continue_getOutMsgQueueSizes(td::optional shard, Ref state); + void perform_getBlockOutMsgQueueSize(int mode, BlockIdExt blkid); + void finish_getBlockOutMsgQueueSize(); void perform_nonfinal_getCandidate(td::Bits256 source, BlockIdExt blkid, td::Bits256 collated_data_hash); void perform_nonfinal_getValidatorGroups(int mode, ShardIdFull shard); diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 88bc61634..8c39a1ab4 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -895,6 +895,9 @@ bool ValidateQuery::try_unpack_mc_state() { if (!is_masterchain() && !check_this_shard_mc_info()) { return fatal_error("masterchain configuration does not admit creating block "s + id_.to_str()); } + store_out_msg_queue_size_ = config_->has_capability(ton::capStoreOutMsgQueueSize); + msg_metadata_enabled_ = config_->has_capability(ton::capMsgMetadata); + deferring_messages_enabled_ = config_->has_capability(ton::capDeferMessages); } catch (vm::VmError& err) { return fatal_error(-666, err.get_msg()); } catch (vm::VmVirtError& err) { @@ -967,6 +970,7 @@ bool ValidateQuery::fetch_config_params() { compute_phase_cfg_.suspended_addresses = config_->get_suspended_addresses(now_); compute_phase_cfg_.size_limits = size_limits; compute_phase_cfg_.precompiled_contracts = config_->get_precompiled_contracts_config(); + compute_phase_cfg_.allow_external_unfreeze = compute_phase_cfg_.global_version >= 8; } { // compute action_phase_cfg @@ -990,6 +994,8 @@ bool ValidateQuery::fetch_config_params() { action_phase_cfg_.size_limits = size_limits; action_phase_cfg_.action_fine_enabled = config_->get_global_version() >= 4; action_phase_cfg_.bounce_on_fail_enabled = config_->get_global_version() >= 4; + action_phase_cfg_.message_skip_enabled = config_->get_global_version() >= 8; + action_phase_cfg_.disable_custom_fess = config_->get_global_version() >= 8; action_phase_cfg_.mc_blackhole_addr = config_->get_burning_config().blackhole_addr; } { @@ -1702,7 +1708,7 @@ void ValidateQuery::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result< * @param wc_info The workchain information. * @param ccvc The Catchain validators configuration. * - * @returns True if the validation wasa successful, false othewise. + * @returns True if the validation wasa successful, false otherwise. */ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block::McShardHash* sibling, const block::WorkchainInfo* wc_info, const block::CatchainValidatorsConfig& ccvc) { @@ -2195,6 +2201,50 @@ bool ValidateQuery::check_utime_lt() { return true; } +/** + * Reads the size of the outbound message queue from the previous state(s), or requests it if needed. + * + * @returns True if the request was successful, false otherwise. + */ +bool ValidateQuery::prepare_out_msg_queue_size() { + if (ps_.out_msg_queue_size_) { + // if after_split then out_msg_queue_size is always present, since it is calculated during split + old_out_msg_queue_size_ = ps_.out_msg_queue_size_.value(); + return true; + } + old_out_msg_queue_size_ = 0; + for (size_t i = 0; i < prev_blocks.size(); ++i) { + ++pending; + send_closure_later(manager, &ValidatorManager::get_out_msg_queue_size, prev_blocks[i], + [self = get_self(), i](td::Result res) { + td::actor::send_closure(std::move(self), &ValidateQuery::got_out_queue_size, i, + std::move(res)); + }); + } + return true; +} + +/** + * Handles the result of obtaining the size of the outbound message queue. + * + * If the block is after merge then the two sizes are added. + * + * @param i The index of the previous block (0 or 1). + * @param res The result object containing the size of the queue. + */ +void ValidateQuery::got_out_queue_size(size_t i, td::Result res) { + --pending; + if (res.is_error()) { + fatal_error( + res.move_as_error_prefix(PSTRING() << "failed to get message queue size from prev block #" << i << ": ")); + return; + } + td::uint64 size = res.move_as_ok(); + LOG(DEBUG) << "got outbound queue size from prev block #" << i << ": " << size; + old_out_msg_queue_size_ += size; + try_validate(); +} + /* * * METHODS CALLED FROM try_validate() stage 1 @@ -3041,6 +3091,7 @@ bool ValidateQuery::precheck_one_message_queue_update(td::ConstBitPtr out_msg_id return reject_query("new EnqueuedMsg with key "s + out_msg_id.to_hex(352) + " is invalid"); } if (new_value.not_null()) { + ++new_out_msg_queue_size_; if (!block::gen::t_EnqueuedMsg.validate_csr(new_value)) { return reject_query("new EnqueuedMsg with key "s + out_msg_id.to_hex(352) + " failed to pass automated validity checks"); @@ -3057,6 +3108,7 @@ bool ValidateQuery::precheck_one_message_queue_update(td::ConstBitPtr out_msg_id } } if (old_value.not_null()) { + --new_out_msg_queue_size_; if (!block::gen::t_EnqueuedMsg.validate_csr(old_value)) { return reject_query("old EnqueuedMsg with key "s + out_msg_id.to_hex(352) + " failed to pass automated validity checks"); @@ -3083,11 +3135,18 @@ bool ValidateQuery::precheck_one_message_queue_update(td::ConstBitPtr out_msg_id " has been changed in the OutMsgQueue, but the key did not change"); } auto q_msg_env = (old_value.not_null() ? old_value : new_value)->prefetch_ref(); - int tag = (int)out_msg_cs->prefetch_ulong(3); - // mode for msg_export_{ext,new,imm,tr,deq_imm,???,deq/deq_short,tr_req} - static const int tag_mode[8] = {0, 2, 0, 2, 1, 0, 1, 3}; - static const char* tag_str[8] = {"ext", "new", "imm", "tr", "deq_imm", "???", "deq", "tr_req"}; - if (tag < 0 || tag >= 8 || !(tag_mode[tag] & mode)) { + int tag = block::tlb::t_OutMsg.get_tag(*out_msg_cs); + if (tag == 12 || tag == 13) { + tag /= 2; + } else if (tag == 20) { + tag = 8; + } else if (tag == 21) { + tag = 9; + } + // mode for msg_export_{ext,new,imm,tr,deq_imm,???,deq/deq_short,tr_req,new_defer,deferred_tr} + static const int tag_mode[10] = {0, 2, 0, 2, 1, 0, 1, 3, 0, 2}; + static const char* tag_str[10] = {"ext", "new", "imm", "tr", "deq_imm", "???", "deq", "tr_req", "new_defer", "deferred_tr"}; + if (tag < 0 || tag >= 10 || !(tag_mode[tag] & mode)) { return reject_query(PSTRING() << "OutMsgDescr corresponding to " << m_str[mode] << "queued message with key " << out_msg_id.to_hex(352) << " has invalid tag " << tag << "(" << tag_str[tag & 7] << ")"); @@ -3202,6 +3261,7 @@ bool ValidateQuery::precheck_message_queue_update() { try { CHECK(ps_.out_msg_queue_ && ns_.out_msg_queue_); CHECK(out_msg_dict_); + new_out_msg_queue_size_ = old_out_msg_queue_size_; if (!ps_.out_msg_queue_->scan_diff( *ns_.out_msg_queue_, [this](td::ConstBitPtr key, int key_len, Ref old_val_extra, @@ -3216,6 +3276,186 @@ bool ValidateQuery::precheck_message_queue_update() { return reject_query("invalid OutMsgQueue dictionary difference between the old and the new state: "s + err.get_msg()); } + LOG(INFO) << "outbound message queue size: " << old_out_msg_queue_size_ << " -> " << new_out_msg_queue_size_; + if (store_out_msg_queue_size_) { + if (!ns_.out_msg_queue_size_) { + return reject_query(PSTRING() << "outbound message queue size in the new state is not correct (expected: " + << new_out_msg_queue_size_ << ", found: none)"); + } + if (ns_.out_msg_queue_size_.value() != new_out_msg_queue_size_) { + return reject_query(PSTRING() << "outbound message queue size in the new state is not correct (expected: " + << new_out_msg_queue_size_ << ", found: " << ns_.out_msg_queue_size_.value() + << ")"); + } + } else { + if (ns_.out_msg_queue_size_) { + return reject_query("outbound message queue size in the new state is present, but shouldn't"); + } + } + return true; +} + +/** + * Performs a check on the difference between the old and new dispatch queues for one account. + * + * @param addr The 256-bit address of the account. + * @param old_queue_csr The old value of the account dispatch queue. + * @param new_queue_csr The new value of the account dispatch queue. + * + * @returns True if the check is successful, false otherwise. + */ +bool ValidateQuery::check_account_dispatch_queue_update(td::Bits256 addr, Ref old_queue_csr, + Ref new_queue_csr) { + vm::Dictionary old_dict{64}; + td::uint64 old_dict_size = 0; + if (!block::unpack_account_dispatch_queue(old_queue_csr, old_dict, old_dict_size)) { + return reject_query(PSTRING() << "invalid AccountDispatchQueue for " << addr.to_hex() << " in the old state"); + } + vm::Dictionary new_dict{64}; + td::uint64 new_dict_size = 0; + if (!block::unpack_account_dispatch_queue(new_queue_csr, new_dict, new_dict_size)) { + return reject_query(PSTRING() << "invalid AccountDispatchQueue for " << addr.to_hex() << " in the new state"); + } + td::uint64 expected_dict_size = old_dict_size; + LogicalTime max_removed_lt = 0; + LogicalTime min_added_lt = (LogicalTime)-1; + bool res = old_dict.scan_diff( + new_dict, [&](td::ConstBitPtr key, int key_len, Ref old_val, Ref new_val) { + CHECK(key_len == 64); + CHECK(old_val.not_null() || new_val.not_null()); + if (old_val.not_null() && new_val.not_null()) { + return false; + } + td::uint64 lt = key.get_uint(64); + block::gen::EnqueuedMsg::Record rec; + if (old_val.not_null()) { + LOG(DEBUG) << "removed message from DispatchQueue: account=" << addr.to_hex() << ", lt=" << lt; + --expected_dict_size; + if (!block::tlb::csr_unpack(old_val, rec)) { + return reject_query(PSTRING() << "invalid EnqueuedMsg in AccountDispatchQueue for " << addr.to_hex()); + } + } else { + LOG(DEBUG) << "added message to DispatchQueue: account=" << addr.to_hex() << ", lt=" << lt; + ++expected_dict_size; + if (!block::tlb::csr_unpack(new_val, rec)) { + return reject_query(PSTRING() << "invalid EnqueuedMsg in AccountDispatchQueue for " << addr.to_hex()); + } + if (is_masterchain() && config_->is_special_smartcontract(addr)) { + return reject_query(PSTRING() << "cannot defer message from a special account -1:" << addr.to_hex()); + } + } + if (lt != rec.enqueued_lt) { + return reject_query(PSTRING() << "invalid EnqueuedMsg in AccountDispatchQueue for " << addr.to_hex() + << ": lt mismatch (" << lt << " != " << rec.enqueued_lt << ")"); + } + block::tlb::MsgEnvelope::Record_std env; + if (!block::gen::t_MsgEnvelope.validate_ref(rec.out_msg) || !block::tlb::unpack_cell(rec.out_msg, env)) { + return reject_query(PSTRING() << "invalid EnqueuedMsg in AccountDispatchQueue for " << addr.to_hex()); + } + if (env.emitted_lt) { + return reject_query(PSTRING() << "invalid EnqueuedMsg in AccountDispatchQueue for " << addr.to_hex() + << ", lt=" << lt << ": unexpected emitted_lt"); + } + unsigned long long created_lt; + vm::CellSlice msg_cs = vm::load_cell_slice(env.msg); + if (!block::tlb::t_Message.get_created_lt(msg_cs, created_lt)) { + return reject_query(PSTRING() << "invalid EnqueuedMsg in AccountDispatchQueue for " << addr.to_hex() + << ": cannot get created_lt"); + } + if (lt != created_lt) { + return reject_query(PSTRING() << "invalid EnqueuedMsg in AccountDispatchQueue for " << addr.to_hex() + << ": lt mismatch (" << lt << " != " << created_lt << ")"); + } + if (old_val.not_null()) { + removed_dispatch_queue_messages_[{addr, lt}] = rec.out_msg; + max_removed_lt = std::max(max_removed_lt, lt); + } else { + new_dispatch_queue_messages_[{addr, lt}] = rec.out_msg; + min_added_lt = std::min(min_added_lt, lt); + } + return true; + }); + if (!res) { + return reject_query(PSTRING() << "invalid AccountDispatchQueue diff for account " << addr.to_hex()); + } + if (expected_dict_size != new_dict_size) { + return reject_query(PSTRING() << "invalid count in AccountDispatchQuery for " << addr.to_hex() + << ": expected=" << expected_dict_size << ", found=" << new_dict_size); + } + if (!new_dict.is_empty()) { + td::BitArray<64> new_min_lt; + CHECK(new_dict.get_minmax_key(new_min_lt).not_null()); + if (new_min_lt.to_ulong() <= max_removed_lt) { + return reject_query(PSTRING() << "invalid AccountDispatchQuery update for " << addr.to_hex() + << ": max removed lt is " << max_removed_lt << ", but lt=" << new_min_lt.to_ulong() + << " is still in queue"); + } + } + if (!old_dict.is_empty()) { + td::BitArray<64> old_max_lt; + CHECK(old_dict.get_minmax_key(old_max_lt, true).not_null()); + if (old_max_lt.to_ulong() >= min_added_lt) { + return reject_query(PSTRING() << "invalid AccountDispatchQuery update for " << addr.to_hex() + << ": min added lt is " << min_added_lt << ", but lt=" << old_max_lt.to_ulong() + << " was present in the queue"); + } + if (max_removed_lt != old_max_lt.to_ulong()) { + // Some old messages are still in DispatchQueue, meaning that all new messages from this account must be deferred + account_expected_defer_all_messages_.insert(addr); + } + } + if (old_dict_size > 0 && max_removed_lt != 0) { + ++processed_account_dispatch_queues_; + } + return true; +} + +/** + * Pre-check the difference between the old and new dispatch queues and put the difference to + * new_dispatch_queue_messages_, old_dispatch_queue_messages_ + * + * @returns True if the pre-check and unpack is successful, false otherwise. + */ +bool ValidateQuery::unpack_dispatch_queue_update() { + LOG(INFO) << "checking the difference between the old and the new dispatch queues"; + try { + CHECK(ps_.dispatch_queue_ && ns_.dispatch_queue_); + CHECK(out_msg_dict_); + bool res = ps_.dispatch_queue_->scan_diff( + *ns_.dispatch_queue_, + [this](td::ConstBitPtr key, int key_len, Ref old_val_extra, Ref new_val_extra) { + CHECK(key_len == 256); + return check_account_dispatch_queue_update(key, ps_.dispatch_queue_->extract_value(std::move(old_val_extra)), + ns_.dispatch_queue_->extract_value(std::move(new_val_extra))); + }, + 3 /* check augmentation of changed nodes */); + if (!res) { + return reject_query("invalid DispatchQueue dictionary in the new state"); + } + + if (old_out_msg_queue_size_ <= compute_phase_cfg_.size_limits.defer_out_queue_size_limit) { + // Check that at least one message was taken from each AccountDispatchQueue + try { + have_unprocessed_account_dispatch_queue_ = false; + td::uint64 total_account_dispatch_queues = 0; + ps_.dispatch_queue_->check_for_each([&](Ref, td::ConstBitPtr, int n) -> bool { + ++total_account_dispatch_queues; + if (total_account_dispatch_queues > processed_account_dispatch_queues_) { + return false; + } + return true; + }); + have_unprocessed_account_dispatch_queue_ = + (total_account_dispatch_queues != processed_account_dispatch_queues_); + } catch (vm::VmVirtError&) { + // VmVirtError can happen if we have only a proof of ShardState + have_unprocessed_account_dispatch_queue_ = true; + } + } + } catch (vm::VmError& err) { + return reject_query("invalid DispatchQueue dictionary difference between the old and the new state: "s + + err.get_msg()); + } return true; } @@ -3343,8 +3583,8 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) CHECK(in_msg.not_null()); int tag = block::gen::t_InMsg.get_tag(*in_msg); CHECK(tag >= 0); // NB: the block has been already checked to be valid TL-B in try_validate() - ton::StdSmcAddress addr; - ton::WorkchainId wc; + ton::StdSmcAddress src_addr, dest_addr; + ton::WorkchainId src_wc, dest_wc; Ref src, dest; Ref transaction; Ref msg, msg_env, tr_msg_env; @@ -3357,6 +3597,7 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) block::gen::CommonMsgInfo::Record_int_msg_info info; ton::AccountIdPrefixFull src_prefix, dest_prefix, cur_prefix, next_prefix; td::RefInt256 fwd_fee, orig_fwd_fee; + bool from_dispatch_queue = false; // initial checks and unpack switch (tag) { case block::gen::InMsg::msg_import_ext: { @@ -3383,7 +3624,7 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) dest_prefix.to_str() + "... not in this shard"); } dest = std::move(info_ext.dest); - if (!block::tlb::t_MsgAddressInt.extract_std_address(dest, wc, addr)) { + if (!block::tlb::t_MsgAddressInt.extract_std_address(dest, dest_wc, dest_addr)) { return reject_query("cannot unpack destination address of inbound external message with hash "s + key.to_hex(256)); } @@ -3395,7 +3636,7 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) block::gen::InMsg::Record_msg_import_imm inp; unsigned long long created_lt = 0; CHECK(tlb::csr_unpack(in_msg, inp) && tlb::unpack_cell(inp.in_msg, env) && - block::tlb::t_MsgEnvelope.get_created_lt(vm::load_cell_slice(inp.in_msg), created_lt) && + block::tlb::t_MsgEnvelope.get_emitted_lt(vm::load_cell_slice(inp.in_msg), created_lt) && (fwd_fee = block::tlb::t_Grams.as_integer(std::move(inp.fwd_fee))).not_null()); transaction = std::move(inp.transaction); msg_env = std::move(inp.in_msg); @@ -3442,9 +3683,42 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) // msg_discard_fin$110 in_msg:^MsgEnvelope transaction_id:uint64 fwd_fee:Grams return reject_query("InMsg with key "s + key.to_hex(256) + " is a msg_discard_fin, but IHR messages are not enabled in this version"); + case block::gen::InMsg::msg_import_deferred_fin: { + from_dispatch_queue = true; + // msg_import_deferredfin$00100 in_msg:^MsgEnvelope transaction:^Transaction fwd_fee:Grams + // importing and processing an internal message from DispatchQueue with destination in this shard + block::gen::InMsg::Record_msg_import_deferred_fin inp; + CHECK(tlb::csr_unpack(in_msg, inp) && tlb::unpack_cell(inp.in_msg, env) && + (fwd_fee = block::tlb::t_Grams.as_integer(std::move(inp.fwd_fee))).not_null()); + transaction = std::move(inp.transaction); + msg_env = std::move(inp.in_msg); + msg = env.msg; + // ... + break; + } + case block::gen::InMsg::msg_import_deferred_tr: { + from_dispatch_queue = true; + // msg_import_deferred_tr$00101 in_msg:^MsgEnvelope out_msg:^MsgEnvelope + // importing and enqueueing internal message from DispatchQueue + block::gen::InMsg::Record_msg_import_deferred_tr inp; + CHECK(tlb::csr_unpack(in_msg, inp) && tlb::unpack_cell(inp.in_msg, env)); + fwd_fee = td::zero_refint(); + msg_env = std::move(inp.in_msg); + msg = env.msg; + tr_msg_env = std::move(inp.out_msg); + // ... + break; + } default: return reject_query(PSTRING() << "InMsg with key " << key.to_hex(256) << " has impossible tag " << tag); } + if (have_unprocessed_account_dispatch_queue_ && tag != block::gen::InMsg::msg_import_ext && + tag != block::gen::InMsg::msg_import_deferred_tr && tag != block::gen::InMsg::msg_import_deferred_fin) { + // Collator is requeired to take at least one message from each AccountDispatchQueue + // (unless the block is full or unless out_msg_queue_size is big) + // If some AccountDispatchQueue is unporcessed then it's not allowed to import other messages except for externals + return reject_query("required DispatchQueue processing is not done, but some other internal messages are imported"); + } // common checks for all (non-external) inbound messages CHECK(msg.not_null()); if (msg->get_hash().as_bitslice() != key) { @@ -3485,27 +3759,34 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) return reject_query("next hop address "s + next_prefix.to_str() + "... of inbound internal message with hash " + key.to_hex(256) + " does not belong to the current block's shard " + shard_.to_str()); } - // next hop may coincide with current address only if destination is already reached - if (next_prefix == cur_prefix && cur_prefix != dest_prefix) { + // next hop may coincide with current address only if destination is already reached (or it is deferred message) + if (!from_dispatch_queue && next_prefix == cur_prefix && cur_prefix != dest_prefix) { return reject_query( "next hop address "s + next_prefix.to_str() + "... of inbound internal message with hash " + key.to_hex(256) + " coincides with its current address, but this message has not reached its final destination " + dest_prefix.to_str() + "... yet"); } + if (from_dispatch_queue && next_prefix != cur_prefix) { + return reject_query( + "next hop address "s + next_prefix.to_str() + "... of deferred internal message with hash " + key.to_hex(256) + + " must coincide with its current prefix "s + cur_prefix.to_str() + "..."s); + } // if a message is processed by a transaction, it must have destination inside the current shard if (transaction.not_null() && !ton::shard_contains(shard_, dest_prefix)) { return reject_query("inbound internal message with hash "s + key.to_hex(256) + " has destination address " + dest_prefix.to_str() + "... not in this shard, but it is processed nonetheless"); } - // if a message is not processed by a transaction, its final destination must be outside this shard - if (transaction.is_null() && ton::shard_contains(shard_, dest_prefix)) { + // if a message is not processed by a transaction, its final destination must be outside this shard, + // or it is a deferred message (dispatch queue -> out msg queue) + if (tag != block::gen::InMsg::msg_import_deferred_tr && transaction.is_null() && + ton::shard_contains(shard_, dest_prefix)) { return reject_query("inbound internal message with hash "s + key.to_hex(256) + " has destination address " + dest_prefix.to_str() + "... in this shard, but it is not processed by a transaction"); } src = std::move(info.src); dest = std::move(info.dest); // unpack complete destination address if it is inside this shard - if (transaction.not_null() && !block::tlb::t_MsgAddressInt.extract_std_address(dest, wc, addr)) { + if (transaction.not_null() && !block::tlb::t_MsgAddressInt.extract_std_address(dest, dest_wc, dest_addr)) { return reject_query("cannot unpack destination address of inbound internal message with hash "s + key.to_hex(256)); } @@ -3517,6 +3798,44 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) td::dec_string(env.fwd_fee_remaining) + " larger than the original (total) forwarding fee " + td::dec_string(orig_fwd_fee)); } + // Unpacr src address + if (!block::tlb::t_MsgAddressInt.extract_std_address(src, src_wc, src_addr)) { + return reject_query("cannot unpack source address of inbound external message with hash "s + key.to_hex(256)); + } + } + + if (from_dispatch_queue) { + // Check that the message was removed from DispatchQueue + LogicalTime lt = info.created_lt; + auto it = removed_dispatch_queue_messages_.find({src_addr, lt}); + if (it == removed_dispatch_queue_messages_.end()) { + return reject_query(PSTRING() << "deferred InMsg with src_addr=" << src_addr.to_hex() << ", lt=" << lt + << " was not removed from the dispatch queue"); + } + // InMsg msg_import_deferred_* has emitted_lt in MessageEnv, but this emitted_lt is not present in DispatchQueue + Ref dispatched_msg_env = it->second; + td::Ref expected_msg_env; + if (!env.emitted_lt) { + return reject_query(PSTRING() << "no dispatch_lt in deferred InMsg with src_addr=" << src_addr.to_hex() + << ", lt=" << lt); + } + auto emitted_lt = env.emitted_lt.value(); + if (emitted_lt < start_lt_ || emitted_lt > end_lt_) { + return reject_query(PSTRING() << "dispatch_lt in deferred InMsg with src_addr=" << src_addr.to_hex() + << ", lt=" << lt << " is not between start and end of the block"); + } + auto env2 = env; + env2.emitted_lt = {}; + CHECK(block::tlb::pack_cell(expected_msg_env, env2)); + if (dispatched_msg_env->get_hash() != expected_msg_env->get_hash()) { + return reject_query(PSTRING() << "deferred InMsg with src_addr=" << src_addr.to_hex() << ", lt=" << lt + << " msg envelope hasg mismatch: " << dispatched_msg_env->get_hash().to_hex() + << " in DispatchQueue, " << expected_msg_env->get_hash().to_hex() << " expected"); + } + removed_dispatch_queue_messages_.erase(it); + if (tag == block::gen::InMsg::msg_import_deferred_fin) { + msg_emitted_lt_.emplace_back(src_addr, lt, env.emitted_lt.value()); + } } if (transaction.not_null()) { @@ -3533,10 +3852,10 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) ton::StdSmcAddress trans_addr; ton::LogicalTime trans_lt; CHECK(block::get_transaction_id(transaction, trans_addr, trans_lt)); - if (addr != trans_addr) { + if (dest_addr != trans_addr) { block::gen::t_InMsg.print(std::cerr, *in_msg); return reject_query(PSTRING() << "InMsg corresponding to inbound message with hash " << key.to_hex(256) - << " and destination address " << addr.to_hex() + << " and destination address " << dest_addr.to_hex() << " claims that the message is processed by transaction " << trans_lt << " of another account " << trans_addr.to_hex()); } @@ -3588,6 +3907,7 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) } case block::gen::InMsg::msg_import_fin: { // msg_import_fin$100 in_msg:^MsgEnvelope transaction:^Transaction fwd_fee:Grams + // msg_import_deferred_fin$00100 in_msg:^MsgEnvelope transaction:^Transaction fwd_fee:Grams // importing and processing an internal message with destination in this shard CHECK(transaction.not_null()); CHECK(shard_contains(shard_, next_prefix)); @@ -3620,22 +3940,39 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) // ... break; } + case block::gen::InMsg::msg_import_deferred_fin: { + // fwd_fee must be equal to the fwd_fee_remaining of this MsgEnvelope + if (*fwd_fee != *env.fwd_fee_remaining) { + return reject_query("msg_import_imm$011 InMsg with hash "s + key.to_hex(256) + + " is invalid because its collected fwd_fee=" + td::dec_string(fwd_fee) + + " is not equal to fwd_fee_remaining=" + td::dec_string(env.fwd_fee_remaining) + + " of this message (envelope)"); + } + // ... + break; + } + case block::gen::InMsg::msg_import_deferred_tr: case block::gen::InMsg::msg_import_tr: { // msg_import_tr$101 in_msg:^MsgEnvelope out_msg:^MsgEnvelope transit_fee:Grams + // msg_import_deferred_tr$00101 in_msg:^MsgEnvelope out_msg:^MsgEnvelope // importing and relaying a (transit) internal message with destination outside this shard - if (cur_prefix == dest_prefix) { + if (cur_prefix == dest_prefix && tag == block::gen::InMsg::msg_import_tr) { return reject_query("inbound internal message with hash "s + key.to_hex(256) + " is a msg_import_tr$101 (a transit message), but its current address " + cur_prefix.to_str() + " is already equal to its final destination"); } + if (cur_prefix != next_prefix && tag == block::gen::InMsg::msg_import_deferred_tr) { + return reject_query("internal message from DispatchQueue with hash "s + key.to_hex(256) + + " is a msg_import_deferred_tr$00101, but its current address " + + cur_prefix.to_str() + " is not equal to next address"); + } CHECK(transaction.is_null()); - CHECK(cur_prefix != next_prefix); auto out_msg_cs = out_msg_dict_->lookup(key, 256); if (out_msg_cs.is_null()) { return reject_query("inbound internal message with hash "s + key.to_hex(256) + " is a msg_import_tr$101 (transit message), but the corresponding OutMsg does not exist"); } - if (shard_contains(shard_, cur_prefix)) { + if (shard_contains(shard_, cur_prefix) && tag == block::gen::InMsg::msg_import_tr) { // we imported this message from our shard! // (very rare situation possible only after merge) tr_req = true; @@ -3648,7 +3985,7 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) } out_msg_env = std::move(out_msg.out_msg); reimport = std::move(out_msg.imported); - } else { + } else if (tag == block::gen::InMsg::msg_import_tr) { block::gen::OutMsg::Record_msg_export_tr out_msg; if (!tlb::csr_unpack_safe(out_msg_cs, out_msg)) { return reject_query( @@ -3662,6 +3999,16 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) if (!check_imported_message(msg_env)) { return false; } + } else { + block::gen::OutMsg::Record_msg_export_deferred_tr out_msg; + if (!tlb::csr_unpack_safe(out_msg_cs, out_msg)) { + return reject_query( + "inbound internal message with hash "s + key.to_hex(256) + + " is a msg_import_deferred_tr$00101 with current address " + cur_prefix.to_str() + + "... outside of our shard, but the corresponding OutMsg is not a valid msg_export_deferred_tr$10101"); + } + out_msg_env = std::move(out_msg.out_msg); + reimport = std::move(out_msg.imported); } // perform hypercube routing for this transit message auto route_info = block::perform_hypercube_routing(next_prefix, dest_prefix, shard_); @@ -3702,6 +4049,18 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) td::dec_string(env.fwd_fee_remaining) + " to " + td::dec_string(tr_env.fwd_fee_remaining) + " in transit"); } + if (tr_env.metadata != env.metadata) { + return reject_query( + PSTRING() << "InMsg for transit message with hash " << key.to_hex(256) << " contains invalid MsgMetadata: " + << (env.metadata ? env.metadata.value().to_str() : "") << " in in_msg, but " + << (tr_env.metadata ? tr_env.metadata.value().to_str() : "") << " in out_msg"); + } + if (tr_env.emitted_lt != env.emitted_lt) { + return reject_query( + PSTRING() << "InMsg for transit message with hash " << key.to_hex(256) << " contains invalid emitted_lt: " + << (env.emitted_lt ? td::to_string(env.emitted_lt.value()) : "") << " in in_msg, but " + << (tr_env.emitted_lt ? td::to_string(tr_env.emitted_lt.value()) : "") << " in out_msg"); + } if (tr_msg_env->get_hash() != out_msg_env->get_hash()) { return reject_query( "InMsg for transit message with hash "s + key.to_hex(256) + @@ -3709,7 +4068,8 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) (tr_req ? "requeued" : "usual") + "transit)"); } // check the amount of the transit fee - td::RefInt256 transit_fee = action_phase_cfg_.fwd_std.get_next_part(env.fwd_fee_remaining); + td::RefInt256 transit_fee = + from_dispatch_queue ? td::zero_refint() : action_phase_cfg_.fwd_std.get_next_part(env.fwd_fee_remaining); if (*transit_fee != *fwd_fee) { return reject_query("InMsg for transit message with hash "s + key.to_hex(256) + " declared collected transit fees to be " + td::dec_string(fwd_fee) + @@ -3735,7 +4095,8 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) " refers to a different reimport InMsg"); } // for transit messages, OutMsg refers to the newly-created outbound messages (not to the re-imported old outbound message) - if (tag != block::gen::InMsg::msg_import_tr && out_msg_env->get_hash() != msg_env->get_hash()) { + if (tag != block::gen::InMsg::msg_import_tr && tag != block::gen::InMsg::msg_import_deferred_tr && + out_msg_env->get_hash() != msg_env->get_hash()) { return reject_query( "InMsg with hash "s + key.to_hex(256) + " is a reimport record, but the corresponding OutMsg exports a MsgEnvelope with a different hash"); @@ -3781,8 +4142,8 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms CHECK(out_msg.not_null()); int tag = block::gen::t_OutMsg.get_tag(*out_msg); CHECK(tag >= 0); // NB: the block has been already checked to be valid TL-B in try_validate() - ton::StdSmcAddress addr; - ton::WorkchainId wc; + ton::StdSmcAddress src_addr; + ton::WorkchainId src_wc; Ref src, dest; Ref transaction; Ref msg, msg_env, tr_msg_env, reimport; @@ -3826,7 +4187,7 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms src_prefix.to_str() + "... not in this shard"); } src = std::move(info_ext.src); - if (!block::tlb::t_MsgAddressInt.extract_std_address(src, wc, addr)) { + if (!block::tlb::t_MsgAddressInt.extract_std_address(src, src_wc, src_addr)) { return reject_query("cannot unpack source address of outbound external message with hash "s + key.to_hex(256)); } break; @@ -3845,7 +4206,7 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms case block::gen::OutMsg::msg_export_new: { block::gen::OutMsg::Record_msg_export_new out; CHECK(tlb::csr_unpack(out_msg, out) && tlb::unpack_cell(out.out_msg, env) && - block::tlb::t_MsgEnvelope.get_created_lt(vm::load_cell_slice(out.out_msg), created_lt)); + block::tlb::t_MsgEnvelope.get_emitted_lt(vm::load_cell_slice(out.out_msg), created_lt)); transaction = std::move(out.transaction); msg_env = std::move(out.out_msg); msg = env.msg; @@ -3908,6 +4269,35 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms // ... break; } + case block::gen::OutMsg::msg_export_new_defer: { + block::gen::OutMsg::Record_msg_export_new_defer out; + CHECK(tlb::csr_unpack(out_msg, out) && tlb::unpack_cell(out.out_msg, env) && + block::tlb::t_MsgEnvelope.get_emitted_lt(vm::load_cell_slice(out.out_msg), created_lt)); + transaction = std::move(out.transaction); + msg_env = std::move(out.out_msg); + msg = env.msg; + // ... + break; + } + case block::gen::OutMsg::msg_export_deferred_tr: { + block::gen::OutMsg::Record_msg_export_deferred_tr out; + CHECK(tlb::csr_unpack(out_msg, out) && tlb::unpack_cell(out.out_msg, env)); + msg_env = std::move(out.out_msg); + msg = env.msg; + reimport = std::move(out.imported); + in_tag = block::gen::InMsg::msg_import_deferred_tr; + mode = 2; // added to OutMsgQueue + if (!env.emitted_lt) { + return reject_query(PSTRING() << "msg_export_deferred_tr for OutMsg with key " << key.to_hex(256) + << " does not have emitted_lt in MsgEnvelope"); + } + if (env.emitted_lt.value() < start_lt_ || env.emitted_lt.value() > end_lt_) { + return reject_query(PSTRING() << "emitted_lt for msg_export_deferred_tr with key " << key.to_hex(256) + << " is not between start and end lt of the block"); + } + // ... + break; + } default: return reject_query(PSTRING() << "OutMsg with key (message hash) " << key.to_hex(256) << " has an unknown tag " << tag); @@ -3942,30 +4332,36 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms return reject_query("destination of outbound internal message with hash "s + key.to_hex(256) + " is an invalid blockchain address"); } - cur_prefix = block::interpolate_addr(src_prefix, dest_prefix, env.cur_addr); - next_prefix = block::interpolate_addr(src_prefix, dest_prefix, env.next_addr); - if (!(cur_prefix.is_valid() && next_prefix.is_valid())) { - return reject_query("cannot compute current and next hop addresses of outbound internal message with hash "s + - key.to_hex(256)); - } - // check that next hop is nearer to the destination than the current address - if (count_matching_bits(dest_prefix, next_prefix) < count_matching_bits(dest_prefix, cur_prefix)) { - return reject_query("next hop address "s + next_prefix.to_str() + "... of outbound internal message with hash " + - key.to_hex(256) + " is further from its destination " + dest_prefix.to_str() + - "... than its current address " + cur_prefix.to_str() + "..."); - } - // current address must belong to this shard (otherwise we should never had exported this message) - if (!ton::shard_contains(shard_, cur_prefix)) { - return reject_query("current address "s + cur_prefix.to_str() + "... of outbound internal message with hash " + - key.to_hex(256) + " does not belong to the current block's shard " + shard_.to_str()); - } - // next hop may coincide with current address only if destination is already reached - if (next_prefix == cur_prefix && cur_prefix != dest_prefix) { - return reject_query( - "next hop address "s + next_prefix.to_str() + "... of outbound internal message with hash " + - key.to_hex(256) + - " coincides with its current address, but this message has not reached its final destination " + - dest_prefix.to_str() + "... yet"); + if (tag == block::gen::OutMsg::msg_export_new_defer) { + if (env.cur_addr != 0 || env.next_addr != 0) { + return reject_query("cur_addr and next_addr of the message in DispatchQueue must be zero"); + } + } else { + cur_prefix = block::interpolate_addr(src_prefix, dest_prefix, env.cur_addr); + next_prefix = block::interpolate_addr(src_prefix, dest_prefix, env.next_addr); + if (!(cur_prefix.is_valid() && next_prefix.is_valid())) { + return reject_query("cannot compute current and next hop addresses of outbound internal message with hash "s + + key.to_hex(256)); + } + // check that next hop is nearer to the destination than the current address + if (count_matching_bits(dest_prefix, next_prefix) < count_matching_bits(dest_prefix, cur_prefix)) { + return reject_query("next hop address "s + next_prefix.to_str() + "... of outbound internal message with hash " + + key.to_hex(256) + " is further from its destination " + dest_prefix.to_str() + + "... than its current address " + cur_prefix.to_str() + "..."); + } + // current address must belong to this shard (otherwise we should never had exported this message) + if (!ton::shard_contains(shard_, cur_prefix)) { + return reject_query("current address "s + cur_prefix.to_str() + "... of outbound internal message with hash " + + key.to_hex(256) + " does not belong to the current block's shard " + shard_.to_str()); + } + // next hop may coincide with current address only if destination is already reached + if (next_prefix == cur_prefix && cur_prefix != dest_prefix) { + return reject_query( + "next hop address "s + next_prefix.to_str() + "... of outbound internal message with hash " + + key.to_hex(256) + + " coincides with its current address, but this message has not reached its final destination " + + dest_prefix.to_str() + "... yet"); + } } // if a message is created by a transaction, it must have source inside the current shard if (transaction.not_null() && !ton::shard_contains(shard_, src_prefix)) { @@ -3976,7 +4372,7 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms src = std::move(info.src); dest = std::move(info.dest); // unpack complete source address if it is inside this shard - if (transaction.not_null() && !block::tlb::t_MsgAddressInt.extract_std_address(src, wc, addr)) { + if (!block::tlb::t_MsgAddressInt.extract_std_address(src, src_wc, src_addr)) { return reject_query("cannot unpack source address of outbound internal message with hash "s + key.to_hex(256) + " created in this shard"); } @@ -4004,10 +4400,10 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms ton::StdSmcAddress trans_addr; ton::LogicalTime trans_lt; CHECK(block::get_transaction_id(transaction, trans_addr, trans_lt)); - if (addr != trans_addr) { + if (src_addr != trans_addr) { block::gen::t_OutMsg.print(std::cerr, *out_msg); return reject_query(PSTRING() << "OutMsg corresponding to outbound message with hash " << key.to_hex(256) - << " and source address " << addr.to_hex() + << " and source address " << src_addr.to_hex() << " claims that the message was created by transaction " << trans_lt << " of another account " << trans_addr.to_hex()); } @@ -4026,43 +4422,64 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms (q_key.bits() + 96).copy_from(key, 256); auto q_entry = ns_.out_msg_queue_->lookup(q_key); auto old_q_entry = ps_.out_msg_queue_->lookup(q_key); - if (old_q_entry.not_null() && q_entry.not_null()) { - return reject_query("OutMsg with key (message hash) "s + key.to_hex(256) + - " should have removed or added OutMsgQueue entry with key " + q_key.to_hex() + - ", but it is present both in the old and in the new output queues"); - } - if (old_q_entry.is_null() && q_entry.is_null() && mode) { - return reject_query("OutMsg with key (message hash) "s + key.to_hex(256) + - " should have removed or added OutMsgQueue entry with key " + q_key.to_hex() + - ", but it is absent both from the old and from the new output queues"); - } - if (!mode && (old_q_entry.not_null() || q_entry.not_null())) { - return reject_query("OutMsg with key (message hash) "s + key.to_hex(256) + - " is a msg_export_imm$010, so the OutMsgQueue entry with key " + q_key.to_hex() + - " should never be created, but it is present in either the old or the new output queue"); - } - // NB: if mode!=0, the OutMsgQueue entry has been changed, so we have already checked some conditions in precheck_one_message_queue_update() - if (mode & 2) { - if (q_entry.is_null()) { - return reject_query("OutMsg with key "s + key.to_hex(256) + - " was expected to create OutMsgQueue entry with key " + q_key.to_hex() + " but it did not"); + + if (tag == block::gen::OutMsg::msg_export_new_defer) { + // check the DispatchQueue update + if (old_q_entry.not_null() || q_entry.not_null()) { + return reject_query("OutMsg with key (message hash) "s + key.to_hex(256) + + " shouldn't exist in the old and the new message queues"); } - if (msg_env_hash != q_entry->prefetch_ref()->get_hash().bits()) { - return reject_query("OutMsg with key "s + key.to_hex(256) + " has created OutMsgQueue entry with key " + - q_key.to_hex() + " containing a different MsgEnvelope"); + auto it = new_dispatch_queue_messages_.find({src_addr, created_lt}); + if (it == new_dispatch_queue_messages_.end()) { + return reject_query(PSTRING() << "new deferred OutMsg with src_addr=" << src_addr.to_hex() + << ", lt=" << created_lt << " was not added to the dispatch queue"); } - // ... - } else if (mode & 1) { - if (old_q_entry.is_null()) { - return reject_query("OutMsg with key "s + key.to_hex(256) + - " was expected to remove OutMsgQueue entry with key " + q_key.to_hex() + - " but it did not exist in the old queue"); + Ref expected_msg_env = it->second; + if (expected_msg_env->get_hash() != msg_env->get_hash()) { + return reject_query(PSTRING() << "new deferred OutMsg with src_addr=" << src_addr.to_hex() << ", lt=" + << created_lt << " msg envelope hasg mismatch: " << msg_env->get_hash().to_hex() + << " in OutMsg, " << expected_msg_env->get_hash().to_hex() << " in DispatchQueue"); } - if (msg_env_hash != old_q_entry->prefetch_ref()->get_hash().bits()) { - return reject_query("OutMsg with key "s + key.to_hex(256) + " has dequeued OutMsgQueue entry with key " + - q_key.to_hex() + " containing a different MsgEnvelope"); + new_dispatch_queue_messages_.erase(it); + } else { + if (old_q_entry.not_null() && q_entry.not_null()) { + return reject_query("OutMsg with key (message hash) "s + key.to_hex(256) + + " should have removed or added OutMsgQueue entry with key " + q_key.to_hex() + + ", but it is present both in the old and in the new output queues"); + } + if (old_q_entry.is_null() && q_entry.is_null() && mode) { + return reject_query("OutMsg with key (message hash) "s + key.to_hex(256) + + " should have removed or added OutMsgQueue entry with key " + q_key.to_hex() + + ", but it is absent both from the old and from the new output queues"); + } + if (!mode && (old_q_entry.not_null() || q_entry.not_null())) { + return reject_query("OutMsg with key (message hash) "s + key.to_hex(256) + + " is a msg_export_imm$010, so the OutMsgQueue entry with key " + q_key.to_hex() + + " should never be created, but it is present in either the old or the new output queue"); + } + // NB: if mode!=0, the OutMsgQueue entry has been changed, so we have already checked some conditions in precheck_one_message_queue_update() + if (mode & 2) { + if (q_entry.is_null()) { + return reject_query("OutMsg with key "s + key.to_hex(256) + + " was expected to create OutMsgQueue entry with key " + q_key.to_hex() + " but it did not"); + } + if (msg_env_hash != q_entry->prefetch_ref()->get_hash().bits()) { + return reject_query("OutMsg with key "s + key.to_hex(256) + " has created OutMsgQueue entry with key " + + q_key.to_hex() + " containing a different MsgEnvelope"); + } + // ... + } else if (mode & 1) { + if (old_q_entry.is_null()) { + return reject_query("OutMsg with key "s + key.to_hex(256) + + " was expected to remove OutMsgQueue entry with key " + q_key.to_hex() + + " but it did not exist in the old queue"); + } + if (msg_env_hash != old_q_entry->prefetch_ref()->get_hash().bits()) { + return reject_query("OutMsg with key "s + key.to_hex(256) + " has dequeued OutMsgQueue entry with key " + + q_key.to_hex() + " containing a different MsgEnvelope"); + } + // ... } - // ... } // check reimport:^InMsg @@ -4090,8 +4507,8 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms int i_tag = block::gen::t_InMsg.get_tag(*in); if (i_tag < 0 || i_tag != in_tag) { return reject_query("OutMsg with key "s + key.to_hex(256) + - " refers to a (re)import InMsg, which is not one of msg_import_imm, msg_import_fin or " - "msg_import_tr as expected"); + " refers to a (re)import InMsg, which is not one of msg_import_imm, msg_import_fin, " + "msg_import_tr or msg_import_deferred_tr as expected"); } } @@ -4151,6 +4568,9 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms // ... break; } + case block::gen::OutMsg::msg_export_new_defer: { + break; + } case block::gen::OutMsg::msg_export_tr: { block::gen::InMsg::Record_msg_import_tr in; block::tlb::MsgEnvelope::Record_std in_env; @@ -4175,6 +4595,24 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms // ... break; } + case block::gen::OutMsg::msg_export_deferred_tr: { + block::gen::InMsg::Record_msg_import_deferred_tr in; + block::tlb::MsgEnvelope::Record_std in_env; + if (!(tlb::unpack_cell(reimport, in) && tlb::unpack_cell(in.in_msg, in_env))) { + return reject_query( + "cannot unpack msg_import_deferred_tr InMsg record corresponding to msg_export_deferred_tr OutMsg record with key "s + + key.to_hex(256)); + } + CHECK(in_env.msg->get_hash() == msg->get_hash()); + auto in_cur_prefix = block::interpolate_addr(src_prefix, dest_prefix, in_env.cur_addr); + if (!shard_contains(shard_, in_cur_prefix)) { + return reject_query( + "msg_export_deferred_tr OutMsg record with key "s + key.to_hex(256) + + " corresponds to msg_import_deferred_tr InMsg record with current imported message address " + + in_cur_prefix.to_str() + " NOT inside the current shard"); + } + break; + } case block::gen::OutMsg::msg_export_deq: case block::gen::OutMsg::msg_export_deq_short: { // check that the message has been indeed processed by a neighbor @@ -4290,6 +4728,24 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms return fatal_error(PSTRING() << "unknown OutMsg tag " << tag); } + if (tag == block::gen::OutMsg::msg_export_imm || tag == block::gen::OutMsg::msg_export_deq_imm || + tag == block::gen::OutMsg::msg_export_new || tag == block::gen::OutMsg::msg_export_deferred_tr) { + if (src_wc != workchain()) { + return true; + } + if (tag == block::gen::OutMsg::msg_export_imm && is_special_in_msg(vm::load_cell_slice(reimport))) { + return true; + } + unsigned long long created_lt; + auto cs = vm::load_cell_slice(env.msg); + if (!block::tlb::t_Message.get_created_lt(cs, created_lt)) { + return reject_query(PSTRING() << "cannot get created_lt for OutMsg with key " << key.to_hex(256) + << ", tag=" << tag); + } + auto emitted_lt = env.emitted_lt ? env.emitted_lt.value() : created_lt; + msg_emitted_lt_.emplace_back(src_addr, created_lt, emitted_lt); + } + return true; } @@ -4377,6 +4833,25 @@ bool ValidateQuery::check_processed_upto() { return true; } +/** + * Check that the difference between the old and new dispatch queues is reflected in OutMsgs and InMsgs + * + * @returns True if the check is successful, false otherwise. + */ +bool ValidateQuery::check_dispatch_queue_update() { + if (!new_dispatch_queue_messages_.empty()) { + auto it = new_dispatch_queue_messages_.begin(); + return reject_query(PSTRING() << "DispatchQueue has a new message with src_addr=" << it->first.first.to_hex() + << ", lt=" << it->first.second << ", but no correseponding OutMsg exists"); + } + if (!removed_dispatch_queue_messages_.empty()) { + auto it = removed_dispatch_queue_messages_.begin(); + return reject_query(PSTRING() << "message with src_addr=" << it->first.first.to_hex() << ", lt=" << it->first.second + << " was removed from DispatchQueue, but no correseponding InMsg exists"); + } + return true; +} + /** * Checks the validity of an outbound message in the neighbor's queue. * Similar to Collator::process_inbound_message. @@ -4691,6 +5166,10 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT // check input message block::CurrencyCollection money_imported(0), money_exported(0); bool is_special_tx = false; // recover/mint transaction + auto td_cs = vm::load_cell_slice(trans.description); + int tag = block::gen::t_TransactionDescr.get_tag(td_cs); + CHECK(tag >= 0); // we have already validated the serialization of all Transactions + td::optional in_msg_metadata; if (in_msg_root.not_null()) { auto in_descr_cs = in_msg_dict_->lookup(in_msg_root->get_hash().as_bitslice()); if (in_descr_cs.is_null()) { @@ -4698,20 +5177,21 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << " of transaction " << lt << " of account " << addr.to_hex() << " does not have a corresponding InMsg record"); } - auto tag = block::gen::t_InMsg.get_tag(*in_descr_cs); - if (tag != block::gen::InMsg::msg_import_ext && tag != block::gen::InMsg::msg_import_fin && - tag != block::gen::InMsg::msg_import_imm && tag != block::gen::InMsg::msg_import_ihr) { + auto in_msg_tag = block::gen::t_InMsg.get_tag(*in_descr_cs); + if (in_msg_tag != block::gen::InMsg::msg_import_ext && in_msg_tag != block::gen::InMsg::msg_import_fin && + in_msg_tag != block::gen::InMsg::msg_import_imm && in_msg_tag != block::gen::InMsg::msg_import_ihr && + in_msg_tag != block::gen::InMsg::msg_import_deferred_fin) { return reject_query(PSTRING() << "inbound message with hash " << in_msg_root->get_hash().to_hex() << " of transaction " << lt << " of account " << addr.to_hex() << " has an invalid InMsg record (not one of msg_import_ext, msg_import_fin, " - "msg_import_imm or msg_import_ihr)"); + "msg_import_imm, msg_import_ihr or msg_import_deferred_fin)"); } is_special_tx = is_special_in_msg(*in_descr_cs); // once we know there is a InMsg with correct hash, we already know that it contains a message with this hash (by the verification of InMsg), so it is our message // have still to check its destination address and imported value // and that it refers to this transaction Ref dest; - if (tag == block::gen::InMsg::msg_import_ext) { + if (in_msg_tag == block::gen::InMsg::msg_import_ext) { block::gen::CommonMsgInfo::Record_ext_in_msg_info info; CHECK(tlb::unpack_cell_inexact(in_msg_root, info)); dest = std::move(info.dest); @@ -4724,12 +5204,26 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << " processed inbound message created later at logical time " << info.created_lt); } + LogicalTime emitted_lt = info.created_lt; // See ValidateQuery::check_message_processing_order + if (in_msg_tag == block::gen::InMsg::msg_import_imm || in_msg_tag == block::gen::InMsg::msg_import_fin || + in_msg_tag == block::gen::InMsg::msg_import_deferred_fin) { + block::tlb::MsgEnvelope::Record_std msg_env; + if (!block::tlb::unpack_cell(in_descr_cs->prefetch_ref(), msg_env)) { + return reject_query(PSTRING() << "InMsg record for inbound message with hash " + << in_msg_root->get_hash().to_hex() << " of transaction " << lt + << " of account " << addr.to_hex() << " does not have a valid MsgEnvelope"); + } + in_msg_metadata = std::move(msg_env.metadata); + if (msg_env.emitted_lt) { + emitted_lt = msg_env.emitted_lt.value(); + } + } if (info.created_lt != start_lt_ || !is_special_tx) { - msg_proc_lt_.emplace_back(addr, lt, info.created_lt); + msg_proc_lt_.emplace_back(addr, lt, emitted_lt); } dest = std::move(info.dest); CHECK(money_imported.validate_unpack(info.value)); - ihr_delivered = (tag == block::gen::InMsg::msg_import_ihr); + ihr_delivered = (in_msg_tag == block::gen::InMsg::msg_import_ihr); if (!ihr_delivered) { money_imported += block::tlb::t_Grams.as_integer(info.ihr_fee); } @@ -4751,6 +5245,15 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT } } // check output messages + td::optional new_msg_metadata; + if (msg_metadata_enabled_) { + if (external || is_special_tx || tag != block::gen::TransactionDescr::trans_ord) { + new_msg_metadata = block::MsgMetadata{0, account.workchain, account.addr, (LogicalTime)trans.lt}; + } else if (in_msg_metadata) { + new_msg_metadata = std::move(in_msg_metadata); + ++new_msg_metadata.value().depth; + } + } vm::Dictionary out_dict{trans.r1.out_msgs, 15}; for (int i = 0; i < trans.outmsg_cnt; i++) { auto out_msg_root = out_dict.lookup_ref(td::BitArray<15>{i}); @@ -4763,33 +5266,45 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT } auto tag = block::gen::t_OutMsg.get_tag(*out_descr_cs); if (tag != block::gen::OutMsg::msg_export_ext && tag != block::gen::OutMsg::msg_export_new && - tag != block::gen::OutMsg::msg_export_imm) { - return reject_query( - PSTRING() << "outbound message #" << i + 1 << " with hash " << out_msg_root->get_hash().to_hex() - << " of transaction " << lt << " of account " << addr.to_hex() - << " has an invalid OutMsg record (not one of msg_export_ext, msg_export_new or msg_export_imm)"); + tag != block::gen::OutMsg::msg_export_imm && tag != block::gen::OutMsg::msg_export_new_defer) { + return reject_query(PSTRING() << "outbound message #" << i + 1 << " with hash " + << out_msg_root->get_hash().to_hex() << " of transaction " << lt << " of account " + << addr.to_hex() + << " has an invalid OutMsg record (not one of msg_export_ext, msg_export_new, " + "msg_export_imm or msg_export_new_defer)"); } - // once we know there is an OutMsg with correct hash, we already know that it contains a message with this hash (by the verification of OutMsg), so it is our message + // once we know there is an OutMsg with correct hash, we already know that it contains a message with this hash + // (by the verification of OutMsg), so it is our message // have still to check its source address, lt and imported value // and that it refers to this transaction as its origin Ref src; + LogicalTime message_lt; if (tag == block::gen::OutMsg::msg_export_ext) { block::gen::CommonMsgInfo::Record_ext_out_msg_info info; CHECK(tlb::unpack_cell_inexact(out_msg_root, info)); src = std::move(info.src); + message_lt = info.created_lt; } else { block::gen::CommonMsgInfo::Record_int_msg_info info; CHECK(tlb::unpack_cell_inexact(out_msg_root, info)); src = std::move(info.src); - block::gen::MsgEnvelope::Record msg_env; + message_lt = info.created_lt; + block::tlb::MsgEnvelope::Record_std msg_env; CHECK(tlb::unpack_cell(out_descr_cs->prefetch_ref(), msg_env)); // unpack exported message value (from this transaction) block::CurrencyCollection msg_export_value; CHECK(msg_export_value.unpack(info.value)); msg_export_value += block::tlb::t_Grams.as_integer(info.ihr_fee); - msg_export_value += block::tlb::t_Grams.as_integer(msg_env.fwd_fee_remaining); + msg_export_value += msg_env.fwd_fee_remaining; CHECK(msg_export_value.is_valid()); money_exported += msg_export_value; + if (msg_env.metadata != new_msg_metadata) { + return reject_query(PSTRING() << "outbound message #" << i + 1 << " with hash " + << out_msg_root->get_hash().to_hex() << " of transaction " << lt << " of account " + << addr.to_hex() << " has invalid metadata in an OutMsg record: expected " + << (new_msg_metadata ? new_msg_metadata.value().to_str() : "") << ", found " + << (msg_env.metadata ? msg_env.metadata.value().to_str() : "")); + } } WorkchainId s_wc; StdSmcAddress ss_addr; // s_addr is some macros in Windows @@ -4806,13 +5321,32 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << out_msg_root->get_hash().to_hex() << " of transaction " << lt << " of account " << addr.to_hex() << " refers to a different processing transaction"); } + if (tag != block::gen::OutMsg::msg_export_ext) { + bool is_deferred = tag == block::gen::OutMsg::msg_export_new_defer; + if (account_expected_defer_all_messages_.count(ss_addr) && !is_deferred) { + return reject_query( + PSTRING() << "outbound message #" << i + 1 << " on account " << workchain() << ":" << ss_addr.to_hex() + << " must be deferred because this account has earlier messages in DispatchQueue"); + } + if (is_deferred) { + LOG(INFO) << "message from account " << workchain() << ":" << ss_addr.to_hex() << " with lt " << message_lt + << " was deferred"; + if (!deferring_messages_enabled_ && !account_expected_defer_all_messages_.count(ss_addr)) { + return reject_query(PSTRING() << "outbound message #" << i + 1 << " on account " << workchain() << ":" + << ss_addr.to_hex() << " is deferred, but deferring messages is disabled"); + } + if (i == 0 && !account_expected_defer_all_messages_.count(ss_addr)) { + return reject_query(PSTRING() << "outbound message #1 on account " << workchain() << ":" << ss_addr.to_hex() + << " must not be deferred (the first message cannot be deferred unless some " + "prevoius messages are deferred)"); + } + account_expected_defer_all_messages_.insert(ss_addr); + } + } } CHECK(money_exported.is_valid()); // check general transaction data block::CurrencyCollection old_balance{account.get_balance()}; - auto td_cs = vm::load_cell_slice(trans.description); - int tag = block::gen::t_TransactionDescr.get_tag(td_cs); - CHECK(tag >= 0); // we have already validated the serialization of all Transactions if (tag == block::gen::TransactionDescr::trans_merge_prepare || tag == block::gen::TransactionDescr::trans_merge_install || tag == block::gen::TransactionDescr::trans_split_prepare || @@ -5271,6 +5805,10 @@ bool ValidateQuery::check_all_ticktock_processed() { * @returns True if the processing order of messages is valid, false otherwise. */ bool ValidateQuery::check_message_processing_order() { + // Old rule: if messages m1 and m2 with the same destination generate transactions t1 and t2, + // then (m1.created_lt < m2.created_lt) => (t1.lt < t2.lt). + // New rule: + // If message was taken from dispatch queue, instead of created_lt use emitted_lt std::sort(msg_proc_lt_.begin(), msg_proc_lt_.end()); for (std::size_t i = 1; i < msg_proc_lt_.size(); i++) { auto &a = msg_proc_lt_[i - 1], &b = msg_proc_lt_[i]; @@ -5282,6 +5820,19 @@ bool ValidateQuery::check_message_processing_order() { << ") processes an earlier message created at logical time " << std::get<2>(b)); } } + + // Check that if messages m1 and m2 with the same source have m1.created_lt < m2.created_lt then + // m1.emitted_lt < m2.emitted_lt. + std::sort(msg_emitted_lt_.begin(), msg_emitted_lt_.end()); + for (std::size_t i = 1; i < msg_emitted_lt_.size(); i++) { + auto &a = msg_emitted_lt_[i - 1], &b = msg_emitted_lt_[i]; + if (std::get<0>(a) == std::get<0>(b) && std::get<2>(a) >= std::get<2>(b)) { + return reject_query(PSTRING() << "incorrect deferred message processing order for sender " + << std::get<0>(a).to_hex() << ": message with created_lt " << std::get<1>(a) + << " has emitted_lt" << std::get<2>(a) << ", but message with created_lt " + << std::get<1>(b) << " has emitted_lt" << std::get<2>(b)); + } + } return true; } @@ -6240,6 +6791,9 @@ bool ValidateQuery::try_validate() { if (!check_utime_lt()) { return reject_query("creation utime/lt of the new block is invalid"); } + if (!prepare_out_msg_queue_size()) { + return reject_query("cannot request out msg queue size"); + } stage_ = 1; if (pending) { return true; @@ -6268,12 +6822,18 @@ bool ValidateQuery::try_validate() { if (!precheck_message_queue_update()) { return reject_query("invalid OutMsgQueue update"); } + if (!unpack_dispatch_queue_update()) { + return reject_query("invalid DispatchQueue update"); + } if (!check_in_msg_descr()) { return reject_query("invalid InMsgDescr"); } if (!check_out_msg_descr()) { return reject_query("invalid OutMsgDescr"); } + if (!check_dispatch_queue_update()) { + return reject_query("invalid OutMsgDescr"); + } if (!check_processed_upto()) { return reject_query("invalid ProcessedInfo"); } diff --git a/validator/impl/validate-query.hpp b/validator/impl/validate-query.hpp index 8829ac61f..824afb49d 100644 --- a/validator/impl/validate-query.hpp +++ b/validator/impl/validate-query.hpp @@ -112,7 +112,8 @@ class ValidateQuery : public td::actor::Actor { return SUPPORTED_VERSION; } static constexpr long long supported_capabilities() { - return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue; + return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue | + ton::capStoreOutMsgQueueSize | ton::capMsgMetadata | ton::capDeferMessages; } public: @@ -227,9 +228,22 @@ class ValidateQuery : public td::actor::Actor { bool inbound_queues_empty_{false}; std::vector> msg_proc_lt_; + std::vector> msg_emitted_lt_; std::vector> lib_publishers_, lib_publishers2_; + std::map, Ref> removed_dispatch_queue_messages_; + std::map, Ref> new_dispatch_queue_messages_; + std::set account_expected_defer_all_messages_; + td::uint64 old_out_msg_queue_size_ = 0, new_out_msg_queue_size_ = 0; + + bool msg_metadata_enabled_ = false; + bool deferring_messages_enabled_ = false; + bool store_out_msg_queue_size_ = false; + + td::uint64 processed_account_dispatch_queues_ = 0; + bool have_unprocessed_account_dispatch_queue_ = false; + td::PerfWarningTimer perf_timer_; static constexpr td::uint32 priority() { @@ -309,6 +323,8 @@ class ValidateQuery : public td::actor::Actor { bool check_cur_validator_set(); bool check_mc_validator_info(bool update_mc_cc); bool check_utime_lt(); + bool prepare_out_msg_queue_size(); + void got_out_queue_size(size_t i, td::Result res); bool fix_one_processed_upto(block::MsgProcessedUpto& proc, ton::ShardIdFull owner, bool allow_cur = false); bool fix_processed_upto(block::MsgProcessedUptoCollection& upto, bool allow_cur = false); @@ -330,6 +346,9 @@ class ValidateQuery : public td::actor::Actor { bool precheck_one_message_queue_update(td::ConstBitPtr out_msg_id, Ref old_value, Ref new_value); bool precheck_message_queue_update(); + bool check_account_dispatch_queue_update(td::Bits256 addr, Ref old_queue_csr, + Ref new_queue_csr); + bool unpack_dispatch_queue_update(); bool update_max_processed_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash); bool update_min_enqueued_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash); bool check_imported_message(Ref msg_env); @@ -338,6 +357,7 @@ class ValidateQuery : public td::actor::Actor { bool check_in_msg_descr(); bool check_out_msg(td::ConstBitPtr key, Ref out_msg); bool check_out_msg_descr(); + bool check_dispatch_queue_update(); bool check_processed_upto(); bool check_neighbor_outbound_message(Ref enq_msg, ton::LogicalTime lt, td::ConstBitPtr key, const block::McShardDescr& src_nb, bool& unprocessed); diff --git a/validator/interfaces/db.h b/validator/interfaces/db.h index 84ea2b366..8bbf7f31f 100644 --- a/validator/interfaces/db.h +++ b/validator/interfaces/db.h @@ -66,6 +66,8 @@ class Db : public td::actor::Actor { virtual void store_zero_state_file(BlockIdExt block_id, td::BufferSlice state, td::Promise promise) = 0; virtual void get_zero_state_file(BlockIdExt block_id, td::Promise promise) = 0; virtual void check_zero_state_file_exists(BlockIdExt block_id, td::Promise promise) = 0; + virtual void get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) = 0; virtual void try_get_static_file(FileHash file_hash, td::Promise promise) = 0; diff --git a/validator/manager-disk.cpp b/validator/manager-disk.cpp index 17b793c7a..5678408c6 100644 --- a/validator/manager-disk.cpp +++ b/validator/manager-disk.cpp @@ -128,8 +128,8 @@ void ValidatorManagerImpl::sync_complete(td::Promise promise) { } Ed25519_PublicKey created_by{td::Bits256::zero()}; td::as(created_by.as_bits256().data() + 32 - 4) = ((unsigned)std::time(nullptr) >> 8); - run_collate_query(shard_id, 0, last_masterchain_block_id_, prev, created_by, val_set, actor_id(this), - td::Timestamp::in(10.0), std::move(P)); + run_collate_query(shard_id, 0, last_masterchain_block_id_, prev, created_by, val_set, td::Ref{true}, + actor_id(this), td::Timestamp::in(10.0), std::move(P)); } void ValidatorManagerImpl::validate_fake(BlockCandidate candidate, std::vector prev, BlockIdExt last, diff --git a/validator/manager-disk.hpp b/validator/manager-disk.hpp index 389c7c0de..a77be2725 100644 --- a/validator/manager-disk.hpp +++ b/validator/manager-disk.hpp @@ -116,6 +116,10 @@ class ValidatorManagerImpl : public ValidatorManager { td::int64 max_length, td::Promise promise) override { UNREACHABLE(); } + void get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) override { + UNREACHABLE(); + } void get_block_proof(BlockHandle handle, td::Promise promise) override; void get_block_proof_link(BlockHandle block_id, td::Promise promise) override { UNREACHABLE(); @@ -384,7 +388,7 @@ class ValidatorManagerImpl : public ValidatorManager { void log_new_validator_group_stats(validatorsession::NewValidatorGroupStats stats) override { UNREACHABLE(); } - void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { if (queue_size_counter_.empty()) { queue_size_counter_ = td::actor::create_actor("queuesizecounter", td::Ref{}, actor_id(this)); diff --git a/validator/manager-hardfork.hpp b/validator/manager-hardfork.hpp index 7bf95b3f7..e7175b77b 100644 --- a/validator/manager-hardfork.hpp +++ b/validator/manager-hardfork.hpp @@ -139,6 +139,10 @@ class ValidatorManagerImpl : public ValidatorManager { td::int64 max_length, td::Promise promise) override { UNREACHABLE(); } + void get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) override { + UNREACHABLE(); + } void get_block_proof(BlockHandle handle, td::Promise promise) override; void get_block_proof_link(BlockHandle block_id, td::Promise promise) override; void get_key_block_proof(BlockIdExt block_id, td::Promise promise) override; @@ -446,7 +450,7 @@ class ValidatorManagerImpl : public ValidatorManager { void log_new_validator_group_stats(validatorsession::NewValidatorGroupStats stats) override { UNREACHABLE(); } - void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { if (queue_size_counter_.empty()) { queue_size_counter_ = td::actor::create_actor("queuesizecounter", td::Ref{}, actor_id(this)); diff --git a/validator/manager.cpp b/validator/manager.cpp index 2af818e6f..eb082d91e 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -308,6 +308,11 @@ void ValidatorManagerImpl::get_persistent_state_slice(BlockIdExt block_id, Block std::move(promise)); } +void ValidatorManagerImpl::get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) { + td::actor::send_closure(db_, &Db::get_previous_persistent_state_files, cur_mc_seqno, std::move(promise)); +} + void ValidatorManagerImpl::get_block_proof(BlockHandle handle, td::Promise promise) { auto P = td::PromiseCreator::lambda([promise = std::move(promise)](td::Result> R) mutable { if (R.is_error()) { @@ -1714,6 +1719,8 @@ void ValidatorManagerImpl::read_gc_list(std::vector list) { serializer_ = td::actor::create_actor("serializer", last_key_block_handle_->id(), opts_, actor_id(this)); + td::actor::send_closure(serializer_, &AsyncStateSerializer::update_last_known_key_block_ts, + last_key_block_handle_->unix_time()); if (last_masterchain_block_handle_->inited_next_left()) { auto b = last_masterchain_block_handle_->one_next(true); @@ -1903,6 +1910,10 @@ void ValidatorManagerImpl::new_masterchain_block() { last_known_key_block_handle_ = last_key_block_handle_; callback_->new_key_block(last_key_block_handle_); } + if (!serializer_.empty()) { + td::actor::send_closure(serializer_, &AsyncStateSerializer::update_last_known_key_block_ts, + last_key_block_handle_->unix_time()); + } } update_shards(); @@ -3133,10 +3144,16 @@ void ValidatorManagerImpl::get_validator_groups_info_for_litequery( } void ValidatorManagerImpl::update_options(td::Ref opts) { - // Currently options can be updated only to change state_serializer_enabled flag + // Currently options can be updated only to change state_serializer_enabled flag and collator_options if (!serializer_.empty()) { td::actor::send_closure(serializer_, &AsyncStateSerializer::update_options, opts); } + for (auto &group : validator_groups_) { + td::actor::send_closure(group.second.actor, &ValidatorGroup::update_options, opts); + } + for (auto &group : next_validator_groups_) { + td::actor::send_closure(group.second.actor, &ValidatorGroup::update_options, opts); + } opts_ = std::move(opts); } diff --git a/validator/manager.hpp b/validator/manager.hpp index f76900a9e..12354c634 100644 --- a/validator/manager.hpp +++ b/validator/manager.hpp @@ -371,6 +371,8 @@ class ValidatorManagerImpl : public ValidatorManager { td::Promise promise) override; void get_persistent_state_slice(BlockIdExt block_id, BlockIdExt masterchain_block_id, td::int64 offset, td::int64 max_length, td::Promise promise) override; + void get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) override; void get_block_proof(BlockHandle handle, td::Promise promise) override; void get_block_proof_link(BlockHandle block_id, td::Promise promise) override; void get_key_block_proof(BlockIdExt block_id, td::Promise promise) override; @@ -590,7 +592,7 @@ class ValidatorManagerImpl : public ValidatorManager { void update_options(td::Ref opts) override; - void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { if (queue_size_counter_.empty()) { if (last_masterchain_state_.is_null()) { promise.set_error(td::Status::Error(ErrorCode::notready, "not ready")); diff --git a/validator/queue-size-counter.cpp b/validator/queue-size-counter.cpp index 4780f202c..eb8580894 100644 --- a/validator/queue-size-counter.cpp +++ b/validator/queue-size-counter.cpp @@ -23,8 +23,8 @@ namespace ton::validator { -static td::Result calc_queue_size(const td::Ref &state) { - td::uint32 size = 0; +static td::Result calc_queue_size(const td::Ref &state) { + td::uint64 size = 0; TRY_RESULT(outq_descr, state->message_queue()); block::gen::OutMsgQueueInfo::Record qinfo; if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) { @@ -41,8 +41,8 @@ static td::Result calc_queue_size(const td::Ref &state) return size; } -static td::Result recalc_queue_size(const td::Ref &state, const td::Ref &prev_state, - td::uint32 prev_size) { +static td::Result recalc_queue_size(const td::Ref &state, const td::Ref &prev_state, + td::uint64 prev_size) { TRY_RESULT(outq_descr, state->message_queue()); block::gen::OutMsgQueueInfo::Record qinfo; if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) { @@ -56,7 +56,7 @@ static td::Result recalc_queue_size(const td::Ref &state return td::Status::Error("invalid message queue"); } vm::AugmentedDictionary prev_queue{prev_qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue}; - td::uint32 add = 0, rem = 0; + td::uint64 add = 0, rem = 0; bool ok = prev_queue.scan_diff( queue, [&](td::ConstBitPtr, int, td::Ref prev_val, td::Ref new_val) -> bool { if (prev_val.not_null()) { @@ -88,11 +88,11 @@ void QueueSizeCounter::start_up() { alarm(); } -void QueueSizeCounter::get_queue_size(BlockIdExt block_id, td::Promise promise) { +void QueueSizeCounter::get_queue_size(BlockIdExt block_id, td::Promise promise) { get_queue_size_ex(block_id, simple_mode_ || is_block_too_old(block_id), std::move(promise)); } -void QueueSizeCounter::get_queue_size_ex(ton::BlockIdExt block_id, bool calc_whole, td::Promise promise) { +void QueueSizeCounter::get_queue_size_ex(ton::BlockIdExt block_id, bool calc_whole, td::Promise promise) { Entry &entry = results_[block_id]; if (entry.done_) { promise.set_result(entry.queue_size_); @@ -152,12 +152,12 @@ void QueueSizeCounter::get_queue_size_cont(BlockHandle handle, td::Refone_prev(true); - get_queue_size(prev_block_id, [=, SelfId = actor_id(this), manager = manager_](td::Result R) { + get_queue_size(prev_block_id, [=, SelfId = actor_id(this), manager = manager_](td::Result R) { if (R.is_error()) { td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, state->get_block_id(), R.move_as_error()); return; } - td::uint32 prev_size = R.move_as_ok(); + td::uint64 prev_size = R.move_as_ok(); td::actor::send_closure( manager, &ValidatorManager::wait_block_state_short, prev_block_id, 0, td::Timestamp::in(10.0), [=](td::Result> R) { @@ -171,7 +171,7 @@ void QueueSizeCounter::get_queue_size_cont(BlockHandle handle, td::Ref state, td::Ref prev_state, - td::uint32 prev_size) { + td::uint64 prev_size) { BlockIdExt block_id = state->get_block_id(); Entry &entry = results_[block_id]; CHECK(entry.started_); @@ -252,7 +252,7 @@ void QueueSizeCounter::process_top_shard_blocks_cont(td::Ref s void QueueSizeCounter::get_queue_size_ex_retry(BlockIdExt block_id, bool calc_whole, td::Promise promise) { get_queue_size_ex(block_id, calc_whole, - [=, promise = std::move(promise), SelfId = actor_id(this)](td::Result R) mutable { + [=, promise = std::move(promise), SelfId = actor_id(this)](td::Result R) mutable { if (R.is_error()) { LOG(WARNING) << "Failed to calculate queue size for block " << block_id.to_str() << ": " << R.move_as_error(); diff --git a/validator/queue-size-counter.hpp b/validator/queue-size-counter.hpp index fabb0cec3..4825a43c0 100644 --- a/validator/queue-size-counter.hpp +++ b/validator/queue-size-counter.hpp @@ -26,7 +26,7 @@ class QueueSizeCounter : public td::actor::Actor { } void start_up() override; - void get_queue_size(BlockIdExt block_id, td::Promise promise); + void get_queue_size(BlockIdExt block_id, td::Promise promise); void alarm() override; private: @@ -42,14 +42,14 @@ class QueueSizeCounter : public td::actor::Actor { bool started_ = false; bool done_ = false; bool calc_whole_ = false; - td::uint32 queue_size_ = 0; - std::vector> promises_; + td::uint64 queue_size_ = 0; + std::vector> promises_; }; std::map results_; - void get_queue_size_ex(BlockIdExt block_id, bool calc_whole, td::Promise promise); + void get_queue_size_ex(BlockIdExt block_id, bool calc_whole, td::Promise promise); void get_queue_size_cont(BlockHandle handle, td::Ref state); - void get_queue_size_cont2(td::Ref state, td::Ref prev_state, td::uint32 prev_size); + void get_queue_size_cont2(td::Ref state, td::Ref prev_state, td::uint64 prev_size); void on_error(BlockIdExt block_id, td::Status error); void process_top_shard_blocks(); diff --git a/validator/state-serializer.cpp b/validator/state-serializer.cpp index 4f10d959e..b27561b63 100644 --- a/validator/state-serializer.cpp +++ b/validator/state-serializer.cpp @@ -21,6 +21,7 @@ #include "adnl/utils.hpp" #include "ton/ton-io.hpp" #include "common/delay.h" +#include "td/utils/filesystem.h" namespace ton { @@ -84,6 +85,20 @@ void AsyncStateSerializer::alarm() { td::actor::send_closure(manager_, &ValidatorManager::get_top_masterchain_block, std::move(P)); } +void AsyncStateSerializer::request_previous_state_files() { + td::actor::send_closure( + manager_, &ValidatorManager::get_previous_persistent_state_files, masterchain_handle_->id().seqno(), + [SelfId = actor_id(this)](td::Result>> R) { + R.ensure(); + td::actor::send_closure(SelfId, &AsyncStateSerializer::got_previous_state_files, R.move_as_ok()); + }); +} + +void AsyncStateSerializer::got_previous_state_files(std::vector> files) { + previous_state_files_ = std::move(files); + request_masterchain_state(); +} + void AsyncStateSerializer::request_masterchain_state() { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), manager = manager_](td::Result> R) { if (R.is_error()) { @@ -133,37 +148,43 @@ void AsyncStateSerializer::next_iteration() { } CHECK(masterchain_handle_->id() == last_block_id_); if (attempt_ < max_attempt() && last_key_block_id_.id.seqno < last_block_id_.id.seqno && - need_serialize(masterchain_handle_) && opts_->get_state_serializer_enabled()) { - if (!have_masterchain_state_) { - LOG(ERROR) << "started serializing persistent state for " << masterchain_handle_->id().id.to_str(); - // block next attempts immediately, but send actual request later - running_ = true; - double delay = td::Random::fast(0, 3600); - LOG(WARNING) << "serializer delay = " << delay << "s"; - delay_action([SelfId = actor_id( - this)]() { td::actor::send_closure(SelfId, &AsyncStateSerializer::request_masterchain_state); }, - td::Timestamp::in(delay)); - return; - } - while (next_idx_ < shards_.size()) { - if (!need_monitor(shards_[next_idx_].shard_full())) { - next_idx_++; - } else { + need_serialize(masterchain_handle_)) { + if (!have_masterchain_state_ && !opts_->get_state_serializer_enabled()) { + LOG(ERROR) << "skipping serializing persistent state for " << masterchain_handle_->id().id.to_str() + << ": serializer is disabled"; + } else if (!have_masterchain_state_ && have_newer_persistent_state(masterchain_handle_->unix_time())) { + LOG(ERROR) << "skipping serializing persistent state for " << masterchain_handle_->id().id.to_str() + << ": newer key block with ts=" << last_known_key_block_ts_ << " exists"; + } else { + if (!have_masterchain_state_) { + LOG(ERROR) << "started serializing persistent state for " << masterchain_handle_->id().id.to_str(); // block next attempts immediately, but send actual request later running_ = true; - double delay = td::Random::fast(0, 1800); + double delay = td::Random::fast(0, 3600); LOG(WARNING) << "serializer delay = " << delay << "s"; delay_action( - [SelfId = actor_id(this), shard = shards_[next_idx_]]() { - td::actor::send_closure(SelfId, &AsyncStateSerializer::request_shard_state, shard); + [SelfId = actor_id(this)]() { + td::actor::send_closure(SelfId, &AsyncStateSerializer::request_previous_state_files); }, td::Timestamp::in(delay)); return; } + while (next_idx_ < shards_.size()) { + if (!need_monitor(shards_[next_idx_].shard_full())) { + next_idx_++; + } else { + running_ = true; + request_shard_state(shards_[next_idx_]); + return; + } + } + LOG(ERROR) << "finished serializing persistent state for " << masterchain_handle_->id().id.to_str(); } - LOG(ERROR) << "finished serializing persistent state for " << masterchain_handle_->id().id.to_str(); last_key_block_ts_ = masterchain_handle_->unix_time(); last_key_block_id_ = masterchain_handle_->id(); + previous_state_files_ = {}; + previous_state_cache_ = {}; + previous_state_cur_shards_ = {}; } if (!saved_to_db_) { running_ = true; @@ -177,9 +198,6 @@ void AsyncStateSerializer::next_iteration() { return; } if (masterchain_handle_->inited_next_left()) { - if (need_serialize(masterchain_handle_) && !opts_->get_state_serializer_enabled()) { - LOG(ERROR) << "skipping serializing persistent state for " << masterchain_handle_->id().id.to_str(); - } last_block_id_ = masterchain_handle_->one_next(true); have_masterchain_state_ = false; masterchain_handle_ = nullptr; @@ -204,6 +222,91 @@ void AsyncStateSerializer::got_masterchain_handle(BlockHandle handle) { next_iteration(); } +class CachedCellDbReader : public vm::CellDbReader { + public: + CachedCellDbReader(std::shared_ptr parent, + std::shared_ptr>> cache) + : parent_(std::move(parent)), cache_(std::move(cache)) { + } + td::Result> load_cell(td::Slice hash) override { + ++total_reqs_; + DCHECK(hash.size() == 32); + if (cache_) { + auto it = cache_->find(td::Bits256{(const unsigned char*)hash.data()}); + if (it != cache_->end()) { + ++cached_reqs_; + TRY_RESULT(loaded_cell, it->second->load_cell()); + return loaded_cell.data_cell; + } + } + return parent_->load_cell(hash); + } + void print_stats() const { + LOG(WARNING) << "CachedCellDbReader stats : " << total_reqs_ << " reads, " << cached_reqs_ << " cached"; + } + private: + std::shared_ptr parent_; + std::shared_ptr>> cache_; + + td::uint64 total_reqs_ = 0; + td::uint64 cached_reqs_ = 0; +}; + +void AsyncStateSerializer::prepare_previous_state_cache(ShardIdFull shard) { + if (!opts_->get_fast_state_serializer_enabled()) { + return; + } + std::vector prev_shards; + for (const auto& [_, prev_shard] : previous_state_files_) { + if (shard_intersects(shard, prev_shard)) { + prev_shards.push_back(prev_shard); + } + } + if (prev_shards == previous_state_cur_shards_) { + return; + } + previous_state_cur_shards_ = std::move(prev_shards); + previous_state_cache_ = {}; + if (previous_state_cur_shards_.empty()) { + return; + } + td::Timer timer; + LOG(WARNING) << "Preloading previous persistent state for shard " << shard.to_str() << " (" + << previous_state_cur_shards_.size() << " files)"; + std::map> cells; + std::function)> dfs = [&](td::Ref cell) { + td::Bits256 hash = cell->get_hash().bits(); + if (!cells.emplace(hash, cell).second) { + return; + } + bool is_special; + vm::CellSlice cs = vm::load_cell_slice_special(cell, is_special); + for (unsigned i = 0; i < cs.size_refs(); ++i) { + dfs(cs.prefetch_ref(i)); + } + }; + for (const auto& [file, prev_shard] : previous_state_files_) { + if (!shard_intersects(shard, prev_shard)) { + continue; + } + auto r_data = td::read_file(file); + if (r_data.is_error()) { + LOG(INFO) << "Reading " << file << " : " << r_data.move_as_error(); + continue; + } + LOG(INFO) << "Reading " << file << " : " << td::format::as_size(r_data.ok().size()); + auto r_root = vm::std_boc_deserialize(r_data.move_as_ok()); + if (r_root.is_error()) { + LOG(WARNING) << "Deserialize error : " << r_root.move_as_error(); + continue; + } + r_data = {}; + dfs(r_root.move_as_ok()); + } + LOG(WARNING) << "Preloaded previous state: " << cells.size() << " cells in " << timer.elapsed() << "s"; + previous_state_cache_ = std::make_shared>>(std::move(cells)); +} + void AsyncStateSerializer::got_masterchain_state(td::Ref state, std::shared_ptr cell_db_reader) { if (!opts_->get_state_serializer_enabled()) { @@ -211,6 +314,8 @@ void AsyncStateSerializer::got_masterchain_state(td::Ref state return; } LOG(ERROR) << "serializing masterchain state " << masterchain_handle_->id().id.to_str(); + prepare_previous_state_cache(state->get_shard()); + auto new_cell_db_reader = std::make_shared(cell_db_reader, previous_state_cache_); have_masterchain_state_ = true; CHECK(next_idx_ == 0); CHECK(shards_.size() == 0); @@ -220,9 +325,11 @@ void AsyncStateSerializer::got_masterchain_state(td::Ref state shards_.push_back(v->top_block_id()); } - auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader, + auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader = new_cell_db_reader, cancellation_token = cancellation_token_source_.get_cancellation_token()](td::FileFd& fd) mutable { - return vm::std_boc_serialize_to_file_large(cell_db_reader, hash, fd, 31, std::move(cancellation_token)); + auto res = vm::std_boc_serialize_to_file_large(cell_db_reader, hash, fd, 31, std::move(cancellation_token)); + cell_db_reader->print_stats(); + return res; }; auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result R) { if (R.is_error() && R.error().code() == cancelled) { @@ -273,9 +380,13 @@ void AsyncStateSerializer::got_shard_state(BlockHandle handle, td::Refid().id.to_str(); - auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader, + prepare_previous_state_cache(state->get_shard()); + auto new_cell_db_reader = std::make_shared(cell_db_reader, previous_state_cache_); + auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader = new_cell_db_reader, cancellation_token = cancellation_token_source_.get_cancellation_token()](td::FileFd& fd) mutable { - return vm::std_boc_serialize_to_file_large(cell_db_reader, hash, fd, 31, std::move(cancellation_token)); + auto res = vm::std_boc_serialize_to_file_large(cell_db_reader, hash, fd, 31, std::move(cancellation_token)); + cell_db_reader->print_stats(); + return res; }; auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result R) { if (R.is_error() && R.error().code() == cancelled) { @@ -329,6 +440,10 @@ bool AsyncStateSerializer::need_serialize(BlockHandle handle) { ValidatorManager::persistent_state_ttl(handle->unix_time()) > (UnixTime)td::Clocks::system(); } +bool AsyncStateSerializer::have_newer_persistent_state(UnixTime cur_ts) { + return cur_ts / (1 << 17) < last_known_key_block_ts_ / (1 << 17); +} + } // namespace validator } // namespace ton diff --git a/validator/state-serializer.hpp b/validator/state-serializer.hpp index 0bee70315..6d966f930 100644 --- a/validator/state-serializer.hpp +++ b/validator/state-serializer.hpp @@ -38,6 +38,7 @@ class AsyncStateSerializer : public td::actor::Actor { td::Ref opts_; td::CancellationTokenSource cancellation_token_source_; + UnixTime last_known_key_block_ts_ = 0; td::actor::ActorId manager_; @@ -47,6 +48,11 @@ class AsyncStateSerializer : public td::actor::Actor { bool have_masterchain_state_ = false; std::vector shards_; + std::vector> previous_state_files_; + std::shared_ptr>> previous_state_cache_; + std::vector previous_state_cur_shards_; + + void prepare_previous_state_cache(ShardIdFull shard); public: AsyncStateSerializer(BlockIdExt block_id, td::Ref opts, @@ -60,12 +66,15 @@ class AsyncStateSerializer : public td::actor::Actor { bool need_serialize(BlockHandle handle); bool need_monitor(ShardIdFull shard); + bool have_newer_persistent_state(UnixTime cur_ts); void alarm() override; void start_up() override; void got_self_state(AsyncSerializerState state); void got_init_handle(BlockHandle handle); + void request_previous_state_files(); + void got_previous_state_files(std::vector> files); void request_masterchain_state(); void request_shard_state(BlockIdExt shard); @@ -81,6 +90,10 @@ class AsyncStateSerializer : public td::actor::Actor { promise.set_result(last_block_id_.id.seqno); } + void update_last_known_key_block_ts(UnixTime ts) { + last_known_key_block_ts_ = std::max(last_known_key_block_ts_, ts); + } + void saved_to_db() { saved_to_db_ = true; running_ = false; diff --git a/validator/validator-group.cpp b/validator/validator-group.cpp index 68e2b07ec..fc3ebe541 100644 --- a/validator/validator-group.cpp +++ b/validator/validator-group.cpp @@ -52,8 +52,8 @@ void ValidatorGroup::generate_block_candidate( })); run_collate_query( shard_, min_ts_, min_masterchain_block_id_, prev_block_ids_, - Ed25519_PublicKey{local_id_full_.ed25519_value().raw()}, validator_set_, manager_, td::Timestamp::in(10.0), - [SelfId = actor_id(this), cache = cached_collated_block_](td::Result R) { + Ed25519_PublicKey{local_id_full_.ed25519_value().raw()}, validator_set_, opts_->get_collator_options(), manager_, + td::Timestamp::in(10.0), [SelfId = actor_id(this), cache = cached_collated_block_](td::Result R) { td::actor::send_closure(SelfId, &ValidatorGroup::generated_block_candidate, std::move(cache), std::move(R)); }); } diff --git a/validator/validator-group.hpp b/validator/validator-group.hpp index f99402647..3499da9d7 100644 --- a/validator/validator-group.hpp +++ b/validator/validator-group.hpp @@ -64,6 +64,10 @@ class ValidatorGroup : public td::actor::Actor { void get_validator_group_info_for_litequery( td::Promise> promise); + void update_options(td::Ref opts) { + opts_ = std::move(opts); + } + ValidatorGroup(ShardIdFull shard, PublicKeyHash local_id, ValidatorSessionId session_id, td::Ref validator_set, validatorsession::ValidatorSessionOptions config, td::actor::ActorId keyring, td::actor::ActorId adnl, diff --git a/validator/validator-options.hpp b/validator/validator-options.hpp index 37006bdad..04aeb69bb 100644 --- a/validator/validator-options.hpp +++ b/validator/validator-options.hpp @@ -144,6 +144,12 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { bool get_state_serializer_enabled() const override { return state_serializer_enabled_; } + td::Ref get_collator_options() const override { + return collator_options_; + } + bool get_fast_state_serializer_enabled() const override { + return fast_state_serializer_enabled_; + } void set_zero_block_id(BlockIdExt block_id) override { zero_block_id_ = block_id; @@ -227,6 +233,12 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { void set_state_serializer_enabled(bool value) override { state_serializer_enabled_ = value; } + void set_collator_options(td::Ref value) override { + collator_options_ = std::move(value); + } + void set_fast_state_serializer_enabled(bool value) override { + fast_state_serializer_enabled_ = value; + } ValidatorManagerOptionsImpl *make_copy() const override { return new ValidatorManagerOptionsImpl(*this); @@ -279,6 +291,8 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { bool celldb_preload_all_ = false; td::optional catchain_max_block_delay_; bool state_serializer_enabled_ = true; + td::Ref collator_options_{true}; + bool fast_state_serializer_enabled_ = false; }; } // namespace validator diff --git a/validator/validator.h b/validator/validator.h index 9082fd882..3bceec6fe 100644 --- a/validator/validator.h +++ b/validator/validator.h @@ -51,6 +51,21 @@ struct PerfTimerStats { std::deque> stats; // }; +struct CollatorOptions : public td::CntObject { + bool deferring_enabled = true; + + // Defer messages from account after Xth message in block (excluding first messages from transactions) + td::uint32 defer_messages_after = 10; + // Defer all messages if out msg queue size is greater than X (excluding first messages from transactions) + td::uint64 defer_out_queue_size_limit = 2048; + + // See Collator::process_dispatch_queue + td::uint32 dispatch_phase_2_max_total = 150; + td::uint32 dispatch_phase_3_max_total = 150; + td::uint32 dispatch_phase_2_max_per_initiator = 20; + td::optional dispatch_phase_3_max_per_initiator; // Default - depends on out msg queue size +}; + struct ValidatorManagerOptions : public td::CntObject { public: enum class ShardCheckMode { m_monitor, m_validate }; @@ -91,6 +106,8 @@ struct ValidatorManagerOptions : public td::CntObject { virtual bool get_celldb_preload_all() const = 0; virtual td::optional get_catchain_max_block_delay() const = 0; virtual bool get_state_serializer_enabled() const = 0; + virtual td::Ref get_collator_options() const = 0; + virtual bool get_fast_state_serializer_enabled() const = 0; virtual void set_zero_block_id(BlockIdExt block_id) = 0; virtual void set_init_block_id(BlockIdExt block_id) = 0; @@ -120,6 +137,8 @@ struct ValidatorManagerOptions : public td::CntObject { virtual void set_celldb_preload_all(bool value) = 0; virtual void set_catchain_max_block_delay(double value) = 0; virtual void set_state_serializer_enabled(bool value) = 0; + virtual void set_collator_options(td::Ref value) = 0; + virtual void set_fast_state_serializer_enabled(bool value) = 0; static td::Ref create( BlockIdExt zero_block_id, BlockIdExt init_block_id, @@ -200,6 +219,8 @@ class ValidatorManagerInterface : public td::actor::Actor { td::Promise promise) = 0; virtual void get_persistent_state_slice(BlockIdExt block_id, BlockIdExt masterchain_block_id, td::int64 offset, td::int64 max_length, td::Promise promise) = 0; + virtual void get_previous_persistent_state_files( + BlockSeqno cur_mc_seqno, td::Promise>> promise) = 0; virtual void get_block_proof(BlockHandle handle, td::Promise promise) = 0; virtual void get_block_proof_link(BlockHandle handle, td::Promise promise) = 0; virtual void get_block_handle(BlockIdExt block_id, bool force, td::Promise promise) = 0; @@ -249,7 +270,7 @@ class ValidatorManagerInterface : public td::actor::Actor { virtual void prepare_perf_timer_stats(td::Promise> promise) = 0; virtual void add_perf_timer_stat(std::string name, double duration) = 0; - virtual void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) = 0; + virtual void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) = 0; virtual void update_options(td::Ref opts) = 0; };