diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 764b85bacca..223da150705 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -19,3 +19,4 @@ - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. +- [ ] Linkcheck has been run via `zk linkcheck`. diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 75ff7a43b8a..639b81a230a 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "20.1.0", - "prover": "10.1.0" + "core": "20.5.1", + "prover": "11.0.0" } diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index d4a7963aa52..7853867e426 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -30,7 +30,6 @@ jobs: - server-v2 - external-node - contract-verifier - - cross-external-nodes-checker - snapshots-creator platforms: - linux/amd64 @@ -74,7 +73,8 @@ jobs: ci_run yarn zk build ci_run curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - - name: login to Docker registries + - name: Login to Docker registries + if: ${{ inputs.action == 'push' }} run: | ci_run docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} ci_run gcloud auth configure-docker us-docker.pkg.dev -q @@ -108,8 +108,6 @@ jobs: platform: linux/amd64,linux/arm64 - name: contract-verifier platform: linux/amd64 - - name: cross-external-nodes-checker - platform: linux/amd64 - name: snapshots-creator platform: linux/amd64 env: diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml new file mode 100644 index 00000000000..ef0dbdc12f5 --- /dev/null +++ b/.github/workflows/check-links.yml @@ -0,0 +1,41 @@ +name: Check Links + +on: + push: + branches: + - main + pull_request: + merge_group: + +env: + CARGO_TERM_COLOR: always + +jobs: + linkcheck: + runs-on: [matterlabs-ci-runner] + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + - name: Use Node.js + uses: actions/setup-node@v3 + with: + node-version: 18 + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + + - name: Start services + run: | + docker compose up -d zk + + - name: Build zk + run: | + ci_run zk + + - name: Run zk linkcheck + run: | + ci_run zk linkcheck diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml new file mode 100644 index 00000000000..ce8884f00f6 --- /dev/null +++ b/.github/workflows/ci-common-reusable.yml @@ -0,0 +1,37 @@ +name: Workflow template for CI jobs to be ran on both Prover and Core Components +on: + workflow_call: + +jobs: + build: + runs-on: [matterlabs-ci-runner] + env: + RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" + + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 + with: + submodules: "recursive" + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + + - name: Start services + run: | + docker-compose -f ${RUNNER_COMPOSE_FILE} pull + mkdir -p ./volumes/postgres + docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres + ci_run sccache --start-server + + - name: Init + run: | + ci_run zk + ci_run zk db setup + + # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo + # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build + - name: Lints + run: ci_run zk lint prover diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 03ede7fa732..6a0fc7a0737 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -118,8 +118,13 @@ jobs: ci_run sccache --show-stats ci_run cat /tmp/sccache_log.txt integration: - runs-on: [matterlabs-ci-runner] + strategy: + matrix: + consensus: [false,true] + env: + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,basic_witness_input_producer${{ matrix.consensus && ',consensus' || '' }}" + runs-on: [matterlabs-ci-runner] steps: - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 with: @@ -179,7 +184,7 @@ jobs: # `sleep 5` because we need to wait until server started properly - name: Run server run: | - ci_run zk server &>server.log & + ci_run zk server --components=$SERVER_COMPONENTS &>server.log & ci_run sleep 5 - name: Run contract verifier @@ -197,7 +202,7 @@ jobs: run: | ci_run pkill zksync_server || true ci_run sleep 2 - ci_run zk test i revert + ENABLE_CONSENSUS=${{ matrix.consensus }} PASSED_ENV_VARS=ENABLE_CONSENSUS ci_run zk test i revert # This test should be the last one as soon as it # finished bootloader will be different @@ -230,7 +235,14 @@ jobs: ci_run cat /tmp/sccache_log.txt external-node: + strategy: + matrix: + consensus: [false,true] runs-on: [matterlabs-ci-runner] + + env: + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,basic_witness_input_producer${{ matrix.consensus && ',consensus' || '' }}" + EXT_NODE_FLAGS: "${{ matrix.consensus && '--enable-consensus' || '' }}" steps: - name: Checkout code # Checks out the repository under $GITHUB_WORKSPACE, so the job can access it. @@ -291,23 +303,20 @@ jobs: # `sleep 30` because we need to wait until server started properly - name: Run server run: | - ci_run zk server &>server.log & + ci_run zk server --components=$SERVER_COMPONENTS &>server.log & ci_run sleep 30 - + - name: Run external node run: | ci_run zk env ext-node-docker ci_run zk db setup - ci_run zk external-node &>ext-node.log & + ci_run zk external-node $EXT_NODE_FLAGS &>ext-node.log & ci_run sleep 30 # TODO(PLA-653): Restore bridge tests for EN. - name: Integration tests run: ci_run zk test i server --testPathIgnorePatterns 'contract-verification|custom-erc20-bridge|snapshots-creator' - - name: Run Cross EN Checker - run: ci_run zk run cross-en-checker - - name: Run revert test run: | ci_run zk env @@ -315,14 +324,14 @@ jobs: ci_run pkill zksync_server || true ci_run sleep 2 ci_run zk env - ci_run zk test i revert + ENABLE_CONSENSUS=${{ matrix.consensus }} PASSED_ENV_VARS=ENABLE_CONSENSUS ci_run zk test i revert # Check that the rollback was performed on the EN ci_run sleep 20 ci_run grep -q 'Rollback successfully completed' ext-node.log # Restart the EN - ci_run zk server &>>server.log & + ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & ci_run sleep 30 - ZKSYNC_ENV=ext-node-docker ci_run zk external-node &>>ext-node.log & + ZKSYNC_ENV=ext-node-docker ci_run zk external-node $EXT_NODE_FLAGS &>>ext-node.log & ci_run sleep 30 - name: Run upgrade test diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index cc1246cbf9a..ee6d5da9984 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -34,9 +34,6 @@ jobs: - name: Formatting run: ci_run bash -c "cd prover && cargo fmt --check" - - name: Lints - run: ci_run zk lint prover - unit-tests: runs-on: [matterlabs-ci-runner] env: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f15efa8a6a3..cfa752171f5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,7 +49,6 @@ jobs: - 'core/**' - '!core/CHANGELOG.md' - 'docker/contract-verifier/**' - - 'docker/cross-external-nodes-checker/**' - 'docker/external-node/**' - 'docker/server/**' - '.github/workflows/build-core-template.yml' @@ -96,6 +95,13 @@ jobs: name: CI for Docs uses: ./.github/workflows/ci-docs-reusable.yml + # What needs to be ran for both core and prover + ci-for-common: + needs: changed_files + if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + name: CI for Common Components (prover or core) + uses: ./.github/workflows/ci-common-reusable.yml + build-contracts: name: Build contracts needs: changed_files diff --git a/.gitignore b/.gitignore index 20c5973e8f4..c2878f7f734 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,8 @@ Cargo.lock !/etc/env/docker.toml !/etc/env/ext-node.toml !/etc/env/ext-node-docker.toml +!/etc/env/consensus_config.json +!/etc/env/en_consensus_config.json /etc/tokens/localhost.json /etc/zksolc-bin/* /etc/zkvyper-bin/* diff --git a/Cargo.lock b/Cargo.lock index 05bb752676f..f0b6b28314b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,11 +14,11 @@ dependencies = [ [[package]] name = "actix-codec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617a8268e3537fe1d8c9ead925fca49ef6400927ee7bc26750e90ecee14ce4b8" +checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", "bytes", "futures-core", "futures-sink", @@ -31,9 +31,9 @@ dependencies = [ [[package]] name = "actix-cors" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b340e9cfa5b08690aae90fb61beb44e9b06f44fe3d0f93781aaa58cfba86245e" +checksum = "0346d8c1f762b41b458ed3145eea914966bb9ad20b9be0d6d463b20d45586370" dependencies = [ "actix-utils", "actix-web", @@ -46,17 +46,17 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.4.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92ef85799cba03f76e4f7c10f533e66d87c9a7e7055f3391f09000ad8351bc9" +checksum = "129d4c88e98860e1758c5de288d1632b07970a16d59bdf7b8d66053d582bb71f" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", "ahash 0.8.7", - "base64 0.21.5", - "bitflags 2.4.1", + "base64 0.21.7", + "bitflags 2.4.2", "brotli", "bytes", "bytestring", @@ -80,7 +80,7 @@ dependencies = [ "tokio", "tokio-util", "tracing", - "zstd 0.12.4", + "zstd 0.13.0", ] [[package]] @@ -89,15 +89,15 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ - "quote 1.0.33", - "syn 2.0.38", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "actix-router" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66ff4d247d2b160861fa2866457e85706833527840e4133f8f49aa423a38799" +checksum = "d22475596539443685426b6bdadb926ad0ecaefdfc5fb05e5e3441f15463c511" dependencies = [ "bytestring", "http", @@ -157,9 +157,9 @@ dependencies = [ [[package]] name = "actix-web" -version = "4.4.0" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4a5b5e29603ca8c94a77c65cf874718ceb60292c5a5c3e5f4ace041af462b9" +checksum = "e43428f3bf11dee6d166b00ec2df4e3aa8cc1606aaa0b7433c146852e2f4e03b" dependencies = [ "actix-codec", "actix-http", @@ -202,9 +202,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb1f50ebbb30eca122b188319a4398b3f7bb4a8cdf50ecfb73bfc6a3c3ce54f5" dependencies = [ "actix-router", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -354,9 +354,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", @@ -368,43 +368,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "arr_macro" @@ -423,7 +423,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.33", + "quote 1.0.35", "syn 1.0.109", ] @@ -471,9 +471,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-compression" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" +checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" dependencies = [ "brotli", "flate2", @@ -487,11 +487,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ - "event-listener 4.0.0", + "event-listener 4.0.3", "event-listener-strategy", "pin-project-lite", ] @@ -513,20 +513,20 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -535,7 +535,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ - "futures 0.3.28", + "futures 0.3.30", "pharos", "rustc_version", ] @@ -572,14 +572,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" +checksum = "823b8bb275161044e2ac7a25879cb3e2480cb403e3943022c7c769c599b756aa" dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 1.0.109", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -671,9 +670,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -708,7 +707,7 @@ dependencies = [ "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.28", + "futures 0.3.30", "hex", "lazy_static", "num_cpus", @@ -731,7 +730,7 @@ dependencies = [ "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.28", + "futures 0.3.30", "hex", "lazy_static", "num_cpus", @@ -776,12 +775,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -810,9 +809,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" dependencies = [ "serde", ] @@ -938,7 +937,7 @@ name = "block_reverter" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.4.18", "serde_json", "tokio", "vlog", @@ -964,15 +963,15 @@ dependencies = [ [[package]] name = "boojum" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#84754b066959c8fdfb77edf730fc13ed87404907" +source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#93b5e0f0dbff0a9b606d9025e207c8405c141bd9" dependencies = [ "arrayvec 0.7.4", "bincode", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "const_format", "convert_case 0.6.0", - "crossbeam 0.8.2", - "crypto-bigint 0.5.3", + "crossbeam 0.8.4", + "crypto-bigint 0.5.5", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum.git?branch=main)", "derivative", "ethereum-types", @@ -1009,10 +1008,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" dependencies = [ "once_cell", - "proc-macro-crate 2.0.1", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro-crate 2.0.2", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", "syn_derive", ] @@ -1029,9 +1028,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da74e2b81409b1b743f8f0c62cc6254afefb8b8e50bbfe3735550f7aeefa3448" +checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1076,16 +1075,16 @@ version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] [[package]] name = "bytecount" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1a12477b7237a01c11a80a51278165f9ba0edd28fa6db00a65ab230320dc58c" +checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "byteorder" @@ -1104,9 +1103,9 @@ dependencies = [ [[package]] name = "bytestring" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238e4886760d98c4f899360c834fa93e62cf7f721ac3c2da375cbdf4b8679aae" +checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" dependencies = [ "bytes", ] @@ -1143,9 +1142,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" +checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" dependencies = [ "serde", ] @@ -1246,9 +1245,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1256,14 +1255,14 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -1272,15 +1271,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -1300,10 +1299,11 @@ dependencies = [ [[package]] name = "circuit_definitions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#43aeb53d7d9c909508a98f9fc140edff0e9d2357" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#de2ecad62ac8c12777e576dca20311ad8ec770d1" dependencies = [ - "crossbeam 0.8.2", + "crossbeam 0.8.4", "derivative", + "seq-macro", "serde", "snark_wrapper", "zk_evm 1.4.0", @@ -1313,9 +1313,9 @@ dependencies = [ [[package]] name = "circuit_definitions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#ef77c44f919ba161df5976ec3899cf57a1585e7c" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#8af5e7b54df4cafbc27506709b8b2009541cd216" dependencies = [ - "crossbeam 0.8.2", + "crossbeam 0.8.4", "derivative", "seq-macro", "serde", @@ -1334,9 +1334,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -1372,9 +1372,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.6" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" dependencies = [ "clap_builder", "clap_derive", @@ -1382,26 +1382,26 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.6" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" dependencies = [ "anstream", "anstyle", - "clap_lex 0.5.1", + "clap_lex 0.6.0", "strsim 0.10.0", ] [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -1415,9 +1415,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "codegen" @@ -1454,7 +1454,7 @@ dependencies = [ "coins-core", "digest 0.10.7", "hmac", - "k256 0.13.2", + "k256 0.13.3", "serde", "sha2 0.10.8", "thiserror", @@ -1482,7 +1482,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bech32", "bs58", "digest 0.10.7", @@ -1523,19 +1523,19 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" dependencies = [ - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] name = "console" -version = "0.15.7" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ "encode_unicode", "lazy_static", "libc", - "windows-sys 0.45.0", + "windows-sys 0.52.0", ] [[package]] @@ -1553,9 +1553,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" @@ -1572,8 +1572,8 @@ version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "unicode-xid 0.2.4", ] @@ -1611,9 +1611,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1621,15 +1621,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbc60abd742b35f2492f808e1abbb83d45f72db402e14c55057edc9c7b1e9e4" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -1694,24 +1694,6 @@ dependencies = [ "itertools 0.10.5", ] -[[package]] -name = "cross_external_nodes_checker" -version = "0.1.0" -dependencies = [ - "anyhow", - "ctrlc", - "envy", - "futures 0.3.28", - "serde", - "serde_json", - "tokio", - "tracing", - "vlog", - "zksync_types", - "zksync_utils", - "zksync_web3_decl", -] - [[package]] name = "crossbeam" version = "0.7.3" @@ -1728,16 +1710,15 @@ dependencies = [ [[package]] name = "crossbeam" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-channel 0.5.8", - "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.15", - "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.16", + "crossbeam-channel 0.5.11", + "crossbeam-deque 0.8.5", + "crossbeam-epoch 0.9.18", + "crossbeam-queue 0.3.11", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1752,12 +1733,11 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1773,13 +1753,12 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", + "crossbeam-epoch 0.9.18", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1793,21 +1772,17 @@ dependencies = [ "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", - "memoffset 0.5.6", + "memoffset", "scopeguard", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", - "memoffset 0.9.0", - "scopeguard", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1823,12 +1798,11 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1844,12 +1818,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1871,9 +1842,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.5.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1905,11 +1876,11 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#84754b066959c8fdfb77edf730fc13ed87404907" +source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#93b5e0f0dbff0a9b606d9025e207c8405c141bd9" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -1919,8 +1890,8 @@ version = "0.1.0" source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#ed8ab8984cae05d00d9d62196753c8d40df47c7d" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "serde", "syn 1.0.109", ] @@ -1936,12 +1907,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.1" +version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" +checksum = "b467862cc8610ca6fc9a1532d7777cee0804e678ab45410897b9396495994a0b" dependencies = [ "nix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1961,9 +1932,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.70+curl-8.5.0" +version = "0.4.71+curl-8.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0333d8849afe78a4c8102a429a446bfdd055832af071945520e835ae2d841e" +checksum = "c7b12a7ab780395666cb576203dc3ed6e01513754939a600b85196ccf5356bc5" dependencies = [ "cc", "libc", @@ -1997,9 +1968,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -2020,8 +1991,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "strsim 0.10.0", "syn 1.0.109", ] @@ -2033,7 +2004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.33", + "quote 1.0.35", "syn 1.0.109", ] @@ -2044,7 +2015,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.14.2", + "hashbrown 0.14.3", "lock_api", "once_cell", "parking_lot_core", @@ -2063,7 +2034,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ "serde", - "uuid 1.5.0", + "uuid 1.7.0", ] [[package]] @@ -2089,9 +2060,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", "serde", @@ -2103,8 +2074,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -2115,8 +2086,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "rustc_version", "syn 1.0.109", ] @@ -2228,10 +2199,10 @@ checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der 0.7.8", "digest 0.10.7", - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", "rfc6979 0.4.0", "signature 2.2.0", - "spki 0.7.2", + "spki 0.7.3", ] [[package]] @@ -2246,15 +2217,16 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.8", + "subtle", "zeroize", ] @@ -2289,12 +2261,12 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.13.7" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9775b22bc152ad86a0cf23f0f348b884b26add12bf741e7ffc4d4ab2ab4d205" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct 0.2.0", - "crypto-bigint 0.5.3", + "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.0", "generic-array", @@ -2308,9 +2280,9 @@ dependencies = [ [[package]] name = "elsa" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "714f766f3556b44e7e4776ad133fcc3445a489517c25c704ace411bb14790194" +checksum = "d98e71ae4df57d214182a2e5cb90230c0192c6ddfcaa05c36453d46a54713e10" dependencies = [ "stable_deref_trait", ] @@ -2345,10 +2317,10 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "hex", - "k256 0.13.2", + "k256 0.13.3", "log", "rand 0.8.5", "rlp", @@ -2357,6 +2329,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "env_filter" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +dependencies = [ + "log", + "regex", +] + [[package]] name = "env_logger" version = "0.9.3" @@ -2372,9 +2354,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", @@ -2383,6 +2365,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e7cf40684ae96ade6232ed84582f40ce0a66efcd43a5117aef610534f8e0b8" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + [[package]] name = "envy" version = "0.4.2" @@ -2400,12 +2395,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2500,9 +2495,9 @@ dependencies = [ [[package]] name = "ethers" -version = "2.0.11" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a5344eea9b20effb5efeaad29418215c4d27017639fd1f908260f59cbbd226e" +checksum = "6c7cd562832e2ff584fa844cd2f6e5d4f35bbe11b28c7c9b8df957b2e1d0c701" dependencies = [ "ethers-addressbook", "ethers-contract", @@ -2516,9 +2511,9 @@ dependencies = [ [[package]] name = "ethers-addressbook" -version = "2.0.12" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bf35eb7d2e2092ad41f584951e08ec7c077b142dba29c4f1b8f52d2efddc49c" +checksum = "35dc9a249c066d17e8947ff52a4116406163cf92c7f0763cb8c001760b26403f" dependencies = [ "ethers-core", "once_cell", @@ -2528,9 +2523,9 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.11" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0111ead599d17a7bff6985fd5756f39ca7033edc79a31b23026a8d5d64fa95cd" +checksum = "43304317c7f776876e47f2f637859f6d0701c1ec7930a150f169d5fbe7d76f5a" dependencies = [ "const-hex", "ethers-contract-abigen", @@ -2547,9 +2542,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.12" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbdfb952aafd385b31d316ed80d7b76215ce09743c172966d840e96924427e0c" +checksum = "f9f96502317bf34f6d71a3e3d270defaa9485d754d789e15a8e04a84161c95eb" dependencies = [ "Inflector", "const-hex", @@ -2558,48 +2553,48 @@ dependencies = [ "ethers-etherscan", "eyre", "prettyplease", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "regex", "reqwest", "serde", "serde_json", - "syn 2.0.38", + "syn 2.0.48", "toml", "walkdir", ] [[package]] name = "ethers-contract-derive" -version = "2.0.12" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7465c814a2ecd0de0442160da13584205d1cdc08f4717a6511cad455bd5d7dc4" +checksum = "452ff6b0a64507ce8d67ffd48b1da3b42f03680dcf5382244e9c93822cbbf5de" dependencies = [ "Inflector", "const-hex", "ethers-contract-abigen", "ethers-core", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "serde_json", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] name = "ethers-core" -version = "2.0.12" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "918b1a9ba585ea61022647def2f27c29ba19f6d2a4a4c8f68a9ae97fd5769737" +checksum = "aab3cef6cc1c9fd7f787043c81ad3052eff2b96a3878ef1526aa446311bdbfc9" dependencies = [ "arrayvec 0.7.4", "bytes", "cargo_metadata 0.18.1", "chrono", "const-hex", - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", "ethabi", "generic-array", - "k256 0.13.2", + "k256 0.13.3", "num_enum 0.7.2", "once_cell", "open-fastrlp", @@ -2608,7 +2603,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.38", + "syn 2.0.48", "tempfile", "thiserror", "tiny-keccak 2.0.2", @@ -2617,9 +2612,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.12" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "facabf8551b4d1a3c08cb935e7fca187804b6c2525cc0dafb8e5a6dd453a24de" +checksum = "16d45b981f5fa769e1d0343ebc2a44cfa88c9bc312eb681b676318b40cef6fb1" dependencies = [ "chrono", "ethers-core", @@ -2633,9 +2628,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "2.0.11" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681ece6eb1d10f7cf4f873059a77c04ff1de4f35c63dd7bccde8f438374fcb93" +checksum = "145211f34342487ef83a597c1e69f0d3e01512217a7c72cc8a25931854c7dca0" dependencies = [ "async-trait", "auto_impl", @@ -2660,13 +2655,13 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.11" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816" +checksum = "fb6b15393996e3b8a78ef1332d6483c11d839042c17be58decc92fa8b1c3508a" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.5", + "base64 0.21.7", "bytes", "const-hex", "enr", @@ -2697,15 +2692,15 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.11" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb1b714e227bbd2d8c53528adb580b203009728b17d0d0e4119353aa9bc5532" +checksum = "b3b125a103b56aef008af5d5fb48191984aa326b50bfd2557d231dc499833de3" dependencies = [ "async-trait", "coins-bip32", "coins-bip39", "const-hex", - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", "eth-keystore", "ethers-core", "rand 0.8.5", @@ -2716,9 +2711,9 @@ dependencies = [ [[package]] name = "ethers-solc" -version = "2.0.12" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2e46e3ec8ef0c986145901fa9864205dc4dcee701f9846be2d56112d34bdea" +checksum = "d21df08582e0a43005018a858cc9b465c5fff9cf4056651be64f844e57d1f55f" dependencies = [ "cfg-if 1.0.0", "const-hex", @@ -2754,9 +2749,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.0" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" dependencies = [ "concurrent-queue", "parking", @@ -2769,15 +2764,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ - "event-listener 4.0.0", + "event-listener 4.0.3", "pin-project-lite", ] [[package]] name = "eyre" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6267a1fa6f59179ea4afc8e50fd8612a3cc60bc858f786ff877a4a8cb042799" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" dependencies = [ "indenter", "once_cell", @@ -2831,17 +2826,17 @@ dependencies = [ "num-bigint 0.4.4", "num-integer", "num-traits", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "serde", "syn 1.0.109", ] [[package]] name = "fiat-crypto" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f69037fe1b785e84986b4f2cbcf647381876a00671d25ceef715d7812dd7e1dd" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" [[package]] name = "findshlibs" @@ -2929,9 +2924,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -2969,7 +2964,7 @@ dependencies = [ [[package]] name = "franklin-crypto" version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper#900332b8c2fe528b5008bb4e6bf2d3f206a9ae56" +source = "git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper#2546c63b91b59bdb0ad342d26f03fb57477550b2" dependencies = [ "arr_macro", "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=snark-wrapper)", @@ -3028,9 +3023,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -3043,9 +3038,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -3053,15 +3048,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -3082,9 +3077,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-locks" @@ -3098,26 +3093,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" @@ -3131,9 +3126,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures 0.1.31", "futures-channel", @@ -3170,9 +3165,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if 1.0.0", "libc", @@ -3191,9 +3186,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -3254,7 +3249,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1087f1fbd2dd3f58c17c7574ddd99cd61cbbbc2c4dc81114b8687209b196cb" dependencies = [ "async-trait", - "base64 0.21.5", + "base64 0.21.7", "google-cloud-metadata", "google-cloud-token", "home", @@ -3288,7 +3283,7 @@ checksum = "ac04b29849ebdeb9fb008988cc1c4d1f0c9d121b4c7f1ddeb8061df124580e93" dependencies = [ "async-stream", "async-trait", - "base64 0.21.5", + "base64 0.21.7", "bytes", "futures-util", "google-cloud-auth", @@ -3313,9 +3308,9 @@ dependencies = [ [[package]] name = "google-cloud-token" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a" +checksum = "8f49c12ba8b21d128a2ce8585955246977fbce4415f680ebf9199b6f9d6d725f" dependencies = [ "async-trait", ] @@ -3327,7 +3322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ "dashmap", - "futures 0.3.28", + "futures 0.3.30", "futures-timer", "no-std-compat", "nonzero_ext", @@ -3371,7 +3366,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.1.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util", @@ -3380,15 +3375,19 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +dependencies = [ + "cfg-if 1.0.0", + "crunchy", +] [[package]] name = "handlebars" -version = "4.4.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c39b3bc2a8f715298032cf5087e58573809374b08160aa7d750582bdb82d2683" +checksum = "ab283476b99e66691dee3f1640fea91487a8d81f50fb5ecc75538f8f8879a1e4" dependencies = [ "log", "pest", @@ -3418,9 +3417,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash 0.8.7", "allocator-api2", @@ -3441,14 +3440,14 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.2", + "hashbrown 0.14.3", ] [[package]] name = "hdrhistogram" -version = "7.5.2" +version = "7.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" dependencies = [ "byteorder", "num-traits", @@ -3460,7 +3459,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "headers-core", "http", @@ -3507,9 +3506,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" [[package]] name = "hex" @@ -3519,9 +3518,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac", ] @@ -3537,11 +3536,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3557,9 +3556,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -3568,9 +3567,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -3603,9 +3602,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -3618,7 +3617,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -3627,15 +3626,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http", "hyper", "log", - "rustls 0.21.7", + "rustls 0.21.10", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", @@ -3662,9 +3661,9 @@ checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3699,6 +3698,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "im" version = "15.1.0" @@ -3746,8 +3755,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -3769,12 +3778,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", - "hashbrown 0.14.2", + "hashbrown 0.14.3", ] [[package]] @@ -3836,13 +3845,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.4", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3865,18 +3874,18 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" @@ -3889,9 +3898,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" dependencies = [ "wasm-bindgen", ] @@ -3902,7 +3911,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.28", + "futures 0.3.30", "futures-executor", "futures-util", "log", @@ -3950,7 +3959,7 @@ dependencies = [ "tokio-util", "tracing", "url", - "webpki-roots 0.26.0", + "webpki-roots 0.26.1", ] [[package]] @@ -4007,9 +4016,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d94b7505034e2737e688e1153bf81e6f93ad296695c43958d6da2e4321f0a990" dependencies = [ "heck 0.4.1", - "proc-macro-crate 2.0.1", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro-crate 2.0.2", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -4080,7 +4089,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "pem", "ring 0.16.20", "serde", @@ -4102,13 +4111,13 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if 1.0.0", "ecdsa 0.16.9", - "elliptic-curve 0.13.7", + "elliptic-curve 0.13.8", "once_cell", "sha2 0.10.8", "signature 2.2.0", @@ -4116,9 +4125,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -4180,18 +4189,18 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if 1.0.0", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -4206,9 +4215,9 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", - "redox_syscall 0.4.1", + "redox_syscall", ] [[package]] @@ -4238,9 +4247,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" dependencies = [ "cc", "libc", @@ -4256,29 +4265,29 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linkme" -version = "0.3.17" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ed2ee9464ff9707af8e9ad834cffa4802f072caad90639c583dd3c62e6e608" +checksum = "8b53ad6a33de58864705954edb5ad5d571a010f9e296865ed43dc72a5621b430" dependencies = [ "linkme-impl", ] [[package]] name = "linkme-impl" -version = "0.3.17" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125974b109d512fccbc6c0244e7580143e460895dfd6ea7f8bbb692fd94396" +checksum = "04e542a18c94a9b6fcc7adb090fa3ba6b79ee220a16404f325672729f32a66ff" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "loadnext" @@ -4287,7 +4296,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "futures 0.3.28", + "futures 0.3.30", "hex", "metrics", "num 0.3.1", @@ -4316,9 +4325,9 @@ dependencies = [ [[package]] name = "local-channel" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a493488de5f18c8ffcba89eebb8532ffc562dc400490eb65b84893fae0b178" +checksum = "b6cbc85e69b8df4b8bb8b89ec634e7189099cea8927a276b7384ce5488e53ec8" dependencies = [ "futures-core", "futures-sink", @@ -4327,9 +4336,9 @@ dependencies = [ [[package]] name = "local-waker" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e34f76eb3611940e0e7d53a9aaa4e6a3151f69541a282fd0dad5571420c53ff1" +checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" [[package]] name = "lock_api" @@ -4364,10 +4373,10 @@ checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" dependencies = [ "beef", "fnv", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "regex-syntax 0.6.29", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -4381,9 +4390,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +checksum = "db2c024b41519440580066ba82aab04092b333e09066a5eb86c7c4890df31f22" [[package]] name = "mach" @@ -4396,9 +4405,9 @@ dependencies = [ [[package]] name = "mach2" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" dependencies = [ "libc", ] @@ -4442,9 +4451,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -4455,21 +4464,12 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "merkle_tree_consistency_checker" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.4.18", "tracing", "vlog", "zksync_config", @@ -4492,11 +4492,11 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" +checksum = "1d4fa7ce7c4862db464a37b0b31d89bca874562f034bd7993895572783d02950" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "hyper", "indexmap 1.9.3", "ipnet", @@ -4510,13 +4510,13 @@ dependencies = [ [[package]] name = "metrics-macros" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" +checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -4525,8 +4525,8 @@ version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" dependencies = [ - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", + "crossbeam-epoch 0.9.18", + "crossbeam-utils 0.8.19", "hashbrown 0.13.1", "metrics", "num_cpus", @@ -4552,9 +4552,9 @@ version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -4575,12 +4575,12 @@ dependencies = [ [[package]] name = "mini-moka" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e0b72e7c9042467008b10279fc732326bd605459ae03bda88825909dd19b56" +checksum = "c325dfab65f261f386debee8b0969da215b3fa0037e74c8a1234db7ba986d803" dependencies = [ - "crossbeam-channel 0.5.8", - "crossbeam-utils 0.8.16", + "crossbeam-channel 0.5.11", + "crossbeam-utils 0.8.19", "dashmap", "skeptic", "smallvec", @@ -4605,9 +4605,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "log", @@ -4680,7 +4680,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "cfg-if 1.0.0", "libc", ] @@ -4812,6 +4812,12 @@ dependencies = [ "serde", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-derive" version = "0.2.5" @@ -4829,8 +4835,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -4907,7 +4913,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.4", "libc", ] @@ -4936,9 +4942,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -4947,26 +4953,26 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.1", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro-crate 2.0.2", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -5000,18 +5006,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -5026,9 +5032,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -5039,9 +5045,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" dependencies = [ "cc", "libc", @@ -5125,7 +5131,7 @@ dependencies = [ [[package]] name = "pairing_ce" version = "0.28.5" -source = "git+https://github.com/matter-labs/pairing.git#d06c2a112913b0abfb75996cc29a6b6075717e99" +source = "git+https://github.com/matter-labs/pairing.git#f55393fd366596eac792d78525d26e9c4d6ed1ca" dependencies = [ "byteorder", "cfg-if 1.0.0", @@ -5136,9 +5142,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.5" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -5150,13 +5156,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.5" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro-crate 2.0.2", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -5184,7 +5190,7 @@ checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.4.1", + "redox_syscall", "smallvec", "windows-targets 0.48.5", ] @@ -5260,15 +5266,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" +checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" dependencies = [ "memchr", "thiserror", @@ -5277,9 +5283,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" +checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" dependencies = [ "pest", "pest_generator", @@ -5287,22 +5293,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" +checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "pest_meta" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" +checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" dependencies = [ "once_cell", "pest", @@ -5316,7 +5322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.1.0", + "indexmap 2.2.2", ] [[package]] @@ -5325,7 +5331,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ - "futures 0.3.28", + "futures 0.3.30", "rustc_version", ] @@ -5357,9 +5363,9 @@ checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" dependencies = [ "phf_generator", "phf_shared 0.11.2", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -5382,22 +5388,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -5420,7 +5426,7 @@ checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ "der 0.7.8", "pkcs8 0.10.2", - "spki 0.7.2", + "spki 0.7.3", ] [[package]] @@ -5440,20 +5446,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der 0.7.8", - "spki 0.7.2", + "spki 0.7.3", ] [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "platforms" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "plotters" @@ -5508,9 +5514,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b559898e0b4931ed2d3b959ab0c2da4d99cc644c4b0b1a35b4d344027f474023" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" [[package]] name = "powerfmt" @@ -5530,14 +5536,24 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "pretty_assertions" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ - "proc-macro2 1.0.69", - "syn 2.0.38", + "proc-macro2 1.0.78", + "syn 2.0.48", ] [[package]] @@ -5566,9 +5582,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" dependencies = [ "toml_datetime", "toml_edit 0.20.2", @@ -5581,8 +5597,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", "version_check", ] @@ -5593,8 +5609,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "version_check", ] @@ -5615,9 +5631,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -5640,9 +5656,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -5663,7 +5679,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "lazy_static", "num-traits", "rand 0.8.5", @@ -5675,9 +5691,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" +checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" dependencies = [ "bytes", "prost-derive", @@ -5685,13 +5701,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" +checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2" dependencies = [ "bytes", "heck 0.4.1", - "itertools 0.10.5", + "itertools 0.11.0", "log", "multimap", "once_cell", @@ -5700,22 +5716,22 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.38", + "syn 2.0.48", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" +checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" dependencies = [ "anyhow", - "itertools 0.10.5", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "itertools 0.11.0", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -5724,7 +5740,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "logos", "miette", "once_cell", @@ -5736,9 +5752,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" +checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" dependencies = [ "prost", ] @@ -5785,18 +5801,18 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] [[package]] name = "pulldown-cmark" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", "memchr", "unicase", ] @@ -5807,7 +5823,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" dependencies = [ - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", "libc", "mach", "once_cell", @@ -5823,7 +5839,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", "libc", "mach2", "once_cell", @@ -5853,11 +5869,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ - "proc-macro2 1.0.69", + "proc-macro2 1.0.78", ] [[package]] @@ -5953,9 +5969,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" dependencies = [ "either", "rayon-core", @@ -5963,12 +5979,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.16", + "crossbeam-deque 0.8.5", + "crossbeam-utils 0.8.19", ] [[package]] @@ -5980,15 +5996,6 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.4.1" @@ -6011,13 +6018,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -6032,9 +6039,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -6079,11 +6086,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", @@ -6103,11 +6110,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.7", - "rustls-pemfile 1.0.3", + "rustls 0.21.10", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -6119,7 +6127,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.25.2", + "webpki-roots 0.25.4", "winreg", ] @@ -6239,7 +6247,7 @@ dependencies = [ "rkyv_derive", "seahash", "tinyvec", - "uuid 1.5.0", + "uuid 1.7.0", ] [[package]] @@ -6248,8 +6256,8 @@ version = "0.7.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -6270,8 +6278,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -6293,9 +6301,9 @@ checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" [[package]] name = "rsa" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" dependencies = [ "const-oid", "digest 0.10.7", @@ -6306,16 +6314,16 @@ dependencies = [ "pkcs8 0.10.2", "rand_core 0.6.4", "signature 2.2.0", - "spki 0.7.2", + "spki 0.7.3", "subtle", "zeroize", ] [[package]] name = "rust_decimal" -version = "1.33.1" +version = "1.34.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4" +checksum = "755392e1a2f77afd95580d3f0d0e94ac83eeeb7167552c9b5bca549e61a94d83" dependencies = [ "arrayvec 0.7.4", "borsh", @@ -6356,39 +6364,39 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.20" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.16.20", - "rustls-webpki 0.101.6", + "ring 0.17.7", + "rustls-webpki 0.101.7", "sct", ] [[package]] name = "rustls" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6b63262c9fcac8659abfaa96cac103d28166d3ff3eaf8f412e19f3ae9e5a48" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ "log", "ring 0.17.7", "rustls-pki-types", - "rustls-webpki 0.102.0", + "rustls-webpki 0.102.2", "subtle", "zeroize", ] @@ -6400,7 +6408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.3", + "rustls-pemfile 1.0.4", "schannel", "security-framework", ] @@ -6420,11 +6428,11 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", ] [[package]] @@ -6433,31 +6441,31 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.0.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7673e0aa20ee4937c6aacfc12bb8341cfbf054cdd21df6bec5fd0629fe9339b" +checksum = "0a716eb65e3158e90e17cd93d855216e27bde02745ab842f2cab4a39dba1bacf" [[package]] name = "rustls-webpki" -version = "0.101.6" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] name = "rustls-webpki" -version = "0.102.0" +version = "0.102.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" dependencies = [ "ring 0.17.7", "rustls-pki-types", @@ -6472,9 +6480,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "salsa20" @@ -6513,18 +6521,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6547,12 +6555,12 @@ dependencies = [ [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] @@ -6632,9 +6640,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" dependencies = [ "serde", ] @@ -6653,9 +6661,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "sentry" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0097a48cd1999d983909f07cb03b15241c5af29e5e679379efac1c06296abecc" +checksum = "6ce4b57f1b521f674df7a1d200be8ff5d74e3712020ee25b553146657b5377d5" dependencies = [ "httpdate", "native-tls", @@ -6672,9 +6680,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a7b80fa1dd6830a348d38a8d3a9761179047757b7dca29aef82db0118b9670" +checksum = "58cc8d4e04a73de8f718dc703943666d03f25d3e9e4d0fb271ca0b8c76dfa00e" dependencies = [ "backtrace", "once_cell", @@ -6684,9 +6692,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7615dc588930f1fd2e721774f25844ae93add2dbe2d3c2f995ce5049af898147" +checksum = "6436c1bad22cdeb02179ea8ef116ffc217797c028927def303bc593d9320c0d1" dependencies = [ "hostname", "libc", @@ -6698,9 +6706,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f51264e4013ed9b16558cce43917b983fa38170de2ca480349ceb57d71d6053" +checksum = "901f761681f97db3db836ef9e094acdd8756c40215326c194201941947164ef1" dependencies = [ "once_cell", "rand 0.8.5", @@ -6711,9 +6719,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fe6180fa564d40bb942c9f0084ffb5de691c7357ead6a2b7a3154fae9e401dd" +checksum = "afdb263e73d22f39946f6022ed455b7561b22ff5553aca9be3c6a047fa39c328" dependencies = [ "findshlibs", "once_cell", @@ -6722,9 +6730,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323160213bba549f9737317b152af116af35c0410f4468772ee9b606d3d6e0fa" +checksum = "74fbf1c163f8b6a9d05912e1b272afa27c652e8b47ea60cb9a57ad5e481eea99" dependencies = [ "sentry-backtrace", "sentry-core", @@ -6732,9 +6740,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38033822128e73f7b6ca74c1631cef8868890c6cb4008a291cf73530f87b4eac" +checksum = "82eabcab0a047040befd44599a1da73d3adb228ff53b5ed9795ae04535577704" dependencies = [ "sentry-backtrace", "sentry-core", @@ -6744,9 +6752,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e663b3eb62ddfc023c9cf5432daf5f1a4f6acb1df4d78dd80b740b32dd1a740" +checksum = "da956cca56e0101998c8688bc65ce1a96f00673a0e58e663664023d4c7911e82" dependencies = [ "debugid", "hex", @@ -6756,7 +6764,7 @@ dependencies = [ "thiserror", "time", "url", - "uuid 1.5.0", + "uuid 1.7.0", ] [[package]] @@ -6767,9 +6775,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] @@ -6786,20 +6794,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", @@ -6808,9 +6816,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" dependencies = [ "itoa", "serde", @@ -6856,8 +6864,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -7002,9 +7010,9 @@ checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" [[package]] name = "similar" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" +checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" [[package]] name = "simple_asn1" @@ -7066,9 +7074,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" dependencies = [ "serde", ] @@ -7078,7 +7086,7 @@ name = "snapshots_creator" version = "0.1.0" dependencies = [ "anyhow", - "futures 0.3.28", + "futures 0.3.30", "prometheus_exporter", "rand 0.8.5", "tokio", @@ -7090,13 +7098,12 @@ dependencies = [ "zksync_env_config", "zksync_object_store", "zksync_types", - "zksync_utils", ] [[package]] name = "snark_wrapper" version = "0.1.0" -source = "git+https://github.com/matter-labs/snark-wrapper.git?branch=main#52f9ef98a7e6c86b405dd0ec42291dacf6e2bcb4" +source = "git+https://github.com/matter-labs/snark-wrapper.git?branch=main#42661a9ff9d00853441589679c101f71e3785f55" dependencies = [ "derivative", "rand 0.4.6", @@ -7105,9 +7112,9 @@ dependencies = [ [[package]] name = "snow" -version = "0.9.4" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ "aes-gcm", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -7147,7 +7154,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.28", + "futures 0.3.30", "http", "httparse", "log", @@ -7196,9 +7203,9 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der 0.7.8", @@ -7216,7 +7223,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" dependencies = [ - "itertools 0.12.0", + "itertools 0.12.1", "nom", "unicode_categories", ] @@ -7247,7 +7254,7 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-queue 0.3.8", + "crossbeam-queue 0.3.11", "dotenvy", "either", "event-listener 2.5.3", @@ -7258,7 +7265,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap 2.1.0", + "indexmap 2.2.2", "ipnetwork", "log", "memchr", @@ -7285,8 +7292,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "sqlx-core", "sqlx-macros-core", "syn 1.0.109", @@ -7304,8 +7311,8 @@ dependencies = [ "heck 0.4.1", "hex", "once_cell", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "serde", "serde_json", "sha2 0.10.8", @@ -7326,9 +7333,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" dependencies = [ "atoi", - "base64 0.21.5", + "base64 0.21.7", "bigdecimal", - "bitflags 2.4.1", + "bitflags 2.4.2", "byteorder", "bytes", "chrono", @@ -7371,9 +7378,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" dependencies = [ "atoi", - "base64 0.21.5", + "base64 0.21.7", "bigdecimal", - "bitflags 2.4.1", + "bitflags 2.4.2", "byteorder", "chrono", "crc", @@ -7448,7 +7455,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" name = "storage_logs_dedup_migration" version = "0.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.18", "tokio", "zksync_config", "zksync_dal", @@ -7511,8 +7518,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "syn 1.0.109", ] @@ -7541,8 +7548,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "rustversion", "syn 1.0.109", ] @@ -7554,10 +7561,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "rustversion", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -7568,9 +7575,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "svm-rs" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce290b5536ab2a42a61c9c6f22d6bfa8f26339c602aa62db4c978c95d1afc47" +checksum = "11297baafe5fa0c99d5722458eac6a5e25c01eb1b8e5cd137f54079093daa7a4" dependencies = [ "dirs", "fs2", @@ -7603,19 +7610,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.38" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", + "proc-macro2 1.0.78", + "quote 1.0.35", "unicode-ident", ] @@ -7626,9 +7633,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -7721,15 +7728,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", + "redox_syscall", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7745,9 +7752,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -7767,20 +7774,30 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cfbe7811249c4c914b06141b8ac0f2cee2733fb883d05eb19668a45fc60c3d5" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "test-log" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f66edd6b6cd810743c0c71e1d085e92b01ce6a72782032e3f794c8284fe4bcdd" +checksum = "6159ab4116165c99fc88cce31f99fa2c9dbe08d3691cb38da02fc3b45f357d2b" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "env_logger 0.10.2", + "test-log-macros", +] + +[[package]] +name = "test-log-macros" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ba277e77219e9eea169e8508942db1bf5d8a41ff2db9b20aab5a5aadc9fa25d" +dependencies = [ + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -7800,22 +7817,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -7859,12 +7876,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "fe80ced77cbfb4cb91a94bf72b378b4b6791a0d9b7f09d0be747d1bdff4e68bd" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -7879,10 +7897,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -7931,9 +7950,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.34.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -7954,9 +7973,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -7975,7 +7994,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.7", + "rustls 0.21.10", "tokio", ] @@ -7985,7 +8004,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.1", + "rustls 0.22.2", "rustls-pki-types", "tokio", ] @@ -8009,18 +8028,18 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls 0.21.7", + "rustls 0.21.10", "tokio", "tokio-rustls 0.24.1", "tungstenite", - "webpki-roots 0.25.2", + "webpki-roots 0.25.4", ] [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -8058,7 +8077,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "toml_datetime", "winnow", ] @@ -8069,7 +8088,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "serde", "serde_spanned", "toml_datetime", @@ -8104,8 +8123,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", - "base64 0.21.5", - "bitflags 2.4.1", + "base64 0.21.7", + "bitflags 2.4.2", "bytes", "futures-core", "futures-util", @@ -8124,7 +8143,7 @@ dependencies = [ "tower-layer", "tower-service", "tracing", - "uuid 1.5.0", + "uuid 1.7.0", ] [[package]] @@ -8157,9 +8176,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -8184,9 +8203,9 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.4" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", @@ -8205,9 +8224,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -8227,15 +8246,15 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" @@ -8250,7 +8269,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls 0.21.7", + "rustls 0.21.10", "sha1", "thiserror", "url", @@ -8307,9 +8326,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -8372,7 +8391,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ad948c1cb799b1a70f836077721a92a35ac177d4daddf4c20a633786d4cf618" dependencies = [ - "quote 1.0.33", + "quote 1.0.35", "syn 1.0.109", ] @@ -8390,11 +8409,11 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.8.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3" +checksum = "f8cdd25c339e200129fe4de81451814e5228c9b771d57378817d6117cc2b3f97" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "log", "native-tls", "once_cell", @@ -8403,12 +8422,12 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna", + "idna 0.5.0", "percent-encoding", "serde", ] @@ -8443,9 +8462,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.5.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" +checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ "getrandom", "serde", @@ -8532,9 +8551,9 @@ name = "vise-macros" version = "0.1.0" source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1#1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -8622,9 +8641,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -8632,24 +8651,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -8659,38 +8678,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" dependencies = [ - "quote 1.0.33", + "quote 1.0.35", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" [[package]] name = "wasm-streams" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", @@ -8701,9 +8720,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -8716,16 +8735,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5388522c899d1e1c96a4c307e3797e0f697ba7c77dd8e0e625ecba9dd0342937" dependencies = [ "arrayvec 0.7.4", - "base64 0.21.5", + "base64 0.21.7", "bytes", "derive_more", "ethabi", "ethereum-types", - "futures 0.3.28", + "futures 0.3.30", "futures-timer", "headers", "hex", - "idna", + "idna 0.4.0", "jsonrpc-core", "log", "once_cell", @@ -8742,15 +8761,15 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" dependencies = [ "rustls-pki-types", ] @@ -8806,20 +8825,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.42.2", + "windows-targets 0.52.0", ] [[package]] @@ -8832,18 +8842,12 @@ dependencies = [ ] [[package]] -name = "windows-targets" -version = "0.42.2" +name = "windows-sys" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.52.0", ] [[package]] @@ -8862,10 +8866,19 @@ dependencies = [ ] [[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" +name = "windows-targets" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] [[package]] name = "windows_aarch64_gnullvm" @@ -8874,10 +8887,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" +name = "windows_aarch64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" @@ -8886,10 +8899,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] -name = "windows_i686_gnu" -version = "0.42.2" +name = "windows_aarch64_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" @@ -8898,10 +8911,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] -name = "windows_i686_msvc" -version = "0.42.2" +name = "windows_i686_gnu" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" @@ -8910,10 +8923,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" +name = "windows_i686_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" @@ -8922,10 +8935,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" +name = "windows_x86_64_gnu" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" @@ -8934,10 +8947,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" +name = "windows_x86_64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" @@ -8945,11 +8958,17 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" -version = "0.5.17" +version = "0.5.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +checksum = "818ce546a11a9986bc24f93d0cdf38a8a1a400f1473ea8c82e59f6e0ffab9249" dependencies = [ "memchr", ] @@ -8971,7 +8990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" dependencies = [ "async_io_stream", - "futures 0.3.28", + "futures 0.3.30", "js-sys", "log", "pharos", @@ -9009,29 +9028,29 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zerocopy" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -9042,9 +9061,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", ] [[package]] @@ -9058,7 +9077,7 @@ dependencies = [ "bzip2", "constant_time_eq", "crc32fast", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", "flate2", "hmac", "pbkdf2 0.11.0", @@ -9209,7 +9228,7 @@ dependencies = [ [[package]] name = "zkevm_circuits" version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=main#4fba537ccecc238e2da9c80844dc8c185e42466f" +source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=main#fb3e2574b5c890342518fc930c145443f039a105" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -9220,6 +9239,7 @@ dependencies = [ "itertools 0.10.5", "rand 0.4.6", "rand 0.8.5", + "seq-macro", "serde", "serde_json", "smallvec", @@ -9229,7 +9249,7 @@ dependencies = [ [[package]] name = "zkevm_circuits" version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.4.1#70234e99c2492740226b9f40091e7fccc7ef28e9" +source = "git+https://github.com/matter-labs/era-zkevm_circuits.git?branch=v1.4.1#873fe0fcf0bb8df6be0ae1938ce8469d6bf63ebd" dependencies = [ "arrayvec 0.7.4", "bincode", @@ -9263,7 +9283,7 @@ name = "zkevm_opcode_defs" version = "1.3.2" source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#dffacadeccdfdbff4bc124d44c595c4a6eae5013" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", "ethereum-types", "k256 0.11.6", @@ -9277,10 +9297,10 @@ name = "zkevm_opcode_defs" version = "1.4.1" source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.4.1#ba8228ff0582d21f64d6a319d50d0aec48e9e7b6" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types", - "k256 0.13.2", + "k256 0.13.3", "lazy_static", "sha2 0.10.8", "sha3 0.10.8", @@ -9294,9 +9314,9 @@ dependencies = [ "bincode", "circuit_testing", "codegen 0.2.0", - "crossbeam 0.8.2", + "crossbeam 0.8.4", "derivative", - "env_logger 0.10.0", + "env_logger 0.11.1", "hex", "num-bigint 0.4.4", "num-integer", @@ -9316,14 +9336,14 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#43aeb53d7d9c909508a98f9fc140edff0e9d2357" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#de2ecad62ac8c12777e576dca20311ad8ec770d1" dependencies = [ "bincode", "circuit_definitions 0.1.0 (git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0)", "codegen 0.2.0", - "crossbeam 0.8.2", + "crossbeam 0.8.4", "derivative", - "env_logger 0.10.0", + "env_logger 0.11.1", "hex", "rand 0.4.6", "rayon", @@ -9339,15 +9359,15 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#ef77c44f919ba161df5976ec3899cf57a1585e7c" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#8af5e7b54df4cafbc27506709b8b2009541cd216" dependencies = [ "bincode", "circuit_definitions 0.1.0 (git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1)", "codegen 0.2.0", - "crossbeam 0.8.2", + "crossbeam 0.8.4", "curl", "derivative", - "env_logger 0.10.0", + "env_logger 0.11.1", "hex", "lazy_static", "rand 0.4.6", @@ -9390,8 +9410,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15bc9b106393359ac013c2527db318ced4ca838d26ef03488233af557ebe5da8" dependencies = [ "async-trait", - "clap 4.4.6", - "env_logger 0.10.0", + "clap 4.4.18", + "env_logger 0.10.2", "ethers", "ethers-contract", "hex", @@ -9420,7 +9440,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "hex", "metrics", "serde_json", @@ -9438,7 +9458,10 @@ name = "zksync_commitment_utils" version = "0.1.0" dependencies = [ "multivm", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", + "zk_evm 1.4.1", "zkevm_test_harness 1.4.0", + "zkevm_test_harness 1.4.1", "zksync_types", "zksync_utils", ] @@ -9446,7 +9469,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "once_cell", @@ -9466,6 +9489,7 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", + "rand 0.8.5", "serde", "zksync_basic_types", ] @@ -9473,7 +9497,7 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "async-trait", @@ -9494,7 +9518,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "blst", @@ -9512,7 +9536,7 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "rand 0.8.5", @@ -9526,12 +9550,13 @@ dependencies = [ "zksync_consensus_storage", "zksync_consensus_sync_blocks", "zksync_consensus_utils", + "zksync_protobuf", ] [[package]] name = "zksync_consensus_network" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "async-trait", @@ -9547,6 +9572,7 @@ dependencies = [ "zksync_concurrency", "zksync_consensus_crypto", "zksync_consensus_roles", + "zksync_consensus_storage", "zksync_consensus_utils", "zksync_protobuf", "zksync_protobuf_build", @@ -9555,7 +9581,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "bit-vec", @@ -9575,7 +9601,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "async-trait", @@ -9593,7 +9619,7 @@ dependencies = [ [[package]] name = "zksync_consensus_sync_blocks" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "thiserror", @@ -9608,7 +9634,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "thiserror", "zksync_concurrency", @@ -9622,7 +9648,7 @@ dependencies = [ "chrono", "ctrlc", "ethabi", - "futures 0.3.28", + "futures 0.3.30", "hex", "lazy_static", "metrics", @@ -9673,7 +9699,7 @@ dependencies = [ "bitflags 1.3.2", "chrono", "ctrlc", - "futures 0.3.28", + "futures 0.3.30", "governor", "hex", "itertools 0.10.5", @@ -9711,14 +9737,18 @@ dependencies = [ "zksync_consensus_utils", "zksync_contracts", "zksync_dal", + "zksync_env_config", "zksync_eth_client", "zksync_eth_signer", "zksync_health_check", + "zksync_l1_contract_interface", "zksync_mempool", "zksync_merkle_tree", "zksync_mini_merkle_tree", "zksync_object_store", "zksync_protobuf", + "zksync_protobuf_build", + "zksync_prover_interface", "zksync_queued_job_processor", "zksync_state", "zksync_storage", @@ -9751,6 +9781,7 @@ dependencies = [ "assert_matches", "bigdecimal", "bincode", + "chrono", "hex", "itertools 0.10.5", "num 0.4.1", @@ -9813,7 +9844,7 @@ dependencies = [ "actix-rt", "actix-web", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "hex", "jsonrpc-core", "reqwest", @@ -9832,9 +9863,9 @@ name = "zksync_external_node" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.4.18", "envy", - "futures 0.3.28", + "futures 0.3.30", "prometheus_exporter", "semver", "serde", @@ -9845,7 +9876,9 @@ dependencies = [ "vise", "vlog", "zksync_basic_types", + "zksync_concurrency", "zksync_config", + "zksync_consensus_roles", "zksync_contracts", "zksync_core", "zksync_dal", @@ -9863,13 +9896,24 @@ version = "0.1.0" dependencies = [ "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "serde", "serde_json", "tokio", "tracing", ] +[[package]] +name = "zksync_l1_contract_interface" +version = "0.1.0" +dependencies = [ + "codegen 0.1.0", + "zkevm_test_harness 1.3.3", + "zksync_config", + "zksync_prover_interface", + "zksync_types", +] + [[package]] name = "zksync_mempool" version = "0.1.0" @@ -9883,7 +9927,7 @@ name = "zksync_merkle_tree" version = "0.1.0" dependencies = [ "assert_matches", - "clap 4.4.6", + "clap 4.4.18", "insta", "leb128", "once_cell", @@ -9899,6 +9943,7 @@ dependencies = [ "tracing-subscriber", "vise", "zksync_crypto", + "zksync_prover_interface", "zksync_storage", "zksync_system_constants", "zksync_types", @@ -9921,7 +9966,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "prometheus_exporter", "thiserror", "tokio", @@ -9962,7 +10007,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "bit-vec", @@ -9980,17 +10025,48 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "heck 0.4.1", "prettyplease", - "proc-macro2 1.0.69", + "proc-macro2 1.0.78", "prost-build", "prost-reflect", "protox", - "quote 1.0.33", - "syn 2.0.38", + "quote 1.0.35", + "syn 2.0.48", +] + +[[package]] +name = "zksync_protobuf_config" +version = "0.1.0" +dependencies = [ + "anyhow", + "pretty_assertions", + "prost", + "rand 0.8.5", + "serde_json", + "zksync_basic_types", + "zksync_config", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_types", +] + +[[package]] +name = "zksync_prover_interface" +version = "0.1.0" +dependencies = [ + "bincode", + "chrono", + "serde", + "serde_with", + "strum 0.24.1", + "tokio", + "zkevm_test_harness 1.3.3", + "zksync_object_store", + "zksync_types", ] [[package]] @@ -10010,13 +10086,18 @@ name = "zksync_server" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", - "futures 0.3.28", + "clap 4.4.18", + "futures 0.3.30", + "serde_json", "tikv-jemallocator", "tokio", "tracing", "vlog", + "zksync_concurrency", "zksync_config", + "zksync_consensus_crypto", + "zksync_consensus_executor", + "zksync_consensus_roles", "zksync_core", "zksync_env_config", "zksync_storage", @@ -10024,15 +10105,34 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_snapshots_applier" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "thiserror", + "tokio", + "tracing", + "vise", + "zksync_dal", + "zksync_object_store", + "zksync_types", + "zksync_utils", + "zksync_web3_decl", +] + [[package]] name = "zksync_state" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "itertools 0.10.5", "mini-moka", "rand 0.8.5", "tempfile", + "test-casing", "tokio", "tracing", "vise", @@ -10058,8 +10158,6 @@ dependencies = [ name = "zksync_system_constants" version = "0.1.0" dependencies = [ - "anyhow", - "num 0.3.1", "once_cell", "zksync_basic_types", "zksync_utils", @@ -10085,7 +10183,6 @@ dependencies = [ "anyhow", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", - "codegen 0.1.0", "hex", "num 0.4.1", "num_enum 0.6.1", @@ -10099,10 +10196,6 @@ dependencies = [ "strum 0.24.1", "thiserror", "tokio", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zk_evm 1.4.0", - "zk_evm 1.4.1", - "zkevm_test_harness 1.3.3", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -10119,7 +10212,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bigdecimal", - "futures 0.3.28", + "futures 0.3.30", "hex", "itertools 0.10.5", "metrics", @@ -10159,15 +10252,6 @@ dependencies = [ "zstd-safe 5.0.2+zstd.1.5.2", ] -[[package]] -name = "zstd" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" -dependencies = [ - "zstd-safe 6.0.6", -] - [[package]] name = "zstd" version = "0.13.0" @@ -10187,16 +10271,6 @@ dependencies = [ "zstd-sys", ] -[[package]] -name = "zstd-safe" -version = "6.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" -dependencies = [ - "libc", - "zstd-sys", -] - [[package]] name = "zstd-safe" version = "7.0.0" diff --git a/Cargo.toml b/Cargo.toml index b781282d09a..ce9a65a80ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,25 +23,28 @@ members = [ "core/lib/env_config", "core/lib/eth_client", "core/lib/eth_signer", + "core/lib/l1_contract_interface", "core/lib/mempool", "core/lib/merkle_tree", "core/lib/mini_merkle_tree", "core/lib/node", "core/lib/object_store", "core/lib/prometheus_exporter", + "core/lib/prover_interface", "core/lib/queued_job_processor", "core/lib/state", "core/lib/storage", "core/lib/types", + "core/lib/protobuf_config", "core/lib/utils", "core/lib/vlog", "core/lib/multivm", "core/lib/vm_utils", "core/lib/web3_decl", + "core/lib/snapshots_applier", # Test infrastructure "core/tests/test_account", - "core/tests/cross_external_nodes_checker", "core/tests/loadnext", "core/tests/vm-benchmark", "core/tests/vm-benchmark/harness", diff --git a/bin/zk b/bin/zk index ca899fd5f46..f22ce90440f 100755 --- a/bin/zk +++ b/bin/zk @@ -41,7 +41,7 @@ check_subdirectory check_yarn_version if [ -z "$1" ]; then cd $ZKSYNC_HOME - yarn && yarn zk build + yarn install --frozen-lockfile && yarn zk build else # can't start this with yarn since it has quirks with `--` as an argument node -- $ZKSYNC_HOME/infrastructure/zk/build/index.js "$@" diff --git a/checks-config/era.dic b/checks-config/era.dic index fd14c003385..66480ebb51f 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -306,6 +306,7 @@ tokenomics validator validator's validator +validators Validators CHAINID PREVRANDAO @@ -804,6 +805,7 @@ hacky ceil Infura synth +proto AUTOGENERATED x19Ethereum diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 467e65237ed..ca00743af65 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,67 @@ # Changelog +## [20.5.1](https://github.com/matter-labs/zksync-era/compare/core-v20.5.0...core-v20.5.1) (2024-02-02) + + +### Bug Fixes + +* **db:** transaction index ([#998](https://github.com/matter-labs/zksync-era/issues/998)) ([2b03736](https://github.com/matter-labs/zksync-era/commit/2b037365543aa39a28601e63b30f9963e7d3e044)) + +## [20.5.0](https://github.com/matter-labs/zksync-era/compare/core-v20.4.0...core-v20.5.0) (2024-02-02) + + +### Features + +* **merkle-tree:** Do not wait for tree initialization when starting node ([#992](https://github.com/matter-labs/zksync-era/issues/992)) ([fdbfcb1](https://github.com/matter-labs/zksync-era/commit/fdbfcb1622ee1eccd380e1930ec5401c52b73567)) + + +### Bug Fixes + +* added consensus column back ([#986](https://github.com/matter-labs/zksync-era/issues/986)) ([b9b48d4](https://github.com/matter-labs/zksync-era/commit/b9b48d45fa5d854b21c3a3b9ff57665a788a53c5)) +* get_block_receipts test ([#989](https://github.com/matter-labs/zksync-era/issues/989)) ([c301359](https://github.com/matter-labs/zksync-era/commit/c30135902afa3c39d1ac0ce2ff3b70f5c1746373)) +* **vm:** Save empty bootloader memory for batches with ancient vms ([#991](https://github.com/matter-labs/zksync-era/issues/991)) ([af7f64f](https://github.com/matter-labs/zksync-era/commit/af7f64f37faa5300ae258874d74cdcfe009dfaae)) + +## [20.4.0](https://github.com/matter-labs/zksync-era/compare/core-v20.3.0...core-v20.4.0) (2024-01-31) + + +### Features + +* **en:** Revert "feat(en): Fix operator address assignment for ENs" ([#977](https://github.com/matter-labs/zksync-era/issues/977)) ([e051f7a](https://github.com/matter-labs/zksync-era/commit/e051f7a80bd1c1c5b76a6e74288fab9f820738b2)) + +## [20.3.0](https://github.com/matter-labs/zksync-era/compare/core-v20.2.0...core-v20.3.0) (2024-01-31) + + +### Features + +* add eth_getBlockReceipts ([#887](https://github.com/matter-labs/zksync-era/issues/887)) ([5dcbcfd](https://github.com/matter-labs/zksync-era/commit/5dcbcfdeb683b02d17b77031b0a2200fa69ac778)) +* **eth-sender:** metrics for finalized and safe L1 block numbers ([#972](https://github.com/matter-labs/zksync-era/issues/972)) ([32c1637](https://github.com/matter-labs/zksync-era/commit/32c163754d5e21b9996267728fe3f527ed8ec4da)) +* Optimized block tip seal criterion ([#968](https://github.com/matter-labs/zksync-era/issues/968)) ([8049eb3](https://github.com/matter-labs/zksync-era/commit/8049eb340eadcb2e9844465d8ea15ae8c08e0ef5)) +* Prover interface and L1 interface crates ([#959](https://github.com/matter-labs/zksync-era/issues/959)) ([4f7e107](https://github.com/matter-labs/zksync-era/commit/4f7e10783afdff67a24246f17f03b536f743352d)) + +## [20.2.0](https://github.com/matter-labs/zksync-era/compare/core-v20.1.0...core-v20.2.0) (2024-01-30) + + +### Features + +* added unauthenticated version of gcs object store ([#916](https://github.com/matter-labs/zksync-era/issues/916)) ([638a813](https://github.com/matter-labs/zksync-era/commit/638a813e1115c36d3d7fbed28f24658769b2b93e)) +* Adding EN snapshots applier ([#882](https://github.com/matter-labs/zksync-era/issues/882)) ([0d2ba09](https://github.com/matter-labs/zksync-era/commit/0d2ba09c5d4b607bd9da31fc4bf0ea8ca2b4d7b8)) +* consensus component config for main node and external node ([#881](https://github.com/matter-labs/zksync-era/issues/881)) ([1aed8de](https://github.com/matter-labs/zksync-era/commit/1aed8de0f1651686bf9e9f8aa7dc9ba15625cc42)) +* **en:** Make ENs detect reorgs earlier ([#964](https://github.com/matter-labs/zksync-era/issues/964)) ([b043cc8](https://github.com/matter-labs/zksync-era/commit/b043cc84cd9f5e9c6e80b810a019c713fb3076d3)) +* **en:** Restore state keeper storage from snapshot ([#885](https://github.com/matter-labs/zksync-era/issues/885)) ([a9553b5](https://github.com/matter-labs/zksync-era/commit/a9553b537a857a6f6a755cd700da4c096c1f80f0)) +* protobuf-generated json configs for the main node (BFT-371) ([#458](https://github.com/matter-labs/zksync-era/issues/458)) ([f938314](https://github.com/matter-labs/zksync-era/commit/f9383143b4f1f0c18af658980bae8ec93b6b588f)) +* Remove zkevm_test_harness public reexport from zksync_types ([#929](https://github.com/matter-labs/zksync-era/issues/929)) ([dd1a35e](https://github.com/matter-labs/zksync-era/commit/dd1a35eec006b40db66da73e6fa3d8963efb7d60)) +* **state-keeper:** track the time that individual transactions spend in mempool ([#941](https://github.com/matter-labs/zksync-era/issues/941)) ([fa45aa9](https://github.com/matter-labs/zksync-era/commit/fa45aa9bd87f284872c9831620b36f2f2339f75b)) +* **vm:** detailed circuit statistic ([#845](https://github.com/matter-labs/zksync-era/issues/845)) ([a20af60](https://github.com/matter-labs/zksync-era/commit/a20af609d6eda25e5530c30b360847f6eadb68d9)) +* **vm:** Support tracers for old vm ([#926](https://github.com/matter-labs/zksync-era/issues/926)) ([9fc2d95](https://github.com/matter-labs/zksync-era/commit/9fc2d95ebaa3670d573a2ed022603132be234a0e)) + + +### Bug Fixes + +* **api:** Order transaction traces in `debug_traceBlock*` methods ([#924](https://github.com/matter-labs/zksync-era/issues/924)) ([5918ef9](https://github.com/matter-labs/zksync-era/commit/5918ef925dae97aee428961c2dc61dd91bf2f07e)) +* **db:** Make `get_expected_l1_batch_timestamp()` more efficient ([#963](https://github.com/matter-labs/zksync-era/issues/963)) ([7334679](https://github.com/matter-labs/zksync-era/commit/73346792952c5538aafc42a2ee778f0069a98607)) +* **db:** Make `snapshot_recovery` migration backward-compatible ([#961](https://github.com/matter-labs/zksync-era/issues/961)) ([e756762](https://github.com/matter-labs/zksync-era/commit/e756762b934f4f2262ee02404b9d18f2f4431842)) +* **zksync_types:** Update SerializationTransactionError::OversizedData description ([#949](https://github.com/matter-labs/zksync-era/issues/949)) ([c95f3ee](https://github.com/matter-labs/zksync-era/commit/c95f3eeb03804ba2739b487288b20f6bf6997e47)) + ## [20.1.0](https://github.com/matter-labs/zksync-era/compare/core-v20.0.0...core-v20.1.0) (2024-01-23) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 272bcfc081c..725ad5a5699 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -20,6 +20,9 @@ zksync_state = { path = "../../lib/state" } zksync_basic_types = { path = "../../lib/basic_types" } zksync_contracts = { path = "../../lib/contracts" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } + prometheus_exporter = { path = "../../lib/prometheus_exporter" } zksync_health_check = { path = "../../lib/health_check" } zksync_web3_decl = { path = "../../lib/web3_decl" } diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 6bf06f04930..80caab713a7 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -4,9 +4,13 @@ use anyhow::Context; use serde::Deserialize; use url::Url; use zksync_basic_types::{Address, L1ChainId, L2ChainId}; -use zksync_core::api_server::{ - tx_sender::TxSenderConfig, - web3::{state::InternalApiConfig, Namespace}, +use zksync_consensus_roles::node; +use zksync_core::{ + api_server::{ + tx_sender::TxSenderConfig, + web3::{state::InternalApiConfig, Namespace}, + }, + consensus, }; use zksync_types::api::BridgeAddresses; use zksync_web3_decl::{ @@ -75,6 +79,12 @@ impl RemoteENConfig { } } +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub enum BlockFetcher { + ServerAPI, + Consensus, +} + /// This part of the external node config is completely optional to provide. /// It can tweak limits of the API, delay intervals of certain components, etc. /// If any of the fields are not provided, the default values will be used. @@ -406,14 +416,27 @@ impl PostgresConfig { } } +pub(crate) fn read_consensus_config() -> anyhow::Result { + let path = std::env::var("EN_CONSENSUS_CONFIG_PATH") + .context("EN_CONSENSUS_CONFIG_PATH env variable is not set")?; + let cfg = std::fs::read_to_string(&path).context(path)?; + let cfg: consensus::config::Config = + consensus::config::decode_json(&cfg).context("failed decoding JSON")?; + let node_key: node::SecretKey = consensus::config::read_secret("EN_CONSENSUS_NODE_KEY")?; + Ok(consensus::FetcherConfig { + executor: cfg.executor_config(node_key), + }) +} + /// External Node Config contains all the configuration required for the EN operation. /// It is split into three parts: required, optional and remote for easier navigation. -#[derive(Debug, Deserialize, Clone, PartialEq)] +#[derive(Debug, Clone)] pub struct ExternalNodeConfig { pub required: RequiredENConfig, pub postgres: PostgresConfig, pub optional: OptionalENConfig, pub remote: RemoteENConfig, + pub consensus: Option, } impl ExternalNodeConfig { @@ -434,7 +457,6 @@ impl ExternalNodeConfig { let remote = RemoteENConfig::fetch(&client) .await .context("Unable to fetch required config values from the main node")?; - // We can query them from main node, but it's better to set them explicitly // as well to avoid connecting to wrong environment variables unintentionally. let eth_chain_id = HttpClientBuilder::default() @@ -479,6 +501,7 @@ impl ExternalNodeConfig { postgres, required, optional, + consensus: None, }) } } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 3d5abde0a59..ee15d1a63ca 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -7,7 +7,8 @@ use metrics::EN_METRICS; use prometheus_exporter::PrometheusExporterConfig; use tokio::{sync::watch, task, time::sleep}; use zksync_basic_types::{Address, L2ChainId}; -use zksync_config::configs::database::MerkleTreeMode; +use zksync_concurrency::{ctx, scope}; +use zksync_config::configs::{chain::L1BatchCommitDataGeneratorMode, database::MerkleTreeMode}; use zksync_core::{ api_server::{ execution_sandbox::VmConcurrencyLimiter, @@ -16,6 +17,7 @@ use zksync_core::{ web3::{ApiBuilder, Namespace}, }, block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert}, + consensus, consistency_checker::ConsistencyChecker, l1_gas_price::MainNodeFeeParamsFetcher, metadata_calculator::{MetadataCalculator, MetadataCalculatorConfig}, @@ -34,7 +36,6 @@ use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; use zksync_health_check::CheckHealth; use zksync_state::PostgresStorageCaches; use zksync_storage::RocksDB; -use zksync_types::l1_batch_commit_data_generator::RollupModeL1BatchCommitDataGenerator; use zksync_utils::wait_for_tasks::wait_for_tasks; mod config; @@ -170,22 +171,57 @@ async fn init_tasks( let main_node_client = ::json_rpc(&main_node_url) .context("Failed creating JSON-RPC client for main node")?; let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); - let fetcher_cursor = { - let pool = singleton_pool_builder - .build() - .await - .context("failed to build a connection pool for `MainNodeFetcher`")?; - let mut storage = pool.access_storage_tagged("sync_layer").await?; - FetcherCursor::new(&mut storage) - .await - .context("failed to load `MainNodeFetcher` cursor from Postgres")? + + let fetcher_handle = match config.consensus.clone() { + None => { + let fetcher_cursor = { + let pool = singleton_pool_builder + .build() + .await + .context("failed to build a connection pool for `MainNodeFetcher`")?; + let mut storage = pool.access_storage_tagged("sync_layer").await?; + FetcherCursor::new(&mut storage) + .await + .context("failed to load `MainNodeFetcher` cursor from Postgres")? + }; + let fetcher = fetcher_cursor.into_fetcher( + Box::new(main_node_client), + action_queue_sender, + sync_state.clone(), + stop_receiver.clone(), + ); + tokio::spawn(fetcher.run()) + } + Some(cfg) => { + let pool = connection_pool.clone(); + let mut stop_receiver = stop_receiver.clone(); + let sync_state = sync_state.clone(); + #[allow(clippy::redundant_locals)] + tokio::spawn(async move { + let sync_state = sync_state; + let main_node_client = main_node_client; + scope::run!(&ctx::root(), |ctx, s| async { + s.spawn_bg(async { + let res = cfg.run(ctx, pool, action_queue_sender).await; + tracing::info!("Consensus actor stopped"); + res + }); + // TODO: information about the head block of the validators + // (currently just the main node) + // should also be provided over the gossip network. + s.spawn_bg(async { + consensus::run_main_node_state_fetcher(ctx, &main_node_client, &sync_state) + .await?; + Ok(()) + }); + ctx.wait(stop_receiver.wait_for(|stop| *stop)).await??; + Ok(()) + }) + .await + .context("consensus actor") + }) + } }; - let fetcher = fetcher_cursor.into_fetcher( - Box::new(main_node_client), - action_queue_sender, - sync_state.clone(), - stop_receiver.clone(), - ); let metadata_calculator_config = MetadataCalculatorConfig { db_path: config.required.merkle_tree_path.clone(), @@ -197,7 +233,9 @@ async fn init_tasks( memtable_capacity: config.optional.merkle_tree_memtable_capacity(), stalled_writes_timeout: config.optional.merkle_tree_stalled_writes_timeout(), }; - let metadata_calculator = MetadataCalculator::new(metadata_calculator_config, None).await; + let metadata_calculator = MetadataCalculator::new(metadata_calculator_config, None) + .await + .context("failed initializing metadata calculator")?; healthchecks.push(Box::new(metadata_calculator.tree_health_check())); let consistency_checker = ConsistencyChecker::new( @@ -229,15 +267,16 @@ async fn init_tasks( .context("failed to build a tree_pool")?; let tree_handle = task::spawn(metadata_calculator.run(tree_pool, tree_stop_receiver)); - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let consistency_checker_handle = tokio::spawn( consistency_checker.run(stop_receiver.clone(), l1_batch_commit_data_generator), ); let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone())); + let fee_address_migration_handle = + task::spawn(state_keeper.run_fee_address_migration(connection_pool.clone())); let sk_handle = task::spawn(state_keeper.run()); - let fetcher_handle = tokio::spawn(fetcher.run()); let fee_params_fetcher_handle = tokio::spawn(fee_params_fetcher.clone().run(stop_receiver.clone())); @@ -322,12 +361,13 @@ async fn init_tasks( task_handles.extend(cache_update_handle); task_handles.extend([ sk_handle, + fee_address_migration_handle, fetcher_handle, updater_handle, tree_handle, + consistency_checker_handle, fee_params_fetcher_handle, ]); - task_handles.push(consistency_checker_handle); Ok((task_handles, stop_sender, healthcheck_handle, stop_receiver)) } @@ -350,6 +390,8 @@ async fn shutdown_components( struct Cli { #[arg(long)] revert_pending_l1_batch: bool, + #[arg(long)] + enable_consensus: bool, } #[tokio::main] @@ -380,9 +422,13 @@ async fn main() -> anyhow::Result<()> { tracing::info!("No sentry URL was provided"); } - let config = ExternalNodeConfig::collect() + let mut config = ExternalNodeConfig::collect() .await .context("Failed to load external node config")?; + if opt.enable_consensus { + config.consensus = + Some(config::read_consensus_config().context("read_consensus_config()")?); + } let main_node_url = config .required .main_node_url() @@ -476,7 +522,7 @@ async fn main() -> anyhow::Result<()> { let reorg_detector_last_correct_batch = reorg_detector_result.and_then(|result| match result { Ok(Ok(last_correct_batch)) => last_correct_batch, Ok(Err(err)) => { - tracing::error!("Reorg detector failed: {err}"); + tracing::error!("Reorg detector failed: {err:#}"); None } Err(err) => { diff --git a/core/bin/merkle_tree_consistency_checker/src/main.rs b/core/bin/merkle_tree_consistency_checker/src/main.rs index 60a4feb750e..6d64729f9b6 100644 --- a/core/bin/merkle_tree_consistency_checker/src/main.rs +++ b/core/bin/merkle_tree_consistency_checker/src/main.rs @@ -27,7 +27,7 @@ impl Cli { let db_path = &config.merkle_tree.path; tracing::info!("Verifying consistency of Merkle tree at {db_path}"); let start = Instant::now(); - let db = RocksDB::new(Path::new(db_path)); + let db = RocksDB::new(Path::new(db_path)).unwrap(); let tree = ZkSyncTree::new_lightweight(db.into()); let l1_batch_number = if let Some(number) = self.l1_batch { diff --git a/core/bin/snapshots_creator/Cargo.toml b/core/bin/snapshots_creator/Cargo.toml index fe18233e7d9..f1882dd2bb7 100644 --- a/core/bin/snapshots_creator/Cargo.toml +++ b/core/bin/snapshots_creator/Cargo.toml @@ -16,7 +16,6 @@ prometheus_exporter = { path = "../../lib/prometheus_exporter" } zksync_config = { path = "../../lib/config" } zksync_dal = { path = "../../lib/dal" } zksync_env_config = { path = "../../lib/env_config" } -zksync_utils = { path = "../../lib/utils" } zksync_types = { path = "../../lib/types" } zksync_object_store = { path = "../../lib/object_store" } vlog = { path = "../../lib/vlog" } diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs deleted file mode 100644 index 047a6a23d24..00000000000 --- a/core/bin/snapshots_creator/src/chunking.rs +++ /dev/null @@ -1,69 +0,0 @@ -use std::ops; - -use zksync_types::{H256, U256}; -use zksync_utils::u256_to_h256; - -pub(crate) fn get_chunk_hashed_keys_range( - chunk_id: u64, - chunk_count: u64, -) -> ops::RangeInclusive { - assert!(chunk_count > 0); - let mut stride = U256::MAX / chunk_count; - let stride_minus_one = if stride < U256::MAX { - stride += U256::one(); - stride - 1 - } else { - stride // `stride` is really 1 << 256 == U256::MAX + 1 - }; - - let start = stride * chunk_id; - let (mut end, is_overflow) = stride_minus_one.overflowing_add(start); - if is_overflow { - end = U256::MAX; - } - u256_to_h256(start)..=u256_to_h256(end) -} - -#[cfg(test)] -mod tests { - use zksync_utils::h256_to_u256; - - use super::*; - - #[test] - fn chunking_is_correct() { - for chunks_count in (2..10).chain([42, 256, 500, 1_001, 12_345]) { - println!("Testing chunks_count={chunks_count}"); - let chunked_ranges: Vec<_> = (0..chunks_count) - .map(|chunk_id| get_chunk_hashed_keys_range(chunk_id, chunks_count)) - .collect(); - - assert_eq!(*chunked_ranges[0].start(), H256::zero()); - assert_eq!( - *chunked_ranges.last().unwrap().end(), - H256::repeat_byte(0xff) - ); - for window in chunked_ranges.windows(2) { - let [prev_chunk, next_chunk] = window else { - unreachable!(); - }; - assert_eq!( - h256_to_u256(*prev_chunk.end()) + 1, - h256_to_u256(*next_chunk.start()) - ); - } - - let chunk_sizes: Vec<_> = chunked_ranges - .iter() - .map(|chunk| h256_to_u256(*chunk.end()) - h256_to_u256(*chunk.start()) + 1) - .collect(); - - // Check that chunk sizes are roughly equal. Due to how chunks are constructed, the sizes - // of all chunks except for the last one are the same, and the last chunk size may be slightly smaller; - // the difference in sizes is lesser than the number of chunks. - let min_chunk_size = chunk_sizes.iter().copied().min().unwrap(); - let max_chunk_size = chunk_sizes.iter().copied().max().unwrap(); - assert!(max_chunk_size - min_chunk_size < U256::from(chunks_count)); - } - } -} diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index 51a14ce2cca..2d2ce2335b9 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -9,19 +9,15 @@ use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_object_store::ObjectStore; use zksync_types::{ snapshots::{ - SnapshotFactoryDependencies, SnapshotMetadata, SnapshotStorageLogsChunk, - SnapshotStorageLogsStorageKey, + uniform_hashed_keys_chunk, SnapshotFactoryDependencies, SnapshotFactoryDependency, + SnapshotMetadata, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, }, L1BatchNumber, MiniblockNumber, }; -use zksync_utils::ceil_div; +use crate::metrics::{FactoryDepsStage, StorageChunkStage, METRICS}; #[cfg(test)] use crate::tests::HandleEvent; -use crate::{ - chunking::get_chunk_hashed_keys_range, - metrics::{FactoryDepsStage, StorageChunkStage, METRICS}, -}; /// Encapsulates progress of creating a particular storage snapshot. #[derive(Debug)] @@ -91,7 +87,7 @@ impl SnapshotCreator { return Ok(()); } - let hashed_keys_range = get_chunk_hashed_keys_range(chunk_id, chunk_count); + let hashed_keys_range = uniform_hashed_keys_chunk(chunk_id, chunk_count); let mut conn = self.connect_to_replica().await?; let latency = @@ -166,6 +162,12 @@ impl SnapshotCreator { tracing::info!("Saving factory deps to GCS..."); let latency = METRICS.factory_deps_processing_duration[&FactoryDepsStage::SaveToGcs].start(); + let factory_deps = factory_deps + .into_iter() + .map(|(_, bytecode)| SnapshotFactoryDependency { + bytecode: bytecode.into(), + }) + .collect(); let factory_deps = SnapshotFactoryDependencies { factory_deps }; let filename = self .blob_store @@ -216,8 +218,9 @@ impl SnapshotCreator { .await?; let chunk_size = config.storage_logs_chunk_size; // We force the minimum number of chunks to avoid situations where only one chunk is created in tests. - let chunk_count = - ceil_div(distinct_storage_logs_keys_count, chunk_size).max(min_chunk_count); + let chunk_count = distinct_storage_logs_keys_count + .div_ceil(chunk_size) + .max(min_chunk_count); tracing::info!( "Selected storage logs chunking for L1 batch {l1_batch_number}: \ diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs index 0571500615b..c9a52fe0d74 100644 --- a/core/bin/snapshots_creator/src/main.rs +++ b/core/bin/snapshots_creator/src/main.rs @@ -19,7 +19,6 @@ use zksync_object_store::ObjectStoreFactory; use crate::creator::SnapshotCreator; -mod chunking; mod creator; mod metrics; #[cfg(test)] diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index d061b090670..240857843f5 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -13,7 +13,7 @@ use rand::{thread_rng, Rng}; use zksync_dal::StorageProcessor; use zksync_object_store::ObjectStore; use zksync_types::{ - block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, + block::{L1BatchHeader, MiniblockHeader}, snapshots::{ SnapshotFactoryDependencies, SnapshotFactoryDependency, SnapshotStorageLog, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, @@ -142,6 +142,7 @@ async fn create_miniblock( hash: H256::from_low_u64_be(u64::from(miniblock_number.0)), l1_tx_count: 0, l2_tx_count: 0, + fee_account_address: Address::repeat_byte(1), base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), @@ -164,16 +165,9 @@ async fn create_l1_batch( l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { - let mut header = L1BatchHeader::new( - l1_batch_number, - 0, - Address::default(), - Default::default(), - Default::default(), - ); - header.is_finished = true; + let header = L1BatchHeader::new(l1_batch_number, 0, Default::default(), Default::default()); conn.blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&header) .await .unwrap(); conn.blocks_dal() @@ -205,7 +199,8 @@ async fn prepare_postgres( let factory_deps = gen_factory_deps(rng, 10); conn.storage_dal() .insert_factory_deps(MiniblockNumber(block_number), &factory_deps) - .await; + .await + .unwrap(); // Since we generate `logs` randomly, all of them are written the first time. create_l1_batch(conn, L1BatchNumber(block_number), &logs).await; @@ -223,7 +218,8 @@ async fn prepare_postgres( let expected_l1_batches_and_indices = conn .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&hashed_keys) - .await; + .await + .unwrap(); let logs = logs.into_iter().map(|log| { let (l1_batch_number_of_initial_write, enumeration_index) = diff --git a/core/bin/storage_logs_dedup_migration/src/main.rs b/core/bin/storage_logs_dedup_migration/src/main.rs index 179685c4002..fcaebafc0fe 100644 --- a/core/bin/storage_logs_dedup_migration/src/main.rs +++ b/core/bin/storage_logs_dedup_migration/src/main.rs @@ -108,7 +108,8 @@ async fn main() { let values_for_missing_keys: HashMap<_, _> = connection .storage_logs_dal() .get_storage_values(&missing_keys, miniblock_number - 1) - .await; + .await + .expect("failed getting storage values for missing keys"); in_memory_prev_values_iter .chain( diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 993a995e619..75b47b6aa05 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -10,7 +10,7 @@ use multivm::{ BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, Vm, VmTracer, ZkSyncVmState, }, - zk_evm_1_4_1::aux_structures::Timestamp, + zk_evm_latest::aux_structures::Timestamp, }; use once_cell::sync::Lazy; use zksync_contracts::{ diff --git a/core/bin/zksync_server/Cargo.toml b/core/bin/zksync_server/Cargo.toml index a1de2ef057a..33835ba0952 100644 --- a/core/bin/zksync_server/Cargo.toml +++ b/core/bin/zksync_server/Cargo.toml @@ -17,10 +17,18 @@ zksync_storage = { path = "../../lib/storage" } zksync_utils = { path = "../../lib/utils" } zksync_types = { path = "../../lib/types" } zksync_core = { path = "../../lib/zksync_core" } + +# Consensus dependenices +zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } + vlog = { path = "../../lib/vlog" } anyhow = "1.0" clap = { version = "4.2.4", features = ["derive"] } +serde_json = "1.0" tokio = { version = "1", features = ["full"] } tracing = "0.1" futures = "0.3" diff --git a/core/bin/zksync_server/src/config.rs b/core/bin/zksync_server/src/config.rs new file mode 100644 index 00000000000..bdd06a80eef --- /dev/null +++ b/core/bin/zksync_server/src/config.rs @@ -0,0 +1,17 @@ +use anyhow::Context as _; +use zksync_consensus_roles::{node, validator}; +use zksync_core::consensus; + +pub(crate) fn read_consensus_config() -> anyhow::Result { + let path = std::env::var("CONSENSUS_CONFIG_PATH").context("CONSENSUS_CONFIG_PATH")?; + let cfg = std::fs::read_to_string(&path).context(path)?; + let cfg: consensus::config::Config = + consensus::config::decode_json(&cfg).context("failed decoding JSON")?; + let validator_key: validator::SecretKey = + consensus::config::read_secret("CONSENSUS_VALIDATOR_KEY")?; + let node_key: node::SecretKey = consensus::config::read_secret("CONSENSUS_NODE_KEY")?; + Ok(consensus::MainNodeConfig { + executor: cfg.executor_config(node_key), + validator: cfg.validator_config(validator_key), + }) +} diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index ffaa08ea090..f9f18a11c05 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -25,6 +25,8 @@ use zksync_env_config::FromEnv; use zksync_storage::RocksDB; use zksync_utils::wait_for_tasks::wait_for_tasks; +mod config; + #[cfg(not(target_env = "msvc"))] #[global_allocator] static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; @@ -93,7 +95,7 @@ async fn main() -> anyhow::Result<()> { // Right now, we are trying to deserialize all the configs that may be needed by `zksync_core`. // "May" is the key word here, since some configs are only used by certain component configuration, // hence we are using `Option`s. - let configs: TempConfigStore = TempConfigStore { + let mut configs: TempConfigStore = TempConfigStore { postgres_config: PostgresConfig::from_env().ok(), health_check_config: HealthCheckConfig::from_env().ok(), merkle_tree_api_config: MerkleTreeApiConfig::from_env().ok(), @@ -105,7 +107,7 @@ async fn main() -> anyhow::Result<()> { state_keeper_config: StateKeeperConfig::from_env().ok(), house_keeper_config: HouseKeeperConfig::from_env().ok(), fri_proof_compressor_config: FriProofCompressorConfig::from_env().ok(), - fri_prover_config: FriProverConfig::from_env().ok(), + fri_prover_config: Some(FriProverConfig::from_env().context("fri_prover_config")?), fri_prover_group_config: FriProverGroupConfig::from_env().ok(), fri_witness_generator_config: FriWitnessGeneratorConfig::from_env().ok(), prometheus_config: PrometheusConfig::from_env().ok(), @@ -119,8 +121,14 @@ async fn main() -> anyhow::Result<()> { eth_watch_config: ETHWatchConfig::from_env().ok(), gas_adjuster_config: GasAdjusterConfig::from_env().ok(), object_store_config: ObjectStoreConfig::from_env().ok(), + consensus_config: None, }; + if opt.components.0.contains(&Component::Consensus) { + configs.consensus_config = + Some(config::read_consensus_config().context("read_consensus_config()")?); + } + let postgres_config = configs.postgres_config.clone().context("PostgresConfig")?; if opt.genesis || is_genesis_needed(&postgres_config).await { diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index 47444c6675f..5aaba2ece6e 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -1,8 +1,12 @@ //! Basic types for FRI prover. +// TODO (PLA-773): Should be moved to the prover workspace. + +use std::{convert::TryFrom, str::FromStr}; + use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Clone, Eq, Hash, PartialEq)] +#[derive(Debug, Deserialize, Serialize, Clone, Eq, Hash, PartialEq, PartialOrd, Ord)] pub struct CircuitIdRoundTuple { pub circuit_id: u8, pub aggregation_round: u8, @@ -16,3 +20,82 @@ impl CircuitIdRoundTuple { } } } + +/// Represents the sequential number of the proof aggregation round. +/// Mostly used to be stored in `aggregation_round` column in `prover_jobs` table +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub enum AggregationRound { + BasicCircuits = 0, + LeafAggregation = 1, + NodeAggregation = 2, + Scheduler = 3, +} + +impl From for AggregationRound { + fn from(item: u8) -> Self { + match item { + 0 => AggregationRound::BasicCircuits, + 1 => AggregationRound::LeafAggregation, + 2 => AggregationRound::NodeAggregation, + 3 => AggregationRound::Scheduler, + _ => panic!("Invalid round"), + } + } +} + +impl AggregationRound { + pub fn next(&self) -> Option { + match self { + AggregationRound::BasicCircuits => Some(AggregationRound::LeafAggregation), + AggregationRound::LeafAggregation => Some(AggregationRound::NodeAggregation), + AggregationRound::NodeAggregation => Some(AggregationRound::Scheduler), + AggregationRound::Scheduler => None, + } + } +} + +impl std::fmt::Display for AggregationRound { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str(match self { + Self::BasicCircuits => "basic_circuits", + Self::LeafAggregation => "leaf_aggregation", + Self::NodeAggregation => "node_aggregation", + Self::Scheduler => "scheduler", + }) + } +} + +impl FromStr for AggregationRound { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "basic_circuits" => Ok(AggregationRound::BasicCircuits), + "leaf_aggregation" => Ok(AggregationRound::LeafAggregation), + "node_aggregation" => Ok(AggregationRound::NodeAggregation), + "scheduler" => Ok(AggregationRound::Scheduler), + other => Err(format!( + "{} is not a valid round name for witness generation", + other + )), + } + } +} + +impl TryFrom for AggregationRound { + type Error = (); + + fn try_from(v: i32) -> Result { + match v { + x if x == AggregationRound::BasicCircuits as i32 => Ok(AggregationRound::BasicCircuits), + x if x == AggregationRound::LeafAggregation as i32 => { + Ok(AggregationRound::LeafAggregation) + } + x if x == AggregationRound::NodeAggregation as i32 => { + Ok(AggregationRound::NodeAggregation) + } + x if x == AggregationRound::Scheduler as i32 => Ok(AggregationRound::Scheduler), + _ => Err(()), + } + } +} diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index 4c84f857a29..db602ca9656 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -62,7 +62,7 @@ impl CircuitBreakerChecker { return circuit_breaker_sender .send(error) .ok() - .context("failed to send circuit breaker messsage"); + .context("failed to send circuit breaker message"); } tokio::time::sleep(self.sync_interval).await; } diff --git a/core/lib/commitment_utils/Cargo.toml b/core/lib/commitment_utils/Cargo.toml index bb286d6216a..d74ff3c9572 100644 --- a/core/lib/commitment_utils/Cargo.toml +++ b/core/lib/commitment_utils/Cargo.toml @@ -12,5 +12,8 @@ categories = ["cryptography"] [dependencies] zksync_types = { path = "../../lib/types" } zksync_utils = { path = "../../lib/utils" } -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0" } +zkevm_test_harness_1_4_0 = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", package = "zkevm_test_harness" } +zkevm_test_harness_1_4_1 = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.1", package = "zkevm_test_harness" } +zk_evm_1_4_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } +zk_evm_1_3_3 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } multivm = { path = "../../lib/multivm" } diff --git a/core/lib/commitment_utils/src/lib.rs b/core/lib/commitment_utils/src/lib.rs index 3c2db1471cc..cfdfa23e81d 100644 --- a/core/lib/commitment_utils/src/lib.rs +++ b/core/lib/commitment_utils/src/lib.rs @@ -1,16 +1,39 @@ //! Utils for commitment calculation. use multivm::utils::get_used_bootloader_memory_bytes; -use zkevm_test_harness::witness::utils::{ - events_queue_commitment_fixed, initial_heap_content_commitment_fixed, +use zk_evm_1_3_3::{ + aux_structures::Timestamp as Timestamp_1_3_3, + zk_evm_abstractions::queries::LogQuery as LogQuery_1_3_3, }; -use zksync_types::{LogQuery, ProtocolVersionId, H256, U256}; +use zk_evm_1_4_1::{ + aux_structures::Timestamp as Timestamp_1_4_1, + zk_evm_abstractions::queries::LogQuery as LogQuery_1_4_1, +}; +use zksync_types::{zk_evm_types::LogQuery, ProtocolVersionId, VmVersion, H256, U256}; use zksync_utils::expand_memory_contents; pub fn events_queue_commitment( - events_queue: &Vec, + events_queue: &[LogQuery], protocol_version: ProtocolVersionId, ) -> Option { - (!protocol_version.is_pre_boojum()).then(|| H256(events_queue_commitment_fixed(events_queue))) + match VmVersion::from(protocol_version) { + VmVersion::VmBoojumIntegration => Some(H256( + zkevm_test_harness_1_4_0::witness::utils::events_queue_commitment_fixed( + &events_queue + .iter() + .map(|x| to_log_query_1_3_3(*x)) + .collect(), + ), + )), + VmVersion::Vm1_4_1 => Some(H256( + zkevm_test_harness_1_4_1::witness::utils::events_queue_commitment_fixed( + &events_queue + .iter() + .map(|x| to_log_query_1_4_1(*x)) + .collect(), + ), + )), + _ => None, + } } pub fn bootloader_initial_content_commitment( @@ -25,9 +48,50 @@ pub fn bootloader_initial_content_commitment( let full_bootloader_memory = expand_memory_contents(initial_bootloader_contents, expanded_memory_size); - let commitment = H256(initial_heap_content_commitment_fixed( - &full_bootloader_memory, - )); - Some(commitment) + match VmVersion::from(protocol_version) { + VmVersion::VmBoojumIntegration => Some(H256( + zkevm_test_harness_1_4_0::witness::utils::initial_heap_content_commitment_fixed( + &full_bootloader_memory, + ), + )), + VmVersion::Vm1_4_1 => Some(H256( + zkevm_test_harness_1_4_1::witness::utils::initial_heap_content_commitment_fixed( + &full_bootloader_memory, + ), + )), + _ => unreachable!(), + } +} + +fn to_log_query_1_3_3(log_query: LogQuery) -> LogQuery_1_3_3 { + LogQuery_1_3_3 { + timestamp: Timestamp_1_3_3(log_query.timestamp.0), + tx_number_in_block: log_query.tx_number_in_block, + aux_byte: log_query.aux_byte, + shard_id: log_query.shard_id, + address: log_query.address, + key: log_query.key, + read_value: log_query.read_value, + written_value: log_query.written_value, + rw_flag: log_query.rw_flag, + rollback: log_query.rollback, + is_service: log_query.is_service, + } +} + +fn to_log_query_1_4_1(log_query: LogQuery) -> LogQuery_1_4_1 { + LogQuery_1_4_1 { + timestamp: Timestamp_1_4_1(log_query.timestamp.0), + tx_number_in_block: log_query.tx_number_in_block, + aux_byte: log_query.aux_byte, + shard_id: log_query.shard_id, + address: log_query.address, + key: log_query.key, + read_value: log_query.read_value, + written_value: log_query.written_value, + rw_flag: log_query.rw_flag, + rollback: log_query.rollback, + is_service: log_query.is_service, + } } diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 2532e474ca2..ecbc781df65 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -13,4 +13,5 @@ categories = ["cryptography"] zksync_basic_types = { path = "../../lib/basic_types" } anyhow = "1.0" +rand = "0.8" serde = { version = "1.0", features = ["derive"] } diff --git a/core/lib/config/src/configs/database.rs b/core/lib/config/src/configs/database.rs index 578cd6be46a..acca253d402 100644 --- a/core/lib/config/src/configs/database.rs +++ b/core/lib/config/src/configs/database.rs @@ -123,7 +123,7 @@ impl DBConfig { /// Collection of different database URLs and general PostgreSQL options. /// All the entries are optional, since some components may only require a subset of them, /// and any component may have overrides. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct PostgresConfig { /// URL for the main (sequencer) database. pub master_url: Option, diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index cd44daed17f..f5b43af7481 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -45,6 +45,7 @@ impl ETHSenderConfig { internal_enforced_l1_gas_price: None, poll_period: 5, max_l1_gas_price: None, + l1_gas_per_pubdata_byte: 17, }, } } @@ -135,6 +136,7 @@ pub struct GasAdjusterConfig { pub poll_period: u64, /// Max number of l1 gas price that is allowed to be used in state keeper. pub max_l1_gas_price: Option, + pub l1_gas_per_pubdata_byte: u64, } impl GasAdjusterConfig { diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index b773efbd7df..e81e55b8a53 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -15,6 +15,7 @@ pub struct ProofDataHandlerConfig { pub protocol_version_loading_mode: ProtocolVersionLoadingMode, pub fri_protocol_version_id: u16, } + impl ProofDataHandlerConfig { pub fn proof_generation_timeout(&self) -> Duration { Duration::from_secs(self.proof_generation_timeout_in_secs as u64) diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index d139596b80b..cde1582a1a2 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -6,3 +6,4 @@ pub use crate::configs::{ }; pub mod configs; +pub mod testonly; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs new file mode 100644 index 00000000000..b05b7c1b2bb --- /dev/null +++ b/core/lib/config/src/testonly.rs @@ -0,0 +1,719 @@ +use std::collections::HashSet; + +use rand::{distributions::Alphanumeric, Rng}; +use zksync_basic_types::{ + basic_fri_types::CircuitIdRoundTuple, network::Network, Address, L2ChainId, H256, +}; + +use crate::configs; + +/// Generator of random configs. +pub struct Gen<'a, R: Rng> { + /// Underlying RNG. + pub rng: &'a mut R, + /// Generate configs with only required fields. + pub required_only: bool, + /// Generate decimal fractions for f64 + /// to avoid rounding errors of decimal encodings. + pub decimal_fractions: bool, +} + +impl<'a, R: Rng> Gen<'a, R> { + pub fn gen(&mut self) -> C { + C::sample(self) + } +} + +pub trait RandomConfig { + fn sample(g: &mut Gen) -> Self; +} + +impl RandomConfig for String { + fn sample(g: &mut Gen) -> Self { + let n = g.rng.gen_range(5..10); + g.rng + .sample_iter(&Alphanumeric) + .take(n) + .map(char::from) + .collect() + } +} + +impl RandomConfig for Option { + fn sample(g: &mut Gen) -> Self { + if g.required_only { + return None; + } + Some(g.gen()) + } +} + +impl RandomConfig for Vec { + fn sample(g: &mut Gen) -> Self { + if g.required_only { + return vec![]; + } + (0..g.rng.gen_range(5..10)).map(|_| g.gen()).collect() + } +} + +impl RandomConfig for HashSet { + fn sample(g: &mut Gen) -> Self { + if g.required_only { + return HashSet::new(); + } + (0..g.rng.gen_range(5..10)).map(|_| g.gen()).collect() + } +} + +impl RandomConfig for bool { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for u8 { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for u16 { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for u32 { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for u64 { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for f64 { + fn sample(g: &mut Gen) -> Self { + if g.decimal_fractions { + const PRECISION: usize = 1000000; + return g.rng.gen_range(0..PRECISION) as f64 / PRECISION as f64; + } + g.rng.gen() + } +} + +impl RandomConfig for usize { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for std::num::NonZeroU32 { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for Address { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for H256 { + fn sample(g: &mut Gen) -> Self { + g.rng.gen() + } +} + +impl RandomConfig for Network { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..8) { + 0 => Self::Mainnet, + 1 => Self::Rinkeby, + 2 => Self::Ropsten, + 3 => Self::Goerli, + 4 => Self::Sepolia, + 5 => Self::Localhost, + 6 => Self::Unknown, + _ => Self::Test, + } + } +} + +impl RandomConfig for configs::chain::FeeModelVersion { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::V1, + _ => Self::V2, + } + } +} + +impl RandomConfig for configs::AlertsConfig { + fn sample(g: &mut Gen) -> Self { + Self { + sporadic_crypto_errors_substrs: g.gen(), + } + } +} + +impl RandomConfig for configs::ApiConfig { + fn sample(g: &mut Gen) -> Self { + Self { + web3_json_rpc: g.gen(), + contract_verification: g.gen(), + prometheus: g.gen(), + healthcheck: g.gen(), + merkle_tree: g.gen(), + } + } +} + +impl RandomConfig for configs::api::Web3JsonRpcConfig { + fn sample(g: &mut Gen) -> Self { + Self { + http_port: g.gen(), + http_url: g.gen(), + ws_port: g.gen(), + ws_url: g.gen(), + req_entities_limit: g.gen(), + filters_limit: g.gen(), + subscriptions_limit: g.gen(), + pubsub_polling_interval: g.gen(), + max_nonce_ahead: g.gen(), + gas_price_scale_factor: g.gen(), + request_timeout: g.gen(), + account_pks: g.gen(), + estimate_gas_scale_factor: g.gen(), + estimate_gas_acceptable_overestimation: g.gen(), + l1_to_l2_transactions_compatibility_mode: g.gen(), + max_tx_size: g.gen(), + vm_execution_cache_misses_limit: g.gen(), + vm_concurrency_limit: g.gen(), + factory_deps_cache_size_mb: g.gen(), + initial_writes_cache_size_mb: g.gen(), + latest_values_cache_size_mb: g.gen(), + fee_history_limit: g.gen(), + max_batch_request_size: g.gen(), + max_response_body_size_mb: g.gen(), + websocket_requests_per_minute_limit: g.gen(), + tree_api_url: g.gen(), + } + } +} + +impl RandomConfig for configs::api::HealthCheckConfig { + fn sample(g: &mut Gen) -> Self { + Self { port: g.gen() } + } +} + +impl RandomConfig for configs::api::ContractVerificationApiConfig { + fn sample(g: &mut Gen) -> Self { + Self { + port: g.gen(), + url: g.gen(), + } + } +} + +impl RandomConfig for configs::api::MerkleTreeApiConfig { + fn sample(g: &mut Gen) -> Self { + Self { port: g.gen() } + } +} + +impl RandomConfig for configs::PrometheusConfig { + fn sample(g: &mut Gen) -> Self { + Self { + listener_port: g.gen(), + pushgateway_url: g.gen(), + push_interval_ms: g.gen(), + } + } +} + +impl RandomConfig for configs::chain::NetworkConfig { + fn sample(g: &mut Gen) -> Self { + Self { + network: g.gen(), + zksync_network: g.gen(), + zksync_network_id: L2ChainId::max(), + } + } +} + +impl RandomConfig for configs::chain::L1BatchCommitDataGeneratorMode { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::Rollup, + _ => Self::Validium, + } + } +} + +impl RandomConfig for configs::chain::StateKeeperConfig { + fn sample(g: &mut Gen) -> Self { + Self { + transaction_slots: g.gen(), + block_commit_deadline_ms: g.gen(), + miniblock_commit_deadline_ms: g.gen(), + miniblock_seal_queue_capacity: g.gen(), + max_single_tx_gas: g.gen(), + max_allowed_l2_tx_gas_limit: g.gen(), + reject_tx_at_geometry_percentage: g.gen(), + reject_tx_at_eth_params_percentage: g.gen(), + reject_tx_at_gas_percentage: g.gen(), + close_block_at_geometry_percentage: g.gen(), + close_block_at_eth_params_percentage: g.gen(), + close_block_at_gas_percentage: g.gen(), + fee_account_addr: g.gen(), + minimal_l2_gas_price: g.gen(), + compute_overhead_part: g.gen(), + pubdata_overhead_part: g.gen(), + batch_overhead_l1_gas: g.gen(), + max_gas_per_batch: g.gen(), + max_pubdata_per_batch: g.gen(), + fee_model_version: g.gen(), + validation_computational_gas_limit: g.gen(), + save_call_traces: g.gen(), + virtual_blocks_interval: g.gen(), + virtual_blocks_per_miniblock: g.gen(), + upload_witness_inputs_to_gcs: g.gen(), + enum_index_migration_chunk_size: g.gen(), + l1_batch_commit_data_generator_mode: g.gen(), + } + } +} + +impl RandomConfig for configs::chain::OperationsManagerConfig { + fn sample(g: &mut Gen) -> Self { + Self { + delay_interval: g.gen(), + } + } +} + +impl RandomConfig for configs::chain::CircuitBreakerConfig { + fn sample(g: &mut Gen) -> Self { + Self { + sync_interval_ms: g.gen(), + http_req_max_retry_number: g.gen(), + http_req_retry_interval_sec: g.gen(), + replication_lag_limit_sec: g.gen(), + } + } +} + +impl RandomConfig for configs::chain::MempoolConfig { + fn sample(g: &mut Gen) -> Self { + Self { + sync_interval_ms: g.gen(), + sync_batch_size: g.gen(), + capacity: g.gen(), + stuck_tx_timeout: g.gen(), + remove_stuck_txs: g.gen(), + delay_interval: g.gen(), + } + } +} + +impl RandomConfig for configs::ContractVerifierConfig { + fn sample(g: &mut Gen) -> Self { + Self { + compilation_timeout: g.gen(), + polling_interval: g.gen(), + prometheus_port: g.gen(), + } + } +} + +impl RandomConfig for configs::contracts::ProverAtGenesis { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::Fri, + _ => Self::Old, + } + } +} + +impl RandomConfig for configs::ContractsConfig { + fn sample(g: &mut Gen) -> Self { + Self { + governance_addr: g.gen(), + mailbox_facet_addr: g.gen(), + executor_facet_addr: g.gen(), + admin_facet_addr: g.gen(), + getters_facet_addr: g.gen(), + verifier_addr: g.gen(), + diamond_init_addr: g.gen(), + diamond_upgrade_init_addr: g.gen(), + diamond_proxy_addr: g.gen(), + validator_timelock_addr: g.gen(), + genesis_tx_hash: g.gen(), + l1_erc20_bridge_proxy_addr: g.gen(), + l1_erc20_bridge_impl_addr: g.gen(), + l2_erc20_bridge_addr: g.gen(), + l1_weth_bridge_proxy_addr: g.gen(), + l2_weth_bridge_addr: g.gen(), + l1_allow_list_addr: g.gen(), + l2_testnet_paymaster_addr: g.gen(), + recursion_scheduler_level_vk_hash: g.gen(), + recursion_node_level_vk_hash: g.gen(), + recursion_leaf_level_vk_hash: g.gen(), + recursion_circuits_set_vks_hash: g.gen(), + l1_multicall3_addr: g.gen(), + fri_recursion_scheduler_level_vk_hash: g.gen(), + fri_recursion_node_level_vk_hash: g.gen(), + fri_recursion_leaf_level_vk_hash: g.gen(), + prover_at_genesis: g.gen(), + snark_wrapper_vk_hash: g.gen(), + } + } +} + +impl RandomConfig for configs::database::MerkleTreeMode { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::Full, + _ => Self::Lightweight, + } + } +} + +impl RandomConfig for configs::database::MerkleTreeConfig { + fn sample(g: &mut Gen) -> Self { + Self { + path: g.gen(), + mode: g.gen(), + multi_get_chunk_size: g.gen(), + block_cache_size_mb: g.gen(), + memtable_capacity_mb: g.gen(), + stalled_writes_timeout_sec: g.gen(), + max_l1_batches_per_iter: g.gen(), + } + } +} + +impl RandomConfig for configs::database::DBConfig { + fn sample(g: &mut Gen) -> Self { + Self { + state_keeper_db_path: g.gen(), + merkle_tree: g.gen(), + } + } +} + +impl RandomConfig for configs::database::PostgresConfig { + fn sample(g: &mut Gen) -> Self { + Self { + master_url: g.gen(), + replica_url: g.gen(), + prover_url: g.gen(), + max_connections: g.gen(), + statement_timeout_sec: g.gen(), + } + } +} + +impl RandomConfig for configs::ETHClientConfig { + fn sample(g: &mut Gen) -> Self { + Self { + chain_id: g.gen(), + web3_url: g.gen(), + } + } +} + +impl RandomConfig for configs::ETHSenderConfig { + fn sample(g: &mut Gen) -> Self { + Self { + sender: g.gen(), + gas_adjuster: g.gen(), + } + } +} + +impl RandomConfig for configs::eth_sender::ProofSendingMode { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..3) { + 0 => Self::OnlyRealProofs, + 1 => Self::OnlySampledProofs, + _ => Self::SkipEveryProof, + } + } +} + +impl RandomConfig for configs::eth_sender::ProofLoadingMode { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::OldProofFromDb, + _ => Self::FriProofFromGcs, + } + } +} + +impl RandomConfig for configs::eth_sender::SenderConfig { + fn sample(g: &mut Gen) -> Self { + Self { + aggregated_proof_sizes: g.gen(), + wait_confirmations: g.gen(), + tx_poll_period: g.gen(), + aggregate_tx_poll_period: g.gen(), + max_txs_in_flight: g.gen(), + proof_sending_mode: g.gen(), + max_aggregated_tx_gas: g.gen(), + max_eth_tx_data_size: g.gen(), + max_aggregated_blocks_to_commit: g.gen(), + max_aggregated_blocks_to_execute: g.gen(), + aggregated_block_commit_deadline: g.gen(), + aggregated_block_prove_deadline: g.gen(), + aggregated_block_execute_deadline: g.gen(), + timestamp_criteria_max_allowed_lag: g.gen(), + l1_batch_min_age_before_execute_seconds: g.gen(), + max_acceptable_priority_fee_in_gwei: g.gen(), + proof_loading_mode: g.gen(), + } + } +} + +impl RandomConfig for configs::eth_sender::GasAdjusterConfig { + fn sample(g: &mut Gen) -> Self { + Self { + default_priority_fee_per_gas: g.gen(), + max_base_fee_samples: g.gen(), + pricing_formula_parameter_a: g.gen(), + pricing_formula_parameter_b: g.gen(), + internal_l1_pricing_multiplier: g.gen(), + internal_enforced_l1_gas_price: g.gen(), + poll_period: g.gen(), + max_l1_gas_price: g.gen(), + l1_gas_per_pubdata_byte: g.gen(), + } + } +} + +impl RandomConfig for configs::ETHWatchConfig { + fn sample(g: &mut Gen) -> Self { + Self { + confirmations_for_eth_event: g.gen(), + eth_node_poll_interval: g.gen(), + } + } +} + +impl RandomConfig for configs::FriProofCompressorConfig { + fn sample(g: &mut Gen) -> Self { + Self { + compression_mode: g.gen(), + prometheus_listener_port: g.gen(), + prometheus_pushgateway_url: g.gen(), + prometheus_push_interval_ms: g.gen(), + generation_timeout_in_secs: g.gen(), + max_attempts: g.gen(), + universal_setup_path: g.gen(), + universal_setup_download_url: g.gen(), + verify_wrapper_proof: g.gen(), + } + } +} + +impl RandomConfig for configs::fri_prover::SetupLoadMode { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::FromDisk, + _ => Self::FromMemory, + } + } +} + +impl RandomConfig for configs::FriProverConfig { + fn sample(g: &mut Gen) -> Self { + Self { + setup_data_path: g.gen(), + prometheus_port: g.gen(), + max_attempts: g.gen(), + generation_timeout_in_secs: g.gen(), + base_layer_circuit_ids_to_be_verified: g.gen(), + recursive_layer_circuit_ids_to_be_verified: g.gen(), + setup_load_mode: g.gen(), + specialized_group_id: g.gen(), + witness_vector_generator_thread_count: g.gen(), + queue_capacity: g.gen(), + witness_vector_receiver_port: g.gen(), + zone_read_url: g.gen(), + shall_save_to_public_bucket: g.gen(), + } + } +} + +impl RandomConfig for configs::FriProverGatewayConfig { + fn sample(g: &mut Gen) -> Self { + Self { + api_url: g.gen(), + api_poll_duration_secs: g.gen(), + prometheus_listener_port: g.gen(), + prometheus_pushgateway_url: g.gen(), + prometheus_push_interval_ms: g.gen(), + } + } +} + +impl RandomConfig for CircuitIdRoundTuple { + fn sample(g: &mut Gen) -> Self { + Self { + circuit_id: g.gen(), + aggregation_round: g.gen(), + } + } +} + +impl RandomConfig for configs::fri_prover_group::FriProverGroupConfig { + fn sample(g: &mut Gen) -> Self { + Self { + group_0: g.gen(), + group_1: g.gen(), + group_2: g.gen(), + group_3: g.gen(), + group_4: g.gen(), + group_5: g.gen(), + group_6: g.gen(), + group_7: g.gen(), + group_8: g.gen(), + group_9: g.gen(), + group_10: g.gen(), + group_11: g.gen(), + group_12: g.gen(), + } + } +} + +impl RandomConfig for configs::FriWitnessGeneratorConfig { + fn sample(g: &mut Gen) -> Self { + Self { + generation_timeout_in_secs: g.gen(), + max_attempts: g.gen(), + blocks_proving_percentage: g.gen(), + dump_arguments_for_blocks: g.gen(), + last_l1_batch_to_process: g.gen(), + force_process_block: g.gen(), + shall_save_to_public_bucket: g.gen(), + } + } +} + +impl RandomConfig for configs::FriWitnessVectorGeneratorConfig { + fn sample(g: &mut Gen) -> Self { + Self { + max_prover_reservation_duration_in_secs: g.gen(), + prover_instance_wait_timeout_in_secs: g.gen(), + prover_instance_poll_time_in_milli_secs: g.gen(), + prometheus_listener_port: g.gen(), + prometheus_pushgateway_url: g.gen(), + prometheus_push_interval_ms: g.gen(), + specialized_group_id: g.gen(), + } + } +} + +impl RandomConfig for configs::house_keeper::HouseKeeperConfig { + fn sample(g: &mut Gen) -> Self { + Self { + l1_batch_metrics_reporting_interval_ms: g.gen(), + gpu_prover_queue_reporting_interval_ms: g.gen(), + prover_job_retrying_interval_ms: g.gen(), + prover_stats_reporting_interval_ms: g.gen(), + witness_job_moving_interval_ms: g.gen(), + witness_generator_stats_reporting_interval_ms: g.gen(), + fri_witness_job_moving_interval_ms: g.gen(), + fri_prover_job_retrying_interval_ms: g.gen(), + fri_witness_generator_job_retrying_interval_ms: g.gen(), + prover_db_pool_size: g.gen(), + fri_prover_stats_reporting_interval_ms: g.gen(), + fri_proof_compressor_job_retrying_interval_ms: g.gen(), + fri_proof_compressor_stats_reporting_interval_ms: g.gen(), + } + } +} + +impl RandomConfig for configs::object_store::ObjectStoreMode { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..4) { + 0 => Self::GCS, + 1 => Self::GCSWithCredentialFile, + 2 => Self::FileBacked, + _ => Self::GCSAnonymousReadOnly, + } + } +} + +impl RandomConfig for configs::ObjectStoreConfig { + fn sample(g: &mut Gen) -> Self { + Self { + bucket_base_url: g.gen(), + mode: g.gen(), + file_backed_base_path: g.gen(), + gcs_credential_file_path: g.gen(), + max_retries: g.gen(), + } + } +} + +impl RandomConfig for configs::proof_data_handler::ProtocolVersionLoadingMode { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::FromDb, + _ => Self::FromEnvVar, + } + } +} + +impl RandomConfig for configs::ProofDataHandlerConfig { + fn sample(g: &mut Gen) -> Self { + Self { + http_port: g.gen(), + proof_generation_timeout_in_secs: g.gen(), + protocol_version_loading_mode: g.gen(), + fri_protocol_version_id: g.gen(), + } + } +} + +impl RandomConfig for configs::SnapshotsCreatorConfig { + fn sample(g: &mut Gen) -> Self { + Self { + storage_logs_chunk_size: g.gen(), + concurrent_queries_count: g.gen(), + } + } +} + +impl RandomConfig for configs::witness_generator::BasicWitnessGeneratorDataSource { + fn sample(g: &mut Gen) -> Self { + match g.rng.gen_range(0..2) { + 0 => Self::FromPostgres, + 1 => Self::FromPostgresShadowBlob, + _ => Self::FromBlob, + } + } +} + +impl RandomConfig for configs::WitnessGeneratorConfig { + fn sample(g: &mut Gen) -> Self { + Self { + generation_timeout_in_secs: g.gen(), + initial_setup_key_path: g.gen(), + key_download_url: g.gen(), + max_attempts: g.gen(), + blocks_proving_percentage: g.gen(), + dump_arguments_for_blocks: g.gen(), + last_l1_batch_to_process: g.gen(), + data_source: g.gen(), + } + } +} diff --git a/core/lib/constants/Cargo.toml b/core/lib/constants/Cargo.toml index 268b14ad19b..36483d95bee 100644 --- a/core/lib/constants/Cargo.toml +++ b/core/lib/constants/Cargo.toml @@ -13,6 +13,4 @@ categories = ["cryptography"] zksync_basic_types = { path = "../../lib/basic_types" } zksync_utils = { path = "../../lib/utils" } -anyhow = "1.0" -num = "0.3.1" once_cell = "1.13.0" diff --git a/core/lib/constants/src/crypto.rs b/core/lib/constants/src/crypto.rs index 8b97af9d237..ead2cc31837 100644 --- a/core/lib/constants/src/crypto.rs +++ b/core/lib/constants/src/crypto.rs @@ -1,28 +1,10 @@ -use num::BigUint; -use once_cell::sync::Lazy; - pub const ZKPORTER_IS_AVAILABLE: bool = false; /// Depth of the account tree. pub const ROOT_TREE_DEPTH: usize = 256; -/// Cost of 1 byte of calldata in bytes. -// TODO (SMA-1609): Double check this value. -// TODO: possibly remove this value. -pub const GAS_PER_PUBDATA_BYTE: u32 = 16; - -/// Maximum amount of bytes in one packed write storage slot. -/// Calculated as `(len(hash) + 1) + len(u256)` -// TODO (SMA-1609): Double check this value. -pub const MAX_BYTES_PER_PACKED_SLOT: u64 = 65; - -/// Amount of gas required to publish one slot in pubdata. -pub static GAS_PER_SLOT: Lazy = - Lazy::new(|| BigUint::from(MAX_BYTES_PER_PACKED_SLOT) * BigUint::from(GAS_PER_PUBDATA_BYTE)); pub const MAX_NEW_FACTORY_DEPS: usize = 32; -pub const PAD_MSG_BEFORE_HASH_BITS_LEN: usize = 736; - /// To avoid DDoS we limit the size of the transactions size. /// TODO(X): remove this as a constant and introduce a config. pub const MAX_ENCODED_TX_SIZE: usize = 1 << 24; diff --git a/core/lib/constants/src/ethereum.rs b/core/lib/constants/src/ethereum.rs index d9a137c7c22..cd44b0486c5 100644 --- a/core/lib/constants/src/ethereum.rs +++ b/core/lib/constants/src/ethereum.rs @@ -3,7 +3,7 @@ use zksync_basic_types::Address; /// Priority op should be executed for this number of eth blocks. pub const PRIORITY_EXPIRATION: u64 = 50000; pub const MAX_L1_TRANSACTION_GAS_LIMIT: u64 = 300000; -pub static ETHEREUM_ADDRESS: Address = Address::zero(); +pub const ETHEREUM_ADDRESS: Address = Address::zero(); /// The maximum number of pubdata per L1 batch. This limit is due to the fact that the Ethereum /// nodes do not accept transactions that have more than 128kb of pubdata. diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 3ed725baa7d..f6bfd9c1e4a 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -33,10 +33,6 @@ const MULTICALL3_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/cache/solpp-generated-contracts/dev-contracts/Multicall3.sol/Multicall3.json"; const VERIFIER_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/cache/solpp-generated-contracts/zksync/Verifier.sol/Verifier.json"; -const IERC20_CONTRACT_FILE: &str = - "contracts/l1-contracts/artifacts/cache/solpp-generated-contracts/common/interfaces/IERC20.sol/IERC20.json"; -const FAIL_ON_RECEIVE_CONTRACT_FILE: &str = - "contracts/l1-contracts/artifacts/cache/solpp-generated-contracts/zksync/dev-contracts/FailOnReceive.sol/FailOnReceive.json"; const L2_BRIDGE_CONTRACT_FILE: &str = "contracts/l2-contracts/artifacts-zk/contracts-preprocessed/bridge/interfaces/IL2Bridge.sol/IL2Bridge.json"; const LOADNEXT_CONTRACT_FILE: &str = @@ -53,7 +49,7 @@ fn read_file_to_json_value(path: impl AsRef) -> serde_json::Value { .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)) } -pub fn load_contract_if_present + std::fmt::Debug>(path: P) -> Option { +fn load_contract_if_present + std::fmt::Debug>(path: P) -> Option { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); let path = Path::new(&zksync_home).join(path); path.exists().then(|| { @@ -75,13 +71,6 @@ pub fn load_sys_contract(contract_name: &str) -> Contract { )) } -pub fn read_contract_abi(path: impl AsRef) -> String { - read_file_to_json_value(path)["abi"] - .as_str() - .expect("Failed to parse abi") - .to_string() -} - pub fn governance_contract() -> Contract { load_contract_if_present(GOVERNANCE_CONTRACT_FILE).expect("Governance contract not found") } @@ -94,10 +83,6 @@ pub fn multicall_contract() -> Contract { load_contract(MULTICALL3_CONTRACT_FILE) } -pub fn erc20_contract() -> Contract { - load_contract(IERC20_CONTRACT_FILE) -} - pub fn l2_bridge_contract() -> Contract { load_contract(L2_BRIDGE_CONTRACT_FILE) } @@ -129,20 +114,10 @@ pub fn get_loadnext_contract() -> TestContract { } // Returns loadnext contract and its factory dependencies -pub fn loadnext_contract() -> Contract { +fn loadnext_contract() -> Contract { load_contract("etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/LoadnextContract.json") } -pub fn loadnext_simple_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/Foo.json", - ) -} - -pub fn fail_on_receive_contract() -> Contract { - load_contract(FAIL_ON_RECEIVE_CONTRACT_FILE) -} - pub fn deployer_contract() -> Contract { load_sys_contract("ContractDeployer") } @@ -151,22 +126,15 @@ pub fn l1_messenger_contract() -> Contract { load_sys_contract("L1Messenger") } -pub fn eth_contract() -> Contract { - load_sys_contract("L2EthToken") -} - -pub fn known_codes_contract() -> Contract { - load_sys_contract("KnownCodesStorage") -} - /// Reads bytecode from the path RELATIVE to the ZKSYNC_HOME environment variable. pub fn read_bytecode(relative_path: impl AsRef) -> Vec { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); let artifact_path = Path::new(&zksync_home).join(relative_path); read_bytecode_from_path(artifact_path) } + /// Reads bytecode from a given path. -pub fn read_bytecode_from_path(artifact_path: PathBuf) -> Vec { +fn read_bytecode_from_path(artifact_path: PathBuf) -> Vec { let artifact = read_file_to_json_value(artifact_path.clone()); let bytecode = artifact["bytecode"] @@ -179,15 +147,11 @@ pub fn read_bytecode_from_path(artifact_path: PathBuf) -> Vec { .unwrap_or_else(|err| panic!("Can't decode bytecode in {:?}: {}", artifact_path, err)) } -pub fn default_erc20_bytecode() -> Vec { - read_bytecode("etc/ERC20/artifacts-zk/contracts/ZkSyncERC20.sol/ZkSyncERC20.json") -} - pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLanguage) -> Vec { DEFAULT_SYSTEM_CONTRACTS_REPO.read_sys_contract_bytecode(directory, name, lang) } -pub static DEFAULT_SYSTEM_CONTRACTS_REPO: Lazy = +static DEFAULT_SYSTEM_CONTRACTS_REPO: Lazy = Lazy::new(SystemContractsRepo::from_env); /// Structure representing a system contract repository - that allows @@ -207,6 +171,7 @@ impl SystemContractsRepo { root: zksync_home.join("contracts/system-contracts"), } } + pub fn read_sys_contract_bytecode( &self, directory: &str, @@ -233,28 +198,14 @@ pub fn read_bootloader_code(bootloader_type: &str) -> Vec { )) } -pub fn read_proved_batch_bootloader_bytecode() -> Vec { +fn read_proved_batch_bootloader_bytecode() -> Vec { read_bootloader_code("proved_batch") } -pub fn read_playground_batch_bootloader_bytecode() -> Vec { +fn read_playground_batch_bootloader_bytecode() -> Vec { read_bootloader_code("playground_batch") } -pub fn get_loadnext_test_contract_path(file_name: &str, contract_name: &str) -> String { - format!( - "core/tests/loadnext/test-contracts/loadnext_contract/artifacts/loadnext_contract.sol/{}.sol:{}.abi", - file_name, contract_name - ) -} - -pub fn get_loadnext_test_contract_bytecode(file_name: &str, contract_name: &str) -> String { - format!( - "core/tests/loadnext/test-contracts/loadnext_contract/artifacts/loadnext_contract.sol/{}.sol:{}.zbin", - file_name, contract_name - ) -} - /// Reads zbin bytecode from a given path, relative to ZKSYNC_HOME. pub fn read_zbin_bytecode(relative_zbin_path: impl AsRef) -> Vec { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); @@ -263,7 +214,7 @@ pub fn read_zbin_bytecode(relative_zbin_path: impl AsRef) -> Vec { } /// Reads zbin bytecode from a given path. -pub fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { +fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) } @@ -293,26 +244,6 @@ impl PartialEq for BaseSystemContracts { } } -pub static PLAYGROUND_BLOCK_BOOTLOADER_CODE: Lazy = Lazy::new(|| { - let bytecode = read_playground_batch_bootloader_bytecode(); - let hash = hash_bytecode(&bytecode); - - SystemContractCode { - code: bytes_to_be_words(bytecode), - hash, - } -}); - -pub static ESTIMATE_FEE_BLOCK_CODE: Lazy = Lazy::new(|| { - let bytecode = read_bootloader_code("fee_estimate"); - let hash = hash_bytecode(&bytecode); - - SystemContractCode { - code: bytes_to_be_words(bytecode), - hash, - } -}); - impl BaseSystemContracts { fn load_with_bootloader(bootloader_bytecode: Vec) -> Self { let hash = hash_bytecode(&bootloader_bytecode); @@ -381,12 +312,6 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } - /// BaseSystemContracts with playground bootloader - used for handling eth_calls. - pub fn estimate_gas() -> Self { - let bootloader_bytecode = read_bootloader_code("fee_estimate"); - BaseSystemContracts::load_with_bootloader(bootloader_bytecode) - } - pub fn estimate_gas_pre_virtual_blocks() -> Self { let bootloader_bytecode = read_zbin_bytecode( "etc/multivm_bootloaders/vm_1_3_2/fee_estimate.yul/fee_estimate.yul.zbin", diff --git a/core/lib/dal/.sqlx/query-0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c.json b/core/lib/dal/.sqlx/query-0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c.json new file mode 100644 index 00000000000..cf78a74dfb3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE l1_batches\n SET\n fee_account_address = $1::bytea\n WHERE\n number = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c" +} diff --git a/core/lib/dal/.sqlx/query-02285b8d0bc76c8cfd259872ac24f3670813e5a5356ddcb7ac482a0201d045f7.json b/core/lib/dal/.sqlx/query-02285b8d0bc76c8cfd259872ac24f3670813e5a5356ddcb7ac482a0201d045f7.json deleted file mode 100644 index 41a37726f48..00000000000 --- a/core/lib/dal/.sqlx/query-02285b8d0bc76c8cfd259872ac24f3670813e5a5356ddcb7ac482a0201d045f7.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n sl AS (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.address = $1\n AND storage_logs.tx_hash = $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n sl.key AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN sl ON sl.value != $3\n WHERE\n transactions.hash = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "tx_hash", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - }, - { - "ordinal": 2, - "name": "l1_batch_tx_index", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "block_number!", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "error", - "type_info": "Varchar" - }, - { - "ordinal": 5, - "name": "effective_gas_price", - "type_info": "Numeric" - }, - { - "ordinal": 6, - "name": "initiator_address", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "transfer_to?", - "type_info": "Jsonb" - }, - { - "ordinal": 8, - "name": "execute_contract_address?", - "type_info": "Jsonb" - }, - { - "ordinal": 9, - "name": "tx_format?", - "type_info": "Int4" - }, - { - "ordinal": 10, - "name": "refunded_gas", - "type_info": "Int8" - }, - { - "ordinal": 11, - "name": "gas_limit", - "type_info": "Numeric" - }, - { - "ordinal": 12, - "name": "block_hash", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "l1_batch_number?", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "contract_address?", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Bytea" - ] - }, - "nullable": [ - false, - true, - true, - true, - true, - true, - false, - null, - null, - true, - false, - true, - false, - true, - false - ] - }, - "hash": "02285b8d0bc76c8cfd259872ac24f3670813e5a5356ddcb7ac482a0201d045f7" -} diff --git a/core/lib/dal/.sqlx/query-84c804db9d60a4c1ebbce5e3dcdf03c0aad3ac30d85176e0a4e35f72bbb21b12.json b/core/lib/dal/.sqlx/query-04e407cc7675c0787847209d378242a0eb9cad22a120a957a699c8752933b8a7.json similarity index 65% rename from core/lib/dal/.sqlx/query-84c804db9d60a4c1ebbce5e3dcdf03c0aad3ac30d85176e0a4e35f72bbb21b12.json rename to core/lib/dal/.sqlx/query-04e407cc7675c0787847209d378242a0eb9cad22a120a957a699c8752933b8a7.json index a0a3cb3d63b..8720a96ca13 100644 --- a/core/lib/dal/.sqlx/query-84c804db9d60a4c1ebbce5e3dcdf03c0aad3ac30d85176e0a4e35f72bbb21b12.json +++ b/core/lib/dal/.sqlx/query-04e407cc7675c0787847209d378242a0eb9cad22a120a957a699c8752933b8a7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -15,191 +15,166 @@ }, { "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, "name": "l1_tx_count", "type_info": "Int4" }, { - "ordinal": 4, + "ordinal": 3, "name": "l2_tx_count", "type_info": "Int4" }, { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, + "ordinal": 4, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 5, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 6, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 7, "name": "parent_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 8, "name": "commitment", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 9, "name": "compressed_write_logs", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 10, "name": "compressed_contracts", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "eth_prove_tx_id", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 12, "name": "eth_commit_tx_id", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 13, "name": "eth_execute_tx_id", "type_info": "Int4" }, { - "ordinal": 16, + "ordinal": 14, "name": "merkle_root_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 15, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 18, + "ordinal": 16, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 19, + "ordinal": 17, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 20, + "ordinal": 18, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 19, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 20, "name": "l2_l1_compressed_messages", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 21, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, + "ordinal": 22, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 27, + "ordinal": 23, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 28, + "ordinal": 24, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 25, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, + "ordinal": 26, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 32, + "ordinal": 27, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 33, + "ordinal": 28, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 34, + "ordinal": 29, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 35, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 36, + "ordinal": 30, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 37, + "ordinal": 31, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 32, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 38, + "ordinal": 33, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 39, + "ordinal": 34, "name": "pubdata_input", "type_info": "Bytea" } @@ -216,8 +191,6 @@ false, false, false, - false, - false, true, true, true, @@ -234,23 +207,20 @@ true, true, true, - false, - false, true, true, true, true, - false, true, true, true, true, - false, true, + false, true, true, true ] }, - "hash": "84c804db9d60a4c1ebbce5e3dcdf03c0aad3ac30d85176e0a4e35f72bbb21b12" + "hash": "04e407cc7675c0787847209d378242a0eb9cad22a120a957a699c8752933b8a7" } diff --git a/core/lib/dal/.sqlx/query-05267e9774056bb0f984918ab861a2ee78eb59628d0429e89b27d185f83512be.json b/core/lib/dal/.sqlx/query-05267e9774056bb0f984918ab861a2ee78eb59628d0429e89b27d185f83512be.json deleted file mode 100644 index 81b6ad9687b..00000000000 --- a/core/lib/dal/.sqlx/query-05267e9774056bb0f984918ab861a2ee78eb59628d0429e89b27d185f83512be.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n call_traces\n WHERE\n tx_hash IN (\n SELECT\n hash\n FROM\n transactions\n WHERE\n miniblock_number = $1\n )\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "tx_hash", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "call_trace", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false - ] - }, - "hash": "05267e9774056bb0f984918ab861a2ee78eb59628d0429e89b27d185f83512be" -} diff --git a/core/lib/dal/.sqlx/query-53c04fd528752c0e0ef7ffa1f68a7ea81d8d10c76bbae540013667e13230e2ea.json b/core/lib/dal/.sqlx/query-0535c87d0ae694d5f10e529742ba2803cd147dec7450d1f81a41aea8dcf3be93.json similarity index 73% rename from core/lib/dal/.sqlx/query-53c04fd528752c0e0ef7ffa1f68a7ea81d8d10c76bbae540013667e13230e2ea.json rename to core/lib/dal/.sqlx/query-0535c87d0ae694d5f10e529742ba2803cd147dec7450d1f81a41aea8dcf3be93.json index e07b9192b5f..8c16bcb25c8 100644 --- a/core/lib/dal/.sqlx/query-53c04fd528752c0e0ef7ffa1f68a7ea81d8d10c76bbae540013667e13230e2ea.json +++ b/core/lib/dal/.sqlx/query-0535c87d0ae694d5f10e529742ba2803cd147dec7450d1f81a41aea8dcf3be93.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n fee_account_address\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n fee_account_address\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "53c04fd528752c0e0ef7ffa1f68a7ea81d8d10c76bbae540013667e13230e2ea" + "hash": "0535c87d0ae694d5f10e529742ba2803cd147dec7450d1f81a41aea8dcf3be93" } diff --git a/core/lib/dal/.sqlx/query-08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json b/core/lib/dal/.sqlx/query-08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json new file mode 100644 index 00000000000..2e83c0036b9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE miniblocks\n SET\n fee_account_address = l1_batches.fee_account_address\n FROM\n l1_batches\n WHERE\n l1_batches.number = miniblocks.l1_batch_number\n AND miniblocks.number BETWEEN $1 AND $2\n AND miniblocks.fee_account_address = '\\x0000000000000000000000000000000000000000'::bytea\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf" +} diff --git a/core/lib/dal/.sqlx/query-0aaefa9d5518ed1a2d8f735435e8048558243ff878b59586eb3a8b22794395d8.json b/core/lib/dal/.sqlx/query-0aaefa9d5518ed1a2d8f735435e8048558243ff878b59586eb3a8b22794395d8.json deleted file mode 100644 index 688a7373d05..00000000000 --- a/core/lib/dal/.sqlx/query-0aaefa9d5518ed1a2d8f735435e8048558243ff878b59586eb3a8b22794395d8.json +++ /dev/null @@ -1,259 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 8, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 9, - "name": "parent_hash", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_write_logs", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "compressed_contracts", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "eth_prove_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "eth_commit_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 15, - "name": "eth_execute_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 16, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 18, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 19, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 20, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 22, - "name": "l2_l1_compressed_messages", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 27, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 28, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 29, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 32, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 33, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 34, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 35, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 36, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 37, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 38, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 39, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int4", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - true, - true, - true, - false, - false, - true, - true, - true, - true, - false, - true, - true, - true, - true, - true, - false, - true, - true, - true - ] - }, - "hash": "0aaefa9d5518ed1a2d8f735435e8048558243ff878b59586eb3a8b22794395d8" -} diff --git a/core/lib/dal/.sqlx/query-16e62660fd14f6d3731e69fa696a36408510bb05c15285dfa7708bc0b044d0c5.json b/core/lib/dal/.sqlx/query-16e62660fd14f6d3731e69fa696a36408510bb05c15285dfa7708bc0b044d0c5.json deleted file mode 100644 index 3ba2e9b5448..00000000000 --- a/core/lib/dal/.sqlx/query-16e62660fd14f6d3731e69fa696a36408510bb05c15285dfa7708bc0b044d0c5.json +++ /dev/null @@ -1,259 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 8, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 9, - "name": "parent_hash", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_write_logs", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "compressed_contracts", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "eth_prove_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "eth_commit_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 15, - "name": "eth_execute_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 16, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 18, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 19, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 20, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 22, - "name": "l2_l1_compressed_messages", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 27, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 28, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 29, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 32, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 33, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 34, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 35, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 36, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 37, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 38, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 39, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int4", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - true, - true, - true, - false, - false, - true, - true, - true, - true, - false, - true, - true, - true, - true, - true, - false, - true, - true, - true - ] - }, - "hash": "16e62660fd14f6d3731e69fa696a36408510bb05c15285dfa7708bc0b044d0c5" -} diff --git a/core/lib/dal/.sqlx/query-1f75f2d88c1d2496e48b02f374e492cf2545944291dd0d42b937c0d0c7eefd47.json b/core/lib/dal/.sqlx/query-1f75f2d88c1d2496e48b02f374e492cf2545944291dd0d42b937c0d0c7eefd47.json deleted file mode 100644 index 362c775ea5a..00000000000 --- a/core/lib/dal/.sqlx/query-1f75f2d88c1d2496e48b02f374e492cf2545944291dd0d42b937c0d0c7eefd47.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n l1_batches.l1_gas_price,\n l1_batches.l2_fair_gas_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "root_hash?", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "commit_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 6, - "name": "committed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 7, - "name": "prove_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 8, - "name": "proven_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 9, - "name": "execute_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 10, - "name": "executed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 11, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 12, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 14, - "name": "default_aa_code_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - true, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true - ] - }, - "hash": "1f75f2d88c1d2496e48b02f374e492cf2545944291dd0d42b937c0d0c7eefd47" -} diff --git a/core/lib/dal/.sqlx/query-2028ba507f3ccd474f0261e571eb19a3a7feec950cb3e503588cf55d954a493a.json b/core/lib/dal/.sqlx/query-2028ba507f3ccd474f0261e571eb19a3a7feec950cb3e503588cf55d954a493a.json deleted file mode 100644 index 8aaefe3c6ba..00000000000 --- a/core/lib/dal/.sqlx/query-2028ba507f3ccd474f0261e571eb19a3a7feec950cb3e503588cf55d954a493a.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n factory_deps\n WHERE\n miniblock_number <= $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2028ba507f3ccd474f0261e571eb19a3a7feec950cb3e503588cf55d954a493a" -} diff --git a/core/lib/dal/.sqlx/query-2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a.json b/core/lib/dal/.sqlx/query-2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a.json new file mode 100644 index 00000000000..c4f8057011d --- /dev/null +++ b/core/lib/dal/.sqlx/query-2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n hashed_key,\n l1_batch_number,\n INDEX\n FROM\n initial_writes\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "index", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a" +} diff --git a/core/lib/dal/.sqlx/query-25aad4298d2459ef5aea7c4ea82eda1da000848ed4abf309b68989da33e1ce5a.json b/core/lib/dal/.sqlx/query-25aad4298d2459ef5aea7c4ea82eda1da000848ed4abf309b68989da33e1ce5a.json deleted file mode 100644 index d966ff14c99..00000000000 --- a/core/lib/dal/.sqlx/query-25aad4298d2459ef5aea7c4ea82eda1da000848ed4abf309b68989da33e1ce5a.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n l1_batches.fee_account_address AS \"fee_account_address?\"\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "l1_batch_number!", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 5, - "name": "root_hash?", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "commit_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 7, - "name": "committed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 8, - "name": "prove_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 9, - "name": "proven_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 10, - "name": "execute_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 11, - "name": "executed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 12, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 15, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 16, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 17, - "name": "fee_account_address?", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - null, - false, - false, - false, - false, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true, - true, - false - ] - }, - "hash": "25aad4298d2459ef5aea7c4ea82eda1da000848ed4abf309b68989da33e1ce5a" -} diff --git a/core/lib/dal/.sqlx/query-66554ab87e5fe4776786217d1f71a525c87d390df21250ab4dce08e09be72591.json b/core/lib/dal/.sqlx/query-2979b6c9ce76a4e6eaaa3f9bad5cf831d63d692111d87282aed8e85df6b0411f.json similarity index 70% rename from core/lib/dal/.sqlx/query-66554ab87e5fe4776786217d1f71a525c87d390df21250ab4dce08e09be72591.json rename to core/lib/dal/.sqlx/query-2979b6c9ce76a4e6eaaa3f9bad5cf831d63d692111d87282aed8e85df6b0411f.json index eb2ee1d31bc..84d906823e1 100644 --- a/core/lib/dal/.sqlx/query-66554ab87e5fe4776786217d1f71a525c87d390df21250ab4dce08e09be72591.json +++ b/core/lib/dal/.sqlx/query-2979b6c9ce76a4e6eaaa3f9bad5cf831d63d692111d87282aed8e85df6b0411f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -30,46 +30,51 @@ }, { "ordinal": 5, + "name": "fee_account_address!", + "type_info": "Bytea" + }, + { + "ordinal": 6, "name": "base_fee_per_gas", "type_info": "Numeric" }, { - "ordinal": 6, + "ordinal": 7, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 7, + "ordinal": 8, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 8, + "ordinal": 9, "name": "gas_per_pubdata_limit", "type_info": "Int8" }, { - "ordinal": 9, + "ordinal": 10, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 11, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 12, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 13, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 13, + "ordinal": 14, "name": "fair_pubdata_price", "type_info": "Int8" } @@ -87,6 +92,7 @@ false, false, false, + false, true, true, true, @@ -94,5 +100,5 @@ true ] }, - "hash": "66554ab87e5fe4776786217d1f71a525c87d390df21250ab4dce08e09be72591" + "hash": "2979b6c9ce76a4e6eaaa3f9bad5cf831d63d692111d87282aed8e85df6b0411f" } diff --git a/core/lib/dal/.sqlx/query-2c71a819c6ed22a3ab79675840e00f7b1176d59a83520288f5428b67ebd52130.json b/core/lib/dal/.sqlx/query-2c71a819c6ed22a3ab79675840e00f7b1176d59a83520288f5428b67ebd52130.json new file mode 100644 index 00000000000..e8e6d8e760b --- /dev/null +++ b/core/lib/dal/.sqlx/query-2c71a819c6ed22a3ab79675840e00f7b1176d59a83520288f5428b67ebd52130.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM initial_writes\n WHERE\n l1_batch_number > $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "2c71a819c6ed22a3ab79675840e00f7b1176d59a83520288f5428b67ebd52130" +} diff --git a/core/lib/dal/.sqlx/query-3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9.json b/core/lib/dal/.sqlx/query-3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9.json deleted file mode 100644 index 4290ba1f1b3..00000000000 --- a/core/lib/dal/.sqlx/query-3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n miniblocks\n WHERE\n (\n $1::BIGINT IS NULL\n AND l1_batch_number IS NULL\n )\n OR (l1_batch_number = $1::BIGINT)\n ORDER BY\n number\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "timestamp", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9" -} diff --git a/core/lib/dal/.sqlx/query-481d3cdb6c9a90843b240dba84377cb8f1340b483faedbbc2b71055aa5451cae.json b/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json similarity index 72% rename from core/lib/dal/.sqlx/query-481d3cdb6c9a90843b240dba84377cb8f1340b483faedbbc2b71055aa5451cae.json rename to core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json index 3a9c7616c9c..9b989a9ba25 100644 --- a/core/lib/dal/.sqlx/query-481d3cdb6c9a90843b240dba84377cb8f1340b483faedbbc2b71055aa5451cae.json +++ b/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_finished = TRUE\n ", + "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "481d3cdb6c9a90843b240dba84377cb8f1340b483faedbbc2b71055aa5451cae" + "hash": "38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c" } diff --git a/core/lib/dal/.sqlx/query-75a3cf6f502ebb1a0e92b672dc6ce56b53cc4ca0a8c6ee7cac1b9a5863000be3.json b/core/lib/dal/.sqlx/query-3ad766526138a1cf9ca3acd9025bab23414ee459daa4734772ece7bcc6e5fd7f.json similarity index 64% rename from core/lib/dal/.sqlx/query-75a3cf6f502ebb1a0e92b672dc6ce56b53cc4ca0a8c6ee7cac1b9a5863000be3.json rename to core/lib/dal/.sqlx/query-3ad766526138a1cf9ca3acd9025bab23414ee459daa4734772ece7bcc6e5fd7f.json index 13f45b32225..197832e8fce 100644 --- a/core/lib/dal/.sqlx/query-75a3cf6f502ebb1a0e92b672dc6ce56b53cc4ca0a8c6ee7cac1b9a5863000be3.json +++ b/core/lib/dal/.sqlx/query-3ad766526138a1cf9ca3acd9025bab23414ee459daa4734772ece7bcc6e5fd7f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -15,197 +15,175 @@ }, { "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, "name": "l1_tx_count", "type_info": "Int4" }, { - "ordinal": 4, + "ordinal": 3, "name": "l2_tx_count", "type_info": "Int4" }, { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, + "ordinal": 4, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 5, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 6, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 7, "name": "parent_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 8, "name": "commitment", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 9, "name": "compressed_write_logs", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 10, "name": "compressed_contracts", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "eth_prove_tx_id", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 12, "name": "eth_commit_tx_id", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 13, "name": "eth_execute_tx_id", "type_info": "Int4" }, { - "ordinal": 16, + "ordinal": 14, "name": "merkle_root_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 15, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 18, + "ordinal": 16, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 19, + "ordinal": 17, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 20, + "ordinal": 18, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 19, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 20, "name": "l2_l1_compressed_messages", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 21, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, + "ordinal": 22, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 27, + "ordinal": 23, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 28, + "ordinal": 24, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 25, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, + "ordinal": 26, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 32, + "ordinal": 27, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 33, + "ordinal": 28, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 34, + "ordinal": 29, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 35, + "ordinal": 30, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 36, + "ordinal": 31, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 37, + "ordinal": 32, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 38, + "ordinal": 33, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 39, + "ordinal": 34, "name": "pubdata_input", "type_info": "Bytea" } ], "parameters": { "Left": [ + "Bytea", + "Bytea", + "Int4", "Int8" ] }, @@ -216,8 +194,6 @@ false, false, false, - false, - false, true, true, true, @@ -234,13 +210,10 @@ true, true, true, - false, - false, true, true, true, true, - false, true, true, true, @@ -252,5 +225,5 @@ true ] }, - "hash": "75a3cf6f502ebb1a0e92b672dc6ce56b53cc4ca0a8c6ee7cac1b9a5863000be3" + "hash": "3ad766526138a1cf9ca3acd9025bab23414ee459daa4734772ece7bcc6e5fd7f" } diff --git a/core/lib/dal/.sqlx/query-3c2280a0a07f2916baba64bc9511c711c5531fcee28048b5da988a4e748c0c3a.json b/core/lib/dal/.sqlx/query-3c2280a0a07f2916baba64bc9511c711c5531fcee28048b5da988a4e748c0c3a.json new file mode 100644 index 00000000000..521af97a582 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3c2280a0a07f2916baba64bc9511c711c5531fcee28048b5da988a4e748c0c3a.json @@ -0,0 +1,226 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "parent_hash", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 9, + "name": "compressed_write_logs", + "type_info": "Bytea" + }, + { + "ordinal": 10, + "name": "compressed_contracts", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "eth_prove_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "eth_commit_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "eth_execute_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "merkle_root_hash", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "l2_to_l1_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 16, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 17, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 18, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "l2_l1_compressed_messages", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 22, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 23, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 24, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 30, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 32, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 33, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 34, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true + ] + }, + "hash": "3c2280a0a07f2916baba64bc9511c711c5531fcee28048b5da988a4e748c0c3a" +} diff --git a/core/lib/dal/.sqlx/query-3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2.json b/core/lib/dal/.sqlx/query-3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2.json new file mode 100644 index 00000000000..d8f17c7f772 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batches.fee_account_address\n FROM\n l1_batches\n INNER JOIN miniblocks ON miniblocks.l1_batch_number = l1_batches.number\n WHERE\n miniblocks.number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "fee_account_address", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2" +} diff --git a/core/lib/dal/.sqlx/query-44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca.json b/core/lib/dal/.sqlx/query-44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca.json new file mode 100644 index 00000000000..cb2d1b149ec --- /dev/null +++ b/core/lib/dal/.sqlx/query-44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca.json @@ -0,0 +1,106 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "root_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "commit_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "committed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 7, + "name": "prove_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "proven_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 9, + "name": "execute_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "executed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 11, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 12, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "default_aa_code_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true + ] + }, + "hash": "44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca" +} diff --git a/core/lib/dal/.sqlx/query-3671f23665664b8d6acf97e4f697e5afa28d855d87ea2f8c93e79c436749068a.json b/core/lib/dal/.sqlx/query-4a70d73b85e9b96125d8b9385ed12823e1699490087a060455af49b637d82665.json similarity index 63% rename from core/lib/dal/.sqlx/query-3671f23665664b8d6acf97e4f697e5afa28d855d87ea2f8c93e79c436749068a.json rename to core/lib/dal/.sqlx/query-4a70d73b85e9b96125d8b9385ed12823e1699490087a060455af49b637d82665.json index 1e8d851ab07..d009cf0e962 100644 --- a/core/lib/dal/.sqlx/query-3671f23665664b8d6acf97e4f697e5afa28d855d87ea2f8c93e79c436749068a.json +++ b/core/lib/dal/.sqlx/query-4a70d73b85e9b96125d8b9385ed12823e1699490087a060455af49b637d82665.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -15,191 +15,166 @@ }, { "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, "name": "l1_tx_count", "type_info": "Int4" }, { - "ordinal": 4, + "ordinal": 3, "name": "l2_tx_count", "type_info": "Int4" }, { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, + "ordinal": 4, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 5, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 6, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 7, "name": "parent_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 8, "name": "commitment", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 9, "name": "compressed_write_logs", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 10, "name": "compressed_contracts", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "eth_prove_tx_id", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 12, "name": "eth_commit_tx_id", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 13, "name": "eth_execute_tx_id", "type_info": "Int4" }, { - "ordinal": 16, + "ordinal": 14, "name": "merkle_root_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 15, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 18, + "ordinal": 16, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 19, + "ordinal": 17, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 20, + "ordinal": 18, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 19, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 20, "name": "l2_l1_compressed_messages", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 21, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, + "ordinal": 22, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 27, + "ordinal": 23, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 28, + "ordinal": 24, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 25, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, + "ordinal": 26, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 32, + "ordinal": 27, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 33, + "ordinal": 28, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 34, + "ordinal": 29, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 35, + "ordinal": 30, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 36, + "ordinal": 31, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 37, + "ordinal": 32, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 38, + "ordinal": 33, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 39, + "ordinal": 34, "name": "pubdata_input", "type_info": "Bytea" } @@ -218,8 +193,6 @@ false, false, false, - false, - false, true, true, true, @@ -236,13 +209,10 @@ true, true, true, - false, - false, true, true, true, true, - false, true, true, true, @@ -254,5 +224,5 @@ true ] }, - "hash": "3671f23665664b8d6acf97e4f697e5afa28d855d87ea2f8c93e79c436749068a" + "hash": "4a70d73b85e9b96125d8b9385ed12823e1699490087a060455af49b637d82665" } diff --git a/core/lib/dal/.sqlx/query-51891ab674b8f5cf2d7c12420d1c026c3181f42a49f1f4b6e227c95641931a54.json b/core/lib/dal/.sqlx/query-51891ab674b8f5cf2d7c12420d1c026c3181f42a49f1f4b6e227c95641931a54.json new file mode 100644 index 00000000000..ed13428a569 --- /dev/null +++ b/core/lib/dal/.sqlx/query-51891ab674b8f5cf2d7c12420d1c026c3181f42a49f1f4b6e227c95641931a54.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n timestamp\n FROM\n miniblocks\n WHERE\n number = COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n ),\n (\n SELECT\n MAX(miniblock_number) + 1\n FROM\n snapshot_recovery\n WHERE\n l1_batch_number = $1\n )\n )\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "timestamp", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "51891ab674b8f5cf2d7c12420d1c026c3181f42a49f1f4b6e227c95641931a54" +} diff --git a/core/lib/dal/.sqlx/query-555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af.json b/core/lib/dal/.sqlx/query-555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af.json new file mode 100644 index 00000000000..1cb61dc4460 --- /dev/null +++ b/core/lib/dal/.sqlx/query-555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n hashed_key,\n address,\n key,\n value,\n operation_number,\n tx_hash,\n miniblock_number\n FROM\n storage_logs\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "address", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "key", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "value", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "operation_number", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 6, + "name": "miniblock_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af" +} diff --git a/core/lib/dal/.sqlx/query-5c7b6b58261faa0a164181987eec4055c22895316ce68d9d41619db7fcfb7563.json b/core/lib/dal/.sqlx/query-57fe009542ca9dc763e3823ee73662bf3bfdda11bb57f65db4980982a4200bed.json similarity index 71% rename from core/lib/dal/.sqlx/query-5c7b6b58261faa0a164181987eec4055c22895316ce68d9d41619db7fcfb7563.json rename to core/lib/dal/.sqlx/query-57fe009542ca9dc763e3823ee73662bf3bfdda11bb57f65db4980982a4200bed.json index cd76205be81..909686f9d0e 100644 --- a/core/lib/dal/.sqlx/query-5c7b6b58261faa0a164181987eec4055c22895316ce68d9d41619db7fcfb7563.json +++ b/core/lib/dal/.sqlx/query-57fe009542ca9dc763e3823ee73662bf3bfdda11bb57f65db4980982a4200bed.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -30,46 +30,51 @@ }, { "ordinal": 5, + "name": "fee_account_address!", + "type_info": "Bytea" + }, + { + "ordinal": 6, "name": "base_fee_per_gas", "type_info": "Numeric" }, { - "ordinal": 6, + "ordinal": 7, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 7, + "ordinal": 8, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 8, + "ordinal": 9, "name": "gas_per_pubdata_limit", "type_info": "Int8" }, { - "ordinal": 9, + "ordinal": 10, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 11, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 12, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 13, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 13, + "ordinal": 14, "name": "fair_pubdata_price", "type_info": "Int8" } @@ -89,6 +94,7 @@ false, false, false, + false, true, true, true, @@ -96,5 +102,5 @@ true ] }, - "hash": "5c7b6b58261faa0a164181987eec4055c22895316ce68d9d41619db7fcfb7563" + "hash": "57fe009542ca9dc763e3823ee73662bf3bfdda11bb57f65db4980982a4200bed" } diff --git a/core/lib/dal/.sqlx/query-5aaed2a975042cc9b7b9d88e5fd5db07667280abef27cc73159d2fd9c95b209b.json b/core/lib/dal/.sqlx/query-5aaed2a975042cc9b7b9d88e5fd5db07667280abef27cc73159d2fd9c95b209b.json deleted file mode 100644 index 069cd195639..00000000000 --- a/core/lib/dal/.sqlx/query-5aaed2a975042cc9b7b9d88e5fd5db07667280abef27cc73159d2fd9c95b209b.json +++ /dev/null @@ -1,256 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 8, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 9, - "name": "parent_hash", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_write_logs", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "compressed_contracts", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "eth_prove_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "eth_commit_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 15, - "name": "eth_execute_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 16, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 18, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 19, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 20, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 22, - "name": "l2_l1_compressed_messages", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 27, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 28, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 29, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 32, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 33, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 34, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 35, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 36, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 37, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 38, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 39, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - true, - true, - true, - false, - false, - true, - true, - true, - true, - false, - true, - true, - true, - true, - true, - false, - true, - true, - true - ] - }, - "hash": "5aaed2a975042cc9b7b9d88e5fd5db07667280abef27cc73159d2fd9c95b209b" -} diff --git a/core/lib/dal/.sqlx/query-65cc4517c3693c8bdb66b332151d4cb46ca093129707ee14f2fa42dc1800cc9e.json b/core/lib/dal/.sqlx/query-65cc4517c3693c8bdb66b332151d4cb46ca093129707ee14f2fa42dc1800cc9e.json deleted file mode 100644 index 5f967c6d265..00000000000 --- a/core/lib/dal/.sqlx/query-65cc4517c3693c8bdb66b332151d4cb46ca093129707ee14f2fa42dc1800cc9e.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "65cc4517c3693c8bdb66b332151d4cb46ca093129707ee14f2fa42dc1800cc9e" -} diff --git a/core/lib/dal/.sqlx/query-6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163.json b/core/lib/dal/.sqlx/query-6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163.json new file mode 100644 index 00000000000..5ccda40f56f --- /dev/null +++ b/core/lib/dal/.sqlx/query-6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163.json @@ -0,0 +1,124 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_batch_number!", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "root_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 6, + "name": "commit_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "committed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 8, + "name": "prove_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 9, + "name": "proven_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 10, + "name": "execute_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 11, + "name": "executed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 12, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "fee_account_address", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + null, + false, + false, + false, + false, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true, + true, + false + ] + }, + "hash": "6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163" +} diff --git a/core/lib/dal/.sqlx/query-bb1904a01a3860b5440ae23763d6d5ee4341edadb8a86b459a07427b7e265e98.json b/core/lib/dal/.sqlx/query-6ed5cc84e8097c4febf6c935193f45ef713ef7f9909ce26653faceddb549a383.json similarity index 60% rename from core/lib/dal/.sqlx/query-bb1904a01a3860b5440ae23763d6d5ee4341edadb8a86b459a07427b7e265e98.json rename to core/lib/dal/.sqlx/query-6ed5cc84e8097c4febf6c935193f45ef713ef7f9909ce26653faceddb549a383.json index ddc5d583900..9d3050eaa83 100644 --- a/core/lib/dal/.sqlx/query-bb1904a01a3860b5440ae23763d6d5ee4341edadb8a86b459a07427b7e265e98.json +++ b/core/lib/dal/.sqlx/query-6ed5cc84e8097c4febf6c935193f45ef713ef7f9909ce26653faceddb549a383.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n is_finished,\n fee_account_address,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -25,81 +25,56 @@ }, { "ordinal": 4, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 7, + "ordinal": 5, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 6, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 7, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 8, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 12, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 14, + "ordinal": 9, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 15, + "ordinal": 10, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 11, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 17, + "ordinal": 12, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 13, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 19, + "ordinal": 14, "name": "pubdata_input", "type_info": "Bytea" } @@ -119,11 +94,6 @@ false, false, false, - false, - false, - false, - false, - false, true, true, true, @@ -132,5 +102,5 @@ true ] }, - "hash": "bb1904a01a3860b5440ae23763d6d5ee4341edadb8a86b459a07427b7e265e98" + "hash": "6ed5cc84e8097c4febf6c935193f45ef713ef7f9909ce26653faceddb549a383" } diff --git a/core/lib/dal/.sqlx/query-70979db81f473950b2fae7816dbad7fe3464f2619cee2d583accaa829aa12b94.json b/core/lib/dal/.sqlx/query-70979db81f473950b2fae7816dbad7fe3464f2619cee2d583accaa829aa12b94.json deleted file mode 100644 index 45338f8e64c..00000000000 --- a/core/lib/dal/.sqlx/query-70979db81f473950b2fae7816dbad7fe3464f2619cee2d583accaa829aa12b94.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n is_finished,\n fee_account_address,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_input,\n predicted_circuits,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n $22,\n $23,\n $24,\n $25,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "Bool", - "Bytea", - "ByteaArray", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Numeric", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Bytea", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "70979db81f473950b2fae7816dbad7fe3464f2619cee2d583accaa829aa12b94" -} diff --git a/core/lib/dal/.sqlx/query-47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060.json b/core/lib/dal/.sqlx/query-73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8.json similarity index 68% rename from core/lib/dal/.sqlx/query-47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060.json rename to core/lib/dal/.sqlx/query-73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8.json index fe8a346d1e2..7c366776a5a 100644 --- a/core/lib/dal/.sqlx/query-47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060.json +++ b/core/lib/dal/.sqlx/query-73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n last_finished_chunk_id,\n total_chunk_count\n FROM\n snapshot_recovery\n ", + "query": "\n SELECT\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n storage_logs_chunks_processed\n FROM\n snapshot_recovery\n ", "describe": { "columns": [ { @@ -25,13 +25,8 @@ }, { "ordinal": 4, - "name": "last_finished_chunk_id", - "type_info": "Int4" - }, - { - "ordinal": 5, - "name": "total_chunk_count", - "type_info": "Int4" + "name": "storage_logs_chunks_processed", + "type_info": "BoolArray" } ], "parameters": { @@ -42,9 +37,8 @@ false, false, false, - true, false ] }, - "hash": "47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060" + "hash": "73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8" } diff --git a/core/lib/dal/.sqlx/query-78978c19282961c5b3dc06352b41caa4cca66d6ad74b2cd1a34ea5f7bc1e6909.json b/core/lib/dal/.sqlx/query-78978c19282961c5b3dc06352b41caa4cca66d6ad74b2cd1a34ea5f7bc1e6909.json deleted file mode 100644 index f746bd5703b..00000000000 --- a/core/lib/dal/.sqlx/query-78978c19282961c5b3dc06352b41caa4cca66d6ad74b2cd1a34ea5f7bc1e6909.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n call_traces\n WHERE\n tx_hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "tx_hash", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "call_trace", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - false, - false - ] - }, - "hash": "78978c19282961c5b3dc06352b41caa4cca66d6ad74b2cd1a34ea5f7bc1e6909" -} diff --git a/core/lib/dal/.sqlx/query-87f27295de500591f01ed76731df2aed7049c3f44a6d25556967ea867e0caf25.json b/core/lib/dal/.sqlx/query-87f27295de500591f01ed76731df2aed7049c3f44a6d25556967ea867e0caf25.json new file mode 100644 index 00000000000..dbeaede9ecd --- /dev/null +++ b/core/lib/dal/.sqlx/query-87f27295de500591f01ed76731df2aed7049c3f44a6d25556967ea867e0caf25.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n call_trace\n FROM\n call_traces\n WHERE\n tx_hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "call_trace", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + false + ] + }, + "hash": "87f27295de500591f01ed76731df2aed7049c3f44a6d25556967ea867e0caf25" +} diff --git a/core/lib/dal/.sqlx/query-883ab3d601e2dfef03ad36e5987577821fc8ce2f81cb029d0f64801d5f743388.json b/core/lib/dal/.sqlx/query-883ab3d601e2dfef03ad36e5987577821fc8ce2f81cb029d0f64801d5f743388.json new file mode 100644 index 00000000000..bca333c5704 --- /dev/null +++ b/core/lib/dal/.sqlx/query-883ab3d601e2dfef03ad36e5987577821fc8ce2f81cb029d0f64801d5f743388.json @@ -0,0 +1,227 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "parent_hash", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 9, + "name": "compressed_write_logs", + "type_info": "Bytea" + }, + { + "ordinal": 10, + "name": "compressed_contracts", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "eth_prove_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "eth_commit_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "eth_execute_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "merkle_root_hash", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "l2_to_l1_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 16, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 17, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 18, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "l2_l1_compressed_messages", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 22, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 23, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 24, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 30, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 32, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 33, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 34, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + true, + true + ] + }, + "hash": "883ab3d601e2dfef03ad36e5987577821fc8ce2f81cb029d0f64801d5f743388" +} diff --git a/core/lib/dal/.sqlx/query-8bf8613d5b5365b85bc0e363fbaf92d22215fa81c39f7ff91b247260efa4406b.json b/core/lib/dal/.sqlx/query-8bf8613d5b5365b85bc0e363fbaf92d22215fa81c39f7ff91b247260efa4406b.json new file mode 100644 index 00000000000..b99201a8055 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8bf8613d5b5365b85bc0e363fbaf92d22215fa81c39f7ff91b247260efa4406b.json @@ -0,0 +1,229 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "parent_hash", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 9, + "name": "compressed_write_logs", + "type_info": "Bytea" + }, + { + "ordinal": 10, + "name": "compressed_contracts", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "eth_prove_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "eth_commit_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "eth_execute_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "merkle_root_hash", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "l2_to_l1_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 16, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 17, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 18, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "l2_l1_compressed_messages", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 22, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 23, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 24, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 30, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 32, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 33, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 34, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int4", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true + ] + }, + "hash": "8bf8613d5b5365b85bc0e363fbaf92d22215fa81c39f7ff91b247260efa4406b" +} diff --git a/core/lib/dal/.sqlx/query-c038cecd8184e5e8d9f498116bff995b654adfe328cb825a44ad36b4bf9ec8f2.json b/core/lib/dal/.sqlx/query-8cd11172fc47ff8d37c22ba4163cd2d08a708c3af75fee57379a709baa3c4bed.json similarity index 60% rename from core/lib/dal/.sqlx/query-c038cecd8184e5e8d9f498116bff995b654adfe328cb825a44ad36b4bf9ec8f2.json rename to core/lib/dal/.sqlx/query-8cd11172fc47ff8d37c22ba4163cd2d08a708c3af75fee57379a709baa3c4bed.json index 8161e8c1fc8..ba3ee8a51d7 100644 --- a/core/lib/dal/.sqlx/query-c038cecd8184e5e8d9f498116bff995b654adfe328cb825a44ad36b4bf9ec8f2.json +++ b/core/lib/dal/.sqlx/query-8cd11172fc47ff8d37c22ba4163cd2d08a708c3af75fee57379a709baa3c4bed.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n tx_hash = $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n miniblock_number ASC,\n tx_index_in_block ASC,\n event_index_in_block ASC\n ", "describe": { "columns": [ { @@ -71,7 +71,7 @@ ], "parameters": { "Left": [ - "Bytea" + "ByteaArray" ] }, "nullable": [ @@ -90,5 +90,5 @@ false ] }, - "hash": "c038cecd8184e5e8d9f498116bff995b654adfe328cb825a44ad36b4bf9ec8f2" + "hash": "8cd11172fc47ff8d37c22ba4163cd2d08a708c3af75fee57379a709baa3c4bed" } diff --git a/core/lib/dal/.sqlx/query-9ee07a22405279e1e44d47ec5226a834aeac9156b974ff225d734683c1905469.json b/core/lib/dal/.sqlx/query-9ee07a22405279e1e44d47ec5226a834aeac9156b974ff225d734683c1905469.json new file mode 100644 index 00000000000..3269ceb7987 --- /dev/null +++ b/core/lib/dal/.sqlx/query-9ee07a22405279e1e44d47ec5226a834aeac9156b974ff225d734683c1905469.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Bytea", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Int4", + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "9ee07a22405279e1e44d47ec5226a834aeac9156b974ff225d734683c1905469" +} diff --git a/core/lib/dal/.sqlx/query-9f637f37dc3a29ce7412ab4347071bd180729779a0e98ae7a6bb4386aca99716.json b/core/lib/dal/.sqlx/query-9f637f37dc3a29ce7412ab4347071bd180729779a0e98ae7a6bb4386aca99716.json new file mode 100644 index 00000000000..fc18e9f6cb5 --- /dev/null +++ b/core/lib/dal/.sqlx/query-9f637f37dc3a29ce7412ab4347071bd180729779a0e98ae7a6bb4386aca99716.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n factory_deps\n WHERE\n miniblock_number <= $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "bytecode", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "9f637f37dc3a29ce7412ab4347071bd180729779a0e98ae7a6bb4386aca99716" +} diff --git a/core/lib/dal/.sqlx/query-be16d820c124dba9f4a272f54f0b742349e78e6e4ce3e7c9a0dcf6447eedc6d8.json b/core/lib/dal/.sqlx/query-b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79.json similarity index 88% rename from core/lib/dal/.sqlx/query-be16d820c124dba9f4a272f54f0b742349e78e6e4ce3e7c9a0dcf6447eedc6d8.json rename to core/lib/dal/.sqlx/query-b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79.json index 695be9f2b8c..90c940c3977 100644 --- a/core/lib/dal/.sqlx/query-be16d820c124dba9f4a272f54f0b742349e78e6e4ce3e7c9a0dcf6447eedc6d8.json +++ b/core/lib/dal/.sqlx/query-b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n WHERE\n tx_hash = $1\n ORDER BY\n log_index_in_tx ASC\n ", + "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n tx_index_in_l1_batch ASC,\n log_index_in_tx ASC\n ", "describe": { "columns": [ { @@ -71,7 +71,7 @@ ], "parameters": { "Left": [ - "Bytea" + "ByteaArray" ] }, "nullable": [ @@ -90,5 +90,5 @@ false ] }, - "hash": "be16d820c124dba9f4a272f54f0b742349e78e6e4ce3e7c9a0dcf6447eedc6d8" + "hash": "b259e6bacd98fa68003e0c87bb28cc77bd2dcee4a04d1afc9779714854623a79" } diff --git a/core/lib/dal/.sqlx/query-b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619.json b/core/lib/dal/.sqlx/query-b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619.json new file mode 100644 index 00000000000..acd2d51f6ea --- /dev/null +++ b/core/lib/dal/.sqlx/query-b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619.json @@ -0,0 +1,108 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n sl AS (\n SELECT DISTINCT\n ON (storage_logs.tx_hash) *\n FROM\n storage_logs\n WHERE\n storage_logs.address = $1\n AND storage_logs.tx_hash = ANY ($3)\n ORDER BY\n storage_logs.tx_hash,\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n sl.key AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN sl ON sl.value != $2\n AND sl.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "l1_batch_tx_index", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "block_number!", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "error", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "effective_gas_price", + "type_info": "Numeric" + }, + { + "ordinal": 6, + "name": "initiator_address", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "transfer_to?", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "execute_contract_address?", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "tx_format?", + "type_info": "Int4" + }, + { + "ordinal": 10, + "name": "refunded_gas", + "type_info": "Int8" + }, + { + "ordinal": 11, + "name": "gas_limit", + "type_info": "Numeric" + }, + { + "ordinal": 12, + "name": "block_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, + "name": "l1_batch_number?", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "contract_address?", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "ByteaArray" + ] + }, + "nullable": [ + false, + true, + true, + true, + true, + true, + false, + null, + null, + true, + false, + true, + false, + true, + true + ] + }, + "hash": "b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619" +} diff --git a/core/lib/dal/.sqlx/query-d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541.json b/core/lib/dal/.sqlx/query-c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621.json similarity index 58% rename from core/lib/dal/.sqlx/query-d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541.json rename to core/lib/dal/.sqlx/query-c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621.json index f0ea745821f..0b8a91d7bc8 100644 --- a/core/lib/dal/.sqlx/query-d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541.json +++ b/core/lib/dal/.sqlx/query-c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n is_finished,\n fee_account_address,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id = $1\n OR eth_prove_tx_id = $1\n OR eth_execute_tx_id = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id = $1\n OR eth_prove_tx_id = $1\n OR eth_execute_tx_id = $1\n ", "describe": { "columns": [ { @@ -25,81 +25,56 @@ }, { "ordinal": 4, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 7, + "ordinal": 5, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 6, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 7, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 10, + "ordinal": 8, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 11, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 12, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 14, + "ordinal": 9, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 15, + "ordinal": 10, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 11, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 17, + "ordinal": 12, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 18, + "ordinal": 13, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 14, "name": "pubdata_input", "type_info": "Bytea" } @@ -119,11 +94,6 @@ false, false, false, - false, - false, - false, - false, - false, true, true, true, @@ -132,5 +102,5 @@ true ] }, - "hash": "d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541" + "hash": "c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621" } diff --git a/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json b/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json new file mode 100644 index 00000000000..906cd108140 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "call_trace", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5" +} diff --git a/core/lib/dal/.sqlx/query-c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df.json b/core/lib/dal/.sqlx/query-c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df.json new file mode 100644 index 00000000000..4b69afd7e9c --- /dev/null +++ b/core/lib/dal/.sqlx/query-c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE miniblocks\n SET\n fee_account_address = (\n SELECT\n l1_batches.fee_account_address\n FROM\n l1_batches\n ORDER BY\n l1_batches.number DESC\n LIMIT\n 1\n )\n WHERE\n l1_batch_number IS NULL\n AND fee_account_address = '\\x0000000000000000000000000000000000000000'::bytea\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df" +} diff --git a/core/lib/dal/.sqlx/query-c8155c4e4701fc771918ac1bb4d16f8cc32e365e2ffbd17dc99885de427f2777.json b/core/lib/dal/.sqlx/query-c8155c4e4701fc771918ac1bb4d16f8cc32e365e2ffbd17dc99885de427f2777.json new file mode 100644 index 00000000000..78582f6242b --- /dev/null +++ b/core/lib/dal/.sqlx/query-c8155c4e4701fc771918ac1bb4d16f8cc32e365e2ffbd17dc99885de427f2777.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n timestamp\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n ORDER BY\n number\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "timestamp", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "c8155c4e4701fc771918ac1bb4d16f8cc32e365e2ffbd17dc99885de427f2777" +} diff --git a/core/lib/dal/.sqlx/query-cddf48514aa2aa249d0530d44c741368993009bb4bd90c2ad177ce56317aa04c.json b/core/lib/dal/.sqlx/query-cddf48514aa2aa249d0530d44c741368993009bb4bd90c2ad177ce56317aa04c.json deleted file mode 100644 index d2087e0a32b..00000000000 --- a/core/lib/dal/.sqlx/query-cddf48514aa2aa249d0530d44c741368993009bb4bd90c2ad177ce56317aa04c.json +++ /dev/null @@ -1,257 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 8, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 9, - "name": "parent_hash", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_write_logs", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "compressed_contracts", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "eth_prove_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "eth_commit_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 15, - "name": "eth_execute_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 16, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 18, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 19, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 20, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 22, - "name": "l2_l1_compressed_messages", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 27, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 28, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 29, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 32, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 33, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 34, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 35, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 36, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 37, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 38, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 39, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - true, - true, - true, - false, - false, - true, - true, - true, - true, - false, - true, - true, - true, - false, - true, - true, - true, - true, - true - ] - }, - "hash": "cddf48514aa2aa249d0530d44c741368993009bb4bd90c2ad177ce56317aa04c" -} diff --git a/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json b/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json new file mode 100644 index 00000000000..da21c126347 --- /dev/null +++ b/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "Int8", + "ByteaArray", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Bytea", + "Bytea", + "Int4", + "ByteaArray", + "Int8Array", + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57" +} diff --git a/core/lib/dal/.sqlx/query-d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283.json b/core/lib/dal/.sqlx/query-dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c.json similarity index 64% rename from core/lib/dal/.sqlx/query-d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283.json rename to core/lib/dal/.sqlx/query-dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c.json index fd6ed893c23..a98e8e0004c 100644 --- a/core/lib/dal/.sqlx/query-d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283.json +++ b/core/lib/dal/.sqlx/query-dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -15,191 +15,166 @@ }, { "ordinal": 2, - "name": "is_finished", - "type_info": "Bool" - }, - { - "ordinal": 3, "name": "l1_tx_count", "type_info": "Int4" }, { - "ordinal": 4, + "ordinal": 3, "name": "l2_tx_count", "type_info": "Int4" }, { - "ordinal": 5, - "name": "fee_account_address", - "type_info": "Bytea" - }, - { - "ordinal": 6, + "ordinal": 4, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 5, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 6, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 7, "name": "parent_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 8, "name": "commitment", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 9, "name": "compressed_write_logs", "type_info": "Bytea" }, { - "ordinal": 12, + "ordinal": 10, "name": "compressed_contracts", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "eth_prove_tx_id", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 12, "name": "eth_commit_tx_id", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 13, "name": "eth_execute_tx_id", "type_info": "Int4" }, { - "ordinal": 16, + "ordinal": 14, "name": "merkle_root_hash", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 15, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 18, + "ordinal": 16, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 19, + "ordinal": 17, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 20, + "ordinal": 18, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 19, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 20, "name": "l2_l1_compressed_messages", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 21, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 24, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 25, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 26, + "ordinal": 22, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 27, + "ordinal": 23, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 28, + "ordinal": 24, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 25, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 30, - "name": "base_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 31, + "ordinal": 26, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 32, + "ordinal": 27, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 33, + "ordinal": 28, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 34, + "ordinal": 29, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 35, + "ordinal": 30, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 36, + "ordinal": 31, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 37, + "ordinal": 32, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 38, + "ordinal": 33, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 39, + "ordinal": 34, "name": "pubdata_input", "type_info": "Bytea" } @@ -214,8 +189,6 @@ false, false, false, - false, - false, true, true, true, @@ -232,13 +205,10 @@ true, true, true, - false, - false, true, true, true, true, - false, true, true, true, @@ -250,5 +220,5 @@ true ] }, - "hash": "d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283" + "hash": "dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c" } diff --git a/core/lib/dal/.sqlx/query-df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386.json b/core/lib/dal/.sqlx/query-df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386.json deleted file mode 100644 index a04523bc07b..00000000000 --- a/core/lib/dal/.sqlx/query-df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n last_finished_chunk_id,\n total_chunk_count,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = excluded.l1_batch_number,\n l1_batch_root_hash = excluded.l1_batch_root_hash,\n miniblock_number = excluded.miniblock_number,\n miniblock_root_hash = excluded.miniblock_root_hash,\n last_finished_chunk_id = excluded.last_finished_chunk_id,\n total_chunk_count = excluded.total_chunk_count,\n updated_at = excluded.updated_at\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Int8", - "Bytea", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386" -} diff --git a/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json b/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json new file mode 100644 index 00000000000..5cd05036f98 --- /dev/null +++ b/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT COUNT(*)\n FROM information_schema.columns\n WHERE table_name = 'l1_batches' AND column_name = 'fee_account_address'\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f" +} diff --git a/core/lib/dal/.sqlx/query-5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1.json b/core/lib/dal/.sqlx/query-eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4.json similarity index 86% rename from core/lib/dal/.sqlx/query-5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1.json rename to core/lib/dal/.sqlx/query-eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4.json index 31ce6f31993..aa4751e12b7 100644 --- a/core/lib/dal/.sqlx/query-5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1.json +++ b/core/lib/dal/.sqlx/query-eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n l1_batches.fee_account_address AS \"fee_account_address?\"\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -65,7 +65,7 @@ }, { "ordinal": 12, - "name": "fee_account_address?", + "name": "fee_account_address!", "type_info": "Bytea" } ], @@ -90,5 +90,5 @@ false ] }, - "hash": "5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1" + "hash": "eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4" } diff --git a/core/lib/dal/.sqlx/query-eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b.json b/core/lib/dal/.sqlx/query-eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b.json new file mode 100644 index 00000000000..250e5beb89a --- /dev/null +++ b/core/lib/dal/.sqlx/query-eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n storage_logs_chunks_processed,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Int8", + "Bytea", + "BoolArray" + ] + }, + "nullable": [] + }, + "hash": "eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b" +} diff --git a/core/lib/dal/.sqlx/query-f2f852a340c45ff69cbca42d7c592dfb0d28a797bee69872634f3105d2d51996.json b/core/lib/dal/.sqlx/query-f2f852a340c45ff69cbca42d7c592dfb0d28a797bee69872634f3105d2d51996.json new file mode 100644 index 00000000000..11c409c2180 --- /dev/null +++ b/core/lib/dal/.sqlx/query-f2f852a340c45ff69cbca42d7c592dfb0d28a797bee69872634f3105d2d51996.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE snapshot_recovery\n SET\n storage_logs_chunks_processed[$1] = TRUE,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [] + }, + "hash": "f2f852a340c45ff69cbca42d7c592dfb0d28a797bee69872634f3105d2d51996" +} diff --git a/core/lib/dal/.sqlx/query-f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5.json b/core/lib/dal/.sqlx/query-f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5.json new file mode 100644 index 00000000000..392d44ef63d --- /dev/null +++ b/core/lib/dal/.sqlx/query-f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5.json @@ -0,0 +1,226 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "parent_hash", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 9, + "name": "compressed_write_logs", + "type_info": "Bytea" + }, + { + "ordinal": 10, + "name": "compressed_contracts", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "eth_prove_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "eth_commit_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "eth_execute_tx_id", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "merkle_root_hash", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "l2_to_l1_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 16, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 17, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 18, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "l2_l1_compressed_messages", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 22, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 23, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 24, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 30, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 31, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 33, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 34, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + true + ] + }, + "hash": "f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5" +} diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 6af26113360..7222b4f0ce8 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -18,9 +18,9 @@ zksync_system_constants = { path = "../constants" } zksync_contracts = { path = "../contracts" } zksync_types = { path = "../types" } zksync_health_check = { path = "../health_check" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } itertools = "0.10.1" thiserror = "1.0" @@ -50,9 +50,10 @@ hex = "0.4" once_cell = "1.7" strum = { version = "0.24", features = ["derive"] } tracing = "0.1" +chrono = { version = "0.4", features = ["serde"] } [dev-dependencies] assert_matches = "1.5.0" [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } diff --git a/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.down.sql b/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.down.sql new file mode 100644 index 00000000000..8a0717ff839 --- /dev/null +++ b/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + ALTER COLUMN fee_account_address DROP DEFAULT, + ALTER COLUMN is_finished DROP DEFAULT; +ALTER TABLE miniblocks + DROP COLUMN fee_account_address; diff --git a/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.up.sql b/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.up.sql new file mode 100644 index 00000000000..e6ec1f49278 --- /dev/null +++ b/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.up.sql @@ -0,0 +1,9 @@ +ALTER TABLE miniblocks + ADD COLUMN fee_account_address BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea; +-- ^ Add a default value so that DB queries don't fail even if the DB migration is not completed. + +-- Set default values for columns in `l1_batches` that will be removed, so that INSERTs can work +-- w/o setting these columns. +ALTER TABLE l1_batches + ALTER COLUMN fee_account_address SET DEFAULT '\x0000000000000000000000000000000000000000'::bytea, + ALTER COLUMN is_finished SET DEFAULT true; diff --git a/core/lib/dal/migrations/20240110085604_add-l1-batch-circuit-statistic.down.sql b/core/lib/dal/migrations/20240110085604_add-l1-batch-circuit-statistic.down.sql new file mode 100644 index 00000000000..b2f0caf45ee --- /dev/null +++ b/core/lib/dal/migrations/20240110085604_add-l1-batch-circuit-statistic.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + DROP COLUMN IF EXISTS predicted_circuits_by_type; diff --git a/core/lib/dal/migrations/20240110085604_add-l1-batch-circuit-statistic.up.sql b/core/lib/dal/migrations/20240110085604_add-l1-batch-circuit-statistic.up.sql new file mode 100644 index 00000000000..836b8535d4e --- /dev/null +++ b/core/lib/dal/migrations/20240110085604_add-l1-batch-circuit-statistic.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + ADD COLUMN IF NOT EXISTS predicted_circuits_by_type JSONB; diff --git a/core/lib/dal/migrations/20240112194527_snapshots_applier_processed_chunks_format_change.down.sql b/core/lib/dal/migrations/20240112194527_snapshots_applier_processed_chunks_format_change.down.sql new file mode 100644 index 00000000000..598af2037d6 --- /dev/null +++ b/core/lib/dal/migrations/20240112194527_snapshots_applier_processed_chunks_format_change.down.sql @@ -0,0 +1,11 @@ +ALTER TABLE snapshot_recovery ADD COLUMN last_finished_chunk_id NOT NULL; +ALTER TABLE snapshot_recovery ADD COLUMN total_chunk_count NOT NULL; + +ALTER TABLE snapshot_recovery DROP COLUMN storage_logs_chunks_processed; + +ALTER TABLE factory_deps ADD CONSTRAINT factory_deps_miniblock_number_fkey + FOREIGN KEY (miniblock_number) REFERENCES miniblocks (number); +ALTER TABLE initial_writes ADD CONSTRAINT initial_writes_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) REFERENCES l1_batches (number); +ALTER TABLE storage_logs ADD CONSTRAINT storage_logs_miniblock_number_fkey + FOREIGN KEY (miniblock_number) REFERENCES miniblocks (number); diff --git a/core/lib/dal/migrations/20240112194527_snapshots_applier_processed_chunks_format_change.up.sql b/core/lib/dal/migrations/20240112194527_snapshots_applier_processed_chunks_format_change.up.sql new file mode 100644 index 00000000000..3f484c38638 --- /dev/null +++ b/core/lib/dal/migrations/20240112194527_snapshots_applier_processed_chunks_format_change.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE snapshot_recovery DROP COLUMN last_finished_chunk_id; +ALTER TABLE snapshot_recovery DROP COLUMN total_chunk_count; + +ALTER TABLE snapshot_recovery ADD COLUMN storage_logs_chunks_processed BOOL[] NOT NULL; + +ALTER TABLE factory_deps DROP CONSTRAINT factory_deps_miniblock_number_fkey; +ALTER TABLE initial_writes DROP CONSTRAINT initial_writes_l1_batch_number_fkey; +ALTER TABLE storage_logs DROP CONSTRAINT storage_logs_miniblock_number_fkey; diff --git a/core/lib/dal/migrations/20240129133815_revert_removing_snapshot_recovery_columns.down.sql b/core/lib/dal/migrations/20240129133815_revert_removing_snapshot_recovery_columns.down.sql new file mode 100644 index 00000000000..5acb2419084 --- /dev/null +++ b/core/lib/dal/migrations/20240129133815_revert_removing_snapshot_recovery_columns.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE snapshot_recovery + DROP COLUMN IF EXISTS last_finished_chunk_id, + DROP COLUMN IF EXISTS total_chunk_count; diff --git a/core/lib/dal/migrations/20240129133815_revert_removing_snapshot_recovery_columns.up.sql b/core/lib/dal/migrations/20240129133815_revert_removing_snapshot_recovery_columns.up.sql new file mode 100644 index 00000000000..d64008321e5 --- /dev/null +++ b/core/lib/dal/migrations/20240129133815_revert_removing_snapshot_recovery_columns.up.sql @@ -0,0 +1,5 @@ +-- Temporary revert of the previous `snapshot_recovery` so that the migration is backward-compatible. +-- Do not use these columns in code. +ALTER TABLE snapshot_recovery + ADD COLUMN IF NOT EXISTS last_finished_chunk_id INT, + ADD COLUMN IF NOT EXISTS total_chunk_count INT NOT NULL DEFAULT 0; diff --git a/core/lib/dal/migrations/20240131123456_add_consensus_fields_for_miniblocks.down.sql b/core/lib/dal/migrations/20240131123456_add_consensus_fields_for_miniblocks.down.sql new file mode 100644 index 00000000000..701c5e60854 --- /dev/null +++ b/core/lib/dal/migrations/20240131123456_add_consensus_fields_for_miniblocks.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS consensus; diff --git a/core/lib/dal/migrations/20240131123456_add_consensus_fields_for_miniblocks.up.sql b/core/lib/dal/migrations/20240131123456_add_consensus_fields_for_miniblocks.up.sql new file mode 100644 index 00000000000..cdfd74990ea --- /dev/null +++ b/core/lib/dal/migrations/20240131123456_add_consensus_fields_for_miniblocks.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + ADD COLUMN consensus JSONB NULL; diff --git a/core/lib/dal/migrations/20240202150009_transaction_batch_index.down.sql b/core/lib/dal/migrations/20240202150009_transaction_batch_index.down.sql new file mode 100644 index 00000000000..56795bbe282 --- /dev/null +++ b/core/lib/dal/migrations/20240202150009_transaction_batch_index.down.sql @@ -0,0 +1,3 @@ +DROP INDEX IF EXISTS transactions_l1_batch_number_idx; +DROP INDEX IF EXISTS aggregated_proof_from_l1_batch_index; +DROP INDEX IF EXISTS aggregated_proof_to_l1_batch_index; \ No newline at end of file diff --git a/core/lib/dal/migrations/20240202150009_transaction_batch_index.up.sql b/core/lib/dal/migrations/20240202150009_transaction_batch_index.up.sql new file mode 100644 index 00000000000..7362bcd006c --- /dev/null +++ b/core/lib/dal/migrations/20240202150009_transaction_batch_index.up.sql @@ -0,0 +1,3 @@ +CREATE INDEX IF NOT EXISTS transactions_l1_batch_number_idx ON transactions (l1_batch_number); +CREATE INDEX IF NOT EXISTS aggregated_proof_from_l1_batch_index ON aggregated_proof (from_block_number); +CREATE INDEX IF NOT EXISTS aggregated_proof_to_l1_batch_index ON aggregated_proof (to_block_number); diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 87195d965ad..ee7598d6ed3 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -10,8 +10,10 @@ use sqlx::Row; use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, + circuit::CircuitStatistic, commitment::{L1BatchMetadata, L1BatchWithMetadata}, - Address, L1BatchNumber, LogQuery, MiniblockNumber, ProtocolVersionId, H256, U256, + zk_evm_types::LogQuery, + Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, U256, }; use crate::{ @@ -48,8 +50,6 @@ impl BlocksDal<'_, '_> { MAX(number) AS "number" FROM l1_batches - WHERE - is_finished = TRUE "# ) .instrument("get_sealed_block_number") @@ -151,16 +151,11 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, - is_finished, - fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, used_contract_hashes, - base_fee_per_gas, - l1_gas_price, - l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, @@ -194,10 +189,8 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -216,13 +209,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -258,16 +248,11 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, - is_finished, - fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, used_contract_hashes, - base_fee_per_gas, - l1_gas_price, - l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, @@ -447,7 +432,7 @@ impl BlocksDal<'_, '_> { predicted_block_gas: BlockGasCount, events_queue: &[LogQuery], storage_refunds: &[u32], - predicted_circuits: u32, + predicted_circuits_by_type: CircuitStatistic, // predicted number of circuits for each circuit type ) -> anyhow::Result<()> { let priority_onchain_data: Vec> = header .priority_ops_onchain_data @@ -474,8 +459,6 @@ impl BlocksDal<'_, '_> { // Serialization should always succeed. let used_contract_hashes = serde_json::to_value(&header.used_contract_hashes) .expect("failed to serialize used_contract_hashes to JSON value"); - let base_fee_per_gas = BigDecimal::from_u64(header.base_fee_per_gas) - .context("block.base_fee_per_gas should fit in u64")?; let storage_refunds: Vec<_> = storage_refunds.iter().map(|n| *n as i64).collect(); let mut transaction = self.storage.start_transaction().await?; @@ -487,8 +470,6 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, - is_finished, - fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, @@ -498,16 +479,13 @@ impl BlocksDal<'_, '_> { predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, - base_fee_per_gas, - l1_gas_price, - l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, system_logs, storage_refunds, pubdata_input, - predicted_circuits, + predicted_circuits_by_type, created_at, updated_at ) @@ -533,11 +511,6 @@ impl BlocksDal<'_, '_> { $18, $19, $20, - $21, - $22, - $23, - $24, - $25, NOW(), NOW() ) @@ -546,8 +519,6 @@ impl BlocksDal<'_, '_> { header.l1_tx_count as i32, header.l2_tx_count as i32, header.timestamp as i64, - header.is_finished, - header.fee_account_address.as_bytes(), &l2_to_l1_logs, &header.l2_to_l1_messages, header.bloom.as_bytes(), @@ -557,16 +528,13 @@ impl BlocksDal<'_, '_> { predicted_block_gas.execute as i64, initial_bootloader_contents, used_contract_hashes, - base_fee_per_gas, - header.l1_gas_price as i64, - header.l2_fair_gas_price as i64, header.base_system_contracts_hashes.bootloader.as_bytes(), header.base_system_contracts_hashes.default_aa.as_bytes(), header.protocol_version.map(|v| v as i32), &system_logs, &storage_refunds, pubdata_input, - predicted_circuits as i32, + serde_json::to_value(predicted_circuits_by_type).unwrap(), ) .execute(transaction.conn()) .await?; @@ -604,6 +572,7 @@ impl BlocksDal<'_, '_> { hash, l1_tx_count, l2_tx_count, + fee_account_address, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, @@ -617,13 +586,14 @@ impl BlocksDal<'_, '_> { updated_at ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, NOW(), NOW()) + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, NOW(), NOW()) "#, miniblock_header.number.0 as i64, miniblock_header.timestamp as i64, miniblock_header.hash.as_bytes(), miniblock_header.l1_tx_count as i32, miniblock_header.l2_tx_count as i32, + miniblock_header.fee_account_address.as_bytes(), base_fee_per_gas, miniblock_header.batch_fee_input.l1_gas_price() as i64, miniblock_header.batch_fee_input.fair_l2_gas_price() as i64, @@ -648,7 +618,7 @@ impl BlocksDal<'_, '_> { pub async fn get_last_sealed_miniblock_header( &mut self, ) -> sqlx::Result> { - Ok(sqlx::query_as!( + let header = sqlx::query_as!( StorageMiniblockHeader, r#" SELECT @@ -657,6 +627,7 @@ impl BlocksDal<'_, '_> { hash, l1_tx_count, l2_tx_count, + fee_account_address AS "fee_account_address!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, @@ -675,15 +646,25 @@ impl BlocksDal<'_, '_> { "#, ) .fetch_optional(self.storage.conn()) - .await? - .map(Into::into)) + .await?; + + let Some(header) = header else { + return Ok(None); + }; + let mut header = MiniblockHeader::from(header); + // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration + #[allow(deprecated)] + self.maybe_load_fee_address(&mut header.fee_account_address, header.number) + .await?; + + Ok(Some(header)) } pub async fn get_miniblock_header( &mut self, miniblock_number: MiniblockNumber, ) -> sqlx::Result> { - Ok(sqlx::query_as!( + let header = sqlx::query_as!( StorageMiniblockHeader, r#" SELECT @@ -692,6 +673,7 @@ impl BlocksDal<'_, '_> { hash, l1_tx_count, l2_tx_count, + fee_account_address AS "fee_account_address!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, @@ -709,8 +691,18 @@ impl BlocksDal<'_, '_> { miniblock_number.0 as i64, ) .fetch_optional(self.storage.conn()) - .await? - .map(Into::into)) + .await?; + + let Some(header) = header else { + return Ok(None); + }; + let mut header = MiniblockHeader::from(header); + // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration + #[allow(deprecated)] + self.maybe_load_fee_address(&mut header.fee_account_address, header.number) + .await?; + + Ok(Some(header)) } pub async fn mark_miniblocks_as_executed_in_l1_batch( @@ -935,10 +927,8 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -957,13 +947,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1121,10 +1108,8 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1143,13 +1128,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1234,10 +1216,8 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1256,13 +1236,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1321,10 +1298,8 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1343,13 +1318,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1460,10 +1432,8 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1482,13 +1452,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1538,10 +1505,8 @@ impl BlocksDal<'_, '_> { SELECT number, l1_batches.timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1560,13 +1525,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1626,10 +1588,8 @@ impl BlocksDal<'_, '_> { SELECT number, l1_batches.timestamp, - is_finished, l1_tx_count, l2_tx_count, - fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1648,13 +1608,10 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, - l1_gas_price, - l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, - base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1811,6 +1768,31 @@ impl BlocksDal<'_, '_> { .collect()) } + pub async fn delete_initial_writes( + &mut self, + last_batch_to_keep: L1BatchNumber, + ) -> sqlx::Result<()> { + self.delete_initial_writes_inner(Some(last_batch_to_keep)) + .await + } + + pub async fn delete_initial_writes_inner( + &mut self, + last_batch_to_keep: Option, + ) -> sqlx::Result<()> { + let block_number = last_batch_to_keep.map_or(-1, |number| number.0 as i64); + sqlx::query!( + r#" + DELETE FROM initial_writes + WHERE + l1_batch_number > $1 + "#, + block_number + ) + .execute(self.storage.conn()) + .await?; + Ok(()) + } /// Deletes all L1 batches from the storage so that the specified batch number is the last one left. pub async fn delete_l1_batches( &mut self, @@ -2107,24 +2089,44 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn get_fee_address_for_l1_batch( + pub async fn get_fee_address_for_miniblock( &mut self, - l1_batch_number: L1BatchNumber, + number: MiniblockNumber, ) -> sqlx::Result> { - Ok(sqlx::query!( + let Some(mut fee_account_address) = self.raw_fee_address_for_miniblock(number).await? + else { + return Ok(None); + }; + + // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration + #[allow(deprecated)] + self.maybe_load_fee_address(&mut fee_account_address, number) + .await?; + Ok(Some(fee_account_address)) + } + + async fn raw_fee_address_for_miniblock( + &mut self, + number: MiniblockNumber, + ) -> sqlx::Result> { + let Some(row) = sqlx::query!( r#" SELECT fee_account_address FROM - l1_batches + miniblocks WHERE number = $1 "#, - l1_batch_number.0 as i32 + number.0 as i32 ) .fetch_optional(self.storage.conn()) .await? - .map(|row| Address::from_slice(&row.fee_account_address))) + else { + return Ok(None); + }; + + Ok(Some(Address::from_slice(&row.fee_account_address))) } pub async fn get_virtual_blocks_for_miniblock( @@ -2148,7 +2150,149 @@ impl BlocksDal<'_, '_> { } } -/// These functions should only be used for tests. +/// Temporary methods for migrating `fee_account_address`. +#[deprecated(note = "will be removed after the fee address migration is complete")] +impl BlocksDal<'_, '_> { + pub(crate) async fn maybe_load_fee_address( + &mut self, + fee_address: &mut Address, + miniblock_number: MiniblockNumber, + ) -> sqlx::Result<()> { + if *fee_address != Address::default() { + return Ok(()); + } + + // This clause should be triggered only for non-migrated miniblock rows. After `fee_account_address` + // is filled for all miniblocks, it won't be called; thus, `fee_account_address` column could be removed + // from `l1_batches` even with this code present. + let Some(row) = sqlx::query!( + r#" + SELECT + l1_batches.fee_account_address + FROM + l1_batches + INNER JOIN miniblocks ON miniblocks.l1_batch_number = l1_batches.number + WHERE + miniblocks.number = $1 + "#, + miniblock_number.0 as i32 + ) + .fetch_optional(self.storage.conn()) + .await? + else { + return Ok(()); + }; + + *fee_address = Address::from_slice(&row.fee_account_address); + Ok(()) + } + + /// Checks whether `fee_account_address` is migrated for the specified miniblock. Returns + /// `Ok(None)` if the miniblock doesn't exist. + pub async fn is_fee_address_migrated( + &mut self, + number: MiniblockNumber, + ) -> sqlx::Result> { + Ok(self + .raw_fee_address_for_miniblock(number) + .await? + .map(|address| address != Address::default())) + } + + /// Copies `fee_account_address` for pending miniblocks (ones without an associated L1 batch) + /// from the last L1 batch. Returns the number of affected rows. + pub async fn copy_fee_account_address_for_pending_miniblocks(&mut self) -> sqlx::Result { + let execution_result = sqlx::query!( + r#" + UPDATE miniblocks + SET + fee_account_address = ( + SELECT + l1_batches.fee_account_address + FROM + l1_batches + ORDER BY + l1_batches.number DESC + LIMIT + 1 + ) + WHERE + l1_batch_number IS NULL + AND fee_account_address = '\x0000000000000000000000000000000000000000'::bytea + "# + ) + .execute(self.storage.conn()) + .await?; + + Ok(execution_result.rows_affected()) + } + + pub async fn check_l1_batches_have_fee_account_address(&mut self) -> sqlx::Result { + let count = sqlx::query_scalar!( + r#" + SELECT COUNT(*) + FROM information_schema.columns + WHERE table_name = 'l1_batches' AND column_name = 'fee_account_address' + "# + ) + .fetch_one(self.storage.conn()) + .await? + .unwrap_or(0); + + Ok(count > 0) + } + + /// Copies `fee_account_address` for miniblocks in the given range from the L1 batch they belong to. + /// Returns the number of affected rows. + pub async fn copy_fee_account_address_for_miniblocks( + &mut self, + numbers: ops::RangeInclusive, + ) -> sqlx::Result { + let execution_result = sqlx::query!( + r#" + UPDATE miniblocks + SET + fee_account_address = l1_batches.fee_account_address + FROM + l1_batches + WHERE + l1_batches.number = miniblocks.l1_batch_number + AND miniblocks.number BETWEEN $1 AND $2 + AND miniblocks.fee_account_address = '\x0000000000000000000000000000000000000000'::bytea + "#, + numbers.start().0 as i64, + numbers.end().0 as i64 + ) + .execute(self.storage.conn()) + .await?; + + Ok(execution_result.rows_affected()) + } + + /// Sets `fee_account_address` for an L1 batch. Should only be used in tests. + pub async fn set_l1_batch_fee_address( + &mut self, + l1_batch: L1BatchNumber, + fee_account_address: Address, + ) -> sqlx::Result<()> { + sqlx::query!( + r#" + UPDATE l1_batches + SET + fee_account_address = $1::bytea + WHERE + number = $2 + "#, + fee_account_address.as_bytes(), + l1_batch.0 as i64 + ) + .execute(self.storage.conn()) + .await?; + Ok(()) + } +} + +/// These methods should only be used for tests. impl BlocksDal<'_, '_> { // The actual l1 batch hash is only set by the metadata calculator. pub async fn set_l1_batch_hash( @@ -2172,6 +2316,18 @@ impl BlocksDal<'_, '_> { Ok(()) } + pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> anyhow::Result<()> { + self.insert_l1_batch( + header, + &[], + Default::default(), + &[], + &[], + Default::default(), + ) + .await + } + /// Deletes all miniblocks and L1 batches, including the genesis ones. Should only be used in tests. pub async fn delete_genesis(&mut self) -> anyhow::Result<()> { self.delete_miniblocks_inner(None) @@ -2180,6 +2336,9 @@ impl BlocksDal<'_, '_> { self.delete_l1_batches_inner(None) .await .context("delete_l1_batches_inner()")?; + self.delete_initial_writes_inner(None) + .await + .context("delete_initial_writes_inner()")?; Ok(()) } } @@ -2193,16 +2352,12 @@ mod tests { }; use super::*; - use crate::ConnectionPool; + use crate::{tests::create_miniblock_header, ConnectionPool}; #[tokio::test] async fn loading_l1_batch_header() { let pool = ConnectionPool::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); - conn.blocks_dal() - .delete_l1_batches(L1BatchNumber(0)) - .await - .unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -2210,7 +2365,6 @@ mod tests { let mut header = L1BatchHeader::new( L1BatchNumber(1), 100, - Address::default(), BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), @@ -2231,7 +2385,7 @@ mod tests { header.l2_to_l1_messages.push(vec![33; 33]); conn.blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&header) .await .unwrap(); @@ -2260,17 +2414,12 @@ mod tests { async fn getting_predicted_gas() { let pool = ConnectionPool::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); - conn.blocks_dal() - .delete_l1_batches(L1BatchNumber(0)) - .await - .unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let mut header = L1BatchHeader::new( L1BatchNumber(1), 100, - Address::default(), BaseSystemContractsHashes::default(), ProtocolVersionId::default(), ); @@ -2280,7 +2429,7 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], 0) + .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); @@ -2288,7 +2437,7 @@ mod tests { header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], 0) + .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); @@ -2320,4 +2469,144 @@ mod tests { assert_eq!(gas, 3 * expected_gas); } } + + #[allow(deprecated)] // that's the whole point + #[tokio::test] + async fn checking_fee_account_address_in_l1_batches() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + assert!(conn + .blocks_dal() + .check_l1_batches_have_fee_account_address() + .await + .unwrap()); + } + + #[allow(deprecated)] // that's the whole point + #[tokio::test] + async fn ensuring_fee_account_address_for_miniblocks() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + + for number in [1, 2] { + let l1_batch = L1BatchHeader::new( + L1BatchNumber(number), + 100, + BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(42), + }, + ProtocolVersionId::latest(), + ); + let miniblock = MiniblockHeader { + fee_account_address: Address::default(), + ..create_miniblock_header(number) + }; + conn.blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + conn.blocks_dal() + .insert_mock_l1_batch(&l1_batch) + .await + .unwrap(); + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(number)) + .await + .unwrap(); + + assert_eq!( + conn.blocks_dal() + .is_fee_address_migrated(miniblock.number) + .await + .unwrap(), + Some(false) + ); + } + + // Manually set `fee_account_address` for the inserted L1 batches. + conn.blocks_dal() + .set_l1_batch_fee_address(L1BatchNumber(1), Address::repeat_byte(0x23)) + .await + .unwrap(); + conn.blocks_dal() + .set_l1_batch_fee_address(L1BatchNumber(2), Address::repeat_byte(0x42)) + .await + .unwrap(); + + // Add a pending miniblock. + let miniblock = MiniblockHeader { + fee_account_address: Address::default(), + ..create_miniblock_header(3) + }; + conn.blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let rows_affected = conn + .blocks_dal() + .copy_fee_account_address_for_miniblocks(MiniblockNumber(0)..=MiniblockNumber(100)) + .await + .unwrap(); + + assert_eq!(rows_affected, 2); + let first_miniblock_addr = conn + .blocks_dal() + .raw_fee_address_for_miniblock(MiniblockNumber(1)) + .await + .unwrap() + .expect("No fee address for block #1"); + assert_eq!(first_miniblock_addr, Address::repeat_byte(0x23)); + let second_miniblock_addr = conn + .blocks_dal() + .raw_fee_address_for_miniblock(MiniblockNumber(2)) + .await + .unwrap() + .expect("No fee address for block #1"); + assert_eq!(second_miniblock_addr, Address::repeat_byte(0x42)); + // The pending miniblock should not be affected. + let pending_miniblock_addr = conn + .blocks_dal() + .raw_fee_address_for_miniblock(MiniblockNumber(3)) + .await + .unwrap() + .expect("No fee address for block #3"); + assert_eq!(pending_miniblock_addr, Address::default()); + assert_eq!( + conn.blocks_dal() + .is_fee_address_migrated(MiniblockNumber(3)) + .await + .unwrap(), + Some(false) + ); + + let rows_affected = conn + .blocks_dal() + .copy_fee_account_address_for_pending_miniblocks() + .await + .unwrap(); + assert_eq!(rows_affected, 1); + + let pending_miniblock_addr = conn + .blocks_dal() + .raw_fee_address_for_miniblock(MiniblockNumber(3)) + .await + .unwrap() + .expect("No fee address for block #3"); + assert_eq!(pending_miniblock_addr, Address::repeat_byte(0x42)); + + for number in 1..=3 { + assert_eq!( + conn.blocks_dal() + .is_fee_address_migrated(MiniblockNumber(number)) + .await + .unwrap(), + Some(true) + ); + } + } } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 3406da9bb09..c03352937d2 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -3,7 +3,6 @@ use sqlx::Row; use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, - ethabi::Address, l2_to_l1_log::L2ToL1Log, vm_trace::Call, web3::types::{BlockHeader, U64}, @@ -62,7 +61,8 @@ impl BlocksWeb3Dal<'_, '_> { ON l1_batches.number = miniblocks.l1_batch_number LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number - WHERE {}", + WHERE {} + ORDER BY transactions.index_in_block ASC", transactions_sql, web3_block_where_sql(block_id, 1) ); @@ -258,31 +258,67 @@ impl BlocksWeb3Dal<'_, '_> { &mut self, l1_batch_number: &ResolvedL1BatchForMiniblock, ) -> sqlx::Result> { - let timestamp = sqlx::query!( - r#" - SELECT - timestamp - FROM - miniblocks - WHERE - ( - $1::BIGINT IS NULL - AND l1_batch_number IS NULL - ) - OR (l1_batch_number = $1::BIGINT) - ORDER BY - number - LIMIT - 1 - "#, - l1_batch_number - .miniblock_l1_batch - .map(|number| i64::from(number.0)) - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.timestamp as u64); - Ok(timestamp) + if let Some(miniblock_l1_batch) = l1_batch_number.miniblock_l1_batch { + Ok(sqlx::query!( + r#" + SELECT + timestamp + FROM + miniblocks + WHERE + l1_batch_number = $1 + ORDER BY + number + LIMIT + 1 + "#, + i64::from(miniblock_l1_batch.0) + ) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| row.timestamp as u64)) + } else { + // Got a pending miniblock. Searching the timestamp of the first pending miniblock using + // `WHERE l1_batch_number IS NULL` is slow since it potentially locks the `miniblocks` table. + // Instead, we determine its number using the previous L1 batch, taking into the account that + // it may be stored in the `snapshot_recovery` table. + let prev_l1_batch_number = if l1_batch_number.pending_l1_batch == L1BatchNumber(0) { + return Ok(None); // We haven't created the genesis miniblock yet + } else { + l1_batch_number.pending_l1_batch - 1 + }; + Ok(sqlx::query!( + r#" + SELECT + timestamp + FROM + miniblocks + WHERE + number = COALESCE( + ( + SELECT + MAX(number) + 1 + FROM + miniblocks + WHERE + l1_batch_number = $1 + ), + ( + SELECT + MAX(miniblock_number) + 1 + FROM + snapshot_recovery + WHERE + l1_batch_number = $1 + ) + ) + "#, + i64::from(prev_l1_batch_number.0) + ) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| row.timestamp as u64)) + } } pub async fn get_miniblock_hash( @@ -411,7 +447,8 @@ impl BlocksWeb3Dal<'_, '_> { Ok(result) } - pub async fn get_trace_for_miniblock( + /// Returns call traces for all transactions in the specified miniblock in the order of their execution. + pub async fn get_traces_for_miniblock( &mut self, block_number: MiniblockNumber, ) -> sqlx::Result> { @@ -419,18 +456,14 @@ impl BlocksWeb3Dal<'_, '_> { CallTrace, r#" SELECT - * + call_trace FROM call_traces + INNER JOIN transactions ON tx_hash = transactions.hash WHERE - tx_hash IN ( - SELECT - hash - FROM - transactions - WHERE - miniblock_number = $1 - ) + transactions.miniblock_number = $1 + ORDER BY + transactions.index_in_block "#, block_number.0 as i64 ) @@ -476,122 +509,139 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_block_details( &mut self, block_number: MiniblockNumber, - current_operator_address: Address, ) -> sqlx::Result> { - { - let storage_block_details = sqlx::query_as!( - StorageBlockDetails, - r#" - SELECT - miniblocks.number, - COALESCE( - miniblocks.l1_batch_number, - ( - SELECT - (MAX(number) + 1) - FROM - l1_batches - ) - ) AS "l1_batch_number!", - miniblocks.timestamp, - miniblocks.l1_tx_count, - miniblocks.l2_tx_count, - miniblocks.hash AS "root_hash?", - commit_tx.tx_hash AS "commit_tx_hash?", - commit_tx.confirmed_at AS "committed_at?", - prove_tx.tx_hash AS "prove_tx_hash?", - prove_tx.confirmed_at AS "proven_at?", - execute_tx.tx_hash AS "execute_tx_hash?", - execute_tx.confirmed_at AS "executed_at?", - miniblocks.l1_gas_price, - miniblocks.l2_fair_gas_price, - miniblocks.bootloader_code_hash, - miniblocks.default_aa_code_hash, - miniblocks.protocol_version, - l1_batches.fee_account_address AS "fee_account_address?" - FROM - miniblocks - LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number - LEFT JOIN eth_txs_history AS commit_tx ON ( - l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id - AND commit_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS prove_tx ON ( - l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id - AND prove_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS execute_tx ON ( - l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id - AND execute_tx.confirmed_at IS NOT NULL + let storage_block_details = sqlx::query_as!( + StorageBlockDetails, + r#" + SELECT + miniblocks.number, + COALESCE( + miniblocks.l1_batch_number, + ( + SELECT + (MAX(number) + 1) + FROM + l1_batches ) - WHERE - miniblocks.number = $1 - "#, - block_number.0 as i64 - ) - .instrument("get_block_details") - .with_arg("block_number", &block_number) - .report_latency() - .fetch_optional(self.storage.conn()) - .await?; + ) AS "l1_batch_number!", + miniblocks.timestamp, + miniblocks.l1_tx_count, + miniblocks.l2_tx_count, + miniblocks.hash AS "root_hash?", + commit_tx.tx_hash AS "commit_tx_hash?", + commit_tx.confirmed_at AS "committed_at?", + prove_tx.tx_hash AS "prove_tx_hash?", + prove_tx.confirmed_at AS "proven_at?", + execute_tx.tx_hash AS "execute_tx_hash?", + execute_tx.confirmed_at AS "executed_at?", + miniblocks.l1_gas_price, + miniblocks.l2_fair_gas_price, + miniblocks.bootloader_code_hash, + miniblocks.default_aa_code_hash, + miniblocks.protocol_version, + miniblocks.fee_account_address + FROM + miniblocks + LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number + LEFT JOIN eth_txs_history AS commit_tx ON ( + l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id + AND commit_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS prove_tx ON ( + l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id + AND prove_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS execute_tx ON ( + l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id + AND execute_tx.confirmed_at IS NOT NULL + ) + WHERE + miniblocks.number = $1 + "#, + block_number.0 as i64 + ) + .instrument("get_block_details") + .with_arg("block_number", &block_number) + .report_latency() + .fetch_optional(self.storage.conn()) + .await?; - Ok(storage_block_details.map(|storage_block_details| { - storage_block_details.into_block_details(current_operator_address) - })) - } + let Some(storage_block_details) = storage_block_details else { + return Ok(None); + }; + let mut details = api::BlockDetails::from(storage_block_details); + + // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration + #[allow(deprecated)] + self.storage + .blocks_dal() + .maybe_load_fee_address(&mut details.operator_address, details.number) + .await?; + Ok(Some(details)) } pub async fn get_l1_batch_details( &mut self, l1_batch_number: L1BatchNumber, ) -> sqlx::Result> { - { - let l1_batch_details: Option = sqlx::query_as!( - StorageL1BatchDetails, - r#" - SELECT - l1_batches.number, - l1_batches.timestamp, - l1_batches.l1_tx_count, - l1_batches.l2_tx_count, - l1_batches.hash AS "root_hash?", - commit_tx.tx_hash AS "commit_tx_hash?", - commit_tx.confirmed_at AS "committed_at?", - prove_tx.tx_hash AS "prove_tx_hash?", - prove_tx.confirmed_at AS "proven_at?", - execute_tx.tx_hash AS "execute_tx_hash?", - execute_tx.confirmed_at AS "executed_at?", - l1_batches.l1_gas_price, - l1_batches.l2_fair_gas_price, - l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash - FROM - l1_batches - LEFT JOIN eth_txs_history AS commit_tx ON ( - l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id - AND commit_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS prove_tx ON ( - l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id - AND prove_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS execute_tx ON ( - l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id - AND execute_tx.confirmed_at IS NOT NULL - ) - WHERE - l1_batches.number = $1 - "#, - l1_batch_number.0 as i64 - ) - .instrument("get_l1_batch_details") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage.conn()) - .await?; + let l1_batch_details: Option = sqlx::query_as!( + StorageL1BatchDetails, + r#" + WITH + mb AS ( + SELECT + l1_gas_price, + l2_fair_gas_price + FROM + miniblocks + WHERE + l1_batch_number = $1 + LIMIT + 1 + ) + SELECT + l1_batches.number, + l1_batches.timestamp, + l1_batches.l1_tx_count, + l1_batches.l2_tx_count, + l1_batches.hash AS "root_hash?", + commit_tx.tx_hash AS "commit_tx_hash?", + commit_tx.confirmed_at AS "committed_at?", + prove_tx.tx_hash AS "prove_tx_hash?", + prove_tx.confirmed_at AS "proven_at?", + execute_tx.tx_hash AS "execute_tx_hash?", + execute_tx.confirmed_at AS "executed_at?", + mb.l1_gas_price, + mb.l2_fair_gas_price, + l1_batches.bootloader_code_hash, + l1_batches.default_aa_code_hash + FROM + l1_batches + INNER JOIN mb ON TRUE + LEFT JOIN eth_txs_history AS commit_tx ON ( + l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id + AND commit_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS prove_tx ON ( + l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id + AND prove_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS execute_tx ON ( + l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id + AND execute_tx.confirmed_at IS NOT NULL + ) + WHERE + l1_batches.number = $1 + "#, + l1_batch_number.0 as i64 + ) + .instrument("get_l1_batch_details") + .with_arg("l1_batch_number", &l1_batch_number) + .report_latency() + .fetch_optional(self.storage.conn()) + .await?; - Ok(l1_batch_details.map(api::L1BatchDetails::from)) - } + Ok(l1_batch_details.map(Into::into)) } } @@ -599,12 +649,16 @@ impl BlocksWeb3Dal<'_, '_> { mod tests { use zksync_types::{ block::{MiniblockHasher, MiniblockHeader}, + fee::TransactionExecutionMetrics, snapshots::SnapshotRecoveryStatus, - MiniblockNumber, ProtocolVersion, ProtocolVersionId, + Address, MiniblockNumber, ProtocolVersion, ProtocolVersionId, }; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{ + tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, + ConnectionPool, + }; #[tokio::test] async fn getting_web3_block_and_tx_count() { @@ -766,11 +820,10 @@ mod tests { l1_batch_root_hash: H256::zero(), miniblock_number: MiniblockNumber(42), miniblock_root_hash: H256::zero(), - last_finished_chunk_id: None, - total_chunk_count: 100, + storage_logs_chunks_processed: vec![true; 100], }; conn.snapshot_recovery_dal() - .set_applied_snapshot_status(&snapshot_recovery) + .insert_initial_recovery_status(&snapshot_recovery) .await .unwrap(); @@ -786,10 +839,6 @@ mod tests { async fn resolving_block_by_hash() { let connection_pool = ConnectionPool::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); - conn.blocks_dal() - .delete_miniblocks(MiniblockNumber(0)) - .await - .unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -814,4 +863,47 @@ mod tests { .await; assert_eq!(miniblock_number.unwrap(), None); } + + #[tokio::test] + async fn getting_traces_for_block() { + let connection_pool = ConnectionPool::test_pool().await; + let mut conn = connection_pool.access_storage().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(1)) + .await + .unwrap(); + + let transactions = [mock_l2_transaction(), mock_l2_transaction()]; + let mut tx_results = vec![]; + for (i, tx) in transactions.into_iter().enumerate() { + conn.transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + let mut tx_result = mock_execution_result(tx); + tx_result.call_traces.push(Call { + from: Address::from_low_u64_be(i as u64), + to: Address::from_low_u64_be(i as u64 + 1), + value: i.into(), + ..Call::default() + }); + tx_results.push(tx_result); + } + conn.transactions_dal() + .mark_txs_as_executed_in_miniblock(MiniblockNumber(1), &tx_results, 1.into()) + .await; + + let traces = conn + .blocks_web3_dal() + .get_traces_for_miniblock(MiniblockNumber(1)) + .await + .unwrap(); + assert_eq!(traces.len(), 2); + for (trace, tx_result) in traces.iter().zip(&tx_results) { + let expected_trace = tx_result.call_trace().unwrap(); + assert_eq!(*trace, expected_trace); + } + } } diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index c53541166d3..854e0e0871a 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,7 +1,7 @@ use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; -use zksync_types::{Address, MiniblockNumber}; +use zksync_types::MiniblockNumber; pub use crate::models::storage_sync::Payload; use crate::StorageProcessor; @@ -134,7 +134,6 @@ impl ConsensusDal<'_, '_> { pub async fn block_payload( &mut self, block_number: validator::BlockNumber, - operator_address: Address, ) -> anyhow::Result> { let block_number = MiniblockNumber(block_number.0.try_into()?); let Some(block) = self @@ -150,7 +149,7 @@ impl ConsensusDal<'_, '_> { .transactions_web3_dal() .get_raw_miniblock_transactions(block_number) .await?; - Ok(Some(block.into_payload(operator_address, transactions))) + Ok(Some(block.into_payload(transactions))) } /// Inserts a certificate for the miniblock `cert.header().number`. @@ -162,11 +161,7 @@ impl ConsensusDal<'_, '_> { /// which will help us to detect bugs in the consensus implementation /// while it is "fresh". If it turns out to take too long, /// we can remove the verification checks later. - pub async fn insert_certificate( - &mut self, - cert: &validator::CommitQC, - operator_address: Address, - ) -> anyhow::Result<()> { + pub async fn insert_certificate(&mut self, cert: &validator::CommitQC) -> anyhow::Result<()> { let header = &cert.message.proposal; let mut txn = self.storage.start_transaction().await?; if let Some(last) = txn.consensus_dal().last_certificate().await? { @@ -184,7 +179,7 @@ impl ConsensusDal<'_, '_> { } let want_payload = txn .consensus_dal() - .block_payload(cert.message.proposal.number, operator_address) + .block_payload(cert.message.proposal.number) .await? .context("corresponding miniblock is missing")?; anyhow::ensure!( diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index b7087985f52..9fedee44457 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -1,13 +1,17 @@ -use std::fmt; +use std::{collections::HashMap, fmt}; use sqlx::types::chrono::Utc; use zksync_types::{ + api, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, tx::IncludedTxLocation, MiniblockNumber, VmEvent, H256, }; -use crate::{models::storage_event::StorageL2ToL1Log, SqlxError, StorageProcessor}; +use crate::{ + models::storage_event::{StorageL2ToL1Log, StorageWeb3Log}, + SqlxError, StorageProcessor, +}; /// Wrapper around an optional event topic allowing to hex-format it for `COPY` instructions. #[derive(Debug)] @@ -182,11 +186,65 @@ impl EventsDal<'_, '_> { .unwrap(); } - pub(crate) async fn l2_to_l1_logs( + pub(crate) async fn get_logs_by_tx_hashes( &mut self, - tx_hash: H256, - ) -> Result, SqlxError> { - sqlx::query_as!( + hashes: &[H256], + ) -> Result>, SqlxError> { + let hashes = hashes + .iter() + .map(|hash| hash.as_bytes().to_vec()) + .collect::>(); + let logs: Vec<_> = sqlx::query_as!( + StorageWeb3Log, + r#" + SELECT + address, + topic1, + topic2, + topic3, + topic4, + value, + NULL::bytea AS "block_hash", + NULL::BIGINT AS "l1_batch_number?", + miniblock_number, + tx_hash, + tx_index_in_block, + event_index_in_block, + event_index_in_tx + FROM + events + WHERE + tx_hash = ANY ($1) + ORDER BY + miniblock_number ASC, + tx_index_in_block ASC, + event_index_in_block ASC + "#, + &hashes[..], + ) + .fetch_all(self.storage.conn()) + .await?; + + let mut result = HashMap::>::new(); + + for storage_log in logs { + let current_log = api::Log::from(storage_log); + let tx_hash = current_log.transaction_hash.unwrap(); + result.entry(tx_hash).or_default().push(current_log); + } + + Ok(result) + } + + pub(crate) async fn get_l2_to_l1_logs_by_hashes( + &mut self, + hashes: &[H256], + ) -> Result>, SqlxError> { + let hashes = &hashes + .iter() + .map(|hash| hash.as_bytes().to_vec()) + .collect::>(); + let logs: Vec<_> = sqlx::query_as!( StorageL2ToL1Log, r#" SELECT @@ -206,14 +264,27 @@ impl EventsDal<'_, '_> { FROM l2_to_l1_logs WHERE - tx_hash = $1 + tx_hash = ANY ($1) ORDER BY + tx_index_in_l1_batch ASC, log_index_in_tx ASC "#, - tx_hash.as_bytes() + &hashes[..] ) .fetch_all(self.storage.conn()) - .await + .await?; + + let mut result = HashMap::>::new(); + + for storage_log in logs { + let current_log = api::L2ToL1Log::from(storage_log); + result + .entry(current_log.transaction_hash) + .or_default() + .push(current_log); + } + + Ok(result) } } @@ -355,34 +426,41 @@ mod tests { let logs = conn .events_dal() - .l2_to_l1_logs(H256([1; 32])) + .get_l2_to_l1_logs_by_hashes(&[H256([1; 32])]) .await .unwrap(); + + let logs = logs.get(&H256([1; 32])).unwrap().clone(); + assert_eq!(logs.len(), first_logs.len()); for (i, log) in logs.iter().enumerate() { - assert_eq!(log.log_index_in_miniblock as usize, i); - assert_eq!(log.log_index_in_tx as usize, i); + assert_eq!(log.log_index.as_usize(), i); + assert_eq!(log.transaction_log_index.as_usize(), i); } for (log, expected_log) in logs.iter().zip(&first_logs) { - assert_eq!(log.key, expected_log.0.key.as_bytes()); - assert_eq!(log.value, expected_log.0.value.as_bytes()); - assert_eq!(log.sender, expected_log.0.sender.as_bytes()); + assert_eq!(log.key.as_bytes(), expected_log.0.key.as_bytes()); + assert_eq!(log.value.as_bytes(), expected_log.0.value.as_bytes()); + assert_eq!(log.sender.as_bytes(), expected_log.0.sender.as_bytes()); } let logs = conn .events_dal() - .l2_to_l1_logs(H256([2; 32])) + .get_l2_to_l1_logs_by_hashes(&[H256([2; 32])]) .await - .unwrap(); + .unwrap() + .get(&H256([2; 32])) + .unwrap() + .clone(); + assert_eq!(logs.len(), second_logs.len()); for (i, log) in logs.iter().enumerate() { - assert_eq!(log.log_index_in_miniblock as usize, i + first_logs.len()); - assert_eq!(log.log_index_in_tx as usize, i); + assert_eq!(log.log_index.as_usize(), i + first_logs.len()); + assert_eq!(log.transaction_log_index.as_usize(), i); } for (log, expected_log) in logs.iter().zip(&second_logs) { - assert_eq!(log.key, expected_log.0.key.as_bytes()); - assert_eq!(log.value, expected_log.0.value.as_bytes()); - assert_eq!(log.sender, expected_log.0.sender.as_bytes()); + assert_eq!(log.key.as_bytes(), expected_log.0.key.as_bytes()); + assert_eq!(log.value.as_bytes(), expected_log.0.value.as_bytes()); + assert_eq!(log.sender.as_bytes(), expected_log.0.sender.as_bytes()); } } } diff --git a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs index a2929894444..56baa32ba9c 100644 --- a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs @@ -1,8 +1,10 @@ use std::time::Duration; -use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; - -use crate::{time_utils::pg_interval_from_duration, StorageProcessor}; +use crate::{ + fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, + time_utils::pg_interval_from_duration, + StorageProcessor, +}; #[derive(Debug)] pub struct FriGpuProverQueueDal<'a, 'c> { diff --git a/core/lib/dal/src/fri_proof_compressor_dal.rs b/core/lib/dal/src/fri_proof_compressor_dal.rs index ee331204ec4..959e4304b76 100644 --- a/core/lib/dal/src/fri_proof_compressor_dal.rs +++ b/core/lib/dal/src/fri_proof_compressor_dal.rs @@ -2,12 +2,10 @@ use std::{collections::HashMap, str::FromStr, time::Duration}; use sqlx::Row; use strum::{Display, EnumString}; -use zksync_types::{ - proofs::{JobCountStatistics, StuckJobs}, - L1BatchNumber, -}; +use zksync_types::L1BatchNumber; use crate::{ + fri_prover_dal::types::{JobCountStatistics, StuckJobs}, time_utils::{duration_to_naive_time, pg_interval_from_duration}, StorageProcessor, }; diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index d9446182b7f..f3970f08092 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -1,12 +1,12 @@ use std::{collections::HashMap, convert::TryFrom, time::Duration}; use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, - proofs::{AggregationRound, FriProverJobMetadata, JobCountStatistics, StuckJobs}, + basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, protocol_version::FriProtocolVersionId, L1BatchNumber, }; +use self::types::{FriProverJobMetadata, JobCountStatistics, StuckJobs}; use crate::{ instrument::InstrumentExt, metrics::MethodLatency, @@ -14,6 +14,223 @@ use crate::{ StorageProcessor, }; +// TODO (PLA-775): Should not be an embedded submodule in a concrete DAL file. +pub mod types { + //! Types exposed by the prover DAL for general-purpose use. + + use std::{net::IpAddr, ops::Add}; + + use sqlx::types::chrono::{DateTime, Utc}; + use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + + #[derive(Debug, Clone)] + pub struct FriProverJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub aggregation_round: AggregationRound, + pub sequence_number: usize, + pub depth: u16, + pub is_node_final_proof: bool, + } + + #[derive(Debug, Clone, Copy, Default)] + pub struct JobCountStatistics { + pub queued: usize, + pub in_progress: usize, + pub failed: usize, + pub successful: usize, + } + + impl Add for JobCountStatistics { + type Output = JobCountStatistics; + + fn add(self, rhs: Self) -> Self::Output { + Self { + queued: self.queued + rhs.queued, + in_progress: self.in_progress + rhs.in_progress, + failed: self.failed + rhs.failed, + successful: self.successful + rhs.successful, + } + } + } + + #[derive(Debug)] + pub struct StuckJobs { + pub id: u64, + pub status: String, + pub attempts: u64, + } + + // TODO (PLA-774): Redundant structure, should be replaced with `std::net::SocketAddr`. + #[derive(Debug, Clone)] + pub struct SocketAddress { + pub host: IpAddr, + pub port: u16, + } + + impl From for std::net::SocketAddr { + fn from(socket_address: SocketAddress) -> Self { + Self::new(socket_address.host, socket_address.port) + } + } + + impl From for SocketAddress { + fn from(socket_address: std::net::SocketAddr) -> Self { + Self { + host: socket_address.ip(), + port: socket_address.port(), + } + } + } + + #[derive(Debug, Clone)] + pub struct LeafAggregationJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub prover_job_ids_for_proofs: Vec, + } + + #[derive(Debug, Clone)] + pub struct NodeAggregationJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub depth: u16, + pub prover_job_ids_for_proofs: Vec, + } + + #[derive(Debug)] + pub struct JobPosition { + pub aggregation_round: AggregationRound, + pub sequence_number: usize, + } + + #[derive(Debug, Default)] + pub struct ProverJobStatusFailed { + pub started_at: DateTime, + pub error: String, + } + + #[derive(Debug)] + pub struct ProverJobStatusSuccessful { + pub started_at: DateTime, + pub time_taken: chrono::Duration, + } + + impl Default for ProverJobStatusSuccessful { + fn default() -> Self { + ProverJobStatusSuccessful { + started_at: DateTime::default(), + time_taken: chrono::Duration::zero(), + } + } + } + + #[derive(Debug, Default)] + pub struct ProverJobStatusInProgress { + pub started_at: DateTime, + } + + #[derive(Debug)] + pub struct WitnessJobStatusSuccessful { + pub started_at: DateTime, + pub time_taken: chrono::Duration, + } + + impl Default for WitnessJobStatusSuccessful { + fn default() -> Self { + WitnessJobStatusSuccessful { + started_at: DateTime::default(), + time_taken: chrono::Duration::zero(), + } + } + } + + #[derive(Debug, Default)] + pub struct WitnessJobStatusFailed { + pub started_at: DateTime, + pub error: String, + } + + #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] + pub enum ProverJobStatus { + #[strum(serialize = "queued")] + Queued, + #[strum(serialize = "in_progress")] + InProgress(ProverJobStatusInProgress), + #[strum(serialize = "successful")] + Successful(ProverJobStatusSuccessful), + #[strum(serialize = "failed")] + Failed(ProverJobStatusFailed), + #[strum(serialize = "skipped")] + Skipped, + #[strum(serialize = "ignored")] + Ignored, + } + + #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] + pub enum WitnessJobStatus { + #[strum(serialize = "failed")] + Failed(WitnessJobStatusFailed), + #[strum(serialize = "skipped")] + Skipped, + #[strum(serialize = "successful")] + Successful(WitnessJobStatusSuccessful), + #[strum(serialize = "waiting_for_artifacts")] + WaitingForArtifacts, + #[strum(serialize = "waiting_for_proofs")] + WaitingForProofs, + #[strum(serialize = "in_progress")] + InProgress, + #[strum(serialize = "queued")] + Queued, + } + + #[derive(Debug)] + pub struct WitnessJobInfo { + pub block_number: L1BatchNumber, + pub created_at: DateTime, + pub updated_at: DateTime, + pub status: WitnessJobStatus, + pub position: JobPosition, + } + + #[derive(Debug)] + pub struct ProverJobInfo { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_type: String, + pub position: JobPosition, + pub input_length: u64, + pub status: ProverJobStatus, + pub attempts: u32, + pub created_at: DateTime, + pub updated_at: DateTime, + } + + #[derive(Debug)] + pub struct JobExtendedStatistics { + pub successful_padding: L1BatchNumber, + pub queued_padding: L1BatchNumber, + pub queued_padding_len: u32, + pub active_area: Vec, + } + + #[derive(Debug, Copy, Clone)] + pub enum GpuProverInstanceStatus { + // The instance is available for processing. + Available, + // The instance is running at full capacity. + Full, + // The instance is reserved by an synthesizer. + Reserved, + // The instance is not alive anymore. + Dead, + } +} + #[derive(Debug)] pub struct FriProverDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, diff --git a/core/lib/dal/src/fri_witness_generator_dal.rs b/core/lib/dal/src/fri_witness_generator_dal.rs index 874ad8d0368..57b45253dd7 100644 --- a/core/lib/dal/src/fri_witness_generator_dal.rs +++ b/core/lib/dal/src/fri_witness_generator_dal.rs @@ -2,15 +2,13 @@ use std::{collections::HashMap, convert::TryFrom, time::Duration}; use sqlx::Row; use zksync_types::{ - proofs::{ - AggregationRound, JobCountStatistics, LeafAggregationJobMetadata, - NodeAggregationJobMetadata, StuckJobs, - }, - protocol_version::FriProtocolVersionId, - L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use crate::{ + fri_prover_dal::types::{ + JobCountStatistics, LeafAggregationJobMetadata, NodeAggregationJobMetadata, StuckJobs, + }, metrics::MethodLatency, time_utils::{duration_to_naive_time, pg_interval_from_duration}, StorageProcessor, diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 2aa5e4d30ef..7cd96e7f676 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -28,18 +28,13 @@ pub enum StorageL1BatchConvertError { pub struct StorageL1BatchHeader { pub number: i64, pub timestamp: i64, - pub is_finished: bool, pub l1_tx_count: i32, pub l2_tx_count: i32, - pub fee_account_address: Vec, pub l2_to_l1_logs: Vec>, pub l2_to_l1_messages: Vec>, pub bloom: Vec, pub priority_ops_onchain_data: Vec>, pub used_contract_hashes: serde_json::Value, - pub base_fee_per_gas: BigDecimal, - pub l1_gas_price: i64, - pub l2_fair_gas_price: i64, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, pub protocol_version: Option, @@ -67,9 +62,7 @@ impl From for L1BatchHeader { L1BatchHeader { number: L1BatchNumber(l1_batch.number as u32), - is_finished: l1_batch.is_finished, timestamp: l1_batch.timestamp as u64, - fee_account_address: Address::from_slice(&l1_batch.fee_account_address), priority_ops_onchain_data, l1_tx_count: l1_batch.l1_tx_count as u16, l2_tx_count: l1_batch.l2_tx_count as u16, @@ -79,16 +72,10 @@ impl From for L1BatchHeader { bloom: H2048::from_slice(&l1_batch.bloom), used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), - base_fee_per_gas: l1_batch - .base_fee_per_gas - .to_u64() - .expect("base_fee_per_gas should fit in u64"), base_system_contracts_hashes: convert_base_system_contracts_hashes( l1_batch.bootloader_code_hash, l1_batch.default_aa_code_hash, ), - l1_gas_price: l1_batch.l1_gas_price as u64, - l2_fair_gas_price: l1_batch.l2_fair_gas_price as u64, system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: l1_batch .protocol_version @@ -126,10 +113,8 @@ fn convert_base_system_contracts_hashes( pub struct StorageL1Batch { pub number: i64, pub timestamp: i64, - pub is_finished: bool, pub l1_tx_count: i32, pub l2_tx_count: i32, - pub fee_account_address: Vec, pub bloom: Vec, pub l2_to_l1_logs: Vec>, pub priority_ops_onchain_data: Vec>, @@ -161,10 +146,6 @@ pub struct StorageL1Batch { pub used_contract_hashes: serde_json::Value, - pub base_fee_per_gas: BigDecimal, - pub l1_gas_price: i64, - pub l2_fair_gas_price: i64, - pub system_logs: Vec>, pub compressed_state_diffs: Option>, @@ -188,9 +169,7 @@ impl From for L1BatchHeader { L1BatchHeader { number: L1BatchNumber(l1_batch.number as u32), - is_finished: l1_batch.is_finished, timestamp: l1_batch.timestamp as u64, - fee_account_address: Address::from_slice(&l1_batch.fee_account_address), priority_ops_onchain_data, l1_tx_count: l1_batch.l1_tx_count as u16, l2_tx_count: l1_batch.l2_tx_count as u16, @@ -200,16 +179,10 @@ impl From for L1BatchHeader { bloom: H2048::from_slice(&l1_batch.bloom), used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), - base_fee_per_gas: l1_batch - .base_fee_per_gas - .to_u64() - .expect("base_fee_per_gas should fit in u64"), base_system_contracts_hashes: convert_base_system_contracts_hashes( l1_batch.bootloader_code_hash, l1_batch.default_aa_code_hash, ), - l1_gas_price: l1_batch.l1_gas_price as u64, - l2_fair_gas_price: l1_batch.l2_fair_gas_price as u64, system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: l1_batch .protocol_version @@ -375,61 +348,58 @@ pub struct StorageBlockDetails { pub l2_fair_gas_price: i64, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, - pub fee_account_address: Option>, // May be None if the block is not yet sealed + pub fee_account_address: Vec, pub protocol_version: Option, } -impl StorageBlockDetails { - pub(crate) fn into_block_details(self, current_operator_address: Address) -> api::BlockDetails { - let status = if self.number == 0 || self.execute_tx_hash.is_some() { +impl From for api::BlockDetails { + fn from(details: StorageBlockDetails) -> Self { + let status = if details.number == 0 || details.execute_tx_hash.is_some() { api::BlockStatus::Verified } else { api::BlockStatus::Sealed }; let base = api::BlockDetailsBase { - timestamp: self.timestamp as u64, - l1_tx_count: self.l1_tx_count as usize, - l2_tx_count: self.l2_tx_count as usize, + timestamp: details.timestamp as u64, + l1_tx_count: details.l1_tx_count as usize, + l2_tx_count: details.l2_tx_count as usize, status, - root_hash: self.root_hash.as_deref().map(H256::from_slice), - commit_tx_hash: self + root_hash: details.root_hash.as_deref().map(H256::from_slice), + commit_tx_hash: details .commit_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect commit_tx hash")), - committed_at: self + committed_at: details .committed_at .map(|committed_at| DateTime::from_naive_utc_and_offset(committed_at, Utc)), - prove_tx_hash: self + prove_tx_hash: details .prove_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect prove_tx hash")), - proven_at: self + proven_at: details .proven_at .map(|proven_at| DateTime::::from_naive_utc_and_offset(proven_at, Utc)), - execute_tx_hash: self + execute_tx_hash: details .execute_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect execute_tx hash")), - executed_at: self + executed_at: details .executed_at .map(|executed_at| DateTime::::from_naive_utc_and_offset(executed_at, Utc)), - l1_gas_price: self.l1_gas_price as u64, - l2_fair_gas_price: self.l2_fair_gas_price as u64, + l1_gas_price: details.l1_gas_price as u64, + l2_fair_gas_price: details.l2_fair_gas_price as u64, base_system_contracts_hashes: convert_base_system_contracts_hashes( - self.bootloader_code_hash, - self.default_aa_code_hash, + details.bootloader_code_hash, + details.default_aa_code_hash, ), }; api::BlockDetails { base, - number: MiniblockNumber(self.number as u32), - l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), - operator_address: self - .fee_account_address - .map(|fee_account_address| Address::from_slice(&fee_account_address)) - .unwrap_or(current_operator_address), - protocol_version: self + number: MiniblockNumber(details.number as u32), + l1_batch_number: L1BatchNumber(details.l1_batch_number as u32), + operator_address: Address::from_slice(&details.fee_account_address), + protocol_version: details .protocol_version .map(|v| (v as u16).try_into().unwrap()), } @@ -510,6 +480,7 @@ pub struct StorageMiniblockHeader { pub hash: Vec, pub l1_tx_count: i32, pub l2_tx_count: i32, + pub fee_account_address: Vec, pub base_fee_per_gas: BigDecimal, pub l1_gas_price: i64, // L1 gas price assumed in the corresponding batch @@ -559,6 +530,7 @@ impl From for MiniblockHeader { hash: H256::from_slice(&row.hash), l1_tx_count: row.l1_tx_count as u16, l2_tx_count: row.l2_tx_count as u16, + fee_account_address: Address::from_slice(&row.fee_account_address), base_fee_per_gas: row.base_fee_per_gas.to_u64().unwrap(), batch_fee_input: fee_input, base_system_contracts_hashes: convert_base_system_contracts_hashes( diff --git a/core/lib/dal/src/models/storage_log.rs b/core/lib/dal/src/models/storage_log.rs index adca6742d09..fee544a1997 100644 --- a/core/lib/dal/src/models/storage_log.rs +++ b/core/lib/dal/src/models/storage_log.rs @@ -1,37 +1,36 @@ -use sqlx::types::chrono::NaiveDateTime; -use zksync_types::{AccountTreeId, Address, StorageKey, StorageLog, StorageLogKind, H256, U256}; +use zksync_types::{L1BatchNumber, MiniblockNumber, H160, H256, U256}; -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct DBStorageLog { - pub id: i64, - pub hashed_key: Vec, - pub address: Vec, - pub key: Vec, - pub value: Vec, - pub operation_number: i32, - pub tx_hash: Vec, - pub miniblock_number: i64, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, +/// Model of the initial write record from the `initial_writes` table. Should only be used in tests. +#[derive(Debug, PartialEq)] +pub struct DbInitialWrite { + pub hashed_key: H256, + pub l1_batch_number: L1BatchNumber, + pub index: u64, } -impl From for StorageLog { - fn from(log: DBStorageLog) -> StorageLog { - StorageLog { - kind: StorageLogKind::Write, - key: StorageKey::new( - AccountTreeId::new(Address::from_slice(&log.address)), - H256::from_slice(&log.key), - ), - value: H256::from_slice(&log.value), - } - } +/// Model of the storage log record from the `storage_logs` table. Should only be used in tests. +#[derive(Debug, PartialEq)] +pub struct DbStorageLog { + pub hashed_key: H256, + pub address: H160, + pub key: H256, + pub value: H256, + pub operation_number: u64, + pub tx_hash: H256, + pub miniblock_number: MiniblockNumber, } // We don't want to rely on the Merkle tree crate to import a single type, so we duplicate `TreeEntry` here. #[derive(Debug, Clone, Copy)] -pub struct StorageTreeEntry { - pub key: U256, +pub struct StorageRecoveryLogEntry { + pub key: H256, pub value: H256, pub leaf_index: u64, } + +impl StorageRecoveryLogEntry { + /// Converts `key` to the format used by the Merkle tree (little-endian [`U256`]). + pub fn tree_key(&self) -> U256 { + U256::from_little_endian(&self.key.0) + } +} diff --git a/core/lib/dal/src/models/storage_prover_job_info.rs b/core/lib/dal/src/models/storage_prover_job_info.rs index 3242953b39d..efe6e8cb69d 100644 --- a/core/lib/dal/src/models/storage_prover_job_info.rs +++ b/core/lib/dal/src/models/storage_prover_job_info.rs @@ -1,12 +1,11 @@ use std::{convert::TryFrom, panic, str::FromStr}; use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{ - proofs::{ - AggregationRound, JobPosition, ProverJobInfo, ProverJobStatus, ProverJobStatusFailed, - ProverJobStatusInProgress, ProverJobStatusSuccessful, - }, - L1BatchNumber, +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::fri_prover_dal::types::{ + JobPosition, ProverJobInfo, ProverJobStatus, ProverJobStatusFailed, ProverJobStatusInProgress, + ProverJobStatusSuccessful, }; #[derive(sqlx::FromRow)] diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 2836d2820d8..db1487a4548 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -19,7 +19,7 @@ pub(crate) struct StorageSyncBlock { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, - pub fee_account_address: Option>, // May be None if the block is not yet sealed + pub fee_account_address: Vec, pub protocol_version: i32, pub virtual_blocks: i64, pub hash: Vec, @@ -42,7 +42,7 @@ pub(crate) struct SyncBlock { pub l2_fair_gas_price: u64, pub fair_pubdata_price: Option, pub base_system_contracts_hashes: BaseSystemContractsHashes, - pub fee_account_address: Option
, + pub fee_account_address: Address, pub virtual_blocks: u32, pub hash: H256, pub protocol_version: ProtocolVersionId, @@ -85,10 +85,7 @@ impl TryFrom for SyncBlock { ) .context("default_aa_code_hash")?, }, - fee_account_address: block - .fee_account_address - .map(|a| parse_h160(&a)) - .transpose() + fee_account_address: parse_h160(&block.fee_account_address) .context("fee_account_address")?, virtual_blocks: block.virtual_blocks.try_into().context("virtual_blocks")?, hash: parse_h256(&block.hash).context("hash")?, @@ -101,11 +98,7 @@ impl TryFrom for SyncBlock { } impl SyncBlock { - pub(crate) fn into_api( - self, - current_operator_address: Address, - transactions: Option>, - ) -> en::SyncBlock { + pub(crate) fn into_api(self, transactions: Option>) -> en::SyncBlock { en::SyncBlock { number: self.number, l1_batch_number: self.l1_batch_number, @@ -115,7 +108,7 @@ impl SyncBlock { l2_fair_gas_price: self.l2_fair_gas_price, fair_pubdata_price: self.fair_pubdata_price, base_system_contracts_hashes: self.base_system_contracts_hashes, - operator_address: self.fee_account_address.unwrap_or(current_operator_address), + operator_address: self.fee_account_address, transactions, virtual_blocks: Some(self.virtual_blocks), hash: Some(self.hash), @@ -123,11 +116,7 @@ impl SyncBlock { } } - pub(crate) fn into_payload( - self, - current_operator_address: Address, - transactions: Vec, - ) -> Payload { + pub(crate) fn into_payload(self, transactions: Vec) -> Payload { Payload { protocol_version: self.protocol_version, hash: self.hash, @@ -137,7 +126,7 @@ impl SyncBlock { l2_fair_gas_price: self.l2_fair_gas_price, fair_pubdata_price: self.fair_pubdata_price, virtual_blocks: self.virtual_blocks, - operator_address: self.fee_account_address.unwrap_or(current_operator_address), + operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 1e252a4b8e4..82732f5ab99 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -9,7 +9,7 @@ use sqlx::{ }; use zksync_types::{ api, - api::{TransactionDetails, TransactionStatus}, + api::{TransactionDetails, TransactionReceipt, TransactionStatus}, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, @@ -21,7 +21,7 @@ use zksync_types::{ Nonce, PackedEthSignature, PriorityOpId, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, }; -use zksync_utils::bigdecimal_to_u256; +use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; use crate::BigDecimal; @@ -322,6 +322,83 @@ impl From for Transaction { } } +#[derive(sqlx::FromRow)] +pub(crate) struct StorageTransactionReceipt { + pub error: Option, + pub tx_format: Option, + pub index_in_block: Option, + pub block_hash: Vec, + pub tx_hash: Vec, + pub block_number: i64, + pub l1_batch_tx_index: Option, + pub l1_batch_number: Option, + pub transfer_to: Option, + pub execute_contract_address: Option, + pub refunded_gas: i64, + pub gas_limit: Option, + pub effective_gas_price: Option, + pub contract_address: Option>, + pub initiator_address: Vec, +} + +impl From for TransactionReceipt { + fn from(storage_receipt: StorageTransactionReceipt) -> Self { + let status = storage_receipt.error.map_or_else(U64::one, |_| U64::zero()); + + let tx_type = storage_receipt + .tx_format + .map_or_else(Default::default, U64::from); + let transaction_index = storage_receipt + .index_in_block + .map_or_else(Default::default, U64::from); + + let block_hash = H256::from_slice(&storage_receipt.block_hash); + TransactionReceipt { + transaction_hash: H256::from_slice(&storage_receipt.tx_hash), + transaction_index, + block_hash, + block_number: storage_receipt.block_number.into(), + l1_batch_tx_index: storage_receipt.l1_batch_tx_index.map(U64::from), + l1_batch_number: storage_receipt.l1_batch_number.map(U64::from), + from: H160::from_slice(&storage_receipt.initiator_address), + to: storage_receipt + .transfer_to + .or(storage_receipt.execute_contract_address) + .map(|addr| { + serde_json::from_value::
(addr) + .expect("invalid address value in the database") + }) + // For better compatibility with various clients, we never return null. + .or_else(|| Some(Address::default())), + cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). + gas_used: { + let refunded_gas: U256 = storage_receipt.refunded_gas.into(); + storage_receipt.gas_limit.map(|val| { + let gas_limit = bigdecimal_to_u256(val); + gas_limit - refunded_gas + }) + }, + effective_gas_price: Some( + storage_receipt + .effective_gas_price + .map(bigdecimal_to_u256) + .unwrap_or_default(), + ), + contract_address: storage_receipt + .contract_address + .map(|addr| h256_to_account_address(&H256::from_slice(&addr))), + logs: vec![], + l2_to_l1_logs: vec![], + status, + root: block_hash, + logs_bloom: Default::default(), + // Even though the Rust SDK recommends us to supply "None" for legacy transactions + // we always supply some number anyway to have the same behavior as most popular RPCs + transaction_type: Some(tx_type), + } + } +} + #[derive(Serialize, Deserialize)] pub struct StorageApiTransaction { #[serde(flatten)] @@ -514,8 +591,7 @@ pub fn extract_web3_transaction(db_row: PgRow, chain_id: L2ChainId) -> api::Tran } #[derive(Debug, Clone, sqlx::FromRow)] -pub struct CallTrace { - pub tx_hash: Vec, +pub(crate) struct CallTrace { pub call_trace: Vec, } diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs index 486b9f89681..ea8e15fb9c9 100644 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ b/core/lib/dal/src/models/storage_witness_job_info.rs @@ -1,12 +1,11 @@ use std::{convert::TryFrom, str::FromStr}; use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{ - proofs::{ - AggregationRound, JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, - WitnessJobStatusSuccessful, - }, - L1BatchNumber, +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::fri_prover_dal::types::{ + JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, + WitnessJobStatusSuccessful, }; #[derive(sqlx::FromRow)] diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index abf6ceb4406..af6f6a25439 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -8,7 +8,7 @@ pub struct SnapshotRecoveryDal<'a, 'c> { } impl SnapshotRecoveryDal<'_, '_> { - pub async fn set_applied_snapshot_status( + pub async fn insert_initial_recovery_status( &mut self, status: &SnapshotRecoveryStatus, ) -> sqlx::Result<()> { @@ -20,36 +20,43 @@ impl SnapshotRecoveryDal<'_, '_> { l1_batch_root_hash, miniblock_number, miniblock_root_hash, - last_finished_chunk_id, - total_chunk_count, + storage_logs_chunks_processed, updated_at, created_at ) VALUES - ($1, $2, $3, $4, $5, $6, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO - UPDATE - SET - l1_batch_number = excluded.l1_batch_number, - l1_batch_root_hash = excluded.l1_batch_root_hash, - miniblock_number = excluded.miniblock_number, - miniblock_root_hash = excluded.miniblock_root_hash, - last_finished_chunk_id = excluded.last_finished_chunk_id, - total_chunk_count = excluded.total_chunk_count, - updated_at = excluded.updated_at + ($1, $2, $3, $4, $5, NOW(), NOW()) "#, status.l1_batch_number.0 as i64, status.l1_batch_root_hash.0.as_slice(), status.miniblock_number.0 as i64, status.miniblock_root_hash.0.as_slice(), - status.last_finished_chunk_id.map(|v| v as i32), - status.total_chunk_count as i64, + &status.storage_logs_chunks_processed, ) .execute(self.storage.conn()) .await?; Ok(()) } + pub async fn mark_storage_logs_chunk_as_processed( + &mut self, + chunk_id: u64, + ) -> sqlx::Result<()> { + sqlx::query!( + r#" + UPDATE snapshot_recovery + SET + storage_logs_chunks_processed[$1] = TRUE, + updated_at = NOW() + "#, + chunk_id as i32 + 1 + ) + .execute(self.storage.conn()) + .await?; + + Ok(()) + } + pub async fn get_applied_snapshot_status( &mut self, ) -> sqlx::Result> { @@ -60,8 +67,7 @@ impl SnapshotRecoveryDal<'_, '_> { l1_batch_root_hash, miniblock_number, miniblock_root_hash, - last_finished_chunk_id, - total_chunk_count + storage_logs_chunks_processed FROM snapshot_recovery "#, @@ -74,8 +80,7 @@ impl SnapshotRecoveryDal<'_, '_> { l1_batch_root_hash: H256::from_slice(&r.l1_batch_root_hash), miniblock_number: MiniblockNumber(r.miniblock_number as u32), miniblock_root_hash: H256::from_slice(&r.miniblock_root_hash), - last_finished_chunk_id: r.last_finished_chunk_id.map(|v| v as u64), - total_chunk_count: r.total_chunk_count as u64, + storage_logs_chunks_processed: r.storage_logs_chunks_processed.into_iter().collect(), })) } } @@ -96,40 +101,37 @@ mod tests { .await .unwrap(); assert_eq!(None, empty_status); - let status = SnapshotRecoveryStatus { + let mut status = SnapshotRecoveryStatus { l1_batch_number: L1BatchNumber(123), l1_batch_root_hash: H256::random(), miniblock_number: MiniblockNumber(234), miniblock_root_hash: H256::random(), - last_finished_chunk_id: None, - total_chunk_count: 345, + storage_logs_chunks_processed: vec![false, false, true, false], }; applied_status_dal - .set_applied_snapshot_status(&status) + .insert_initial_recovery_status(&status) .await .unwrap(); let status_from_db = applied_status_dal .get_applied_snapshot_status() .await .unwrap(); - assert_eq!(Some(status), status_from_db); + assert_eq!(status, status_from_db.unwrap()); - let updated_status = SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(123), - l1_batch_root_hash: H256::random(), - miniblock_number: MiniblockNumber(234), - miniblock_root_hash: H256::random(), - last_finished_chunk_id: Some(2345), - total_chunk_count: 345, - }; + status.storage_logs_chunks_processed = vec![false, true, true, true]; applied_status_dal - .set_applied_snapshot_status(&updated_status) + .mark_storage_logs_chunk_as_processed(1) .await .unwrap(); + applied_status_dal + .mark_storage_logs_chunk_as_processed(3) + .await + .unwrap(); + let updated_status_from_db = applied_status_dal .get_applied_snapshot_status() .await .unwrap(); - assert_eq!(Some(updated_status), updated_status_from_db); + assert_eq!(status, updated_status_from_db.unwrap()); } } diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index 9267470878e..d09363592d1 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -1,6 +1,6 @@ use zksync_types::{ - snapshots::{SnapshotFactoryDependency, SnapshotStorageLog}, - AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256, + snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, + StorageKey, H256, }; use crate::{instrument::InstrumentExt, StorageProcessor}; @@ -99,13 +99,15 @@ impl SnapshotsCreatorDal<'_, '_> { Ok(storage_logs) } + /// Returns all factory dependencies up to and including the specified `miniblock_number`. pub async fn get_all_factory_deps( &mut self, miniblock_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> sqlx::Result)>> { let rows = sqlx::query!( r#" SELECT + bytecode_hash, bytecode FROM factory_deps @@ -121,9 +123,7 @@ impl SnapshotsCreatorDal<'_, '_> { Ok(rows .into_iter() - .map(|row| SnapshotFactoryDependency { - bytecode: row.bytecode.into(), - }) + .map(|row| (H256::from_slice(&row.bytecode_hash), row.bytecode)) .collect()) } } diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index 106a7b2a5d4..1155cae4a3b 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -19,7 +19,7 @@ impl StorageDal<'_, '_> { &mut self, block_number: MiniblockNumber, factory_deps: &HashMap>, - ) { + ) -> sqlx::Result<()> { let (bytecode_hashes, bytecodes): (Vec<_>, Vec<_>) = factory_deps .iter() .map(|dep| (dep.0.as_bytes(), dep.1.as_slice())) @@ -45,8 +45,9 @@ impl StorageDal<'_, '_> { block_number.0 as i64, ) .execute(self.storage.conn()) - .await - .unwrap(); + .await?; + + Ok(()) } /// Returns bytecode for a factory dependency with the specified bytecode `hash`. @@ -134,8 +135,8 @@ impl StorageDal<'_, '_> { pub async fn get_factory_deps_for_revert( &mut self, block_number: MiniblockNumber, - ) -> Vec { - sqlx::query!( + ) -> sqlx::Result> { + Ok(sqlx::query!( r#" SELECT bytecode_hash @@ -147,11 +148,10 @@ impl StorageDal<'_, '_> { block_number.0 as i64 ) .fetch_all(self.storage.conn()) - .await - .unwrap() + .await? .into_iter() .map(|row| H256::from_slice(&row.bytecode_hash)) - .collect() + .collect()) } /// Applies the specified storage logs for a miniblock. Returns the map of unique storage updates. diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index ad756dd0819..fa09a15bdf3 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -2,11 +2,12 @@ use std::{collections::HashMap, ops, time::Instant}; use sqlx::{types::chrono::Utc, Row}; use zksync_types::{ - get_code_key, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, + get_code_key, snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, + MiniblockNumber, StorageKey, StorageLog, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H160, H256, }; -use crate::{instrument::InstrumentExt, models::storage_log::StorageTreeEntry, StorageProcessor}; +pub use crate::models::storage_log::{DbStorageLog, StorageRecoveryLogEntry}; +use crate::{instrument::InstrumentExt, StorageProcessor}; #[derive(Debug)] pub struct StorageLogsDal<'a, 'c> { @@ -67,6 +68,46 @@ impl StorageLogsDal<'_, '_> { copy.finish().await.unwrap(); } + pub async fn insert_storage_logs_from_snapshot( + &mut self, + miniblock_number: MiniblockNumber, + snapshot_storage_logs: &[SnapshotStorageLog], + ) -> sqlx::Result<()> { + let mut copy = self + .storage + .conn() + .copy_in_raw( + "COPY storage_logs( + hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, + created_at, updated_at + ) + FROM STDIN WITH (DELIMITER '|')", + ) + .await?; + + let mut buffer = String::new(); + let now = Utc::now().naive_utc().to_string(); + for log in snapshot_storage_logs.iter() { + write_str!( + &mut buffer, + r"\\x{hashed_key:x}|\\x{address:x}|\\x{key:x}|\\x{value:x}|", + hashed_key = log.key.hashed_key(), + address = log.key.address(), + key = log.key.key(), + value = log.value + ); + writeln_str!( + &mut buffer, + r"{}|\\x{:x}|{miniblock_number}|{now}|{now}", + log.enumeration_index, + H256::zero() + ); + } + copy.send(buffer.as_bytes()).await?; + copy.finish().await?; + Ok(()) + } + pub async fn append_storage_logs( &mut self, block_number: MiniblockNumber, @@ -95,11 +136,14 @@ impl StorageLogsDal<'_, '_> { } /// Rolls back storage to the specified point in time. - pub async fn rollback_storage(&mut self, last_miniblock_to_keep: MiniblockNumber) { + pub async fn rollback_storage( + &mut self, + last_miniblock_to_keep: MiniblockNumber, + ) -> sqlx::Result<()> { let stage_start = Instant::now(); let modified_keys = self .modified_keys_since_miniblock(last_miniblock_to_keep) - .await; + .await?; tracing::info!( "Loaded {} keys changed after miniblock #{last_miniblock_to_keep} in {:?}", modified_keys.len(), @@ -109,7 +153,7 @@ impl StorageLogsDal<'_, '_> { let stage_start = Instant::now(); let prev_values = self .get_storage_values(&modified_keys, last_miniblock_to_keep) - .await; + .await?; tracing::info!( "Loaded previous storage values for modified keys in {:?}", stage_start.elapsed() @@ -144,8 +188,8 @@ impl StorageLogsDal<'_, '_> { &keys_to_delete as &[&[u8]], ) .execute(self.storage.conn()) - .await - .unwrap(); + .await?; + tracing::info!( "Removed {} keys in {:?}", keys_to_delete.len(), @@ -167,21 +211,22 @@ impl StorageLogsDal<'_, '_> { &values_to_update as &[&[u8]], ) .execute(self.storage.conn()) - .await - .unwrap(); + .await?; + tracing::info!( "Updated {} keys to previous values in {:?}", keys_to_update.len(), stage_start.elapsed() ); + Ok(()) } /// Returns all storage keys that were modified after the specified miniblock. async fn modified_keys_since_miniblock( &mut self, miniblock_number: MiniblockNumber, - ) -> Vec { - sqlx::query!( + ) -> sqlx::Result> { + Ok(sqlx::query!( r#" SELECT DISTINCT ON (hashed_key) hashed_key @@ -198,11 +243,10 @@ impl StorageLogsDal<'_, '_> { miniblock_number.0 as i64 ) .fetch_all(self.storage.conn()) - .await - .unwrap() + .await? .into_iter() .map(|row| H256::from_slice(&row.hashed_key)) - .collect() + .collect()) } /// Removes all storage logs with a miniblock number strictly greater than the specified `block_number`. @@ -258,7 +302,7 @@ impl StorageLogsDal<'_, '_> { pub async fn get_touched_slots_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> HashMap { + ) -> sqlx::Result> { let rows = sqlx::query!( r#" SELECT @@ -290,8 +334,7 @@ impl StorageLogsDal<'_, '_> { l1_batch_number.0 as i64 ) .fetch_all(self.storage.conn()) - .await - .unwrap(); + .await?; let touched_slots = rows.into_iter().map(|row| { let key = StorageKey::new( @@ -300,7 +343,7 @@ impl StorageLogsDal<'_, '_> { ); (key, H256::from_slice(&row.value)) }); - touched_slots.collect() + Ok(touched_slots.collect()) } /// Returns (hashed) storage keys and the corresponding values that need to be applied to a storage @@ -308,19 +351,18 @@ impl StorageLogsDal<'_, '_> { pub async fn get_storage_logs_for_revert( &mut self, l1_batch_number: L1BatchNumber, - ) -> HashMap> { + ) -> sqlx::Result>> { let miniblock_range = self .storage .blocks_dal() .get_miniblock_range_of_l1_batch(l1_batch_number) - .await - .unwrap(); + .await?; let Some((_, last_miniblock)) = miniblock_range else { - return HashMap::new(); + return Ok(HashMap::new()); }; let stage_start = Instant::now(); - let mut modified_keys = self.modified_keys_since_miniblock(last_miniblock).await; + let mut modified_keys = self.modified_keys_since_miniblock(last_miniblock).await?; let modified_keys_count = modified_keys.len(); tracing::info!( "Fetched {modified_keys_count} keys changed after miniblock #{last_miniblock} in {:?}", @@ -334,7 +376,7 @@ impl StorageLogsDal<'_, '_> { let stage_start = Instant::now(); let l1_batch_and_index_by_key = self .get_l1_batches_and_indices_for_initial_writes(&modified_keys) - .await; + .await?; tracing::info!( "Loaded initial write info for modified keys in {:?}", stage_start.elapsed() @@ -372,7 +414,7 @@ impl StorageLogsDal<'_, '_> { let stage_start = Instant::now(); let prev_values_for_updated_keys = self .get_storage_values(&modified_keys, last_miniblock) - .await + .await? .into_iter() .map(|(key, value)| { let value = value.unwrap(); // We already filtered out keys that weren't touched. @@ -385,15 +427,15 @@ impl StorageLogsDal<'_, '_> { stage_start.elapsed() ); output.extend(prev_values_for_updated_keys); - output + Ok(output) } pub async fn get_l1_batches_and_indices_for_initial_writes( &mut self, hashed_keys: &[H256], - ) -> HashMap { + ) -> sqlx::Result> { if hashed_keys.is_empty() { - return HashMap::new(); // Shortcut to save time on communication with DB in the common case + return Ok(HashMap::new()); // Shortcut to save time on communication with DB in the common case } let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); @@ -413,17 +455,17 @@ impl StorageLogsDal<'_, '_> { .instrument("get_l1_batches_and_indices_for_initial_writes") .report_latency() .fetch_all(self.storage.conn()) - .await - .unwrap(); + .await?; - rows.into_iter() + Ok(rows + .into_iter() .map(|row| { ( H256::from_slice(&row.hashed_key), (L1BatchNumber(row.l1_batch_number as u32), row.index as u64), ) }) - .collect() + .collect()) } /// Gets previous values for the specified storage keys before the specified L1 batch number. @@ -440,17 +482,16 @@ impl StorageLogsDal<'_, '_> { &mut self, hashed_keys: &[H256], next_l1_batch: L1BatchNumber, - ) -> HashMap> { + ) -> sqlx::Result>> { let (miniblock_number, _) = self .storage .blocks_dal() .get_miniblock_range_of_l1_batch(next_l1_batch) - .await - .unwrap() + .await? .unwrap(); if miniblock_number == MiniblockNumber(0) { - hashed_keys.iter().copied().map(|key| (key, None)).collect() + Ok(hashed_keys.iter().copied().map(|key| (key, None)).collect()) } else { self.get_storage_values(hashed_keys, miniblock_number - 1) .await @@ -462,7 +503,7 @@ impl StorageLogsDal<'_, '_> { &mut self, hashed_keys: &[H256], miniblock_number: MiniblockNumber, - ) -> HashMap> { + ) -> sqlx::Result>> { let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); let rows = sqlx::query!( @@ -490,15 +531,48 @@ impl StorageLogsDal<'_, '_> { miniblock_number.0 as i64 ) .fetch_all(self.storage.conn()) - .await - .unwrap(); + .await?; - rows.into_iter() + Ok(rows + .into_iter() .map(|row| { let key = H256::from_slice(&row.hashed_key); let value = row.value.map(|value| H256::from_slice(&value)); (key, value) }) + .collect()) + } + + /// Retrieves all storage log entries for testing purposes. + pub async fn dump_all_storage_logs_for_tests(&mut self) -> Vec { + let rows = sqlx::query!( + r#" + SELECT + hashed_key, + address, + key, + value, + operation_number, + tx_hash, + miniblock_number + FROM + storage_logs + "# + ) + .fetch_all(self.storage.conn()) + .await + .expect("get_all_storage_logs_for_tests"); + + rows.into_iter() + .map(|row| DbStorageLog { + hashed_key: H256::from_slice(&row.hashed_key), + address: H160::from_slice(&row.address), + key: H256::from_slice(&row.key), + value: H256::from_slice(&row.value), + operation_number: row.operation_number as u64, + tx_hash: H256::from_slice(&row.tx_hash), + miniblock_number: MiniblockNumber(row.miniblock_number as u32), + }) .collect() } @@ -531,7 +605,7 @@ impl StorageLogsDal<'_, '_> { &mut self, miniblock_number: MiniblockNumber, key_ranges: &[ops::RangeInclusive], - ) -> sqlx::Result>> { + ) -> sqlx::Result>> { let (start_keys, end_keys): (Vec<_>, Vec<_>) = key_ranges .iter() .map(|range| (range.start().as_bytes(), range.end().as_bytes())) @@ -574,8 +648,8 @@ impl StorageLogsDal<'_, '_> { .await?; let rows = rows.into_iter().map(|row| { - Some(StorageTreeEntry { - key: U256::from_little_endian(row.hashed_key.as_ref()?), + Some(StorageRecoveryLogEntry { + key: H256::from_slice(row.hashed_key.as_ref()?), value: H256::from_slice(row.value.as_ref()?), leaf_index: row.index? as u64, }) @@ -589,7 +663,7 @@ impl StorageLogsDal<'_, '_> { &mut self, miniblock_number: MiniblockNumber, key_range: ops::RangeInclusive, - ) -> sqlx::Result> { + ) -> sqlx::Result> { let rows = sqlx::query!( r#" SELECT @@ -613,8 +687,8 @@ impl StorageLogsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - let rows = rows.into_iter().map(|row| StorageTreeEntry { - key: U256::from_little_endian(&row.hashed_key), + let rows = rows.into_iter().map(|row| StorageRecoveryLogEntry { + key: H256::from_slice(&row.hashed_key), value: H256::from_slice(&row.value), leaf_index: row.index as u64, }); @@ -709,31 +783,20 @@ impl StorageLogsDal<'_, '_> { #[cfg(test)] mod tests { use zksync_contracts::BaseSystemContractsHashes; - use zksync_types::{ - block::{BlockGasCount, L1BatchHeader}, - ProtocolVersion, ProtocolVersionId, - }; + use zksync_types::{block::L1BatchHeader, ProtocolVersion, ProtocolVersionId}; use super::*; use crate::{tests::create_miniblock_header, ConnectionPool}; - fn u256_to_h256_reversed(value: U256) -> H256 { - let mut bytes = [0_u8; 32]; - value.to_little_endian(&mut bytes); - H256(bytes) - } - async fn insert_miniblock(conn: &mut StorageProcessor<'_>, number: u32, logs: Vec) { - let mut header = L1BatchHeader::new( + let header = L1BatchHeader::new( L1BatchNumber(number), 0, - Address::default(), BaseSystemContractsHashes::default(), ProtocolVersionId::default(), ); - header.is_finished = true; conn.blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&header) .await .unwrap(); conn.blocks_dal() @@ -770,7 +833,8 @@ mod tests { let touched_slots = conn .storage_logs_dal() .get_touched_slots_for_l1_batch(L1BatchNumber(1)) - .await; + .await + .unwrap(); assert_eq!(touched_slots.len(), 2); assert_eq!(touched_slots[&first_key], H256::repeat_byte(1)); assert_eq!(touched_slots[&second_key], H256::repeat_byte(2)); @@ -786,7 +850,8 @@ mod tests { let touched_slots = conn .storage_logs_dal() .get_touched_slots_for_l1_batch(L1BatchNumber(1)) - .await; + .await + .unwrap(); assert_eq!(touched_slots.len(), 2); assert_eq!(touched_slots[&first_key], H256::repeat_byte(3)); assert_eq!(touched_slots[&second_key], H256::repeat_byte(2)); @@ -818,7 +883,8 @@ mod tests { let prev_values = conn .storage_logs_dal() .get_previous_storage_values(&prev_keys, L1BatchNumber(2)) - .await; + .await + .unwrap(); assert_eq!(prev_values.len(), 3); assert_eq!(prev_values[&prev_keys[0]], Some(H256::repeat_byte(3))); assert_eq!(prev_values[&prev_keys[1]], None); @@ -826,7 +892,8 @@ mod tests { conn.storage_logs_dal() .rollback_storage(MiniblockNumber(1)) - .await; + .await + .unwrap(); let value = conn.storage_dal().get_by_key(&key).await.unwrap(); assert_eq!(value, Some(H256::repeat_byte(3))); @@ -872,7 +939,8 @@ mod tests { let logs_for_revert = conn .storage_logs_dal() .get_storage_logs_for_revert(L1BatchNumber(1)) - .await; + .await + .unwrap(); assert_eq!(logs_for_revert.len(), 15); // 5 updated + 10 new keys for log in &logs[5..] { let prev_value = logs_for_revert[&log.key.hashed_key()].unwrap().0; @@ -931,7 +999,8 @@ mod tests { let logs_for_revert = conn .storage_logs_dal() .get_storage_logs_for_revert(L1BatchNumber(1)) - .await; + .await + .unwrap(); assert_eq!(logs_for_revert.len(), 3); for (i, log) in logs.iter().enumerate() { let hashed_key = log.key.hashed_key(); @@ -974,10 +1043,7 @@ mod tests { .iter() .find(|&key| key_range.contains(key)); if let Some(chunk_start) = chunk_start { - assert_eq!( - u256_to_h256_reversed(chunk_start.key), - *expected_start_key.unwrap() - ); + assert_eq!(chunk_start.key, *expected_start_key.unwrap()); assert_ne!(chunk_start.value, H256::zero()); assert_ne!(chunk_start.leaf_index, 0); } else { @@ -1027,7 +1093,7 @@ mod tests { assert_eq!( tree_entries .iter() - .map(|entry| u256_to_h256_reversed(entry.key)) + .map(|entry| entry.key) .collect::>(), sorted_hashed_keys ); @@ -1040,7 +1106,7 @@ mod tests { .unwrap(); assert!(!tree_entries.is_empty() && tree_entries.len() < 10); for entry in &tree_entries { - assert!(key_range.contains(&u256_to_h256_reversed(entry.key))); + assert!(key_range.contains(&entry.key)); } } } diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index a7bef5aa794..9ca17176e8b 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,9 +1,13 @@ use std::collections::HashSet; use sqlx::types::chrono::Utc; -use zksync_types::{AccountTreeId, Address, L1BatchNumber, LogQuery, StorageKey, H256}; +use zksync_types::{ + snapshots::SnapshotStorageLog, zk_evm_types::LogQuery, AccountTreeId, Address, L1BatchNumber, + StorageKey, H256, +}; use zksync_utils::u256_to_h256; +pub use crate::models::storage_log::DbInitialWrite; use crate::StorageProcessor; #[derive(Debug)] @@ -44,6 +48,38 @@ impl StorageLogsDedupDal<'_, '_> { /// Insert initial writes and assigns indices to them. /// Assumes indices are already assigned for all saved initial_writes, so must be called only after the migration. + pub async fn insert_initial_writes_from_snapshot( + &mut self, + snapshot_storage_logs: &[SnapshotStorageLog], + ) -> sqlx::Result<()> { + let mut copy = self + .storage + .conn() + .copy_in_raw( + "COPY initial_writes (hashed_key, index, l1_batch_number, created_at, updated_at) \ + FROM STDIN WITH (DELIMITER '|')", + ) + .await?; + + let mut bytes: Vec = Vec::new(); + let now = Utc::now().naive_utc().to_string(); + for log in snapshot_storage_logs.iter() { + let row = format!( + "\\\\x{:x}|{}|{}|{}|{}\n", + log.key.hashed_key(), + log.enumeration_index, + log.l1_batch_number_of_initial_write, + now, + now, + ); + bytes.extend_from_slice(row.as_bytes()); + } + copy.send(bytes).await?; + copy.finish().await?; + + Ok(()) + } + pub async fn insert_initial_writes( &mut self, l1_batch_number: L1BatchNumber, @@ -191,4 +227,29 @@ impl StorageLogsDedupDal<'_, '_> { .map(|row| H256::from_slice(&row.hashed_key)) .collect() } + + /// Retrieves all initial write entries for testing purposes. + pub async fn dump_all_initial_writes_for_tests(&mut self) -> Vec { + let rows = sqlx::query!( + r#" + SELECT + hashed_key, + l1_batch_number, + INDEX + FROM + initial_writes + "# + ) + .fetch_all(self.storage.conn()) + .await + .expect("get_all_initial_writes_for_tests"); + + rows.into_iter() + .map(|row| DbInitialWrite { + hashed_key: H256::from_slice(&row.hashed_key), + l1_batch_number: L1BatchNumber(row.l1_batch_number as u32), + index: row.index as u64, + }) + .collect() + } } diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index e5cc4fc8b97..312c46acba2 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -258,9 +258,7 @@ impl StorageWeb3Dal<'_, '_> { #[cfg(test)] mod tests { use zksync_types::{ - block::{BlockGasCount, L1BatchHeader}, - snapshots::SnapshotRecoveryStatus, - ProtocolVersion, ProtocolVersionId, + block::L1BatchHeader, snapshots::SnapshotRecoveryStatus, ProtocolVersion, ProtocolVersionId, }; use super::*; @@ -280,12 +278,11 @@ mod tests { let l1_batch_header = L1BatchHeader::new( L1BatchNumber(0), 0, - Address::repeat_byte(0x42), Default::default(), ProtocolVersionId::latest(), ); conn.blocks_dal() - .insert_l1_batch(&l1_batch_header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&l1_batch_header) .await .unwrap(); conn.blocks_dal() @@ -346,11 +343,10 @@ mod tests { l1_batch_root_hash: H256::zero(), miniblock_number: MiniblockNumber(42), miniblock_root_hash: H256::zero(), - last_finished_chunk_id: None, - total_chunk_count: 100, + storage_logs_chunks_processed: vec![true; 100], }; conn.snapshot_recovery_dal() - .set_applied_snapshot_status(&snapshot_recovery) + .insert_initial_recovery_status(&snapshot_recovery) .await .unwrap(); @@ -385,12 +381,11 @@ mod tests { let l1_batch_header = L1BatchHeader::new( snapshot_recovery.l1_batch_number + 1, 100, - Address::repeat_byte(0x42), Default::default(), ProtocolVersionId::latest(), ); conn.blocks_dal() - .insert_l1_batch(&l1_batch_header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&l1_batch_header) .await .unwrap(); conn.blocks_dal() diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 53310603d2c..284ce317555 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -1,4 +1,4 @@ -use zksync_types::{api::en, Address, MiniblockNumber}; +use zksync_types::{api::en, MiniblockNumber}; use crate::{ instrument::InstrumentExt, @@ -49,10 +49,9 @@ impl SyncDal<'_, '_> { miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", - l1_batches.fee_account_address AS "fee_account_address?" + miniblocks.fee_account_address AS "fee_account_address!" FROM miniblocks - LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number WHERE miniblocks.number = $1 "#, @@ -65,13 +64,20 @@ impl SyncDal<'_, '_> { else { return Ok(None); }; - Ok(Some(block.try_into()?)) + + let mut block = SyncBlock::try_from(block)?; + // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration + #[allow(deprecated)] + self.storage + .blocks_dal() + .maybe_load_fee_address(&mut block.fee_account_address, block.number) + .await?; + Ok(Some(block)) } pub async fn sync_block( &mut self, block_number: MiniblockNumber, - current_operator_address: Address, include_transactions: bool, ) -> anyhow::Result> { let _latency = MethodLatency::new("sync_dal_sync_block"); @@ -79,25 +85,25 @@ impl SyncDal<'_, '_> { return Ok(None); }; let transactions = if include_transactions { - Some( - self.storage - .transactions_web3_dal() - .get_raw_miniblock_transactions(block_number) - .await?, - ) + let transactions = self + .storage + .transactions_web3_dal() + .get_raw_miniblock_transactions(block_number) + .await?; + Some(transactions) } else { None }; - Ok(Some(block.into_api(current_operator_address, transactions))) + Ok(Some(block.into_api(transactions))) } } #[cfg(test)] mod tests { use zksync_types::{ - block::{BlockGasCount, L1BatchHeader}, + block::{L1BatchHeader, MiniblockHeader}, fee::TransactionExecutionMetrics, - L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, + Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, }; use super::*; @@ -122,12 +128,11 @@ mod tests { let mut l1_batch_header = L1BatchHeader::new( L1BatchNumber(0), 0, - Address::repeat_byte(0x42), Default::default(), ProtocolVersionId::latest(), ); conn.blocks_dal() - .insert_l1_batch(&l1_batch_header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&l1_batch_header) .await .unwrap(); conn.blocks_dal() @@ -135,16 +140,18 @@ mod tests { .await .unwrap(); - let operator_address = Address::repeat_byte(1); assert!(conn .sync_dal() - .sync_block(MiniblockNumber(1), operator_address, false) + .sync_block(MiniblockNumber(1), false) .await .unwrap() .is_none()); // Insert another block in the store. - let miniblock_header = create_miniblock_header(1); + let miniblock_header = MiniblockHeader { + fee_account_address: Address::repeat_byte(0x42), + ..create_miniblock_header(1) + }; let tx = mock_l2_transaction(); conn.transactions_dal() .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) @@ -163,7 +170,7 @@ mod tests { let block = conn .sync_dal() - .sync_block(MiniblockNumber(1), operator_address, false) + .sync_block(MiniblockNumber(1), false) .await .unwrap() .expect("no sync block"); @@ -187,12 +194,12 @@ mod tests { block.l2_fair_gas_price, miniblock_header.batch_fee_input.fair_l2_gas_price() ); - assert_eq!(block.operator_address, operator_address); + assert_eq!(block.operator_address, miniblock_header.fee_account_address); assert!(block.transactions.is_none()); let block = conn .sync_dal() - .sync_block(MiniblockNumber(1), operator_address, true) + .sync_block(MiniblockNumber(1), true) .await .unwrap() .expect("no sync block"); @@ -202,7 +209,7 @@ mod tests { l1_batch_header.number = L1BatchNumber(1); l1_batch_header.timestamp = 1; conn.blocks_dal() - .insert_l1_batch(&l1_batch_header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&l1_batch_header) .await .unwrap(); conn.blocks_dal() @@ -212,12 +219,12 @@ mod tests { let block = conn .sync_dal() - .sync_block(MiniblockNumber(1), operator_address, true) + .sync_block(MiniblockNumber(1), true) .await .unwrap() .expect("no sync block"); assert_eq!(block.l1_batch_number, L1BatchNumber(1)); assert!(block.last_in_batch); - assert_eq!(block.operator_address, l1_batch_header.fee_account_address); + assert_eq!(block.operator_address, miniblock_header.fee_account_address); } } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 5b285ff04f8..8094f37216c 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -36,6 +36,7 @@ pub(crate) fn create_miniblock_header(number: u32) -> MiniblockHeader { hash: MiniblockHasher::new(number, 0, H256::zero()).finalize(protocol_version), l1_tx_count: 0, l2_tx_count: 0, + fee_account_address: Address::default(), gas_per_pubdata_limit: 100, base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), @@ -250,9 +251,10 @@ async fn remove_stuck_txs() { // We shouldn't collect executed tx let storage = transactions_dal.storage; let mut transactions_web3_dal = TransactionsWeb3Dal { storage }; - transactions_web3_dal - .get_transaction_receipt(executed_tx.hash()) + let receipts = transactions_web3_dal + .get_transaction_receipts(&[executed_tx.hash()]) .await - .unwrap() .unwrap(); + + assert_eq!(receipts.len(), 1); } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 707cc1a007b..b084a1ba01a 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1316,25 +1316,22 @@ impl TransactionsDal<'_, '_> { } } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> Option { - { - sqlx::query_as!( - CallTrace, - r#" - SELECT - * - FROM - call_traces - WHERE - tx_hash = $1 - "#, - tx_hash.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|trace| trace.into()) - } + pub async fn get_call_trace(&mut self, tx_hash: H256) -> sqlx::Result> { + Ok(sqlx::query_as!( + CallTrace, + r#" + SELECT + call_trace + FROM + call_traces + WHERE + tx_hash = $1 + "#, + tx_hash.as_bytes() + ) + .fetch_optional(self.storage.conn()) + .await? + .map(Into::into)) } pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> Option { @@ -1356,3 +1353,52 @@ impl TransactionsDal<'_, '_> { .map(|tx| tx.into()) } } + +#[cfg(test)] +mod tests { + use zksync_types::ProtocolVersion; + + use super::*; + use crate::{ + tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, + ConnectionPool, + }; + + #[tokio::test] + async fn getting_call_trace_for_transaction() { + let connection_pool = ConnectionPool::test_pool().await; + let mut conn = connection_pool.access_storage().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(1)) + .await + .unwrap(); + + let tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + conn.transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + let mut tx_result = mock_execution_result(tx); + tx_result.call_traces.push(Call { + from: Address::from_low_u64_be(1), + to: Address::from_low_u64_be(2), + value: 100.into(), + ..Call::default() + }); + let expected_call_trace = tx_result.call_trace().unwrap(); + conn.transactions_dal() + .mark_txs_as_executed_in_miniblock(MiniblockNumber(1), &[tx_result], 1.into()) + .await; + + let call_trace = conn + .transactions_dal() + .get_call_trace(tx_hash) + .await + .unwrap() + .expect("no call trace"); + assert_eq!(call_trace, expected_call_trace); + } +} diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index abc243dd1a1..251d1db7f49 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,18 +1,16 @@ use sqlx::types::chrono::NaiveDateTime; use zksync_types::{ - api, Address, L2ChainId, MiniblockNumber, Transaction, ACCOUNT_CODE_STORAGE_ADDRESS, - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H160, H256, U256, U64, + api, api::TransactionReceipt, Address, L2ChainId, MiniblockNumber, Transaction, + ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, }; -use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; use crate::{ instrument::InstrumentExt, models::{ storage_block::{bind_block_where_sql_params, web3_block_where_sql}, - storage_event::StorageWeb3Log, storage_transaction::{ extract_web3_transaction, web3_transaction_select_sql, StorageTransaction, - StorageTransactionDetails, + StorageTransactionDetails, StorageTransactionReceipt, }, }, SqlxError, StorageProcessor, @@ -24,171 +22,106 @@ pub struct TransactionsWeb3Dal<'a, 'c> { } impl TransactionsWeb3Dal<'_, '_> { - pub async fn get_transaction_receipt( + /// Returns receipts by transactions hashes. + /// Hashes are expected to be unique. + pub async fn get_transaction_receipts( &mut self, - hash: H256, - ) -> Result, SqlxError> { - { - let receipt = sqlx::query!( - r#" - WITH - sl AS ( - SELECT - * - FROM - storage_logs - WHERE - storage_logs.address = $1 - AND storage_logs.tx_hash = $2 - ORDER BY - storage_logs.miniblock_number DESC, - storage_logs.operation_number DESC - LIMIT - 1 - ) - SELECT - transactions.hash AS tx_hash, - transactions.index_in_block AS index_in_block, - transactions.l1_batch_tx_index AS l1_batch_tx_index, - transactions.miniblock_number AS "block_number!", - transactions.error AS error, - transactions.effective_gas_price AS effective_gas_price, - transactions.initiator_address AS initiator_address, - transactions.data -> 'to' AS "transfer_to?", - transactions.data -> 'contractAddress' AS "execute_contract_address?", - transactions.tx_format AS "tx_format?", - transactions.refunded_gas AS refunded_gas, - transactions.gas_limit AS gas_limit, - miniblocks.hash AS "block_hash", - miniblocks.l1_batch_number AS "l1_batch_number?", - sl.key AS "contract_address?" - FROM - transactions - JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN sl ON sl.value != $3 - WHERE - transactions.hash = $2 - "#, - ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), - hash.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes() - ) - .instrument("get_transaction_receipt") - .with_arg("hash", &hash) - .fetch_optional(self.storage.conn()) - .await? - .map(|db_row| { - let status = db_row.error.map(|_| U64::zero()).unwrap_or_else(U64::one); - - let tx_type = db_row.tx_format.map(U64::from).unwrap_or_default(); - let transaction_index = db_row.index_in_block.map(U64::from).unwrap_or_default(); - - let block_hash = H256::from_slice(&db_row.block_hash); - api::TransactionReceipt { - transaction_hash: H256::from_slice(&db_row.tx_hash), - transaction_index, - block_hash, - block_number: db_row.block_number.into(), - l1_batch_tx_index: db_row.l1_batch_tx_index.map(U64::from), - l1_batch_number: db_row.l1_batch_number.map(U64::from), - from: H160::from_slice(&db_row.initiator_address), - to: db_row - .transfer_to - .or(db_row.execute_contract_address) - .map(|addr| { - serde_json::from_value::
(addr) - .expect("invalid address value in the database") - }) - // For better compatibility with various clients, we never return null. - .or_else(|| Some(Address::default())), - cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). - gas_used: { - let refunded_gas: U256 = db_row.refunded_gas.into(); - db_row.gas_limit.map(|val| { - let gas_limit = bigdecimal_to_u256(val); - gas_limit - refunded_gas - }) - }, - effective_gas_price: Some( - db_row - .effective_gas_price - .map(bigdecimal_to_u256) - .unwrap_or_default(), - ), - contract_address: db_row - .contract_address - .map(|addr| h256_to_account_address(&H256::from_slice(&addr))), - logs: vec![], - l2_to_l1_logs: vec![], - status, - root: block_hash, - logs_bloom: Default::default(), - // Even though the Rust SDK recommends us to supply "None" for legacy transactions - // we always supply some number anyway to have the same behavior as most popular RPCs - transaction_type: Some(tx_type), - } - }); - match receipt { - Some(mut receipt) => { - let logs: Vec<_> = sqlx::query_as!( - StorageWeb3Log, - r#" - SELECT - address, - topic1, - topic2, - topic3, - topic4, - value, - NULL::bytea AS "block_hash", - NULL::BIGINT AS "l1_batch_number?", - miniblock_number, - tx_hash, - tx_index_in_block, - event_index_in_block, - event_index_in_tx - FROM - events - WHERE - tx_hash = $1 - ORDER BY - miniblock_number ASC, - event_index_in_block ASC - "#, - hash.as_bytes() - ) - .instrument("get_transaction_receipt_events") - .with_arg("hash", &hash) - .fetch_all(self.storage.conn()) - .await? + hashes: &[H256], + ) -> Result, SqlxError> { + let mut receipts: Vec = sqlx::query_as!( + StorageTransactionReceipt, + r#" + WITH + sl AS ( + SELECT DISTINCT + ON (storage_logs.tx_hash) * + FROM + storage_logs + WHERE + storage_logs.address = $1 + AND storage_logs.tx_hash = ANY ($3) + ORDER BY + storage_logs.tx_hash, + storage_logs.miniblock_number DESC, + storage_logs.operation_number DESC + ) + SELECT + transactions.hash AS tx_hash, + transactions.index_in_block AS index_in_block, + transactions.l1_batch_tx_index AS l1_batch_tx_index, + transactions.miniblock_number AS "block_number!", + transactions.error AS error, + transactions.effective_gas_price AS effective_gas_price, + transactions.initiator_address AS initiator_address, + transactions.data -> 'to' AS "transfer_to?", + transactions.data -> 'contractAddress' AS "execute_contract_address?", + transactions.tx_format AS "tx_format?", + transactions.refunded_gas AS refunded_gas, + transactions.gas_limit AS gas_limit, + miniblocks.hash AS "block_hash", + miniblocks.l1_batch_number AS "l1_batch_number?", + sl.key AS "contract_address?" + FROM + transactions + JOIN miniblocks ON miniblocks.number = transactions.miniblock_number + LEFT JOIN sl ON sl.value != $2 + AND sl.tx_hash = transactions.hash + WHERE + transactions.hash = ANY ($3) + "#, + ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), + FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), + &hashes + .iter() + .map(|h| h.as_bytes().to_vec()) + .collect::>()[..] + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(Into::into) + .collect(); + + let mut logs = self + .storage + .events_dal() + .get_logs_by_tx_hashes(hashes) + .await?; + + let mut l2_to_l1_logs = self + .storage + .events_dal() + .get_l2_to_l1_logs_by_hashes(hashes) + .await?; + + for receipt in &mut receipts { + let logs_for_tx = logs.remove(&receipt.transaction_hash); + + if let Some(logs) = logs_for_tx { + receipt.logs = logs .into_iter() - .map(|storage_log| { - let mut log = api::Log::from(storage_log); + .map(|mut log| { log.block_hash = Some(receipt.block_hash); log.l1_batch_number = receipt.l1_batch_number; log }) .collect(); + } - receipt.logs = logs; - - let l2_to_l1_logs = self.storage.events_dal().l2_to_l1_logs(hash).await?; - let l2_to_l1_logs: Vec<_> = l2_to_l1_logs - .into_iter() - .map(|storage_l2_to_l1_log| { - let mut l2_to_l1_log = api::L2ToL1Log::from(storage_l2_to_l1_log); - l2_to_l1_log.block_hash = Some(receipt.block_hash); - l2_to_l1_log.l1_batch_number = receipt.l1_batch_number; - l2_to_l1_log - }) - .collect(); - receipt.l2_to_l1_logs = l2_to_l1_logs; - - Ok(Some(receipt)) - } - None => Ok(None), + let l2_to_l1_logs_for_tx = l2_to_l1_logs.remove(&receipt.transaction_hash); + if let Some(l2_to_l1_logs) = l2_to_l1_logs_for_tx { + receipt.l2_to_l1_logs = l2_to_l1_logs + .into_iter() + .map(|mut log| { + log.block_hash = Some(receipt.block_hash); + log.l1_batch_number = receipt.l1_batch_number; + log + }) + .collect(); } } + + Ok(receipts) } pub async fn get_transaction( @@ -413,26 +346,33 @@ mod tests { ConnectionPool, }; - async fn prepare_transaction(conn: &mut StorageProcessor<'_>, tx: L2Tx) { + async fn prepare_transactions(conn: &mut StorageProcessor<'_>, txs: Vec) { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await .unwrap(); - conn.transactions_dal() - .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) - .await; + + for tx in &txs { + conn.transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + } conn.blocks_dal() .insert_miniblock(&create_miniblock_header(0)) .await .unwrap(); let mut miniblock_header = create_miniblock_header(1); - miniblock_header.l2_tx_count = 1; + miniblock_header.l2_tx_count = txs.len() as u16; conn.blocks_dal() .insert_miniblock(&miniblock_header) .await .unwrap(); - let tx_results = [mock_execution_result(tx)]; + let tx_results = txs + .into_iter() + .map(mock_execution_result) + .collect::>(); + conn.transactions_dal() .mark_txs_as_executed_in_miniblock(MiniblockNumber(1), &tx_results, U256::from(1)) .await; @@ -447,7 +387,7 @@ mod tests { .await; let tx = mock_l2_transaction(); let tx_hash = tx.hash(); - prepare_transaction(&mut conn, tx).await; + prepare_transactions(&mut conn, vec![tx]).await; let block_hash = MiniblockHasher::new(MiniblockNumber(1), 0, H256::zero()) .finalize(ProtocolVersionId::latest()); @@ -501,6 +441,34 @@ mod tests { } } + #[tokio::test] + async fn getting_receipts() { + let connection_pool = ConnectionPool::test_pool().await; + let mut conn = connection_pool.access_storage().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + + let tx1 = mock_l2_transaction(); + let tx1_hash = tx1.hash(); + let tx2 = mock_l2_transaction(); + let tx2_hash = tx2.hash(); + + prepare_transactions(&mut conn, vec![tx1.clone(), tx2.clone()]).await; + + let mut receipts = conn + .transactions_web3_dal() + .get_transaction_receipts(&[tx1_hash, tx2_hash]) + .await + .unwrap(); + + receipts.sort_unstable_by_key(|receipt| receipt.transaction_index); + + assert_eq!(receipts.len(), 2); + assert_eq!(receipts[0].transaction_hash, tx1_hash); + assert_eq!(receipts[1].transaction_hash, tx2_hash); + } + #[tokio::test] async fn getting_miniblock_transactions() { let connection_pool = ConnectionPool::test_pool().await; @@ -510,7 +478,7 @@ mod tests { .await; let tx = mock_l2_transaction(); let tx_hash = tx.hash(); - prepare_transaction(&mut conn, tx).await; + prepare_transactions(&mut conn, vec![tx]).await; let raw_txs = conn .transactions_web3_dal() diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index f0ee6030b02..c510395c3bb 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -64,6 +64,7 @@ mod tests { internal_enforced_l1_gas_price: None, poll_period: 15, max_l1_gas_price: Some(100000000), + l1_gas_per_pubdata_byte: 17, }, } } diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index fa2bb237191..5186b6e4198 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -32,7 +32,7 @@ pub trait FromEnv: Sized { /// Convenience function that loads the structure from the environment variable given the prefix. /// Panics if the config cannot be loaded from the environment variables. -pub(crate) fn envy_load(name: &str, prefix: &str) -> anyhow::Result { +pub fn envy_load(name: &str, prefix: &str) -> anyhow::Result { envy::prefixed(prefix) .from_env() .with_context(|| format!("Cannot load config <{name}>")) diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml new file mode 100644 index 00000000000..7a8afbd1b8e --- /dev/null +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "zksync_l1_contract_interface" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +readme = "README.md" + +[dependencies] +zksync_types = { path = "../types" } +zksync_prover_interface = { path = "../prover_interface" } +zksync_config = { path = "../config" } + +# Used to serialize proof data +codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } +# Used to calculate commitment for vk from the old L1 verifier contract (backward comatibility needs) +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs new file mode 100644 index 00000000000..43a8152499f --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -0,0 +1,30 @@ +use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; +use zksync_types::{commitment::L1BatchWithMetadata, ethabi::Token}; + +use crate::{ + i_executor::structures::{CommitBatchInfo, StoredBatchInfo}, + Tokenizable, Tokenize, +}; + +/// Input required to encode `commitBatches` call. +#[derive(Debug, Clone)] +pub struct CommitBatches { + pub last_committed_l1_batch: L1BatchWithMetadata, + pub l1_batches: Vec, + pub l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, +} + +impl Tokenize for CommitBatches { + fn into_tokens(self) -> Vec { + let stored_batch_info = StoredBatchInfo(&self.last_committed_l1_batch).into_token(); + let l1_batches_to_commit = self + .l1_batches + .iter() + .map(|batch| { + CommitBatchInfo::new(batch, self.l1_batch_commit_data_generator).into_token() + }) + .collect(); + + vec![stored_batch_info, Token::Array(l1_batches_to_commit)] + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs new file mode 100644 index 00000000000..9b759270a2a --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs @@ -0,0 +1,22 @@ +use zksync_types::{ + commitment::L1BatchWithMetadata, ethabi::Token, web3::contract::tokens::Tokenizable, +}; + +use crate::{i_executor::structures::StoredBatchInfo, Tokenize}; + +/// Input required to encode `executeBatches` call. +#[derive(Debug, Clone)] +pub struct ExecuteBatches { + pub l1_batches: Vec, +} + +impl Tokenize for ExecuteBatches { + fn into_tokens(self) -> Vec { + vec![Token::Array( + self.l1_batches + .iter() + .map(|batch| StoredBatchInfo(batch).into_token()) + .collect(), + )] + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/mod.rs b/core/lib/l1_contract_interface/src/i_executor/methods/mod.rs new file mode 100644 index 00000000000..765586edb3f --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/mod.rs @@ -0,0 +1,9 @@ +//! Utilities for encoding input data for methods defined in `IExecutor.sol`. + +pub use self::{ + commit_batches::CommitBatches, execute_batches::ExecuteBatches, prove_batches::ProveBatches, +}; + +mod commit_batches; +mod execute_batches; +mod prove_batches; diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs new file mode 100644 index 00000000000..3c35677d240 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs @@ -0,0 +1,69 @@ +use codegen::serialize_proof; +use zksync_prover_interface::outputs::L1BatchProofForL1; +use zksync_types::{ + commitment::L1BatchWithMetadata, ethabi::Token, web3::contract::tokens::Tokenizable, U256, +}; + +use crate::{i_executor::structures::StoredBatchInfo, Tokenize}; + +/// Input required to encode `proveBatches` call. +#[derive(Debug, Clone)] +pub struct ProveBatches { + pub prev_l1_batch: L1BatchWithMetadata, + pub l1_batches: Vec, + pub proofs: Vec, + pub should_verify: bool, +} + +impl Tokenize for ProveBatches { + fn into_tokens(self) -> Vec { + let prev_l1_batch = StoredBatchInfo(&self.prev_l1_batch).into_token(); + let batches_arg = self + .l1_batches + .iter() + .map(|batch| StoredBatchInfo(batch).into_token()) + .collect(); + let batches_arg = Token::Array(batches_arg); + + if self.should_verify { + // currently we only support submitting a single proof + assert_eq!(self.proofs.len(), 1); + assert_eq!(self.l1_batches.len(), 1); + + let L1BatchProofForL1 { + aggregation_result_coords, + scheduler_proof, + } = self.proofs.first().unwrap(); + + let (_, proof) = serialize_proof(scheduler_proof); + + let aggregation_result_coords = if self.l1_batches[0] + .header + .protocol_version + .unwrap() + .is_pre_boojum() + { + Token::Array( + aggregation_result_coords + .iter() + .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) + .collect(), + ) + } else { + Token::Array(Vec::new()) + }; + let proof_input = Token::Tuple(vec![ + aggregation_result_coords, + Token::Array(proof.into_iter().map(Token::Uint).collect()), + ]); + + vec![prev_l1_batch, batches_arg, proof_input] + } else { + vec![ + prev_l1_batch, + batches_arg, + Token::Tuple(vec![Token::Array(vec![]), Token::Array(vec![])]), + ] + } + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/mod.rs b/core/lib/l1_contract_interface/src/i_executor/mod.rs new file mode 100644 index 00000000000..a866b45fef7 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/mod.rs @@ -0,0 +1,4 @@ +//! Different interfaces exposed by the `IExecutor.sol`. + +pub mod methods; +pub mod structures; diff --git a/core/lib/types/src/l1_batch_commit_data_generator.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs similarity index 53% rename from core/lib/types/src/l1_batch_commit_data_generator.rs rename to core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 41aeb935a7a..b711aa4fc12 100644 --- a/core/lib/types/src/l1_batch_commit_data_generator.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -1,62 +1,72 @@ -use zksync_basic_types::{ethabi::Token, U256}; +use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; +use zksync_types::{ + commitment::L1BatchWithMetadata, + ethabi::Token, + utils, + web3::{contract::Error as Web3ContractError, error::Error as Web3ApiError}, + U256, +}; -use crate::{commitment::L1BatchWithMetadata, utils}; +use crate::Tokenizable; -pub trait L1BatchCommitDataGenerator -where - Self: std::fmt::Debug + Send + Sync, -{ - fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token; - fn l1_commit_data_size(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> usize { - crate::ethabi::encode(&[Token::Array(vec![ - self.l1_commit_data(l1_batch_with_metadata) - ])]) - .len() - } +/// Encoding for `CommitBatchInfo` from `IExecutor.sol` +#[derive(Debug)] +pub struct CommitBatchInfo<'a> { + pub l1_batch_with_metadata: &'a L1BatchWithMetadata, + pub l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, } -#[derive(Debug, Clone)] -pub struct RollupModeL1BatchCommitDataGenerator {} - -#[derive(Debug, Clone)] -pub struct ValidiumModeL1BatchCommitDataGenerator {} - -impl L1BatchCommitDataGenerator for RollupModeL1BatchCommitDataGenerator { - fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token { - let commit_data = if l1_batch_with_metadata - .header - .protocol_version - .unwrap() - .is_pre_boojum() - { - preboojum_l1_commit_data(l1_batch_with_metadata) - } else { - rollup_mode_l1_commit_data(l1_batch_with_metadata) - }; - Token::Tuple(commit_data) +impl<'a> CommitBatchInfo<'a> { + pub fn new( + l1_batch_with_metadata: &'a L1BatchWithMetadata, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + ) -> Self { + Self { + l1_batch_with_metadata, + l1_batch_commit_data_generator, + } } } -impl L1BatchCommitDataGenerator for ValidiumModeL1BatchCommitDataGenerator { - fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token { - let commit_data = if l1_batch_with_metadata +impl<'a> Tokenizable for CommitBatchInfo<'a> { + fn from_token(_token: Token) -> Result + where + Self: Sized, + { + // Currently there is no need to decode this struct. + // We still want to implement `Tokenizable` trait for it, so that *once* it's needed + // the implementation is provided here and not in some other inconsistent way. + Err(Web3ContractError::Api(Web3ApiError::Decoder( + "Not implemented".to_string(), + ))) + } + + fn into_token(self) -> Token { + if self + .l1_batch_with_metadata .header .protocol_version .unwrap() .is_pre_boojum() { - preboojum_l1_commit_data(l1_batch_with_metadata) + pre_boojum_into_token(self.l1_batch_with_metadata) } else { - validium_mode_l1_commit_data(l1_batch_with_metadata) - }; - Token::Tuple(commit_data) + match self.l1_batch_commit_data_generator { + L1BatchCommitDataGeneratorMode::Rollup => { + Token::Tuple(rollup_mode_l1_commit_data(self.l1_batch_with_metadata)) + } + L1BatchCommitDataGeneratorMode::Validium => { + Token::Tuple(validium_mode_l1_commit_data(self.l1_batch_with_metadata)) + } + } + } } } -fn preboojum_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec { - let header = &l1_batch_with_metadata.header; - let metadata = &l1_batch_with_metadata.metadata; - let commit_data = vec![ +fn pre_boojum_into_token<'a>(l1_batch_commit_with_metadata: &'a L1BatchWithMetadata) -> Token { + let header = &l1_batch_commit_with_metadata.header; + let metadata = &l1_batch_commit_with_metadata.metadata; + Token::Tuple(vec![ Token::Uint(U256::from(header.number.0)), Token::Uint(U256::from(header.timestamp)), Token::Uint(U256::from(metadata.rollup_last_leaf_index)), @@ -75,17 +85,16 @@ fn preboojum_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec .collect(), ), Token::Array( - l1_batch_with_metadata + l1_batch_commit_with_metadata .factory_deps .iter() .map(|bytecode| Token::Bytes(bytecode.to_vec())) .collect(), ), - ]; - commit_data + ]) } -fn validium_mode_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec { +fn validium_mode_l1_commit_data<'a>(l1_batch_with_metadata: &'a L1BatchWithMetadata) -> Vec { let header = &l1_batch_with_metadata.header; let metadata = &l1_batch_with_metadata.metadata; let commit_data = vec![ @@ -123,7 +132,7 @@ fn validium_mode_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> commit_data } -fn rollup_mode_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec { +fn rollup_mode_l1_commit_data<'a>(l1_batch_with_metadata: &'a L1BatchWithMetadata) -> Vec { let mut commit_data = validium_mode_l1_commit_data(l1_batch_with_metadata); commit_data.push(Token::Bytes(utils::construct_pubdata( l1_batch_with_metadata, diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs new file mode 100644 index 00000000000..d1ed57e41f2 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -0,0 +1,6 @@ +//! Structures exposed by the `IExecutor.sol`. + +mod commit_batch_info; +mod stored_batch_info; + +pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs new file mode 100644 index 00000000000..10fccc0198d --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -0,0 +1,53 @@ +use zksync_types::{ + commitment::L1BatchWithMetadata, + ethabi::Token, + web3::{contract::Error as Web3ContractError, error::Error as Web3ApiError}, + U256, +}; + +use crate::Tokenizable; + +/// Encoding for `StoredBatchInfo` from `IExecutor.sol` +#[derive(Debug)] +pub struct StoredBatchInfo<'a>(pub &'a L1BatchWithMetadata); + +impl<'a> Tokenizable for StoredBatchInfo<'a> { + fn from_token(_token: Token) -> Result + where + Self: Sized, + { + // Currently there is no need to decode this struct. + // We still want to implement `Tokenizable` trait for it, so that *once* it's needed + // the implementation is provided here and not in some other inconsistent way. + Err(Web3ContractError::Api(Web3ApiError::Decoder( + "Not implemented".to_string(), + ))) + } + + fn into_token(self) -> Token { + Token::Tuple(vec![ + // `batchNumber` + Token::Uint(U256::from(self.0.header.number.0)), + // `batchHash` + Token::FixedBytes(self.0.metadata.root_hash.as_bytes().to_vec()), + // `indexRepeatedStorageChanges` + Token::Uint(U256::from(self.0.metadata.rollup_last_leaf_index)), + // `numberOfLayer1Txs` + Token::Uint(U256::from(self.0.header.l1_tx_count)), + // `priorityOperationsHash` + Token::FixedBytes( + self.0 + .header + .priority_ops_onchain_data_hash() + .as_bytes() + .to_vec(), + ), + // `l2LogsTreeRoot` + Token::FixedBytes(self.0.metadata.l2_l1_merkle_root.as_bytes().to_vec()), + // timestamp + Token::Uint(U256::from(self.0.header.timestamp)), + // commitment + Token::FixedBytes(self.0.metadata.commitment.as_bytes().to_vec()), + ]) + } +} diff --git a/core/lib/l1_contract_interface/src/lib.rs b/core/lib/l1_contract_interface/src/lib.rs new file mode 100644 index 00000000000..f4f9d04ef24 --- /dev/null +++ b/core/lib/l1_contract_interface/src/lib.rs @@ -0,0 +1,19 @@ +//! Utilities for interacting with the zkSync L1 contract +//! +//! Provides utilities both to encode input data for the contract and to decode +//! the data provided by the contract. +//! +//! This crate utilizes traits provided by the `web3` crate to encode and decode +//! data. `Tokenizable` trait represents items that are encoded via single `Token`, +//! while `Tokenize` trait represents items that are encoded via array of `Token`s +//! (for example, transaction input). + +pub use zksync_types::web3::contract::tokens::{Detokenize, Tokenizable, Tokenize}; + +/// Rust interface for (subset of) `IExector.sol`. +pub mod i_executor; +/// Utilities for interacting with `Multicall3` contract. +pub mod multicall3; +/// Utilities for interacting with the old verifier contract. +/// Required for backward compatibility only. +pub mod pre_boojum_verifier; diff --git a/core/lib/types/src/contracts.rs b/core/lib/l1_contract_interface/src/multicall3/mod.rs similarity index 99% rename from core/lib/types/src/contracts.rs rename to core/lib/l1_contract_interface/src/multicall3/mod.rs index 6b72375202a..a47d034d586 100644 --- a/core/lib/types/src/contracts.rs +++ b/core/lib/l1_contract_interface/src/multicall3/mod.rs @@ -1,6 +1,6 @@ use std::mem; -use crate::{ +use zksync_types::{ ethabi::Token, web3::contract::{tokens::Tokenizable, Error}, Address, diff --git a/core/lib/l1_contract_interface/src/pre_boojum_verifier/mod.rs b/core/lib/l1_contract_interface/src/pre_boojum_verifier/mod.rs new file mode 100644 index 00000000000..b1af0b25373 --- /dev/null +++ b/core/lib/l1_contract_interface/src/pre_boojum_verifier/mod.rs @@ -0,0 +1,3 @@ +mod vk_transform; + +pub use self::vk_transform::old_l1_vk_commitment; diff --git a/core/lib/types/src/vk_transform.rs b/core/lib/l1_contract_interface/src/pre_boojum_verifier/vk_transform.rs similarity index 92% rename from core/lib/types/src/vk_transform.rs rename to core/lib/l1_contract_interface/src/pre_boojum_verifier/vk_transform.rs index b19fdaef692..70098230d9b 100644 --- a/core/lib/types/src/vk_transform.rs +++ b/core/lib/l1_contract_interface/src/pre_boojum_verifier/vk_transform.rs @@ -1,3 +1,6 @@ +//! This module contains functions for transforming vk from the old L1 verifier contract to the hash +//! that serves as its commitment. + use std::str::FromStr; use zkevm_test_harness::{ @@ -13,16 +16,15 @@ use zkevm_test_harness::{ recursive_aggregation::{compute_vk_encoding_and_committment, erase_vk_type}, }, }; - -use crate::{ethabi::Token, H256}; +use zksync_types::{ethabi::Token, H256}; /// Calculates commitment for vk from L1 verifier contract. -pub fn l1_vk_commitment(token: Token) -> H256 { +pub fn old_l1_vk_commitment(token: Token) -> H256 { let vk = vk_from_token(token); generate_vk_commitment(vk) } -pub fn generate_vk_commitment( +fn generate_vk_commitment( vk: VerificationKey>>, ) -> H256 { let (_, scheduler_vk_commitment) = compute_vk_encoding_and_committment(erase_vk_type(vk)); diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index bb38021c7aa..06cc0b67871 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -14,6 +14,7 @@ vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev zksync_types = { path = "../types" } zksync_crypto = { path = "../crypto" } zksync_storage = { path = "../storage" } +zksync_prover_interface = { path = "../prover_interface" } zksync_utils = { path = "../utils" } leb128 = "0.2.5" diff --git a/core/lib/merkle_tree/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs index 185ae0543f9..53a641750d1 100644 --- a/core/lib/merkle_tree/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -90,13 +90,11 @@ impl Cli { "Created temp dir for RocksDB: {}", dir.path().to_string_lossy() ); - let db = RocksDB::with_options( - dir.path(), - RocksDBOptions { - block_cache_capacity: self.block_cache, - ..RocksDBOptions::default() - }, - ); + let db_options = RocksDBOptions { + block_cache_capacity: self.block_cache, + ..RocksDBOptions::default() + }; + let db = RocksDB::with_options(dir.path(), db_options).unwrap(); rocksdb = RocksDBWrapper::from(db); if let Some(chunk_size) = self.chunk_size { diff --git a/core/lib/merkle_tree/examples/recovery.rs b/core/lib/merkle_tree/examples/recovery.rs index 1b4e634e567..8769f9a64ac 100644 --- a/core/lib/merkle_tree/examples/recovery.rs +++ b/core/lib/merkle_tree/examples/recovery.rs @@ -62,13 +62,11 @@ impl Cli { "Created temp dir for RocksDB: {}", dir.path().to_string_lossy() ); - let db = RocksDB::with_options( - dir.path(), - RocksDBOptions { - block_cache_capacity: self.block_cache, - ..RocksDBOptions::default() - }, - ); + let db_options = RocksDBOptions { + block_cache_capacity: self.block_cache, + ..RocksDBOptions::default() + }; + let db = RocksDB::with_options(dir.path(), db_options).unwrap(); rocksdb = RocksDBWrapper::from(db); _temp_dir = Some(dir); &mut rocksdb diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index 2fe4b59f821..0724804a5a7 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -2,8 +2,8 @@ use rayon::{ThreadPool, ThreadPoolBuilder}; use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; use zksync_types::{ - proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, L1BatchNumber, StorageKey, U256, }; diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 7dd4d6083d7..8fc9f202d21 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -3,7 +3,7 @@ use std::path::Path; use rayon::prelude::*; -use zksync_storage::{db::NamedColumnFamily, rocksdb::DBPinnableSlice, RocksDB}; +use zksync_storage::{db::NamedColumnFamily, rocksdb, rocksdb::DBPinnableSlice, RocksDB}; use crate::{ errors::{DeserializeError, ErrorContext}, @@ -66,8 +66,12 @@ impl RocksDBWrapper { const MANIFEST_KEY: &'static [u8] = &[0]; /// Creates a new wrapper, initializing RocksDB at the specified directory. - pub fn new(path: &Path) -> Self { - Self::from(RocksDB::new(path)) + /// + /// # Errors + /// + /// Propagates RocksDB I/O errors. + pub fn new(path: &Path) -> Result { + Ok(Self::from(RocksDB::new(path)?)) } /// Sets the chunk size for multi-get operations. The requested keys will be split @@ -295,7 +299,7 @@ mod tests { #[test] fn garbage_is_removed_on_db_reverts() { let dir = TempDir::new().expect("failed creating temporary dir for RocksDB"); - let mut db = RocksDBWrapper::new(dir.path()); + let mut db = RocksDBWrapper::new(dir.path()).unwrap(); // Insert some data to the database. let mut expected_keys = HashSet::new(); diff --git a/core/lib/merkle_tree/tests/integration/consistency.rs b/core/lib/merkle_tree/tests/integration/consistency.rs index b6b424e431a..33ad521bc94 100644 --- a/core/lib/merkle_tree/tests/integration/consistency.rs +++ b/core/lib/merkle_tree/tests/integration/consistency.rs @@ -21,7 +21,7 @@ fn five_thousand_angry_monkeys_vs_merkle_tree() { const RNG_SEED: u64 = 42; let dir = TempDir::new().expect("failed creating temporary dir for RocksDB"); - let mut db = RocksDBWrapper::new(dir.path()); + let mut db = RocksDBWrapper::new(dir.path()).unwrap(); let mut tree = MerkleTree::new(&mut db); let kvs = generate_key_value_pairs(0..100); diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index e96b68fdade..565b4d5f0fe 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -7,11 +7,10 @@ use serde_with::{hex::Hex, serde_as}; use tempfile::TempDir; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{domain::ZkSyncTree, HashTree, TreeEntry, TreeInstruction}; +use zksync_prover_interface::inputs::StorageLogMetadata; use zksync_storage::RocksDB; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; -use zksync_types::{ - proofs::StorageLogMetadata, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, -}; +use zksync_types::{AccountTreeId, Address, L1BatchNumber, StorageKey, H256}; fn gen_storage_logs() -> Vec> { let addrs = vec![ @@ -45,7 +44,7 @@ fn basic_workflow() { let logs = gen_storage_logs(); let (metadata, expected_root_hash) = { - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new_lightweight(db.into()); let metadata = tree.process_l1_batch(&logs); tree.save(); @@ -73,7 +72,7 @@ fn basic_workflow() { ]), ); - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let tree = ZkSyncTree::new_lightweight(db.into()); tree.verify_consistency(L1BatchNumber(0)); assert_eq!(tree.root_hash(), expected_root_hash); @@ -87,7 +86,7 @@ fn basic_workflow_multiblock() { let blocks = logs.chunks(9); let expected_root_hash = { - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new_lightweight(db.into()); tree.use_dedicated_thread_pool(2); for block in blocks { @@ -105,7 +104,7 @@ fn basic_workflow_multiblock() { ]), ); - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let tree = ZkSyncTree::new_lightweight(db.into()); assert_eq!(tree.root_hash(), expected_root_hash); assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(12)); @@ -114,7 +113,7 @@ fn basic_workflow_multiblock() { #[test] fn filtering_out_no_op_writes() { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new(db.into()); let mut logs = gen_storage_logs(); let root_hash = tree.process_l1_batch(&logs).root_hash; @@ -152,7 +151,7 @@ fn filtering_out_no_op_writes() { #[test] fn revert_blocks() { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let storage = RocksDB::new(temp_dir.as_ref()); + let storage = RocksDB::new(temp_dir.as_ref()).unwrap(); // Generate logs and save them to DB. // Produce 4 blocks with distinct values and 1 block with modified values from first block @@ -210,7 +209,7 @@ fn revert_blocks() { } // Revert the last block. - let storage = RocksDB::new(temp_dir.as_ref()); + let storage = RocksDB::new(temp_dir.as_ref()).unwrap(); { let mut tree = ZkSyncTree::new_lightweight(storage.into()); assert_eq!(tree.root_hash(), tree_metadata.last().unwrap().root_hash); @@ -220,7 +219,7 @@ fn revert_blocks() { } // Revert two more blocks. - let storage = RocksDB::new(temp_dir.as_ref()); + let storage = RocksDB::new(temp_dir.as_ref()).unwrap(); { let mut tree = ZkSyncTree::new_lightweight(storage.into()); tree.revert_logs(L1BatchNumber(1)); @@ -229,7 +228,7 @@ fn revert_blocks() { } // Revert two more blocks second time; the result should be the same - let storage = RocksDB::new(temp_dir.as_ref()); + let storage = RocksDB::new(temp_dir.as_ref()).unwrap(); { let mut tree = ZkSyncTree::new_lightweight(storage.into()); tree.revert_logs(L1BatchNumber(1)); @@ -238,7 +237,7 @@ fn revert_blocks() { } // Reapply one of the reverted logs - let storage = RocksDB::new(temp_dir.as_ref()); + let storage = RocksDB::new(temp_dir.as_ref()).unwrap(); { let storage_log = mirror_logs.get(3 * block_size).unwrap(); let mut tree = ZkSyncTree::new_lightweight(storage.into()); @@ -247,7 +246,7 @@ fn revert_blocks() { } // check saved block number - let storage = RocksDB::new(temp_dir.as_ref()); + let storage = RocksDB::new(temp_dir.as_ref()).unwrap(); let tree = ZkSyncTree::new_lightweight(storage.into()); assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(3)); } @@ -255,7 +254,7 @@ fn revert_blocks() { #[test] fn reset_tree() { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let storage = RocksDB::new(temp_dir.as_ref()); + let storage = RocksDB::new(temp_dir.as_ref()).unwrap(); let logs = gen_storage_logs(); let mut tree = ZkSyncTree::new_lightweight(storage.into()); let empty_root_hash = tree.root_hash(); @@ -278,14 +277,14 @@ fn read_logs() { logs.truncate(5); let write_metadata = { - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new_lightweight(db.into()); let metadata = tree.process_l1_batch(&logs); tree.save(); metadata }; - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new_lightweight(db.into()); let read_logs: Vec<_> = logs .into_iter() @@ -315,7 +314,7 @@ fn subtract_from_max_value(diff: u8) -> [u8; 32] { #[test] fn root_hash_compatibility() { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new_lightweight(db.into()); assert_eq!( tree.root_hash(), @@ -372,7 +371,7 @@ fn root_hash_compatibility() { #[test] fn process_block_idempotency_check() { let temp_dir = TempDir::new().expect("failed to get temporary directory for RocksDB"); - let rocks_db = RocksDB::new(temp_dir.as_ref()); + let rocks_db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new_lightweight(rocks_db.into()); let logs = gen_storage_logs(); let tree_metadata = tree.process_l1_batch(&logs); @@ -435,7 +434,7 @@ fn witness_workflow() { let logs = gen_storage_logs(); let (first_chunk, _) = logs.split_at(logs.len() / 2); - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new(db.into()); let metadata = tree.process_l1_batch(first_chunk); let job = metadata.witness.unwrap(); @@ -465,7 +464,7 @@ fn witnesses_with_multiple_blocks() { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let logs = gen_storage_logs(); - let db = RocksDB::new(temp_dir.as_ref()); + let db = RocksDB::new(temp_dir.as_ref()).unwrap(); let mut tree = ZkSyncTree::new(db.into()); let empty_tree_hashes: Vec<_> = (0..256) .map(|i| Blake2Hasher.empty_subtree_hash(i)) diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index 117ea0db4d9..fe6731fb441 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -550,7 +550,7 @@ mod rocksdb { impl Harness { fn new() -> Self { let dir = TempDir::new().expect("failed creating temporary dir for RocksDB"); - let db = RocksDBWrapper::new(dir.path()); + let db = RocksDBWrapper::new(dir.path()).unwrap(); Self { db, dir } } } @@ -661,7 +661,7 @@ mod rocksdb { tree.extend(vec![TreeEntry::new(U256::zero(), 1, H256::zero())]); drop(tree); - let db = RocksDBWrapper::new(dir.path()); + let db = RocksDBWrapper::new(dir.path()).unwrap(); MerkleTree::with_hasher(db, ()); } } diff --git a/core/lib/merkle_tree/tests/integration/recovery.rs b/core/lib/merkle_tree/tests/integration/recovery.rs index 2bac00f02c3..2992561bb1b 100644 --- a/core/lib/merkle_tree/tests/integration/recovery.rs +++ b/core/lib/merkle_tree/tests/integration/recovery.rs @@ -131,7 +131,7 @@ mod rocksdb { #[test_casing(8, test_casing::Product((RecoveryKind::ALL, [6, 10, 17, 42])))] fn recovery_in_chunks(kind: RecoveryKind, chunk_size: usize) { let temp_dir = TempDir::new().unwrap(); - let db = RocksDBWrapper::new(temp_dir.path()); + let db = RocksDBWrapper::new(temp_dir.path()).unwrap(); test_recovery_in_chunks(db, kind, chunk_size); } } diff --git a/core/lib/multivm/src/glue/tracers/mod.rs b/core/lib/multivm/src/glue/tracers/mod.rs index 10d9cbe8ed8..3ca26892113 100644 --- a/core/lib/multivm/src/glue/tracers/mod.rs +++ b/core/lib/multivm/src/glue/tracers/mod.rs @@ -32,7 +32,7 @@ use zksync_state::WriteStorage; -use crate::HistoryMode; +use crate::{tracers::old_tracers::OldTracers, HistoryMode}; pub type MultiVmTracerPointer = Box>; @@ -41,6 +41,7 @@ pub trait MultiVMTracer: + IntoVmVirtualBlocksTracer + IntoVmRefundsEnhancementTracer + IntoVmBoojumIntegrationTracer + + IntoOldVmTracer { fn into_tracer_pointer(self) -> MultiVmTracerPointer where @@ -72,6 +73,18 @@ pub trait IntoVmBoojumIntegrationTracer { ) -> Box>; } +/// Into tracers for old VM versions. +/// Even though number of tracers is limited, we still need to have this trait to be able to convert +/// tracers to old VM tracers. +/// Unfortunately we can't implement this trait for `T`, because specialization is not stable yet. +/// You can follow the conversation here: https://github.com/rust-lang/rust/issues/31844 +/// For all new tracers we need to implement this trait manually. +pub trait IntoOldVmTracer { + fn old_tracer(&self) -> OldTracers { + OldTracers::None + } +} + impl IntoLatestTracer for T where S: WriteStorage, @@ -132,6 +145,7 @@ where T: IntoLatestTracer + IntoVmVirtualBlocksTracer + IntoVmRefundsEnhancementTracer - + IntoVmBoojumIntegrationTracer, + + IntoVmBoojumIntegrationTracer + + IntoOldVmTracer, { } diff --git a/core/lib/multivm/src/glue/types/mod.rs b/core/lib/multivm/src/glue/types/mod.rs index 03d003212f4..481abfdf85f 100644 --- a/core/lib/multivm/src/glue/types/mod.rs +++ b/core/lib/multivm/src/glue/types/mod.rs @@ -7,4 +7,6 @@ mod vm; mod zk_evm_1_3_1; +mod zk_evm_1_3_3; +mod zk_evm_1_4_0; mod zk_evm_1_4_1; diff --git a/core/lib/multivm/src/glue/types/vm/mod.rs b/core/lib/multivm/src/glue/types/vm/mod.rs index aa3db7f2fc5..47cddc2b8dd 100644 --- a/core/lib/multivm/src/glue/types/vm/mod.rs +++ b/core/lib/multivm/src/glue/types/vm/mod.rs @@ -1,4 +1,5 @@ mod block_context_mode; +mod storage_query; mod tx_execution_mode; mod tx_revert_reason; mod vm_block_result; diff --git a/core/lib/multivm/src/glue/types/vm/storage_query.rs b/core/lib/multivm/src/glue/types/vm/storage_query.rs new file mode 100644 index 00000000000..21a10947e09 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/storage_query.rs @@ -0,0 +1,66 @@ +use zksync_types::StorageLogQuery; + +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for StorageLogQuery { + fn glue_from(value: crate::vm_m5::utils::StorageLogQuery) -> Self { + Self { + log_query: value.log_query.glue_into(), + log_type: value.log_type, + } + } +} + +impl GlueFrom for StorageLogQuery { + fn glue_from(value: crate::vm_m6::utils::StorageLogQuery) -> Self { + Self { + log_query: value.log_query.glue_into(), + log_type: value.log_type, + } + } +} + +impl GlueFrom for StorageLogQuery { + fn glue_from(value: crate::vm_1_3_2::utils::StorageLogQuery) -> Self { + Self { + log_query: value.log_query.glue_into(), + log_type: value.log_type, + } + } +} + +impl GlueFrom for StorageLogQuery { + fn glue_from(value: crate::vm_virtual_blocks::utils::logs::StorageLogQuery) -> Self { + Self { + log_query: value.log_query.glue_into(), + log_type: value.log_type, + } + } +} + +impl GlueFrom for StorageLogQuery { + fn glue_from(value: crate::vm_refunds_enhancement::utils::logs::StorageLogQuery) -> Self { + Self { + log_query: value.log_query.glue_into(), + log_type: value.log_type, + } + } +} + +impl GlueFrom for StorageLogQuery { + fn glue_from(value: crate::vm_boojum_integration::utils::logs::StorageLogQuery) -> Self { + Self { + log_query: value.log_query.glue_into(), + log_type: value.log_type, + } + } +} + +impl GlueFrom for StorageLogQuery { + fn glue_from(value: crate::vm_latest::utils::logs::StorageLogQuery) -> Self { + Self { + log_query: value.log_query.glue_into(), + log_type: value.log_type, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index cc76ce22ca0..9867c0c95b9 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -1,3 +1,6 @@ +use itertools::Itertools; +use zk_evm_1_3_1::aux_structures::LogQuery as LogQuery_1_3_1; +use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries as sort_storage_access_queries_1_3_3; use zksync_types::l2_to_l1_log::UserL2ToL1Log; use crate::{ @@ -15,6 +18,21 @@ use crate::{ impl GlueFrom for crate::interface::FinishedL1Batch { fn glue_from(value: crate::vm_m5::vm_instance::VmBlockResult) -> Self { + let storage_log_queries = value.full_result.storage_log_queries.clone(); + let deduplicated_storage_log_queries: Vec = + sort_storage_access_queries_1_3_3( + &storage_log_queries + .iter() + .map(|log| { + GlueInto::::glue_into(log.log_query) + }) + .collect_vec(), + ) + .1 + .into_iter() + .map(GlueInto::::glue_into) + .collect(); + crate::interface::FinishedL1Batch { block_tip_execution_result: VmExecutionResultAndLogs { result: value.block_tip_result.revert_reason.glue_into(), @@ -26,13 +44,20 @@ impl GlueFrom for crate::interface::Fi computational_gas_used: value.full_result.gas_used, gas_used: value.full_result.gas_used, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: Refunds::default(), }, final_execution_state: CurrentExecutionState { events: value.full_result.events, - storage_log_queries: value.full_result.storage_log_queries, + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduplicated_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes: value.full_result.used_contract_hashes, user_l2_to_l1_logs: value .full_result @@ -54,6 +79,21 @@ impl GlueFrom for crate::interface::Fi impl GlueFrom for crate::interface::FinishedL1Batch { fn glue_from(value: crate::vm_m6::vm_instance::VmBlockResult) -> Self { + let storage_log_queries = value.full_result.storage_log_queries.clone(); + let deduplicated_storage_log_queries: Vec = + sort_storage_access_queries_1_3_3( + &storage_log_queries + .iter() + .map(|log| { + GlueInto::::glue_into(log.log_query) + }) + .collect_vec(), + ) + .1 + .into_iter() + .map(GlueInto::::glue_into) + .collect(); + crate::interface::FinishedL1Batch { block_tip_execution_result: VmExecutionResultAndLogs { result: value.block_tip_result.revert_reason.glue_into(), @@ -65,13 +105,20 @@ impl GlueFrom for crate::interface::Fi computational_gas_used: value.full_result.computational_gas_used, gas_used: value.full_result.gas_used, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: Refunds::default(), }, final_execution_state: CurrentExecutionState { events: value.full_result.events, - storage_log_queries: value.full_result.storage_log_queries, + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduplicated_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes: value.full_result.used_contract_hashes, user_l2_to_l1_logs: value .full_result @@ -93,6 +140,13 @@ impl GlueFrom for crate::interface::Fi impl GlueFrom for crate::interface::FinishedL1Batch { fn glue_from(value: crate::vm_1_3_2::vm_instance::VmBlockResult) -> Self { + let storage_log_queries = value.full_result.storage_log_queries.clone(); + let deduplicated_storage_log_queries = + zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries( + storage_log_queries.iter().map(|log| &log.log_query), + ) + .1; + crate::interface::FinishedL1Batch { block_tip_execution_result: VmExecutionResultAndLogs { result: value.block_tip_result.revert_reason.glue_into(), @@ -110,13 +164,20 @@ impl GlueFrom for crate::interface: computational_gas_used: value.full_result.computational_gas_used, gas_used: value.full_result.gas_used, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: Refunds::default(), }, final_execution_state: CurrentExecutionState { events: value.full_result.events, - storage_log_queries: value.full_result.storage_log_queries, + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduplicated_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes: value.full_result.used_contract_hashes, user_l2_to_l1_logs: value .full_result @@ -161,7 +222,12 @@ impl GlueFrom .map(UserL2ToL1Log) .collect(), system_l2_to_l1_logs: vec![], - storage_logs: value.full_result.storage_log_queries, + storage_logs: value + .full_result + .storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), total_log_queries_count: value.full_result.total_log_queries, }, statistics: VmExecutionStatistics { @@ -171,7 +237,7 @@ impl GlueFrom computational_gas_used: value.full_result.computational_gas_used, gas_used: value.full_result.gas_used, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: Refunds::default(), } @@ -202,7 +268,7 @@ impl GlueFrom computational_gas_used: 0, gas_used: value.full_result.gas_used, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: Refunds::default(), } @@ -234,7 +300,12 @@ impl GlueFrom .map(UserL2ToL1Log) .collect(), system_l2_to_l1_logs: vec![], - storage_logs: value.full_result.storage_log_queries, + storage_logs: value + .full_result + .storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), total_log_queries_count: value.full_result.total_log_queries, }, statistics: VmExecutionStatistics { @@ -244,7 +315,7 @@ impl GlueFrom computational_gas_used: value.full_result.computational_gas_used, gas_used: value.full_result.gas_used, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: Refunds::default(), } diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 7b25c1ff3e0..932b7616521 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -16,7 +16,7 @@ impl GlueFrom // There are no such fields in `m5` computational_gas_used: 0, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: crate::interface::Refunds { gas_refunded: 0, @@ -40,7 +40,7 @@ impl GlueFrom computational_gas_used: value.computational_gas_used, total_log_queries: value.logs.total_log_queries_count, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: crate::interface::Refunds { gas_refunded: 0, @@ -64,7 +64,7 @@ impl GlueFrom computational_gas_used: value.computational_gas_used, total_log_queries: value.logs.total_log_queries_count, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: crate::interface::Refunds { gas_refunded: 0, diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs b/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs index f7c1f1c359c..dfe1121c04e 100644 --- a/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs +++ b/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs @@ -1,14 +1,16 @@ +use zksync_utils::u256_to_h256; + use crate::glue::{GlueFrom, GlueInto}; -impl GlueFrom for zksync_types::Timestamp { +impl GlueFrom for zksync_types::zk_evm_types::Timestamp { fn glue_from(timestamp: zk_evm_1_3_1::aux_structures::Timestamp) -> Self { - zksync_types::Timestamp(timestamp.0) + zksync_types::zk_evm_types::Timestamp(timestamp.0) } } -impl GlueFrom for zksync_types::LogQuery { +impl GlueFrom for zksync_types::zk_evm_types::LogQuery { fn glue_from(query: zk_evm_1_3_1::aux_structures::LogQuery) -> Self { - zksync_types::LogQuery { + zksync_types::zk_evm_types::LogQuery { address: query.address, key: query.key, written_value: query.written_value, @@ -24,14 +26,14 @@ impl GlueFrom for zksync_types::LogQuery } } -impl GlueFrom for zk_evm_1_3_1::aux_structures::Timestamp { - fn glue_from(timestamp: zksync_types::Timestamp) -> Self { +impl GlueFrom for zk_evm_1_3_1::aux_structures::Timestamp { + fn glue_from(timestamp: zksync_types::zk_evm_types::Timestamp) -> Self { zk_evm_1_3_1::aux_structures::Timestamp(timestamp.0) } } -impl GlueFrom for zk_evm_1_3_1::aux_structures::LogQuery { - fn glue_from(query: zksync_types::LogQuery) -> Self { +impl GlueFrom for zk_evm_1_3_1::aux_structures::LogQuery { + fn glue_from(query: zksync_types::zk_evm_types::LogQuery) -> Self { zk_evm_1_3_1::aux_structures::LogQuery { address: query.address, key: query.key, @@ -48,22 +50,9 @@ impl GlueFrom for zk_evm_1_3_1::aux_structures::LogQuery } } -impl GlueFrom - for zksync_types::EventMessage +impl GlueFrom + for zksync_types::zk_evm_types::FarCallOpcode { - fn glue_from(event: zk_evm_1_3_1::reference_impls::event_sink::EventMessage) -> Self { - zksync_types::EventMessage { - shard_id: event.shard_id, - is_first: event.is_first, - tx_number_in_block: event.tx_number_in_block, - address: event.address, - key: event.key, - value: event.value, - } - } -} - -impl GlueFrom for zksync_types::FarCallOpcode { fn glue_from(value: zk_evm_1_3_1::zkevm_opcode_defs::FarCallOpcode) -> Self { match value { zk_evm_1_3_1::zkevm_opcode_defs::FarCallOpcode::Normal => Self::Normal, @@ -73,12 +62,79 @@ impl GlueFrom for zksync_types:: } } -impl GlueFrom for zk_evm_1_3_1::zkevm_opcode_defs::FarCallOpcode { - fn glue_from(value: zksync_types::FarCallOpcode) -> Self { +impl GlueFrom + for zk_evm_1_3_1::zkevm_opcode_defs::FarCallOpcode +{ + fn glue_from(value: zksync_types::zk_evm_types::FarCallOpcode) -> Self { match value { - zksync_types::FarCallOpcode::Normal => Self::Normal, - zksync_types::FarCallOpcode::Delegate => Self::Delegate, - zksync_types::FarCallOpcode::Mimic => Self::Mimic, + zksync_types::zk_evm_types::FarCallOpcode::Normal => Self::Normal, + zksync_types::zk_evm_types::FarCallOpcode::Delegate => Self::Delegate, + zksync_types::zk_evm_types::FarCallOpcode::Mimic => Self::Mimic, + } + } +} + +// Special for `zk_evm_1_3_1`: it re-used the same sorting function from `zkevm_test_harness` as the `v1.3.3` used. +// To continue calling this functions, we need to add the conversion for `Timestamp` and `LogQuery`. +impl GlueFrom for zk_evm_1_3_3::aux_structures::Timestamp { + fn glue_from(timestamp: zk_evm_1_3_1::aux_structures::Timestamp) -> Self { + zk_evm_1_3_3::aux_structures::Timestamp(timestamp.0) + } +} + +impl GlueFrom for zk_evm_1_3_3::aux_structures::LogQuery { + fn glue_from(query: zk_evm_1_3_1::aux_structures::LogQuery) -> Self { + zk_evm_1_3_3::aux_structures::LogQuery { + address: query.address, + key: query.key, + written_value: query.written_value, + timestamp: query.timestamp.glue_into(), + shard_id: query.shard_id, + rollback: query.rollback, + tx_number_in_block: query.tx_number_in_block, + aux_byte: query.aux_byte, + read_value: query.read_value, + rw_flag: query.rw_flag, + is_service: query.is_service, + } + } +} + +impl GlueFrom for zk_evm_1_3_1::aux_structures::Timestamp { + fn glue_from(timestamp: zk_evm_1_3_3::aux_structures::Timestamp) -> Self { + zk_evm_1_3_1::aux_structures::Timestamp(timestamp.0) + } +} + +impl GlueFrom for zk_evm_1_3_1::aux_structures::LogQuery { + fn glue_from(query: zk_evm_1_3_3::aux_structures::LogQuery) -> Self { + zk_evm_1_3_1::aux_structures::LogQuery { + address: query.address, + key: query.key, + written_value: query.written_value, + timestamp: query.timestamp.glue_into(), + shard_id: query.shard_id, + rollback: query.rollback, + tx_number_in_block: query.tx_number_in_block, + aux_byte: query.aux_byte, + read_value: query.read_value, + rw_flag: query.rw_flag, + is_service: query.is_service, + } + } +} + +impl GlueFrom + for zksync_types::l2_to_l1_log::L2ToL1Log +{ + fn glue_from(event: zk_evm_1_3_1::reference_impls::event_sink::EventMessage) -> Self { + Self { + shard_id: event.shard_id, + is_service: event.is_first, + tx_number_in_block: event.tx_number_in_block, + sender: event.address, + key: u256_to_h256(event.key), + value: u256_to_h256(event.value), } } } diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_3_3.rs b/core/lib/multivm/src/glue/types/zk_evm_1_3_3.rs new file mode 100644 index 00000000000..4c554c1bd53 --- /dev/null +++ b/core/lib/multivm/src/glue/types/zk_evm_1_3_3.rs @@ -0,0 +1,93 @@ +use zk_evm_1_3_3::{ + aux_structures::{LogQuery as LogQuery_1_3_3, Timestamp as Timestamp_1_3_3}, + zkevm_opcode_defs::FarCallOpcode as FarCallOpcode_1_3_3, +}; +use zksync_types::zk_evm_types::{FarCallOpcode, LogQuery, Timestamp}; +use zksync_utils::u256_to_h256; + +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for FarCallOpcode { + fn glue_from(value: FarCallOpcode_1_3_3) -> Self { + match value { + FarCallOpcode_1_3_3::Normal => FarCallOpcode::Normal, + FarCallOpcode_1_3_3::Delegate => FarCallOpcode::Delegate, + FarCallOpcode_1_3_3::Mimic => FarCallOpcode::Mimic, + } + } +} + +impl GlueFrom + for zk_evm_1_3_3::zkevm_opcode_defs::FarCallOpcode +{ + fn glue_from(value: zksync_types::zk_evm_types::FarCallOpcode) -> Self { + match value { + zksync_types::zk_evm_types::FarCallOpcode::Normal => Self::Normal, + zksync_types::zk_evm_types::FarCallOpcode::Delegate => Self::Delegate, + zksync_types::zk_evm_types::FarCallOpcode::Mimic => Self::Mimic, + } + } +} + +impl GlueFrom for Timestamp { + fn glue_from(value: Timestamp_1_3_3) -> Timestamp { + Timestamp(value.0) + } +} + +impl GlueFrom for Timestamp_1_3_3 { + fn glue_from(value: Timestamp) -> Timestamp_1_3_3 { + Timestamp_1_3_3(value.0) + } +} + +impl GlueFrom for LogQuery { + fn glue_from(value: LogQuery_1_3_3) -> LogQuery { + LogQuery { + timestamp: value.timestamp.glue_into(), + tx_number_in_block: value.tx_number_in_block, + aux_byte: value.aux_byte, + shard_id: value.shard_id, + address: value.address, + key: value.key, + read_value: value.read_value, + written_value: value.written_value, + rw_flag: value.rw_flag, + rollback: value.rollback, + is_service: value.is_service, + } + } +} + +impl GlueFrom for LogQuery_1_3_3 { + fn glue_from(value: LogQuery) -> LogQuery_1_3_3 { + LogQuery_1_3_3 { + timestamp: value.timestamp.glue_into(), + tx_number_in_block: value.tx_number_in_block, + aux_byte: value.aux_byte, + shard_id: value.shard_id, + address: value.address, + key: value.key, + read_value: value.read_value, + written_value: value.written_value, + rw_flag: value.rw_flag, + rollback: value.rollback, + is_service: value.is_service, + } + } +} + +impl GlueFrom + for zksync_types::l2_to_l1_log::L2ToL1Log +{ + fn glue_from(event: zk_evm_1_3_3::reference_impls::event_sink::EventMessage) -> Self { + Self { + shard_id: event.shard_id, + is_service: event.is_first, + tx_number_in_block: event.tx_number_in_block, + sender: event.address, + key: u256_to_h256(event.key), + value: u256_to_h256(event.value), + } + } +} diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_4_0.rs b/core/lib/multivm/src/glue/types/zk_evm_1_4_0.rs new file mode 100644 index 00000000000..5af0e57c4bf --- /dev/null +++ b/core/lib/multivm/src/glue/types/zk_evm_1_4_0.rs @@ -0,0 +1,20 @@ +use zksync_utils::u256_to_h256; + +use crate::glue::GlueFrom; + +// Most of the types between `zk_evm@1.4.0` and `zk_evm@1.3.3` are shared and so we need only the additional conversion +// for `EventMessage` only. +impl GlueFrom + for zksync_types::l2_to_l1_log::L2ToL1Log +{ + fn glue_from(event: zk_evm_1_4_0::reference_impls::event_sink::EventMessage) -> Self { + Self { + shard_id: event.shard_id, + is_service: event.is_first, + tx_number_in_block: event.tx_number_in_block, + sender: event.address, + key: u256_to_h256(event.key), + value: u256_to_h256(event.value), + } + } +} diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs b/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs index c4c4c06c7f8..933eafbb035 100644 --- a/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs +++ b/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs @@ -2,7 +2,8 @@ use zk_evm_1_4_1::{ aux_structures::{LogQuery as LogQuery_1_4_1, Timestamp as Timestamp_1_4_1}, zkevm_opcode_defs::FarCallOpcode as FarCallOpcode_1_4_1, }; -use zksync_types::{FarCallOpcode, LogQuery, Timestamp}; +use zksync_types::zk_evm_types::{FarCallOpcode, LogQuery, Timestamp}; +use zksync_utils::u256_to_h256; use crate::glue::{GlueFrom, GlueInto}; @@ -63,3 +64,18 @@ impl GlueFrom for LogQuery_1_4_1 { } } } + +impl GlueFrom + for zksync_types::l2_to_l1_log::L2ToL1Log +{ + fn glue_from(event: zk_evm_1_4_1::reference_impls::event_sink::EventMessage) -> Self { + Self { + shard_id: event.shard_id, + is_service: event.is_first, + tx_number_in_block: event.tx_number_in_block, + sender: event.address, + key: u256_to_h256(event.key), + value: u256_to_h256(event.value), + } + } +} diff --git a/core/lib/multivm/src/interface/traits/vm.rs b/core/lib/multivm/src/interface/traits/vm.rs index 1158588f849..dd31c00e98f 100644 --- a/core/lib/multivm/src/interface/traits/vm.rs +++ b/core/lib/multivm/src/interface/traits/vm.rs @@ -129,6 +129,9 @@ pub trait VmInterface { /// Record VM memory metrics. fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; + /// Whether the VM still has enough gas to execute the batch tip + fn has_enough_gas_for_batch_tip(&self) -> bool; + /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. fn finish_batch(&mut self) -> FinishedL1Batch { diff --git a/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs b/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs index 3af7bcd3e05..1258e6b472f 100644 --- a/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs +++ b/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs @@ -2,7 +2,11 @@ use zksync_types::{fee_model::BatchFeeInput, Address, L1BatchNumber, H256}; use super::L2BlockEnv; -/// Unique params for each batch +/// Unique params for each L1 batch. +/// +/// Eventually, most of these parameters (`l1_gas_price`, `fair_l2_gas_price`, `fee_account`, +/// `enforced_base_fee`) will be moved to [`L2BlockEnv`]. For now, the VM doesn't support changing +/// them in the middle of execution; that's why these params are specified here. #[derive(Debug, Clone)] pub struct L1BatchEnv { // If previous batch hash is None, then this is the first batch diff --git a/core/lib/multivm/src/interface/types/outputs/execution_result.rs b/core/lib/multivm/src/interface/types/outputs/execution_result.rs index 6471ca1fe19..a3c201e7970 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_result.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_result.rs @@ -101,7 +101,7 @@ impl VmExecutionResultAndLogs { cycles_used: self.statistics.cycles_used, computational_gas_used: self.statistics.computational_gas_used, pubdata_published: self.statistics.pubdata_published, - estimated_circuits_used: self.statistics.estimated_circuits_used, + circuit_statistic: self.statistics.circuit_statistic, } } } diff --git a/core/lib/multivm/src/interface/types/outputs/execution_state.rs b/core/lib/multivm/src/interface/types/outputs/execution_state.rs index 24034a96221..523d90b7fd6 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_state.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_state.rs @@ -1,6 +1,7 @@ use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - LogQuery, StorageLogQuery, VmEvent, U256, + zk_evm_types::LogQuery, + StorageLogQuery, VmEvent, U256, }; /// State of the VM since the start of the batch execution. @@ -10,6 +11,9 @@ pub struct CurrentExecutionState { pub events: Vec, /// Storage logs produced by the VM. pub storage_log_queries: Vec, + /// The deduplicated storage logs produced by the VM. + /// It is the deduplicated version of the `storage_log_queries` field. + pub deduplicated_storage_log_queries: Vec, /// Hashes of the contracts used by the VM. pub used_contract_hashes: Vec, /// L2 to L1 logs produced by the VM. diff --git a/core/lib/multivm/src/interface/types/outputs/statistic.rs b/core/lib/multivm/src/interface/types/outputs/statistic.rs index 1f5b233423c..88c45166b55 100644 --- a/core/lib/multivm/src/interface/types/outputs/statistic.rs +++ b/core/lib/multivm/src/interface/types/outputs/statistic.rs @@ -1,3 +1,5 @@ +use zksync_types::circuit::CircuitStatistic; + /// Statistics of the tx execution. #[derive(Debug, Default, Clone)] pub struct VmExecutionStatistics { @@ -12,7 +14,7 @@ pub struct VmExecutionStatistics { /// Number of log queries produced by the VM during the tx execution. pub total_log_queries: usize, pub pubdata_published: u32, - pub estimated_circuits_used: f32, + pub circuit_statistic: CircuitStatistic, } /// Oracle metrics of the VM. diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index f5a84a1dc60..aa55026d88f 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -2,9 +2,8 @@ #![warn(unused_extern_crates)] #![warn(unused_imports)] -pub use zk_evm_1_3_1; -pub use zk_evm_1_3_3; -pub use zk_evm_1_4_1; +pub use zk_evm_1_4_1 as zk_evm_latest; +pub use zkevm_test_harness_1_4_1 as zkevm_test_harness_latest; pub use zksync_types::vm_version::VmVersion; pub use self::versions::{ diff --git a/core/lib/multivm/src/tracers/call_tracer/mod.rs b/core/lib/multivm/src/tracers/call_tracer/mod.rs index 6d0285fb97d..b3a98902f4b 100644 --- a/core/lib/multivm/src/tracers/call_tracer/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/mod.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use once_cell::sync::OnceCell; use zksync_types::vm_trace::Call; -use crate::tracers::call_tracer::metrics::CALL_METRICS; +use crate::{glue::tracers::IntoOldVmTracer, tracers::call_tracer::metrics::CALL_METRICS}; mod metrics; pub mod vm_boojum_integration; @@ -88,3 +88,9 @@ impl CallTracer { } } } + +impl IntoOldVmTracer for CallTracer { + fn old_tracer(&self) -> crate::tracers::old_tracers::OldTracers { + crate::tracers::old_tracers::OldTracers::CallTracer(self.result.clone()) + } +} diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs index e2e884e26a1..c36bfb0f966 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs @@ -9,10 +9,12 @@ use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_types::{ vm_trace::{Call, CallType}, - FarCallOpcode, U256, + zk_evm_types::FarCallOpcode, + U256, }; use crate::{ + glue::GlueInto, interface::{ tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, VmRevertReason, @@ -45,7 +47,7 @@ impl DynTracer> for CallTracer { .unwrap_or(current_ergs); let mut current_call = Call { - r#type: CallType::Call(far_call), + r#type: CallType::Call(far_call.glue_into()), gas: 0, parent_gas, ..Default::default() @@ -90,7 +92,7 @@ impl CallTracer { // Actually it's a call of the constructor. // And at this stage caller is user and callee is deployed contract. let call_type = if let CallType::Call(far_call) = current_call.r#type { - if matches!(far_call, FarCallOpcode::Mimic) { + if matches!(far_call.glue_into(), FarCallOpcode::Mimic) { let previous_caller = state .vm_local_state .callstack diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs index d7889c910e2..4d5a9857c93 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs @@ -9,7 +9,8 @@ use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_types::{ vm_trace::{Call, CallType}, - FarCallOpcode, U256, + zk_evm_types::FarCallOpcode, + U256, }; use crate::{ diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs index 6a97d791e8e..4c9a9d8e8d6 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs @@ -9,10 +9,12 @@ use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_types::{ vm_trace::{Call, CallType}, - FarCallOpcode, U256, + zk_evm_types::FarCallOpcode, + U256, }; use crate::{ + glue::GlueInto, interface::{ tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, VmRevertReason, @@ -45,7 +47,7 @@ impl DynTracer> for CallTracer { .unwrap_or(current_ergs); let mut current_call = Call { - r#type: CallType::Call(far_call), + r#type: CallType::Call(far_call.glue_into()), gas: 0, parent_gas, ..Default::default() diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs index f1713fc5e9f..7ffaab1392b 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs @@ -9,10 +9,12 @@ use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_types::{ vm_trace::{Call, CallType}, - FarCallOpcode, U256, + zk_evm_types::FarCallOpcode, + U256, }; use crate::{ + glue::GlueInto, interface::{dyn_tracers::vm_1_3_3::DynTracer, VmExecutionResultAndLogs, VmRevertReason}, tracers::call_tracer::CallTracer, vm_virtual_blocks::{ @@ -44,7 +46,7 @@ impl DynTracer> for CallTracer { .unwrap_or(current_ergs); let mut current_call = Call { - r#type: CallType::Call(far_call), + r#type: CallType::Call(far_call.glue_into()), gas: 0, parent_gas, ..Default::default() diff --git a/core/lib/multivm/src/tracers/mod.rs b/core/lib/multivm/src/tracers/mod.rs index 728a15a6a6b..8d858ae03ed 100644 --- a/core/lib/multivm/src/tracers/mod.rs +++ b/core/lib/multivm/src/tracers/mod.rs @@ -1,5 +1,6 @@ pub mod call_tracer; mod multivm_dispatcher; +pub mod old_tracers; pub mod storage_invocation; pub mod validator; diff --git a/core/lib/multivm/src/tracers/multivm_dispatcher.rs b/core/lib/multivm/src/tracers/multivm_dispatcher.rs index aee09fe0f49..0e8585884fa 100644 --- a/core/lib/multivm/src/tracers/multivm_dispatcher.rs +++ b/core/lib/multivm/src/tracers/multivm_dispatcher.rs @@ -1,6 +1,6 @@ use zksync_state::WriteStorage; -use crate::{HistoryMode, MultiVmTracerPointer}; +use crate::{tracers::old_tracers, HistoryMode, MultiVmTracerPointer}; /// Tracer dispatcher is a tracer that can dispatch calls to multiple tracers. pub struct TracerDispatcher { @@ -83,3 +83,9 @@ impl From> impl From> for () { fn from(_value: TracerDispatcher) -> Self {} } + +impl From> for old_tracers::TracerDispatcher { + fn from(value: TracerDispatcher) -> Self { + Self::new(value.tracers.into_iter().map(|x| x.old_tracer()).collect()) + } +} diff --git a/core/lib/multivm/src/tracers/old_tracers.rs b/core/lib/multivm/src/tracers/old_tracers.rs new file mode 100644 index 00000000000..54e5e45aa2c --- /dev/null +++ b/core/lib/multivm/src/tracers/old_tracers.rs @@ -0,0 +1,48 @@ +use std::sync::Arc; + +use once_cell::sync::OnceCell; +use zksync_types::vm_trace::Call; + +/// For backward compatibility with vm before vm with virtual blocks. +/// These tracers are tightly coupled with the VM implementation and we have to pass only params for them and not tracers by itself. +#[derive(Debug, Clone)] +pub enum OldTracers { + CallTracer(Arc>>), + StorageInvocations(usize), + /// Special cases for not supported tracers. + None, +} + +impl OldTracers { + pub fn call_tracer(&self) -> Option>>> { + match self { + OldTracers::CallTracer(a) => Some(a.clone()), + _ => None, + } + } + pub fn storage_invocations(&self) -> Option { + match self { + OldTracers::StorageInvocations(a) => Some(*a), + _ => None, + } + } +} + +/// Tracer dispatcher is a tracer that can convert list of tracers to params for old VM. +#[derive(Debug, Default, Clone)] +pub struct TracerDispatcher { + pub(crate) call_tracer: Option>>>, + pub(crate) storage_invocations: Option, +} + +impl TracerDispatcher { + pub fn new(tracers: Vec) -> Self { + let call_tracer = tracers.iter().find_map(|x| x.call_tracer()); + let storage_invocations = tracers.iter().find_map(|x| x.storage_invocations()); + + Self { + call_tracer, + storage_invocations, + } + } +} diff --git a/core/lib/multivm/src/tracers/storage_invocation/mod.rs b/core/lib/multivm/src/tracers/storage_invocation/mod.rs index f48534709ad..db4e936e011 100644 --- a/core/lib/multivm/src/tracers/storage_invocation/mod.rs +++ b/core/lib/multivm/src/tracers/storage_invocation/mod.rs @@ -1,3 +1,5 @@ +use crate::{glue::tracers::IntoOldVmTracer, tracers::old_tracers::OldTracers}; + pub mod vm_boojum_integration; pub mod vm_latest; pub mod vm_refunds_enhancement; @@ -16,3 +18,9 @@ impl StorageInvocations { Self { limit, current: 0 } } } + +impl IntoOldVmTracer for StorageInvocations { + fn old_tracer(&self) -> OldTracers { + OldTracers::StorageInvocations(self.limit) + } +} diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index aef11924af8..c56424f3013 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -12,8 +12,11 @@ use zksync_types::{ }; use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; -use crate::tracers::validator::types::{NewTrustedValidationItems, ValidationTracerMode}; pub use crate::tracers::validator::types::{ValidationError, ValidationTracerParams}; +use crate::{ + glue::tracers::IntoOldVmTracer, + tracers::validator::types::{NewTrustedValidationItems, ValidationTracerMode}, +}; mod types; mod vm_boojum_integration; @@ -216,3 +219,5 @@ fn valid_eth_token_call(address: Address, msg_sender: Address) -> bool { || msg_sender == BOOTLOADER_ADDRESS; address == L2_ETH_TOKEN_ADDRESS && is_valid_caller } + +impl IntoOldVmTracer for ValidationTracer {} diff --git a/core/lib/multivm/src/versions/vm_1_3_2/mod.rs b/core/lib/multivm/src/versions/vm_1_3_2/mod.rs index 37c5f34ffd0..45fb0cfa388 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/mod.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/mod.rs @@ -10,7 +10,6 @@ pub use self::{ oracle_tools::OracleTools, oracles::storage::StorageOracle, vm::Vm, - vm_instance::{VmBlockResult, VmExecutionResult}, }; mod bootloader_state; @@ -24,8 +23,6 @@ pub mod oracles; mod pubdata_utils; mod refunds; pub mod test_utils; -#[cfg(test)] -mod tests; pub mod transaction_data; pub mod utils; mod vm; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs index 745dcad5050..3b72f89fcbd 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs @@ -7,15 +7,18 @@ use zk_evm_1_3_3::{ }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, - StorageLogQueryType, BOOTLOADER_ADDRESS, U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQueryType, + BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; -use crate::vm_1_3_2::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, WithHistory, +use crate::vm_1_3_2::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, + }, + utils::StorageLogQuery, }; // While the storage does not support different shards, it was decided to write the @@ -174,7 +177,7 @@ impl StorageOracle { .unwrap_or(&[]) } - pub fn get_final_log_queries(&self) -> Vec { + pub(crate) fn get_final_log_queries(&self) -> Vec { assert_eq!( self.frames_stack.len(), 1, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs index 3f31d7b7123..8160f5911a9 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs @@ -5,18 +5,20 @@ use zk_evm_1_3_3::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, zkevm_opcode_defs::{ - FarCallABI, FarCallOpcode, FatPointer, Opcode, RetOpcode, - CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + FarCallABI, FatPointer, Opcode, RetOpcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_types::{ vm_trace::{Call, CallType}, + zk_evm_types::FarCallOpcode, U256, }; -use crate::vm_1_3_2::{ - errors::VmRevertReason, history_recorder::HistoryMode, memory::SimpleMemory, +use crate::{ + glue::GlueInto, + vm_1_3_2::{errors::VmRevertReason, history_recorder::HistoryMode, memory::SimpleMemory}, }; /// NOTE Auto implementing clone for this tracer can cause stack overflow. @@ -69,7 +71,7 @@ impl Tracer for CallTracer { ) { let call_type = match data.opcode.variant.opcode { Opcode::NearCall(_) => CallType::NearCall, - Opcode::FarCall(far_call) => CallType::Call(far_call), + Opcode::FarCall(far_call) => CallType::Call(far_call.glue_into()), Opcode::Ret(ret_code) => { self.handle_ret_op_code(state, data, memory, ret_code); return; @@ -285,7 +287,7 @@ fn filter_near_call(mut call: Call) -> Vec { #[cfg(test)] mod tests { - use zk_evm_1_3_3::zkevm_opcode_defs::FarCallOpcode; + use zksync_types::zk_evm_types::FarCallOpcode; use crate::vm_1_3_2::oracles::tracer::call::{filter_near_call, Call, CallType}; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index c3aa161543a..3c4367232aa 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -20,8 +20,7 @@ use zksync_types::{ fee::Fee, l2::L2Tx, web3::signing::keccak256, - Execute, L2ChainId, Nonce, StorageKey, StorageLogQuery, StorageValue, - CONTRACT_DEPLOYER_ADDRESS, H256, U256, + Execute, L2ChainId, Nonce, StorageKey, StorageValue, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; use zksync_utils::{ address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, @@ -34,6 +33,7 @@ use crate::vm_1_3_2::{ AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode, HistoryRecorder, }, memory::SimpleMemory, + utils::StorageLogQuery, vm_instance::ZkSyncVmState, VmInstance, }; @@ -69,7 +69,7 @@ pub struct DecommitterTestInnerState { } #[derive(Clone, PartialEq, Debug)] -pub struct StorageOracleInnerState { +pub(crate) struct StorageOracleInnerState { /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs deleted file mode 100644 index 2e5b55c945d..00000000000 --- a/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs +++ /dev/null @@ -1,2146 +0,0 @@ -// ``` -// //! -// //! Tests for the bootloader -// //! The description for each of the tests can be found in the corresponding `.yul` file. -// //! -// use itertools::Itertools; -// use std::{ -// collections::{HashMap, HashSet}, -// convert::{TryFrom, TryInto}, -// }; -// use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner, PrivateKeySigner}; -// -// use crate::{ -// errors::VmRevertReason, -// history_recorder::HistoryMode, -// oracles::tracer::{StorageInvocationTracer, TransactionResultTracer}, -// test_utils::{ -// get_create_zksync_address, get_deploy_tx, get_error_tx, mock_loadnext_test_call, -// verify_required_storage, -// }, -// tests::utils::{ -// get_l1_deploy_tx, get_l1_execute_test_contract_tx_with_sender, read_error_contract, -// read_long_return_data_contract, read_test_contract, -// }, -// transaction_data::TransactionData, -// utils::{ -// create_test_block_params, read_bootloader_test_code, BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, -// }, -// vm::{tx_has_failed, VmExecutionStopReason, ZkSyncVmState}, -// vm_with_bootloader::{ -// bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, -// push_raw_transaction_to_bootloader_memory, BlockContext, BlockContextMode, -// BootloaderJobType, TxExecutionMode, -// }, -// vm_with_bootloader::{ -// init_vm_inner, push_transaction_to_bootloader_memory, DerivedBlockContext, -// BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -// }, -// HistoryEnabled, OracleTools, TxRevertReason, VmBlockResult, VmExecutionResult, VmInstance, -// }; -// -// use zk_evm_1_3_3::{ -// aux_structures::Timestamp, block_properties::BlockProperties, zkevm_opcode_defs::FarCallOpcode, -// }; -// use zksync_state::{InMemoryStorage, ReadStorage, StoragePtr, StorageView, WriteStorage}; -// use zksync_types::{ -// block::DeployedContract, -// ethabi::encode, -// ethabi::Token, -// fee::Fee, -// get_code_key, get_is_account_key, get_known_code_key, get_nonce_key, -// l2::L2Tx, -// l2_to_l1_log::L2ToL1Log, -// storage_writes_deduplicator::StorageWritesDeduplicator, -// system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, -// transaction_request::TransactionRequest, -// tx::tx_execution_info::TxExecutionStatus, -// utils::{ -// deployed_address_create, storage_key_for_eth_balance, -// storage_key_for_standard_token_balance, -// }, -// vm_trace::{Call, CallType}, -// AccountTreeId, Address, Eip712Domain, Execute, ExecuteTransactionCommon, L1TxCommonData, -// L2ChainId, Nonce, PackedEthSignature, Transaction, BOOTLOADER_ADDRESS, H160, H256, -// L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, -// REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, SYSTEM_CONTEXT_ADDRESS, U256, -// }; -// use zksync_utils::{ -// bytecode::CompressedBytecodeInfo, -// test_utils::LoadnextContractExecutionParams, -// {bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}, -// }; -// -// use zksync_contracts::{ -// get_loadnext_contract, load_contract, SystemContractCode, PLAYGROUND_BLOCK_BOOTLOADER_CODE, -// }; -// -// use super::utils::{read_many_owners_custom_account_contract, read_nonce_holder_tester}; -// /// Helper struct for tests, that takes care of setting the database and provides some functions to get and set balances. -// /// Example use: -// ///```ignore -// /// let test_env = VmTestEnv::default(); -// /// test_env.set_rich_account(address); -// /// // To create VM and run a single transaction: -// /// test_env.run_vm_or_die(transaction_data); -// /// // To create VM: -// /// let mut helper = VmTestHelper::new(&test_env); -// /// let mut vm = helper.vm(); -// /// ``` -// #[derive(Debug)] -// pub struct VmTestEnv { -// pub block_context: DerivedBlockContext, -// pub block_properties: BlockProperties, -// pub storage_ptr: Box>, -// } -// -// impl VmTestEnv { -// /// Creates a new test helper with a bunch of already deployed contracts. -// pub fn new_with_contracts(contracts: &[(H160, Vec)]) -> Self { -// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { -// let (block_context, block_properties) = create_test_block_params(); -// (block_context.into(), block_properties) -// }; -// -// let mut raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); -// for (address, bytecode) in contracts { -// let account = DeployedContract { -// account_id: AccountTreeId::new(*address), -// bytecode: bytecode.clone(), -// }; -// -// insert_contracts(&mut raw_storage, vec![(account, true)]); -// } -// -// let storage_ptr = Box::new(StorageView::new(raw_storage)); -// -// VmTestEnv { -// block_context, -// block_properties, -// storage_ptr, -// } -// } -// -// /// Gets the current ETH balance for a given account. -// pub fn get_eth_balance(&mut self, address: &H160) -> U256 { -// get_eth_balance(address, self.storage_ptr.as_mut()) -// } -// -// /// Sets a large balance for a given account. -// pub fn set_rich_account(&mut self, address: &H160) { -// let key = storage_key_for_eth_balance(address); -// -// self.storage_ptr -// .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -// } -// -// /// Runs a given transaction in a VM. -// // Note: that storage changes will be preserved, but not changed to events etc. -// // Strongly suggest to use this function only if this is the only transaction executed within the test. -// pub fn run_vm(&mut self, transaction_data: TransactionData) -> (VmExecutionResult, bool) { -// let mut oracle_tools = OracleTools::new(self.storage_ptr.as_mut(), HistoryEnabled); -// let (result, tx_has_failed) = run_vm_with_raw_tx( -// &mut oracle_tools, -// self.block_context, -// &self.block_properties, -// transaction_data, -// ); -// (result, tx_has_failed) -// } -// -// /// Runs a given transaction in a VM and asserts if it fails. -// pub fn run_vm_or_die(&mut self, transaction_data: TransactionData) { -// let (result, tx_has_failed) = self.run_vm(transaction_data); -// assert!( -// !tx_has_failed, -// "Transaction failed with: {:?}", -// result.revert_reason -// ); -// } -// } -// -// impl Default for VmTestEnv { -// fn default() -> Self { -// VmTestEnv::new_with_contracts(&[]) -// } -// } -// -// /// Helper struct to create a default VM for a given environment. -// #[derive(Debug)] -// pub struct VmTestHelper<'a> { -// pub oracle_tools: OracleTools<'a, false, HistoryEnabled>, -// pub block_context: DerivedBlockContext, -// pub block_properties: BlockProperties, -// vm_created: bool, -// } -// -// impl<'a> VmTestHelper<'a> { -// pub fn new(test_env: &'a mut VmTestEnv) -> Self { -// let block_context = test_env.block_context; -// let block_properties = test_env.block_properties; -// -// let oracle_tools = OracleTools::new(test_env.storage_ptr.as_mut(), HistoryEnabled); -// VmTestHelper { -// oracle_tools, -// block_context, -// block_properties, -// vm_created: false, -// } -// } -// -// /// Creates the VM that can be used in tests. -// pub fn vm(&'a mut self) -> Box> { -// assert!(!self.vm_created, "Vm can be created only once"); -// let vm = init_vm_inner( -// &mut self.oracle_tools, -// BlockContextMode::NewBlock(self.block_context, Default::default()), -// &self.block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// self.vm_created = true; -// vm -// } -// } -// -// fn run_vm_with_custom_factory_deps<'a, H: HistoryMode>( -// oracle_tools: &'a mut OracleTools<'a, false, H>, -// block_context: BlockContext, -// block_properties: &'a BlockProperties, -// encoded_tx: Vec, -// predefined_overhead: u32, -// expected_error: Option, -// ) { -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); -// let mut vm = init_vm_inner( -// oracle_tools, -// BlockContextMode::OverrideCurrent(block_context.into()), -// block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// vm.bootloader_state.add_tx_data(encoded_tx.len()); -// vm.state.memory.populate_page( -// BOOTLOADER_HEAP_PAGE as usize, -// get_bootloader_memory_for_encoded_tx( -// encoded_tx, -// 0, -// TxExecutionMode::VerifyExecute, -// 0, -// 0, -// predefined_overhead, -// u32::MAX, -// 0, -// vec![], -// ), -// Timestamp(0), -// ); -// -// let result = vm.execute_next_tx(u32::MAX, false).err(); -// -// assert_eq!(expected_error, result); -// } -// -// fn get_balance(token_id: AccountTreeId, account: &Address, main_storage: StoragePtr) -> U256 { -// let key = storage_key_for_standard_token_balance(token_id, account); -// h256_to_u256(main_storage.borrow_mut().read_value(&key)) -// } -// -// fn get_eth_balance(account: &Address, main_storage: &mut StorageView) -> U256 { -// let key = -// storage_key_for_standard_token_balance(AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), account); -// h256_to_u256(main_storage.read_value(&key)) -// } -// -// #[test] -// fn test_dummy_bootloader() { -// let mut vm_test_env = VmTestEnv::default(); -// let mut oracle_tools = OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// let bootloader_code = read_bootloader_test_code("dummy"); -// let bootloader_hash = hash_bytecode(&bootloader_code); -// -// base_system_contracts.bootloader = SystemContractCode { -// code: bytes_to_be_words(bootloader_code), -// hash: bootloader_hash, -// }; -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(vm_test_env.block_context, Default::default()), -// &vm_test_env.block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let VmBlockResult { -// full_result: res, .. -// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); -// -// // Dummy bootloader should not panic -// assert!(res.revert_reason.is_none()); -// -// let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); -// -// verify_required_memory( -// &vm.state, -// vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], -// ); -// } -// -// #[test] -// fn test_bootloader_out_of_gas() { -// let mut vm_test_env = VmTestEnv::default(); -// let mut oracle_tools = OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); -// -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// -// let bootloader_code = read_bootloader_test_code("dummy"); -// let bootloader_hash = hash_bytecode(&bootloader_code); -// -// base_system_contracts.bootloader = SystemContractCode { -// code: bytes_to_be_words(bootloader_code), -// hash: bootloader_hash, -// }; -// -// // init vm with only 10 ergs -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(vm_test_env.block_context, Default::default()), -// &vm_test_env.block_properties, -// 10, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let res = vm.execute_block_tip(); -// -// assert_eq!(res.revert_reason, Some(TxRevertReason::BootloaderOutOfGas)); -// } -// -// fn verify_required_memory( -// state: &ZkSyncVmState<'_, H>, -// required_values: Vec<(U256, u32, u32)>, -// ) { -// for (required_value, memory_page, cell) in required_values { -// let current_value = state -// .memory -// .read_slot(memory_page as usize, cell as usize) -// .value; -// assert_eq!(current_value, required_value); -// } -// } -// -// #[test] -// fn test_default_aa_interaction() { -// // In this test, we aim to test whether a simple account interaction (without any fee logic) -// // will work. The account will try to deploy a simple contract from integration tests. -// -// let mut vm_test_env = VmTestEnv::default(); -// -// let operator_address = vm_test_env.block_context.context.operator_address; -// let base_fee = vm_test_env.block_context.base_fee; -// // We deploy here counter contract, because its logic is trivial -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(20000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_data: TransactionData = tx.clone().into(); -// -// let maximal_fee = tx_data.gas_limit * tx_data.max_fee_per_gas; -// let sender_address = tx_data.from(); -// -// vm_test_env.set_rich_account(&sender_address); -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let tx_execution_result = vm -// .execute_next_tx(u32::MAX, false) -// .expect("Bootloader failed while processing transaction"); -// -// assert_eq!( -// tx_execution_result.status, -// TxExecutionStatus::Success, -// "Transaction wasn't successful" -// ); -// -// let VmBlockResult { -// full_result: res, .. -// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// // Should not panic -// assert!( -// res.revert_reason.is_none(), -// "Bootloader was not expected to revert: {:?}", -// res.revert_reason -// ); -// -// // Both deployment and ordinary nonce should be incremented by one. -// let account_nonce_key = get_nonce_key(&sender_address); -// let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; -// -// // The code hash of the deployed contract should be marked as republished. -// let known_codes_key = get_known_code_key(&contract_code_hash); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address, U256::zero()); -// let account_code_key = get_code_key(&deployed_address); -// -// let expected_slots = vec![ -// (u256_to_h256(expected_nonce), account_nonce_key), -// (u256_to_h256(U256::from(1u32)), known_codes_key), -// (contract_code_hash, account_code_key), -// ]; -// -// verify_required_storage(&vm.state, expected_slots); -// -// assert!(!tx_has_failed(&vm.state, 0)); -// -// let expected_fee = -// maximal_fee - U256::from(tx_execution_result.gas_refunded) * U256::from(base_fee); -// let operator_balance = get_balance( -// AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), -// &operator_address, -// vm.state.storage.storage.get_ptr(), -// ); -// -// assert_eq!( -// operator_balance, expected_fee, -// "Operator did not receive his fee" -// ); -// } -// -// fn execute_vm_with_predetermined_refund( -// txs: Vec, -// refunds: Vec, -// compressed_bytecodes: Vec>, -// ) -> VmBlockResult { -// let mut vm_test_env = VmTestEnv::default(); -// let block_context = vm_test_env.block_context; -// -// for tx in txs.iter() { -// let sender_address = tx.initiator_account(); -// vm_test_env.set_rich_account(&sender_address); -// } -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// -// let codes_for_decommiter = txs -// .iter() -// .flat_map(|tx| { -// tx.execute -// .factory_deps -// .clone() -// .unwrap_or_default() -// .iter() -// .map(|dep| bytecode_to_factory_dep(dep.clone())) -// .collect::)>>() -// }) -// .collect(); -// -// vm.state.decommittment_processor.populate( -// codes_for_decommiter, -// Timestamp(vm.state.local_state.timestamp), -// ); -// -// let memory_with_suggested_refund = get_bootloader_memory( -// txs.into_iter().map(Into::into).collect(), -// refunds, -// compressed_bytecodes, -// TxExecutionMode::VerifyExecute, -// BlockContextMode::NewBlock(block_context, Default::default()), -// ); -// -// vm.state.memory.populate_page( -// BOOTLOADER_HEAP_PAGE as usize, -// memory_with_suggested_refund, -// Timestamp(0), -// ); -// -// vm.execute_till_block_end(BootloaderJobType::TransactionExecution) -// } -// -// #[test] -// fn test_predetermined_refunded_gas() { -// // In this test, we compare the execution of the bootloader with the predefined -// // refunded gas and without them -// -// let mut vm_test_env = VmTestEnv::default(); -// let base_fee = vm_test_env.block_context.base_fee; -// -// // We deploy here counter contract, because its logic is trivial -// let contract_code = read_test_contract(); -// let published_bytecode = CompressedBytecodeInfo::from_original(contract_code.clone()).unwrap(); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(20000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let sender_address = tx.initiator_account(); -// -// // set balance -// vm_test_env.set_rich_account(&sender_address); -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let tx_execution_result = vm -// .execute_next_tx(u32::MAX, false) -// .expect("Bootloader failed while processing transaction"); -// -// assert_eq!( -// tx_execution_result.status, -// TxExecutionStatus::Success, -// "Transaction wasn't successful" -// ); -// -// // If the refund provided by the operator or the final refund are the 0 -// // there is no impact of the operator's refund at all and so this test does not -// // make much sense. -// assert!( -// tx_execution_result.operator_suggested_refund > 0, -// "The operator's refund is 0" -// ); -// assert!( -// tx_execution_result.gas_refunded > 0, -// "The final refund is 0" -// ); -// -// let mut result = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// assert!( -// result.full_result.revert_reason.is_none(), -// "Bootloader was not expected to revert: {:?}", -// result.full_result.revert_reason -// ); -// -// let mut result_with_predetermined_refund = execute_vm_with_predetermined_refund( -// vec![tx], -// vec![tx_execution_result.operator_suggested_refund], -// vec![vec![published_bytecode]], -// ); -// // We need to sort these lists as those are flattened from HashMaps -// result.full_result.used_contract_hashes.sort(); -// result_with_predetermined_refund -// .full_result -// .used_contract_hashes -// .sort(); -// -// assert_eq!( -// result.full_result.events, -// result_with_predetermined_refund.full_result.events -// ); -// assert_eq!( -// result.full_result.l2_to_l1_logs, -// result_with_predetermined_refund.full_result.l2_to_l1_logs -// ); -// assert_eq!( -// result.full_result.storage_log_queries, -// result_with_predetermined_refund -// .full_result -// .storage_log_queries -// ); -// assert_eq!( -// result.full_result.used_contract_hashes, -// result_with_predetermined_refund -// .full_result -// .used_contract_hashes -// ); -// } -// -// #[derive(Debug, Clone)] -// enum TransactionRollbackTestInfo { -// Rejected(Transaction, TxRevertReason), -// Processed(Transaction, bool, TxExecutionStatus), -// } -// -// impl TransactionRollbackTestInfo { -// fn new_rejected(transaction: Transaction, revert_reason: TxRevertReason) -> Self { -// Self::Rejected(transaction, revert_reason) -// } -// -// fn new_processed( -// transaction: Transaction, -// should_be_rollbacked: bool, -// expected_status: TxExecutionStatus, -// ) -> Self { -// Self::Processed(transaction, should_be_rollbacked, expected_status) -// } -// -// fn get_transaction(&self) -> &Transaction { -// match self { -// TransactionRollbackTestInfo::Rejected(tx, _) => tx, -// TransactionRollbackTestInfo::Processed(tx, _, _) => tx, -// } -// } -// -// fn rejection_reason(&self) -> Option { -// match self { -// TransactionRollbackTestInfo::Rejected(_, revert_reason) => Some(revert_reason.clone()), -// TransactionRollbackTestInfo::Processed(_, _, _) => None, -// } -// } -// -// fn should_rollback(&self) -> bool { -// match self { -// TransactionRollbackTestInfo::Rejected(_, _) => true, -// TransactionRollbackTestInfo::Processed(_, x, _) => *x, -// } -// } -// -// fn expected_status(&self) -> TxExecutionStatus { -// match self { -// TransactionRollbackTestInfo::Rejected(_, _) => { -// panic!("There is no execution status for rejected transaction") -// } -// TransactionRollbackTestInfo::Processed(_, _, status) => *status, -// } -// } -// } -// -// // Accepts the address of the sender as well as the list of pairs of its transactions -// // and whether these transactions should succeed. -// fn execute_vm_with_possible_rollbacks( -// sender_address: Address, -// transactions: Vec, -// block_context: DerivedBlockContext, -// block_properties: BlockProperties, -// ) -> VmExecutionResult { -// let mut vm_test_env = VmTestEnv { -// block_context, -// block_properties, -// ..Default::default() -// }; -// -// // Setting infinite balance for the sender. -// vm_test_env.set_rich_account(&sender_address); -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// -// for test_info in transactions { -// vm.save_current_vm_as_snapshot(); -// let vm_state_before_tx = vm.dump_inner_state(); -// push_transaction_to_bootloader_memory( -// &mut vm, -// test_info.get_transaction(), -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// match vm.execute_next_tx(u32::MAX, false) { -// Err(reason) => { -// assert_eq!(test_info.rejection_reason(), Some(reason)); -// } -// Ok(res) => { -// assert_eq!(test_info.rejection_reason(), None); -// assert_eq!( -// res.status, -// test_info.expected_status(), -// "Transaction status is not correct" -// ); -// } -// }; -// -// if test_info.should_rollback() { -// // Some error has occurred, we should reject the transaction -// vm.rollback_to_latest_snapshot(); -// -// // vm_state_before_tx. -// let state_after_rollback = vm.dump_inner_state(); -// assert_eq!( -// vm_state_before_tx, state_after_rollback, -// "Did not rollback VM state correctly" -// ); -// } -// } -// -// let VmBlockResult { -// full_result: mut result, -// .. -// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); -// // Used contract hashes are retrieved in unordered manner. -// // However it must be sorted for the comparisons in tests to work -// result.used_contract_hashes.sort(); -// -// result -// } -// -// // Sets the signature for an L2 transaction and returns the same transaction -// // but this different signature. -// fn change_signature(mut tx: Transaction, signature: Vec) -> Transaction { -// tx.common_data = match tx.common_data { -// ExecuteTransactionCommon::L2(mut data) => { -// data.signature = signature; -// ExecuteTransactionCommon::L2(data) -// } -// _ => unreachable!(), -// }; -// -// tx -// } -// -// #[test] -// fn test_vm_rollbacks() { -// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { -// let (block_context, block_properties) = create_test_block_params(); -// (block_context.into(), block_properties) -// }; -// -// let base_fee = U256::from(block_context.base_fee); -// -// let sender_private_key = H256::random(); -// let contract_code = read_test_contract(); -// -// let tx_nonce_0: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(12000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_nonce_1: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(1), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(12000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_nonce_2: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(2), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(12000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let wrong_signature_length_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 32]); -// let wrong_v_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 65]); -// let wrong_signature_tx = change_signature(tx_nonce_0.clone(), vec![27u8; 65]); -// -// let sender_address = tx_nonce_0.initiator_account(); -// -// let result_without_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// // The nonces are ordered correctly, all the transactions should succeed. -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_2.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// ], -// block_context, -// block_properties, -// ); -// -// let incorrect_nonce = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Incorrect nonce".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, -// 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// }); -// let reusing_nonce_twice = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Reusing the same nonce twice".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, -// 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, -// 0, 0, 0, -// ], -// }); -// let signature_length_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Signature length is incorrect".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, -// 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, -// 116, 0, 0, 0, -// ], -// }); -// let v_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "v is neither 27 nor 28".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, -// 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// }); -// let signature_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), -// data: vec![], -// }); -// -// let result_with_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// TransactionRollbackTestInfo::new_rejected( -// wrong_signature_length_tx, -// signature_length_is_incorrect, -// ), -// TransactionRollbackTestInfo::new_rejected(wrong_v_tx, v_is_incorrect), -// TransactionRollbackTestInfo::new_rejected(wrong_signature_tx, signature_is_incorrect), -// // The correct nonce is 0, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce.clone()), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The correct nonce is 1, this tx will fail -// TransactionRollbackTestInfo::new_rejected( -// tx_nonce_0.clone(), -// reusing_nonce_twice.clone(), -// ), -// // The correct nonce is 1, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_1, -// false, -// TxExecutionStatus::Success, -// ), -// // The correct nonce is 2, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_0, reusing_nonce_twice.clone()), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_2.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // This tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2, reusing_nonce_twice.clone()), -// ], -// block_context, -// block_properties, -// ); -// -// assert_eq!(result_without_rollbacks, result_with_rollbacks); -// -// let loadnext_contract = get_loadnext_contract(); -// -// let loadnext_constructor_data = encode(&[Token::Uint(U256::from(100))]); -// let loadnext_deploy_tx: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(0), -// &loadnext_contract.bytecode, -// loadnext_contract.factory_deps, -// &loadnext_constructor_data, -// Fee { -// gas_limit: U256::from(70000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let loadnext_contract_address = -// get_create_zksync_address(loadnext_deploy_tx.initiator_account(), Nonce(0)); -// let deploy_loadnext_tx_info = TransactionRollbackTestInfo::new_processed( -// loadnext_deploy_tx, -// false, -// TxExecutionStatus::Success, -// ); -// -// let get_load_next_tx = |params: LoadnextContractExecutionParams, nonce: Nonce| { -// // Here we test loadnext with various kinds of operations -// let tx: Transaction = mock_loadnext_test_call( -// sender_private_key, -// nonce, -// loadnext_contract_address, -// Fee { -// gas_limit: U256::from(100000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// params, -// ) -// .into(); -// -// tx -// }; -// -// let loadnext_tx_0 = get_load_next_tx( -// LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// }, -// Nonce(1), -// ); -// let loadnext_tx_1 = get_load_next_tx( -// LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// }, -// Nonce(2), -// ); -// -// let result_without_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// deploy_loadnext_tx_info.clone(), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// ], -// block_context, -// block_properties, -// ); -// -// let result_with_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// deploy_loadnext_tx_info, -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The nonce has been bumped up, this transaction should now fail -// TransactionRollbackTestInfo::new_rejected(loadnext_tx_0, reusing_nonce_twice.clone()), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The nonce has been bumped up, this transaction should now fail -// TransactionRollbackTestInfo::new_rejected(loadnext_tx_1, reusing_nonce_twice), -// ], -// block_context, -// block_properties, -// ); -// -// assert_eq!(result_without_rollbacks, result_with_rollbacks); -// } -// -// // Inserts the contracts into the test environment, bypassing the -// // deployer system contract. Besides the reference to storage -// // it accepts a `contracts` tuple of information about the contract -// // and whether or not it is an account. -// fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: Vec<(DeployedContract, bool)>) { -// for (contract, is_account) in contracts { -// let deployer_code_key = get_code_key(contract.account_id.address()); -// raw_storage.set_value(deployer_code_key, hash_bytecode(&contract.bytecode)); -// -// if is_account { -// let is_account_key = get_is_account_key(contract.account_id.address()); -// raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); -// } -// -// raw_storage.store_factory_dep(hash_bytecode(&contract.bytecode), contract.bytecode); -// } -// } -// -// enum NonceHolderTestMode { -// SetValueUnderNonce, -// IncreaseMinNonceBy5, -// IncreaseMinNonceTooMuch, -// LeaveNonceUnused, -// IncreaseMinNonceBy1, -// SwitchToArbitraryOrdering, -// } -// -// impl From for u8 { -// fn from(mode: NonceHolderTestMode) -> u8 { -// match mode { -// NonceHolderTestMode::SetValueUnderNonce => 0, -// NonceHolderTestMode::IncreaseMinNonceBy5 => 1, -// NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, -// NonceHolderTestMode::LeaveNonceUnused => 3, -// NonceHolderTestMode::IncreaseMinNonceBy1 => 4, -// NonceHolderTestMode::SwitchToArbitraryOrdering => 5, -// } -// } -// } -// -// fn get_nonce_holder_test_tx( -// nonce: U256, -// account_address: Address, -// test_mode: NonceHolderTestMode, -// block_context: &DerivedBlockContext, -// ) -> TransactionData { -// TransactionData { -// tx_type: 113, -// from: account_address, -// to: account_address, -// gas_limit: U256::from(10000000u32), -// pubdata_price_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// max_fee_per_gas: U256::from(block_context.base_fee), -// max_priority_fee_per_gas: U256::zero(), -// nonce, -// // The reserved fields that are unique for different types of transactions. -// // E.g. nonce is currently used in all transaction, but it should not be mandatory -// // in the long run. -// reserved: [U256::zero(); 4], -// data: vec![12], -// signature: vec![test_mode.into()], -// -// ..Default::default() -// } -// } -// -// fn run_vm_with_raw_tx<'a, H: HistoryMode>( -// oracle_tools: &'a mut OracleTools<'a, false, H>, -// block_context: DerivedBlockContext, -// block_properties: &'a BlockProperties, -// tx: TransactionData, -// ) -> (VmExecutionResult, bool) { -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); -// let mut vm = init_vm_inner( -// oracle_tools, -// BlockContextMode::OverrideCurrent(block_context), -// block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let block_gas_price_per_pubdata = block_context.context.block_gas_price_per_pubdata(); -// -// let overhead = tx.overhead_gas(block_gas_price_per_pubdata as u32); -// push_raw_transaction_to_bootloader_memory( -// &mut vm, -// tx, -// TxExecutionMode::VerifyExecute, -// overhead, -// None, -// ); -// let VmBlockResult { -// full_result: result, -// .. -// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// -// (result, tx_has_failed(&vm.state, 0)) -// } -// -// #[test] -// fn test_nonce_holder() { -// let account_address = H160::random(); -// let mut vm_test_env = -// VmTestEnv::new_with_contracts(&[(account_address, read_nonce_holder_tester())]); -// -// vm_test_env.set_rich_account(&account_address); -// -// let mut run_nonce_test = |nonce: U256, -// test_mode: NonceHolderTestMode, -// error_message: Option, -// comment: &'static str| { -// let tx = get_nonce_holder_test_tx( -// nonce, -// account_address, -// test_mode, -// &vm_test_env.block_context, -// ); -// -// let (result, tx_has_failed) = vm_test_env.run_vm(tx); -// if let Some(msg) = error_message { -// let expected_error = -// TxRevertReason::ValidationFailed(VmRevertReason::General { msg, data: vec![] }); -// assert_eq!( -// result -// .revert_reason -// .expect("No revert reason") -// .revert_reason -// .to_string(), -// expected_error.to_string(), -// "{}", -// comment -// ); -// } else { -// assert!(!tx_has_failed, "{}", comment); -// } -// }; -// -// // Test 1: trying to set value under non sequential nonce value. -// run_nonce_test( -// 1u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// Some("Previous nonce has not been used".to_string()), -// "Allowed to set value under non sequential value", -// ); -// -// // Test 2: increase min nonce by 1 with sequential nonce ordering: -// run_nonce_test( -// 0u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy1, -// None, -// "Failed to increment nonce by 1 for sequential account", -// ); -// -// // Test 3: correctly set value under nonce with sequential nonce ordering: -// run_nonce_test( -// 1u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// None, -// "Failed to set value under nonce sequential value", -// ); -// -// // Test 5: migrate to the arbitrary nonce ordering: -// run_nonce_test( -// 2u32.into(), -// NonceHolderTestMode::SwitchToArbitraryOrdering, -// None, -// "Failed to switch to arbitrary ordering", -// ); -// -// // Test 6: increase min nonce by 5 -// run_nonce_test( -// 6u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// None, -// "Failed to increase min nonce by 5", -// ); -// -// // Test 7: since the nonces in range [6,10] are no longer allowed, the -// // tx with nonce 10 should not be allowed -// run_nonce_test( -// 10u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// Some("Reusing the same nonce twice".to_string()), -// "Allowed to reuse nonce below the minimal one", -// ); -// -// // Test 8: we should be able to use nonce 13 -// run_nonce_test( -// 13u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// None, -// "Did not allow to use unused nonce 10", -// ); -// -// // Test 9: we should not be able to reuse nonce 13 -// run_nonce_test( -// 13u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// Some("Reusing the same nonce twice".to_string()), -// "Allowed to reuse the same nonce twice", -// ); -// -// // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 -// run_nonce_test( -// 14u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// None, -// "Did not allow to use a bumped nonce", -// ); -// -// // Test 6: Do not allow bumping nonce by too much -// run_nonce_test( -// 16u32.into(), -// NonceHolderTestMode::IncreaseMinNonceTooMuch, -// Some("The value for incrementing the nonce is too high".to_string()), -// "Allowed for incrementing min nonce too much", -// ); -// -// // Test 7: Do not allow not setting a nonce as used -// run_nonce_test( -// 16u32.into(), -// NonceHolderTestMode::LeaveNonceUnused, -// Some("The nonce was not set as used".to_string()), -// "Allowed to leave nonce as unused", -// ); -// } -// -// #[test] -// fn test_l1_tx_execution() { -// // In this test, we try to execute a contract deployment from L1 -// let mut vm_test_env = VmTestEnv::default(); -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// // Here instead of marking code hash via the bootloader means, we will -// // using L1->L2 communication, the same it would likely be done during the priority mode. -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); -// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); -// -// let required_l2_to_l1_logs = vec![ -// L2ToL1Log { -// shard_id: 0, -// is_service: false, -// tx_number_in_block: 0, -// sender: SYSTEM_CONTEXT_ADDRESS, -// key: u256_to_h256(U256::from(vm_helper.block_context.context.block_timestamp)), -// value: Default::default(), -// }, -// L2ToL1Log { -// shard_id: 0, -// is_service: true, -// tx_number_in_block: 0, -// sender: BOOTLOADER_ADDRESS, -// key: l1_deploy_tx_data.canonical_l1_tx_hash(), -// value: u256_to_h256(U256::from(1u32)), -// }, -// ]; -// -// let sender_address = l1_deploy_tx_data.from(); -// -// vm_helper.oracle_tools.decommittment_processor.populate( -// vec![( -// h256_to_u256(contract_code_hash), -// bytes_to_be_words(contract_code), -// )], -// Timestamp(0), -// ); -// -// let mut vm = vm_helper.vm(); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &l1_deploy_tx, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// let res = vm.execute_next_tx(u32::MAX, false).unwrap(); -// -// // The code hash of the deployed contract should be marked as republished. -// let known_codes_key = get_known_code_key(&contract_code_hash); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address, U256::zero()); -// let account_code_key = get_code_key(&deployed_address); -// -// let expected_slots = vec![ -// (u256_to_h256(U256::from(1u32)), known_codes_key), -// (contract_code_hash, account_code_key), -// ]; -// assert!(!tx_has_failed(&vm.state, 0)); -// -// verify_required_storage(&vm.state, expected_slots); -// -// assert_eq!(res.result.logs.l2_to_l1_logs, required_l2_to_l1_logs); -// -// let tx = get_l1_execute_test_contract_tx(deployed_address, true); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let res = StorageWritesDeduplicator::apply_on_empty_state( -// &vm.execute_next_tx(u32::MAX, false) -// .unwrap() -// .result -// .logs -// .storage_logs, -// ); -// assert_eq!(res.initial_storage_writes, 0); -// -// let tx = get_l1_execute_test_contract_tx(deployed_address, false); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// let res = StorageWritesDeduplicator::apply_on_empty_state( -// &vm.execute_next_tx(u32::MAX, false) -// .unwrap() -// .result -// .logs -// .storage_logs, -// ); -// assert_eq!(res.initial_storage_writes, 2); -// -// let repeated_writes = res.repeated_storage_writes; -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// let res = StorageWritesDeduplicator::apply_on_empty_state( -// &vm.execute_next_tx(u32::MAX, false) -// .unwrap() -// .result -// .logs -// .storage_logs, -// ); -// assert_eq!(res.initial_storage_writes, 1); -// // We do the same storage write, so it will be deduplicated -// assert_eq!(res.repeated_storage_writes, repeated_writes); -// -// let mut tx = get_l1_execute_test_contract_tx(deployed_address, false); -// tx.execute.value = U256::from(1); -// match &mut tx.common_data { -// ExecuteTransactionCommon::L1(l1_data) => { -// l1_data.to_mint = U256::from(4); -// } -// _ => unreachable!(), -// } -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// let execution_result = vm.execute_next_tx(u32::MAX, false).unwrap(); -// // The method is not payable, so the transaction with non-zero value should fail -// assert_eq!( -// execution_result.status, -// TxExecutionStatus::Failure, -// "The transaction should fail" -// ); -// -// let res = -// StorageWritesDeduplicator::apply_on_empty_state(&execution_result.result.logs.storage_logs); -// -// // There are 2 initial writes here: -// // - totalSupply of ETH token -// // - balance of the refund recipient -// assert_eq!(res.initial_storage_writes, 2); -// } -// -// #[test] -// fn test_invalid_bytecode() { -// let mut vm_test_env = VmTestEnv::default(); -// -// let block_gas_per_pubdata = vm_test_env -// .block_context -// .context -// .block_gas_price_per_pubdata(); -// -// let mut test_vm_with_custom_bytecode_hash = -// |bytecode_hash: H256, expected_revert_reason: Option| { -// let mut oracle_tools = -// OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); -// -// let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( -// h256_to_u256(bytecode_hash), -// block_gas_per_pubdata as u32, -// ); -// -// run_vm_with_custom_factory_deps( -// &mut oracle_tools, -// vm_test_env.block_context.context, -// &vm_test_env.block_properties, -// encoded_tx, -// predefined_overhead, -// expected_revert_reason, -// ); -// }; -// -// let failed_to_mark_factory_deps = |msg: &str, data: Vec| { -// TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { -// msg: msg.to_string(), -// data, -// }) -// }; -// -// // Here we provide the correctly-formatted bytecode hash of -// // odd length, so it should work. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// None, -// ); -// -// // Here we provide correctly formatted bytecode of even length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Code length in words must be odd", -// vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, -// 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, -// 32, 98, 101, 32, 111, 100, 100, -// ], -// )), -// ); -// -// // Here we provide incorrectly formatted bytecode of odd length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Incorrectly formatted bytecodeHash", -// vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, -// 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, -// 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// )), -// ); -// -// // Here we provide incorrectly formatted bytecode of odd length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Incorrectly formatted bytecodeHash", -// vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, -// 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, -// 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// )), -// ); -// } -// -// #[test] -// fn test_tracing_of_execution_errors() { -// // In this test, we are checking that the execution errors are transmitted correctly from the bootloader. -// let contract_address = Address::random(); -// -// let mut vm_test_env = -// VmTestEnv::new_with_contracts(&[(contract_address, read_error_contract())]); -// -// let private_key = H256::random(); -// -// let tx = get_error_tx( -// private_key, -// Nonce(0), -// contract_address, -// Fee { -// gas_limit: U256::from(1000000u32), -// max_fee_per_gas: U256::from(10000000000u64), -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ); -// -// vm_test_env.set_rich_account(&tx.common_data.initiator_address); -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &tx.into(), -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// let mut tracer = TransactionResultTracer::new(usize::MAX, false); -// assert_eq!( -// vm.execute_with_custom_tracer(&mut tracer), -// VmExecutionStopReason::VmFinished, -// "Tracer should never request stop" -// ); -// -// match tracer.revert_reason { -// Some(revert_reason) => { -// let revert_reason = VmRevertReason::try_from(&revert_reason as &[u8]).unwrap(); -// assert_eq!( -// revert_reason, -// VmRevertReason::General { -// msg: "short".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, -// 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0 -// ], -// } -// ) -// } -// _ => panic!( -// "Tracer captured incorrect result {:#?}", -// tracer.revert_reason -// ), -// } -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// let tx = get_error_tx( -// private_key, -// Nonce(1), -// contract_address, -// Fee { -// gas_limit: U256::from(1000000u32), -// max_fee_per_gas: U256::from(10000000000u64), -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &tx.into(), -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// let mut tracer = TransactionResultTracer::new(10, false); -// assert_eq!( -// vm.execute_with_custom_tracer(&mut tracer), -// VmExecutionStopReason::TracerRequestedStop, -// ); -// assert!(tracer.is_limit_reached()); -// } -// -// /// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -// #[test] -// fn test_tx_gas_limit_offset() { -// let gas_limit = U256::from(999999); -// let mut vm_test_env = VmTestEnv::default(); -// -// let contract_code = read_test_contract(); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// Default::default(), -// Default::default(), -// Fee { -// gas_limit, -// ..Default::default() -// }, -// ) -// .into(); -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let gas_limit_from_memory = vm -// .state -// .memory -// .read_slot( -// BOOTLOADER_HEAP_PAGE as usize, -// TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, -// ) -// .value; -// assert_eq!(gas_limit_from_memory, gas_limit); -// } -// -// #[test] -// fn test_is_write_initial_behaviour() { -// // In this test, we check result of `is_write_initial` at different stages. -// let mut vm_test_env = VmTestEnv::default(); -// -// let base_fee = vm_test_env.block_context.base_fee; -// let account_pk = H256::random(); -// let contract_code = read_test_contract(); -// let tx: Transaction = get_deploy_tx( -// account_pk, -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(20000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let sender_address = tx.initiator_account(); -// let nonce_key = get_nonce_key(&sender_address); -// -// // Check that the next write to the nonce key will be initial. -// assert!(vm_test_env.storage_ptr.is_write_initial(&nonce_key)); -// -// // Set balance to be able to pay fee for txs. -// vm_test_env.set_rich_account(&sender_address); -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// vm.execute_next_tx(u32::MAX, false) -// .expect("Bootloader failed while processing the first transaction"); -// // Check that `is_write_initial` still returns true for the nonce key. -// assert!(vm_test_env.storage_ptr.is_write_initial(&nonce_key)); -// } -// -// pub fn get_l1_tx_with_custom_bytecode_hash( -// bytecode_hash: U256, -// block_gas_per_pubdata: u32, -// ) -> (Vec, u32) { -// let tx: TransactionData = get_l1_execute_test_contract_tx(Default::default(), false).into(); -// let predefined_overhead = -// tx.overhead_gas_with_custom_factory_deps(vec![bytecode_hash], block_gas_per_pubdata); -// let tx_bytes = tx.abi_encode_with_custom_factory_deps(vec![bytecode_hash]); -// -// (bytes_to_be_words(tx_bytes), predefined_overhead) -// } -// -// pub fn get_l1_execute_test_contract_tx(deployed_address: Address, with_panic: bool) -> Transaction { -// let sender = H160::random(); -// get_l1_execute_test_contract_tx_with_sender( -// sender, -// deployed_address, -// with_panic, -// U256::zero(), -// false, -// ) -// } -// -// pub fn get_l1_tx_with_large_output(sender: Address, deployed_address: Address) -> Transaction { -// let test_contract = load_contract( -// "etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json", -// ); -// -// let function = test_contract.function("longReturnData").unwrap(); -// -// let calldata = function -// .encode_input(&[]) -// .expect("failed to encode parameters"); -// -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender, -// gas_limit: U256::from(100000000u32), -// gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute: Execute { -// contract_address: deployed_address, -// calldata, -// value: U256::zero(), -// factory_deps: None, -// }, -// received_timestamp_ms: 0, -// } -// } -// -// #[test] -// fn test_call_tracer() { -// let mut vm_test_env = VmTestEnv::default(); -// -// let sender = H160::random(); -// -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); -// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); -// -// let sender_address_counter = l1_deploy_tx_data.from(); -// -// vm_test_env.set_rich_account(&sender_address_counter); -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// -// vm_helper.oracle_tools.decommittment_processor.populate( -// vec![( -// h256_to_u256(contract_code_hash), -// bytes_to_be_words(contract_code), -// )], -// Timestamp(0), -// ); -// -// let contract_code = read_long_return_data_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let l1_deploy_long_return_data_tx = get_l1_deploy_tx(&contract_code, &[]); -// vm_helper.oracle_tools.decommittment_processor.populate( -// vec![( -// h256_to_u256(contract_code_hash), -// bytes_to_be_words(contract_code), -// )], -// Timestamp(0), -// ); -// -// let tx_data: TransactionData = l1_deploy_long_return_data_tx.clone().into(); -// let sender_long_return_address = tx_data.from(); -// // The contract should be deployed successfully. -// let deployed_address_long_return_data = -// deployed_address_create(sender_long_return_address, U256::zero()); -// let mut vm = vm_helper.vm(); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &l1_deploy_tx, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address_counter, U256::zero()); -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// let mut create_call = None; -// // The first MIMIC call is call to value simulator. All calls goes through it. -// // The second MIMIC call is call to Deployer contract. -// // And only third level call is construct call to the newly deployed contract And we call it create_call. -// for call in &calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in &call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in &call.calls { -// if let CallType::Create = call.r#type { -// create_call = Some(call.clone()); -// } -// } -// } -// } -// } -// } -// let expected = Call { -// r#type: CallType::Create, -// to: deployed_address, -// from: sender_address_counter, -// parent_gas: 0, -// gas_used: 0, -// gas: 0, -// value: U256::zero(), -// input: vec![], -// output: vec![ -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, -// ], -// error: None, -// revert_reason: None, -// calls: vec![], -// }; -// assert_eq!(create_call.unwrap(), expected); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &l1_deploy_long_return_data_tx, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// vm.execute_next_tx(u32::MAX, false).unwrap(); -// -// let tx = get_l1_execute_test_contract_tx_with_sender( -// sender, -// deployed_address, -// false, -// U256::from(1u8), -// true, -// ); -// -// let tx_data: TransactionData = tx.clone().into(); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// -// // We don't want to compare gas used, because it's not fully deterministic. -// let expected = Call { -// r#type: CallType::Call(FarCallOpcode::Mimic), -// to: deployed_address, -// from: tx_data.from(), -// parent_gas: 0, -// gas_used: 0, -// gas: 0, -// value: U256::from(1), -// input: tx_data.data, -// output: vec![ -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 1, -// ], -// error: None, -// revert_reason: None, -// calls: vec![], -// }; -// -// // First loop filter out the bootloaders calls and -// // the second loop filters out the calls msg value simulator calls -// for call in calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// assert_eq!(expected, call); -// } -// } -// } -// } -// -// let tx = get_l1_execute_test_contract_tx_with_sender( -// sender, -// deployed_address, -// true, -// U256::from(1u8), -// true, -// ); -// -// let tx_data: TransactionData = tx.clone().into(); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// -// let expected = Call { -// r#type: CallType::Call(FarCallOpcode::Mimic), -// to: deployed_address, -// from: tx_data.from(), -// parent_gas: 257030, -// gas_used: 348, -// gas: 253008, -// value: U256::from(1u8), -// input: tx_data.data, -// output: vec![], -// error: None, -// revert_reason: Some("This method always reverts".to_string()), -// calls: vec![], -// }; -// -// for call in calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// assert_eq!(expected, call); -// } -// } -// } -// } -// -// let tx = get_l1_tx_with_large_output(sender, deployed_address_long_return_data); -// -// let tx_data: TransactionData = tx.clone().into(); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// assert_ne!(deployed_address_long_return_data, deployed_address); -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// for call in calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// assert_eq!(call.input, tx_data.data); -// assert_eq!( -// call.revert_reason, -// Some("Unknown revert reason".to_string()) -// ); -// } -// } -// } -// } -// } -// -// #[test] -// fn test_get_used_contracts() { -// let mut vm_test_env = VmTestEnv::default(); -// -// let mut vm_helper = VmTestHelper::new(&mut vm_test_env); -// let mut vm = vm_helper.vm(); -// -// assert!(known_bytecodes_without_aa_code(&vm).is_empty()); -// -// // create and push and execute some not-empty factory deps transaction with success status -// // to check that get_used_contracts() updates -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let tx1 = get_l1_deploy_tx(&contract_code, &[]); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx1, TxExecutionMode::VerifyExecute, None); -// -// let res1 = vm.execute_next_tx(u32::MAX, true).unwrap(); -// assert_eq!(res1.status, TxExecutionStatus::Success); -// assert!(vm -// .get_used_contracts() -// .contains(&h256_to_u256(contract_code_hash))); -// -// assert_eq!( -// vm.get_used_contracts() -// .into_iter() -// .collect::>(), -// known_bytecodes_without_aa_code(&vm) -// .keys() -// .cloned() -// .collect::>() -// ); -// -// // create push and execute some non-empty factory deps transaction that fails -// // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) -// -// let mut tx2 = tx1; -// tx2.execute.contract_address = L1_MESSENGER_ADDRESS; -// -// let calldata = vec![1, 2, 3]; -// let big_calldata: Vec = calldata -// .iter() -// .cycle() -// .take(calldata.len() * 1024) -// .cloned() -// .collect(); -// -// tx2.execute.calldata = big_calldata; -// tx2.execute.factory_deps = Some(vec![vec![1; 32]]); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx2, TxExecutionMode::VerifyExecute, None); -// -// let res2 = vm.execute_next_tx(u32::MAX, false).unwrap(); -// -// assert_eq!(res2.status, TxExecutionStatus::Failure); -// -// for factory_dep in tx2.execute.factory_deps.unwrap() { -// let hash = hash_bytecode(&factory_dep); -// let hash_to_u256 = h256_to_u256(hash); -// assert!(known_bytecodes_without_aa_code(&vm) -// .keys() -// .contains(&hash_to_u256)); -// assert!(!vm.get_used_contracts().contains(&hash_to_u256)); -// } -// } -// -// fn known_bytecodes_without_aa_code(vm: &VmInstance) -> HashMap> { -// let mut known_bytecodes_without_aa_code = vm -// .state -// .decommittment_processor -// .known_bytecodes -// .inner() -// .clone(); -// -// known_bytecodes_without_aa_code -// .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) -// .unwrap(); -// -// known_bytecodes_without_aa_code -// } -// -// #[tokio::test] -// /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -// /// and EIP712 transactions. -// /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -// async fn test_require_eip712() { -// // Use 3 accounts: -// // - private_address - EOA account, where we have the key -// // - account_address - AA account, where the contract is deployed -// // - beneficiary - an EOA account, where we'll try to transfer the tokens. -// let account_address = H160::random(); -// -// let (bytecode, contract) = read_many_owners_custom_account_contract(); -// -// let mut vm_test_env = VmTestEnv::new_with_contracts(&[(account_address, bytecode)]); -// -// let beneficiary = H160::random(); -// -// assert_eq!(vm_test_env.get_eth_balance(&beneficiary), U256::from(0)); -// -// let private_key = H256::random(); -// let private_address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); -// let pk_signer = PrivateKeySigner::new(private_key); -// -// vm_test_env.set_rich_account(&account_address); -// vm_test_env.set_rich_account(&private_address); -// -// let chain_id: u16 = 270; -// -// // First, let's set the owners of the AA account to the private_address. -// // (so that messages signed by private_address, are authorized to act on behalf of the AA account). -// { -// let set_owners_function = contract.function("setOwners").unwrap(); -// let encoded_input = set_owners_function -// .encode_input(&[Token::Array(vec![Token::Address(private_address)])]); -// -// // Create a legacy transaction to set the owners. -// let raw_tx = TransactionParameters { -// nonce: U256::from(0), -// to: Some(account_address), -// gas: U256::from(100000000), -// gas_price: Some(U256::from(10000000)), -// value: U256::from(0), -// data: encoded_input.unwrap(), -// chain_id: chain_id as u64, -// transaction_type: None, -// access_list: None, -// max_fee_per_gas: U256::from(1000000000), -// max_priority_fee_per_gas: U256::from(1000000000), -// }; -// let txn = pk_signer.sign_transaction(raw_tx).await.unwrap(); -// -// let (txn_request, hash) = TransactionRequest::from_bytes(&txn, chain_id).unwrap(); -// -// let mut l2_tx: L2Tx = L2Tx::from_request(txn_request, 100000).unwrap(); -// l2_tx.set_input(txn, hash); -// let transaction: Transaction = l2_tx.try_into().unwrap(); -// let transaction_data: TransactionData = transaction.try_into().unwrap(); -// -// vm_test_env.run_vm_or_die(transaction_data); -// } -// -// let private_account_balance = vm_test_env.get_eth_balance(&private_address); -// -// // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). -// // Normally this would not work - unless the operator is malicious. -// { -// let aa_raw_tx = TransactionParameters { -// nonce: U256::from(0), -// to: Some(beneficiary), -// gas: U256::from(100000000), -// gas_price: Some(U256::from(10000000)), -// value: U256::from(888000088), -// data: vec![], -// chain_id: 270, -// transaction_type: None, -// access_list: None, -// max_fee_per_gas: U256::from(1000000000), -// max_priority_fee_per_gas: U256::from(1000000000), -// }; -// -// let aa_txn = pk_signer.sign_transaction(aa_raw_tx).await.unwrap(); -// -// let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&aa_txn, 270).unwrap(); -// -// let mut l2_tx: L2Tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); -// l2_tx.set_input(aa_txn, aa_hash); -// // Pretend that operator is malicious and sets the initiator to the AA account. -// l2_tx.common_data.initiator_address = account_address; -// -// let transaction: Transaction = l2_tx.try_into().unwrap(); -// -// let transaction_data: TransactionData = transaction.try_into().unwrap(); -// -// vm_test_env.run_vm_or_die(transaction_data); -// assert_eq!( -// vm_test_env.get_eth_balance(&beneficiary), -// U256::from(888000088) -// ); -// // Make sure that the tokens were transferred from the AA account. -// assert_eq!( -// private_account_balance, -// vm_test_env.get_eth_balance(&private_address) -// ) -// } -// -// // Now send the 'classic' EIP712 transaction -// { -// let tx_712 = L2Tx::new( -// beneficiary, -// vec![], -// Nonce(1), -// Fee { -// gas_limit: U256::from(1000000000), -// max_fee_per_gas: U256::from(1000000000), -// max_priority_fee_per_gas: U256::from(1000000000), -// gas_per_pubdata_limit: U256::from(1000000000), -// }, -// account_address, -// U256::from(28374938), -// None, -// Default::default(), -// ); -// -// let transaction_request: TransactionRequest = tx_712.into(); -// -// let domain = Eip712Domain::new(L2ChainId(chain_id)); -// let signature = pk_signer -// .sign_typed_data(&domain, &transaction_request) -// .await -// .unwrap(); -// let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(chain_id)); -// -// let (aa_txn_request, aa_hash) = -// TransactionRequest::from_bytes(&encoded_tx, chain_id).unwrap(); -// -// let mut l2_tx: L2Tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); -// l2_tx.set_input(encoded_tx, aa_hash); -// -// let transaction: Transaction = l2_tx.try_into().unwrap(); -// let transaction_data: TransactionData = transaction.try_into().unwrap(); -// -// vm_test_env.run_vm_or_die(transaction_data); -// -// assert_eq!( -// vm_test_env.get_eth_balance(&beneficiary), -// U256::from(916375026) -// ); -// assert_eq!( -// private_account_balance, -// vm_test_env.get_eth_balance(&private_address) -// ); -// } -// } -// ``` \ No newline at end of file diff --git a/core/lib/multivm/src/versions/vm_1_3_2/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_3_2/tests/mod.rs deleted file mode 100644 index 04448987b1c..00000000000 --- a/core/lib/multivm/src/versions/vm_1_3_2/tests/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -// ``` -// mod bootloader; -// mod upgrades; - -// mod utils; -// ``` diff --git a/core/lib/multivm/src/versions/vm_1_3_2/tests/upgrades.rs b/core/lib/multivm/src/versions/vm_1_3_2/tests/upgrades.rs deleted file mode 100644 index cd3857d46da..00000000000 --- a/core/lib/multivm/src/versions/vm_1_3_2/tests/upgrades.rs +++ /dev/null @@ -1,379 +0,0 @@ -// ``` -// use crate::{ -// test_utils::verify_required_storage, -// tests::utils::get_l1_deploy_tx, -// utils::{create_test_block_params, BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT}, -// vm::tx_has_failed, -// vm_with_bootloader::{init_vm_inner, push_transaction_to_bootloader_memory}, -// vm_with_bootloader::{BlockContextMode, TxExecutionMode}, -// HistoryEnabled, OracleTools, TxRevertReason, -// }; -// -// use zk_evm_1_3_3::aux_structures::Timestamp; -// -// use zksync_types::{ -// ethabi::Contract, -// tx::tx_execution_info::TxExecutionStatus, -// Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, -// REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -// {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, -// {get_code_key, get_known_code_key, H160}, -// }; -// -// use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -// -// use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -// use zksync_state::WriteStorage; -// -// use crate::tests::utils::create_storage_view; -// use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; -// -// use super::utils::read_test_contract; -// -// /// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -// /// - This transaction must be the only one in block -// /// - If present, this transaction must be the first one in block -// #[test] -// fn test_protocol_upgrade_is_first() { -// let mut storage_view = create_storage_view(); -// let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); -// let (block_context, block_properties) = create_test_block_params(); -// -// let bytecode_hash = hash_bytecode(&read_test_contract()); -// -// // Here we just use some random transaction of protocol upgrade type: -// let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { -// // The bytecode hash to put on an address -// bytecode_hash, -// // The address on which to deploy the bytecodehash to -// address: H160::random(), -// // Whether to run the constructor on the force deployment -// call_constructor: false, -// // The value with which to initialize a contract -// value: U256::zero(), -// // The constructor calldata -// input: vec![], -// }]); -// -// let normal_l1_transaction = get_l1_deploy_tx(&read_test_contract(), &[]); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// let expected_error = TxRevertReason::UnexpectedVMBehavior( -// "Assertion error: Protocol upgrade tx not first".to_string(), -// ); -// -// // Test 1: there must be only one system transaction in block -// vm.save_current_vm_as_snapshot(); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &protocol_upgrade_transaction, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &normal_l1_transaction, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &protocol_upgrade_transaction, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// vm.execute_next_tx(u32::MAX, false).unwrap(); -// vm.execute_next_tx(u32::MAX, false).unwrap(); -// let res = vm.execute_next_tx(u32::MAX, false); -// assert_eq!(res, Err(expected_error.clone())); -// -// // Test 2: the protocol upgrade tx must be the first one in block -// vm.rollback_to_latest_snapshot(); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &normal_l1_transaction, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &protocol_upgrade_transaction, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// vm.execute_next_tx(u32::MAX, false).unwrap(); -// let res = vm.execute_next_tx(u32::MAX, false); -// assert_eq!(res, Err(expected_error)); -// } -// -// /// In this test we try to test how force deployments could be done via protocol upgrade transactions. -// #[test] -// fn test_force_deploy_upgrade() { -// let mut storage_view = create_storage_view(); -// -// let bytecode_hash = hash_bytecode(&read_test_contract()); -// -// let known_code_key = get_known_code_key(&bytecode_hash); -// // It is generally expected that all the keys will be set as known prior to the protocol upgrade. -// storage_view.set_value(known_code_key, u256_to_h256(1.into())); -// -// let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); -// let (block_context, block_properties) = create_test_block_params(); -// -// let address_to_deploy = H160::random(); -// // Here we just use some random transaction of protocol upgrade type: -// let transaction = get_forced_deploy_tx(&[ForceDeployment { -// // The bytecode hash to put on an address -// bytecode_hash, -// // The address on which to deploy the bytecodehash to -// address: address_to_deploy, -// // Whether to run the constructor on the force deployment -// call_constructor: false, -// // The value with which to initialize a contract -// value: U256::zero(), -// // The constructor calldata -// input: vec![], -// }]); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &transaction, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// let result = vm.execute_next_tx(u32::MAX, false).unwrap(); -// assert_eq!( -// result.status, -// TxExecutionStatus::Success, -// "The force upgrade was not successful" -// ); -// assert!(!tx_has_failed(&vm.state, 0)); -// -// let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; -// -// // Verify that the bytecode has been set correctly -// verify_required_storage(&vm.state, expected_slots); -// } -// -// /// Here we show how the work with the complex upgrader could be done -// #[test] -// fn test_complex_upgrader() { -// let mut storage_view = create_storage_view(); -// -// let bytecode_hash = hash_bytecode(&read_complex_upgrade()); -// let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); -// -// // Let's assume that the bytecode for the implementation of the complex upgrade -// // is already deployed in some address in userspace -// let upgrade_impl = H160::random(); -// let account_code_key = get_code_key(&upgrade_impl); -// -// storage_view.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); -// storage_view.set_value( -// get_known_code_key(&msg_sender_test_hash), -// u256_to_h256(1.into()), -// ); -// storage_view.set_value(account_code_key, bytecode_hash); -// -// let mut oracle_tools: OracleTools = -// OracleTools::new(&mut storage_view, HistoryEnabled); -// oracle_tools.decommittment_processor.populate( -// vec![ -// ( -// h256_to_u256(bytecode_hash), -// bytes_to_be_words(read_complex_upgrade()), -// ), -// ( -// h256_to_u256(msg_sender_test_hash), -// bytes_to_be_words(read_msg_sender_test()), -// ), -// ], -// Timestamp(0), -// ); -// -// let (block_context, block_properties) = create_test_block_params(); -// -// let address_to_deploy1 = H160::random(); -// let address_to_deploy2 = H160::random(); -// -// let transaction = get_complex_upgrade_tx( -// upgrade_impl, -// address_to_deploy1, -// address_to_deploy2, -// bytecode_hash, -// ); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &transaction, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// let result = vm.execute_next_tx(u32::MAX, false).unwrap(); -// assert_eq!( -// result.status, -// TxExecutionStatus::Success, -// "The force upgrade was not successful" -// ); -// assert!(!tx_has_failed(&vm.state, 0)); -// -// let expected_slots = vec![ -// (bytecode_hash, get_code_key(&address_to_deploy1)), -// (bytecode_hash, get_code_key(&address_to_deploy2)), -// ]; -// -// // Verify that the bytecode has been set correctly -// verify_required_storage(&vm.state, expected_slots); -// } -// -// #[derive(Debug, Clone)] -// struct ForceDeployment { -// // The bytecode hash to put on an address -// bytecode_hash: H256, -// // The address on which to deploy the bytecodehash to -// address: Address, -// // Whether to run the constructor on the force deployment -// call_constructor: bool, -// // The value with which to initialize a contract -// value: U256, -// // The constructor calldata -// input: Vec, -// } -// -// fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { -// let deployer = deployer_contract(); -// let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); -// -// let encoded_deployments: Vec<_> = deployment -// .iter() -// .map(|deployment| { -// Token::Tuple(vec![ -// Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), -// Token::Address(deployment.address), -// Token::Bool(deployment.call_constructor), -// Token::Uint(deployment.value), -// Token::Bytes(deployment.input.clone()), -// ]) -// }) -// .collect(); -// -// let params = [Token::Array(encoded_deployments)]; -// -// let calldata = contract_function -// .encode_input(¶ms) -// .expect("failed to encode parameters"); -// -// let execute = Execute { -// contract_address: CONTRACT_DEPLOYER_ADDRESS, -// calldata, -// factory_deps: None, -// value: U256::zero(), -// }; -// -// Transaction { -// common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { -// sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, -// gas_limit: U256::from(200_000_000u32), -// gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// // Returns the transaction that performs a complex protocol upgrade. -// // The first param is the address of the implementation of the complex upgrade -// // in user-space, while the next 3 params are params of the implenentaiton itself -// // For the explanatation for the parameters, please refer to: -// // etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -// fn get_complex_upgrade_tx( -// implementation_address: Address, -// address1: Address, -// address2: Address, -// bytecode_hash: H256, -// ) -> Transaction { -// let impl_contract = get_complex_upgrade_abi(); -// let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); -// let impl_calldata = impl_function -// .encode_input(&[ -// Token::Address(address1), -// Token::Address(address2), -// Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), -// ]) -// .unwrap(); -// -// let complex_upgrader = get_complex_upgrader_abi(); -// let upgrade_function = complex_upgrader.function("upgrade").unwrap(); -// let complex_upgrader_calldata = upgrade_function -// .encode_input(&[ -// Token::Address(implementation_address), -// Token::Bytes(impl_calldata), -// ]) -// .unwrap(); -// -// let execute = Execute { -// contract_address: COMPLEX_UPGRADER_ADDRESS, -// calldata: complex_upgrader_calldata, -// factory_deps: None, -// value: U256::zero(), -// }; -// -// Transaction { -// common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { -// sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, -// gas_limit: U256::from(200_000_000u32), -// gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// fn read_complex_upgrade() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -// } -// -// fn read_msg_sender_test() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -// } -// -// fn get_complex_upgrade_abi() -> Contract { -// load_contract( -// "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" -// ) -// } -// -// fn get_complex_upgrader_abi() -> Contract { -// load_sys_contract("ComplexUpgrader") -// } -// ``` \ No newline at end of file diff --git a/core/lib/multivm/src/versions/vm_1_3_2/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/tests/utils.rs deleted file mode 100644 index b2231f05cf1..00000000000 --- a/core/lib/multivm/src/versions/vm_1_3_2/tests/utils.rs +++ /dev/null @@ -1,112 +0,0 @@ -// ``` -// //! -// //! Tests for the bootloader -// //! The description for each of the tests can be found in the corresponding `.yul` file. -// //! -// use zksync_types::{ -// ethabi::Contract, -// Execute, L1TxCommonData, H160, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -// {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, U256}, -// }; -// -// use zksync_contracts::{load_contract, read_bytecode}; -// use zksync_state::{InMemoryStorage, StorageView}; -// use zksync_utils::bytecode::hash_bytecode; -// -// use crate::test_utils::get_create_execute; -// -// pub fn read_test_contract() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -// } -// -// pub fn read_long_return_data_contract() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json") -// } -// -// pub fn read_nonce_holder_tester() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -// } -// -// pub fn read_error_contract() -> Vec { -// read_bytecode( -// "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", -// ) -// } -// -// pub fn read_many_owners_custom_account_contract() -> (Vec, Contract) { -// let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; -// (read_bytecode(path), load_contract(path)) -// } -// -// pub fn get_l1_execute_test_contract_tx_with_sender( -// sender: Address, -// deployed_address: Address, -// with_panic: bool, -// value: U256, -// payable: bool, -// ) -> Transaction { -// let execute = execute_test_contract(deployed_address, with_panic, value, payable); -// -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender, -// gas_limit: U256::from(200_000_000u32), -// gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), -// to_mint: value, -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// fn execute_test_contract( -// address: Address, -// with_panic: bool, -// value: U256, -// payable: bool, -// ) -> Execute { -// let test_contract = load_contract( -// "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", -// ); -// -// let function = if payable { -// test_contract -// .function("incrementWithRevertPayable") -// .unwrap() -// } else { -// test_contract.function("incrementWithRevert").unwrap() -// }; -// -// let calldata = function -// .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) -// .expect("failed to encode parameters"); -// -// Execute { -// contract_address: address, -// calldata, -// value, -// factory_deps: None, -// } -// } -// -// pub fn get_l1_deploy_tx(code: &[u8], calldata: &[u8]) -> Transaction { -// let execute = get_create_execute(code, calldata); -// -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender: H160::random(), -// gas_limit: U256::from(2000000u32), -// gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// pub fn create_storage_view() -> StorageView { -// let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); -// StorageView::new(raw_storage) -// } -// ``` \ No newline at end of file diff --git a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs index a7956d473ab..51732ccaa79 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs @@ -1,6 +1,6 @@ use once_cell::sync::Lazy; use zk_evm_1_3_3::{ - aux_structures::{MemoryPage, Timestamp}, + aux_structures::{LogQuery, MemoryPage, Timestamp}, block_properties::BlockProperties, vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, @@ -8,7 +8,7 @@ use zk_evm_1_3_3::{ use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; use zksync_state::WriteStorage; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; -use zksync_types::{Address, H160, MAX_L2_TX_GAS_LIMIT, U256}; +use zksync_types::{Address, StorageLogQueryType, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; use crate::vm_1_3_2::{ @@ -252,3 +252,10 @@ pub(crate) fn calculate_computational_gas_used< 0 }) } + +/// Log query, which handle initial and repeated writes to the storage +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct StorageLogQuery { + pub log_query: LogQuery, + pub log_type: StorageLogQueryType, +} diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index be99537187b..ea5647c5636 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,5 +1,6 @@ use std::collections::HashSet; +use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, @@ -17,6 +18,7 @@ use crate::{ L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, + tracers::old_tracers::TracerDispatcher, vm_1_3_2::{events::merge_events, VmInstance}, }; @@ -29,8 +31,7 @@ pub struct Vm { } impl VmInterface for Vm { - /// Tracers are not supported for vm 1.3.2. So we use `()` as a placeholder - type TracerDispatcher = (); + type TracerDispatcher = TracerDispatcher; fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { let oracle_tools = crate::vm_1_3_2::OracleTools::new(storage.clone()); @@ -68,18 +69,31 @@ impl VmInterface for Vm { fn inspect( &mut self, - _tracer: Self::TracerDispatcher, + tracer: Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { + if let Some(storage_invocations) = tracer.storage_invocations { + self.vm + .execution_mode + .set_invocation_limit(storage_invocations); + } + match execution_mode { VmExecutionMode::OneTx => { match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { - // Even that call tracer is supported here, we don't use it now - self.vm.execute_next_tx( + let enable_call_tracer = tracer + .call_tracer.is_some(); + let result = self.vm.execute_next_tx( self.system_env.default_validation_computational_gas_limit, - false, - ).glue_into() + enable_call_tracer, + ); + if let (Ok(result), Some(call_tracer)) = (&result, &tracer.call_tracer) { + call_tracer.set( result.call_traces.clone()).unwrap(); + + } + result.glue_into() + } TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self.vm .execute_till_block_end( @@ -143,9 +157,21 @@ impl VmInterface for Vm { .cloned() .collect(); + let storage_log_queries = self.vm.state.storage.get_final_log_queries(); + + let deduped_storage_log_queries = + sort_storage_access_queries(storage_log_queries.iter().map(|log| &log.log_query)).1; + CurrentExecutionState { events, - storage_log_queries: self.vm.state.storage.get_final_log_queries(), + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduped_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes, user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], @@ -159,13 +185,18 @@ impl VmInterface for Vm { fn inspect_transaction_with_bytecode_compression( &mut self, - _tracer: Self::TracerDispatcher, + tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> ( Result<(), BytecodeCompressionError>, VmExecutionResultAndLogs, ) { + if let Some(storage_invocations) = tracer.storage_invocations { + self.vm + .execution_mode + .set_invocation_limit(storage_invocations); + } self.last_tx_compressed_bytecodes = vec![]; let bytecodes = if with_compression { let deps = tx.execute.factory_deps.as_deref().unwrap_or_default(); @@ -205,13 +236,17 @@ impl VmInterface for Vm { // Even that call tracer is supported here, we don't use it. let result = match self.system_env.execution_mode { - TxExecutionMode::VerifyExecute => self - .vm - .execute_next_tx( + TxExecutionMode::VerifyExecute => { + let enable_call_tracer = tracer.call_tracer.is_some(); + let result = self.vm.execute_next_tx( self.system_env.default_validation_computational_gas_limit, - false, - ) - .glue_into(), + enable_call_tracer, + ); + if let (Ok(result), Some(call_tracer)) = (&result, &tracer.call_tracer) { + call_tracer.set(result.call_traces.clone()).unwrap(); + } + result.glue_into() + } TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self .vm .execute_till_block_end( @@ -249,6 +284,12 @@ impl VmInterface for Vm { } } + fn has_enough_gas_for_batch_tip(&self) -> bool { + // For this version this overhead has not been calculated and it has not been used with those versions. + // We return some value just in case for backwards compatibility + true + } + fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs index 3fe3f1929fd..fabdf541b71 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs @@ -1,5 +1,6 @@ use std::{convert::TryFrom, fmt::Debug}; +use itertools::Itertools; use zk_evm_1_3_3::{ aux_structures::Timestamp, vm_state::{PrimitiveValue, VmLocalState, VmState}, @@ -14,10 +15,11 @@ use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, tx::tx_execution_info::TxExecutionStatus, vm_trace::{Call, VmExecutionTrace, VmTrace}, - L1BatchNumber, StorageLogQuery, VmEvent, H256, U256, + L1BatchNumber, VmEvent, H256, U256, }; use crate::{ + glue::GlueInto, interface::types::outputs::VmExecutionLogs, vm_1_3_2::{ bootloader_state::BootloaderState, @@ -39,7 +41,7 @@ use crate::{ }, utils::{ calculate_computational_gas_used, dump_memory_page_using_primitive_value, - precompile_calls_count_after_timestamp, + precompile_calls_count_after_timestamp, StorageLogQuery, }, vm_with_bootloader::{ BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, @@ -417,7 +419,7 @@ impl VmInstance { .collect(); ( events, - l1_messages.into_iter().map(L2ToL1Log::from).collect(), + l1_messages.into_iter().map(GlueInto::glue_into).collect(), ) } @@ -428,7 +430,7 @@ impl VmInstance { .storage_log_queries_after_timestamp(from_timestamp) .to_vec(); let storage_logs_count = storage_logs.len(); - let storage_logs = storage_logs.iter().map(|x| **x).collect(); + let storage_logs = storage_logs.iter().map(|x| **x).collect_vec(); let (events, l2_to_l1_logs) = self.collect_events_and_l1_logs_after_timestamp(from_timestamp); @@ -443,7 +445,7 @@ impl VmInstance { from_timestamp, ); VmExecutionLogs { - storage_logs, + storage_logs: storage_logs.into_iter().map(GlueInto::glue_into).collect(), events, user_l2_to_l1_logs: l2_to_l1_logs.into_iter().map(UserL2ToL1Log).collect(), total_log_queries_count: storage_logs_count @@ -770,7 +772,8 @@ impl VmInstance { e.into_vm_event(L1BatchNumber(self.block_context.context.block_number)) }) .collect(); - full_result.l2_to_l1_logs = l1_messages.into_iter().map(L2ToL1Log::from).collect(); + full_result.l2_to_l1_logs = + l1_messages.into_iter().map(GlueInto::glue_into).collect(); full_result.computational_gas_used = block_tip_result.computational_gas_used; VmBlockResult { full_result, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index ceaf3d634ad..b01cf7f12ef 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -245,6 +245,18 @@ impl TxExecutionMode { } => *missed_storage_invocation_limit, } } + + pub fn set_invocation_limit(&mut self, limit: usize) { + match self { + Self::VerifyExecute => {} + TxExecutionMode::EstimateFee { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit = limit, + TxExecutionMode::EthCall { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit = limit, + } + } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/constants.rs b/core/lib/multivm/src/versions/vm_boojum_integration/constants.rs index 29a67aa20a6..bf6a4947359 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/constants.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/constants.rs @@ -6,6 +6,9 @@ use zksync_system_constants::{L1_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_ use crate::vm_boojum_integration::old_vm::utils::heap_page_from_base; +/// The amount of ergs to be reserved at the end of the batch to ensure that it has enough ergs to verify compression, etc. +pub(crate) const BOOTLOADER_BATCH_TIP_OVERHEAD: u32 = 80_000_000; + /// The size of the bootloader memory in bytes which is used by the protocol. /// While the maximal possible size is a lot higher, we restrict ourselves to a certain limit to reduce /// the requirements on RAM. diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index 1d1d19f92b7..0f608c83333 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -9,7 +9,8 @@ use crate::{ vm_boojum_integration::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, tracers::{ - dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, + circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, + DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, vm::Vm, }, @@ -80,7 +81,7 @@ impl Vm { spent_pubdata_counter_before, pubdata_published, logs.total_log_queries_count, - tx_tracer.circuits_tracer.estimated_circuits_used, + circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs index af307af55e2..73be046d797 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs @@ -7,6 +7,7 @@ use zksync_types::{ }; use crate::{ + glue::GlueInto, interface::types::outputs::VmExecutionLogs, vm_boojum_integration::{ old_vm::utils::precompile_calls_count_after_timestamp, utils::logs, vm::Vm, @@ -47,7 +48,10 @@ impl Vm { storage_logs_count + log_queries.len() + precompile_calls_count; VmExecutionLogs { - storage_logs, + storage_logs: storage_logs + .into_iter() + .map(|log| log.glue_into()) + .collect(), events, user_l2_to_l1_logs: user_logs .into_iter() diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index 36780c8b845..2bb0f1dc0f7 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -1,6 +1,6 @@ use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::U256; +use zksync_types::{circuit::CircuitStatistic, U256}; use crate::{ interface::{VmExecutionStatistics, VmMemoryMetrics}, @@ -24,7 +24,7 @@ impl Vm { spent_pubdata_counter_before: u32, pubdata_published: u32, total_log_queries_count: usize, - estimated_circuits_used: f32, + circuit_statistic: CircuitStatistic, ) -> VmExecutionStatistics { let computational_gas_used = self.calculate_computational_gas_used( tracer, @@ -41,7 +41,7 @@ impl Vm { computational_gas_used, total_log_queries: total_log_queries_count, pubdata_published, - estimated_circuits_used, + circuit_statistic, } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs b/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs index 1367f83f4e5..1ea8c7822e1 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs @@ -12,17 +12,19 @@ use zksync_types::{ compression::compress_with_best_strategy, BYTES_PER_DERIVED_KEY, BYTES_PER_ENUMERATION_INDEX, }, - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, - U256, + AccountTreeId, Address, StorageKey, StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; -use crate::vm_boojum_integration::old_vm::{ - history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, +use crate::vm_boojum_integration::{ + old_vm::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, + }, + oracles::OracleWithHistory, }, - oracles::OracleWithHistory, + utils::logs::StorageLogQuery, }; // While the storage does not support different shards, it was decided to write the diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs index 7c5b8ee2a2d..7d429b22513 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs @@ -1,3 +1,4 @@ +use zkevm_test_harness_1_4_0::geometry_config::get_geometry_config; use zksync_types::{Address, Execute, U256}; use crate::{ @@ -30,15 +31,34 @@ fn test_circuits() { vm.vm.push_transaction(tx); let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - const EXPECTED_CIRCUITS_USED: f32 = 4.8685; - let delta = - (res.statistics.estimated_circuits_used - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; + let statistic = res.statistics.circuit_statistic; + // Check `circuit_statistic`. + assert!(statistic.main_vm > f32::EPSILON); + assert!(statistic.ram_permutation > f32::EPSILON); + assert!(statistic.storage_application > f32::EPSILON); + assert!(statistic.storage_sorter > f32::EPSILON); + assert!(statistic.code_decommitter > f32::EPSILON); + assert!(statistic.code_decommitter_sorter > f32::EPSILON); + assert!(statistic.log_demuxer > f32::EPSILON); + assert!(statistic.events_sorter > f32::EPSILON); + assert!(statistic.keccak256 > f32::EPSILON); + // Single `ecrecover` should be used to validate tx signature. + assert_eq!( + statistic.ecrecover, + 1.0 / get_geometry_config().cycles_per_ecrecover_circuit as f32 + ); + // `sha256` shouldn't be used. + assert_eq!(statistic.sha256, 0.0); + + const EXPECTED_CIRCUITS_USED: f32 = 4.6363; + let delta = (statistic.total_f32() - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; if delta.abs() > 0.1 { panic!( - "Estimation differs from expected result by too much: {}%, expected value: {}", + "Estimation differs from expected result by too much: {}%, expected value: {}, got {}", delta * 100.0, - res.statistics.estimated_circuits_used + EXPECTED_CIRCUITS_USED, + statistic.total_f32(), ); } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs index 33fa6677de2..5ba932a2a10 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs @@ -1,85 +1,67 @@ use zkevm_test_harness_1_4_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; - -const GEOMETRY_CONFIG: GeometryConfig = get_geometry_config(); -const OVERESTIMATE_PERCENT: f32 = 1.05; - -const MAIN_VM_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_vm_snapshot as f32; - -const CODE_DECOMMITTER_SORTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_code_decommitter_sorter as f32; - -const LOG_DEMUXER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_log_demuxer as f32; - -const STORAGE_SORTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_storage_sorter as f32; - -const EVENTS_OR_L1_MESSAGES_SORTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_events_or_l1_messages_sorter as f32; - -const RAM_PERMUTATION_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_ram_permutation as f32; - -pub(crate) const CODE_DECOMMITTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_code_decommitter as f32; - -pub(crate) const STORAGE_APPLICATION_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_storage_application as f32; - -pub(crate) const KECCAK256_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_keccak256_circuit as f32; - -pub(crate) const SHA256_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32; - -pub(crate) const ECRECOVER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32; +use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. // In the worst case, a rich addressing may take 3 ram permutations // (1 for reading the opcode, 1 for writing input value, 1 for writing output value). -pub(crate) const RICH_ADDRESSING_OPCODE_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + 3.0 * RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const RICH_ADDRESSING_OPCODE_RAM_CYCLES: u32 = 3; -pub(crate) const AVERAGE_OPCODE_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const AVERAGE_OPCODE_RAM_CYCLES: u32 = 1; -// Here "base" fraction is a fraction that will be used unconditionally. -// Usage of `StorageApplication` is being tracked separately as it depends on whether slot was read before or not. -pub(crate) const STORAGE_READ_BASE_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + LOG_DEMUXER_CYCLE_FRACTION - + STORAGE_SORTER_CYCLE_FRACTION; +pub(crate) const STORAGE_READ_RAM_CYCLES: u32 = 1; +pub(crate) const STORAGE_READ_LOG_DEMUXER_CYCLES: u32 = 1; +pub(crate) const STORAGE_READ_STORAGE_SORTER_CYCLES: u32 = 1; +pub(crate) const STORAGE_READ_STORAGE_APPLICATION_CYCLES: u32 = 1; -pub(crate) const EVENT_OR_L1_MESSAGE_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + 2.0 * LOG_DEMUXER_CYCLE_FRACTION - + 2.0 * EVENTS_OR_L1_MESSAGES_SORTER_CYCLE_FRACTION; +pub(crate) const EVENT_RAM_CYCLES: u32 = 1; +pub(crate) const EVENT_LOG_DEMUXER_CYCLES: u32 = 2; +pub(crate) const EVENT_EVENTS_SORTER_CYCLES: u32 = 2; -// Here "base" fraction is a fraction that will be used unconditionally. -// Usage of `StorageApplication` is being tracked separately as it depends on whether slot was written before or not. -pub(crate) const STORAGE_WRITE_BASE_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + 2.0 * LOG_DEMUXER_CYCLE_FRACTION - + 2.0 * STORAGE_SORTER_CYCLE_FRACTION; +pub(crate) const STORAGE_WRITE_RAM_CYCLES: u32 = 1; +pub(crate) const STORAGE_WRITE_LOG_DEMUXER_CYCLES: u32 = 2; +pub(crate) const STORAGE_WRITE_STORAGE_SORTER_CYCLES: u32 = 2; +pub(crate) const STORAGE_WRITE_STORAGE_APPLICATION_CYCLES: u32 = 2; -pub(crate) const FAR_CALL_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + STORAGE_SORTER_CYCLE_FRACTION - + CODE_DECOMMITTER_SORTER_CYCLE_FRACTION; +pub(crate) const FAR_CALL_RAM_CYCLES: u32 = 1; +pub(crate) const FAR_CALL_STORAGE_SORTER_CYCLES: u32 = 1; +pub(crate) const FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES: u32 = 1; // 5 RAM permutations, because: 1 to read opcode + 2 reads + 2 writes. // 2 reads and 2 writes are needed because unaligned access is implemented with // aligned queries. -pub(crate) const UMA_WRITE_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + 5.0 * RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const UMA_WRITE_RAM_CYCLES: u32 = 5; // 3 RAM permutations, because: 1 to read opcode + 2 reads. // 2 reads are needed because unaligned access is implemented with aligned queries. -pub(crate) const UMA_READ_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + 3.0 * RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const UMA_READ_RAM_CYCLES: u32 = 3; + +pub(crate) const PRECOMPILE_RAM_CYCLES: u32 = 1; +pub(crate) const PRECOMPILE_LOG_DEMUXER_CYCLES: u32 = 1; + +const GEOMETRY_CONFIG: GeometryConfig = get_geometry_config(); -pub(crate) const PRECOMPILE_CALL_COMMON_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + RAM_PERMUTATION_CYCLE_FRACTION + LOG_DEMUXER_CYCLE_FRACTION; +pub(crate) fn circuit_statistic_from_cycles(cycles: CircuitCycleStatistic) -> CircuitStatistic { + CircuitStatistic { + main_vm: cycles.main_vm_cycles as f32 / GEOMETRY_CONFIG.cycles_per_vm_snapshot as f32, + ram_permutation: cycles.ram_permutation_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ram_permutation as f32, + storage_application: cycles.storage_application_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_application as f32, + storage_sorter: cycles.storage_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_sorter as f32, + code_decommitter: cycles.code_decommitter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_code_decommitter as f32, + code_decommitter_sorter: cycles.code_decommitter_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_code_decommitter_sorter as f32, + log_demuxer: cycles.log_demuxer_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_log_demuxer as f32, + events_sorter: cycles.events_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_events_or_l1_messages_sorter as f32, + keccak256: cycles.keccak256_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_keccak256_circuit as f32, + ecrecover: cycles.ecrecover_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, + sha256: cycles.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + } +} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs index e6b52221e02..27f4cc6db00 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs @@ -6,6 +6,7 @@ use zk_evm_1_4_0::{ zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -20,29 +21,16 @@ use crate::{ /// Tracer responsible for collecting information about refunds. #[derive(Debug)] -pub(crate) struct CircuitsTracer { - pub(crate) estimated_circuits_used: f32, +pub(crate) struct CircuitsTracer { + pub(crate) statistics: CircuitCycleStatistic, last_decommitment_history_entry_checked: Option, last_written_keys_history_entry_checked: Option, last_read_keys_history_entry_checked: Option, last_precompile_inner_entry_checked: Option, - _phantom_data: PhantomData, + _phantom_data: PhantomData<(S, H)>, } -impl CircuitsTracer { - pub(crate) fn new() -> Self { - Self { - estimated_circuits_used: 0.0, - last_decommitment_history_entry_checked: None, - last_written_keys_history_entry_checked: None, - last_read_keys_history_entry_checked: None, - last_precompile_inner_entry_checked: None, - _phantom_data: Default::default(), - } - } -} - -impl DynTracer> for CircuitsTracer { +impl DynTracer> for CircuitsTracer { fn before_execution( &mut self, _state: VmLocalStateData<'_>, @@ -50,7 +38,9 @@ impl DynTracer> for Circuits _memory: &SimpleMemory, _storage: StoragePtr, ) { - let used = match data.opcode.variant.opcode { + self.statistics.main_vm_cycles += 1; + + match data.opcode.variant.opcode { Opcode::Nop(_) | Opcode::Add(_) | Opcode::Sub(_) @@ -59,27 +49,51 @@ impl DynTracer> for Circuits | Opcode::Jump(_) | Opcode::Binop(_) | Opcode::Shift(_) - | Opcode::Ptr(_) => RICH_ADDRESSING_OPCODE_FRACTION, - Opcode::Context(_) | Opcode::Ret(_) | Opcode::NearCall(_) => AVERAGE_OPCODE_FRACTION, - Opcode::Log(LogOpcode::StorageRead) => STORAGE_READ_BASE_FRACTION, - Opcode::Log(LogOpcode::StorageWrite) => STORAGE_WRITE_BASE_FRACTION, + | Opcode::Ptr(_) => { + self.statistics.ram_permutation_cycles += RICH_ADDRESSING_OPCODE_RAM_CYCLES; + } + Opcode::Context(_) | Opcode::Ret(_) | Opcode::NearCall(_) => { + self.statistics.ram_permutation_cycles += AVERAGE_OPCODE_RAM_CYCLES; + } + Opcode::Log(LogOpcode::StorageRead) => { + self.statistics.ram_permutation_cycles += STORAGE_READ_RAM_CYCLES; + self.statistics.log_demuxer_cycles += STORAGE_READ_LOG_DEMUXER_CYCLES; + self.statistics.storage_sorter_cycles += STORAGE_READ_STORAGE_SORTER_CYCLES; + } + Opcode::Log(LogOpcode::StorageWrite) => { + self.statistics.ram_permutation_cycles += STORAGE_WRITE_RAM_CYCLES; + self.statistics.log_demuxer_cycles += STORAGE_WRITE_LOG_DEMUXER_CYCLES; + self.statistics.storage_sorter_cycles += STORAGE_WRITE_STORAGE_SORTER_CYCLES; + } Opcode::Log(LogOpcode::ToL1Message) | Opcode::Log(LogOpcode::Event) => { - EVENT_OR_L1_MESSAGE_FRACTION + self.statistics.ram_permutation_cycles += EVENT_RAM_CYCLES; + self.statistics.log_demuxer_cycles += EVENT_LOG_DEMUXER_CYCLES; + self.statistics.events_sorter_cycles += EVENT_EVENTS_SORTER_CYCLES; + } + Opcode::Log(LogOpcode::PrecompileCall) => { + self.statistics.ram_permutation_cycles += PRECOMPILE_RAM_CYCLES; + self.statistics.log_demuxer_cycles += PRECOMPILE_LOG_DEMUXER_CYCLES; + } + Opcode::FarCall(_) => { + self.statistics.ram_permutation_cycles += FAR_CALL_RAM_CYCLES; + self.statistics.code_decommitter_sorter_cycles += + FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES; + self.statistics.storage_sorter_cycles += FAR_CALL_STORAGE_SORTER_CYCLES; + } + Opcode::UMA(UMAOpcode::AuxHeapWrite | UMAOpcode::HeapWrite) => { + self.statistics.ram_permutation_cycles += UMA_WRITE_RAM_CYCLES; } - Opcode::Log(LogOpcode::PrecompileCall) => PRECOMPILE_CALL_COMMON_FRACTION, - Opcode::FarCall(_) => FAR_CALL_FRACTION, - Opcode::UMA(UMAOpcode::AuxHeapWrite | UMAOpcode::HeapWrite) => UMA_WRITE_FRACTION, Opcode::UMA( UMAOpcode::AuxHeapRead | UMAOpcode::HeapRead | UMAOpcode::FatPointerRead, - ) => UMA_READ_FRACTION, + ) => { + self.statistics.ram_permutation_cycles += UMA_READ_RAM_CYCLES; + } Opcode::Invalid(_) => unreachable!(), // invalid opcodes are never executed }; - - self.estimated_circuits_used += used; } } -impl VmTracer for CircuitsTracer { +impl VmTracer for CircuitsTracer { fn initialize_tracer(&mut self, state: &mut ZkSyncVmState) { self.last_decommitment_history_entry_checked = Some( state @@ -108,7 +122,28 @@ impl VmTracer for CircuitsTracer { state: &mut ZkSyncVmState, _bootloader_state: &mut BootloaderState, ) -> TracerExecutionStatus { - // Trace decommitments. + self.trace_decommitments(state); + self.trace_storage_writes(state); + self.trace_storage_reads(state); + self.trace_precompile_calls(state); + + TracerExecutionStatus::Continue + } +} + +impl CircuitsTracer { + pub(crate) fn new() -> Self { + Self { + statistics: CircuitCycleStatistic::new(), + last_decommitment_history_entry_checked: None, + last_written_keys_history_entry_checked: None, + last_read_keys_history_entry_checked: None, + last_precompile_inner_entry_checked: None, + _phantom_data: Default::default(), + } + } + + fn trace_decommitments(&mut self, state: &ZkSyncVmState) { let last_decommitment_history_entry_checked = self .last_decommitment_history_entry_checked .expect("Value must be set during init"); @@ -130,12 +165,12 @@ impl VmTracer for CircuitsTracer { // Each cycle of `CodeDecommitter` processes 2 words. // If the number of words in bytecode is odd, then number of cycles must be rounded up. let decommitter_cycles_used = (bytecode_len + 1) / 2; - self.estimated_circuits_used += - (decommitter_cycles_used as f32) * CODE_DECOMMITTER_CYCLE_FRACTION; + self.statistics.code_decommitter_cycles += decommitter_cycles_used as u32; } self.last_decommitment_history_entry_checked = Some(history.len()); + } - // Process storage writes. + fn trace_storage_writes(&mut self, state: &ZkSyncVmState) { let last_writes_history_entry_checked = self .last_written_keys_history_entry_checked .expect("Value must be set during init"); @@ -144,11 +179,12 @@ impl VmTracer for CircuitsTracer { // We assume that only insertions may happen during a single VM inspection. assert!(history_event.value.is_none()); - self.estimated_circuits_used += 2.0 * STORAGE_APPLICATION_CYCLE_FRACTION; + self.statistics.storage_application_cycles += STORAGE_WRITE_STORAGE_APPLICATION_CYCLES; } self.last_written_keys_history_entry_checked = Some(history.len()); + } - // Process storage reads. + fn trace_storage_reads(&mut self, state: &ZkSyncVmState) { let last_reads_history_entry_checked = self .last_read_keys_history_entry_checked .expect("Value must be set during init"); @@ -164,12 +200,14 @@ impl VmTracer for CircuitsTracer { .inner() .contains_key(&history_event.key) { - self.estimated_circuits_used += STORAGE_APPLICATION_CYCLE_FRACTION; + self.statistics.storage_application_cycles += + STORAGE_READ_STORAGE_APPLICATION_CYCLES; } } self.last_read_keys_history_entry_checked = Some(history.len()); + } - // Process precompiles. + fn trace_precompile_calls(&mut self, state: &ZkSyncVmState) { let last_precompile_inner_entry_checked = self .last_precompile_inner_entry_checked .expect("Value must be set during init"); @@ -178,15 +216,18 @@ impl VmTracer for CircuitsTracer { .precompile_cycles_history .inner(); for (precompile, cycles) in &inner[last_precompile_inner_entry_checked..] { - let fraction = match precompile { - PrecompileAddress::Ecrecover => ECRECOVER_CYCLE_FRACTION, - PrecompileAddress::SHA256 => SHA256_CYCLE_FRACTION, - PrecompileAddress::Keccak256 => KECCAK256_CYCLE_FRACTION, + match precompile { + PrecompileAddress::Ecrecover => { + self.statistics.ecrecover_cycles += *cycles as u32; + } + PrecompileAddress::SHA256 => { + self.statistics.sha256_cycles += *cycles as u32; + } + PrecompileAddress::Keccak256 => { + self.statistics.keccak256_cycles += *cycles as u32; + } }; - self.estimated_circuits_used += (*cycles as f32) * fraction; } self.last_precompile_inner_entry_checked = Some(inner.len()); - - TracerExecutionStatus::Continue } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/default_tracers.rs index 422463d2921..01b21d80950 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/default_tracers.rs @@ -4,6 +4,7 @@ use std::{ }; use zk_evm_1_4_0::{ + aux_structures::Timestamp, tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, @@ -12,7 +13,6 @@ use zk_evm_1_4_0::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::Timestamp; use super::PubdataTracer; use crate::{ @@ -65,7 +65,7 @@ pub(crate) struct DefaultExecutionTracer { // This tracer tracks what opcodes were executed and calculates how much circuits will be generated. // It only takes into account circuits that are generated for actual execution. It doesn't // take into account e.g circuits produced by the initial bootloader memory commitment. - pub(crate) circuits_tracer: CircuitsTracer, + pub(crate) circuits_tracer: CircuitsTracer, storage: StoragePtr, _phantom: PhantomData, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/mod.rs index 1bdb1b6ccdb..fe916e19e8c 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/mod.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/mod.rs @@ -10,7 +10,7 @@ pub(crate) mod pubdata_tracer; pub(crate) mod refunds; pub(crate) mod result_tracer; -mod circuits_capacity; +pub(crate) mod circuits_capacity; pub mod dispatcher; pub(crate) mod traits; pub(crate) mod utils; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs index a8c5889a2ff..55a58ccd1b0 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs @@ -4,7 +4,7 @@ use zk_evm_1_4_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; +use zkevm_test_harness_1_4_0::witness::sort_storage_access::sort_storage_access_queries; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ event::{ diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs b/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs index 0461b4a8887..1507f2d5e22 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs @@ -1,7 +1,10 @@ +use zk_evm_1_3_3::aux_structures::LogQuery; +use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::{l2_to_l1_log::L2ToL1Log, Timestamp, VmEvent}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogQueryType, VmEvent}; use crate::{ + glue::GlueInto, interface::L1BatchEnv, vm_boojum_integration::{ old_vm::{events::merge_events, history_recorder::HistoryMode}, @@ -21,5 +24,15 @@ pub(crate) fn collect_events_and_l1_system_logs_after_timestamp VmInterface for Vm { let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events); let system_logs = l1_messages .into_iter() - .map(|log| SystemL2ToL1Log(log.into())) + .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); let total_log_queries = self.state.event_sink.get_log_queries() + self @@ -103,9 +106,21 @@ impl VmInterface for Vm { .len() + self.state.storage.get_final_log_queries().len(); + let storage_log_queries = self.state.storage.get_final_log_queries(); + + let deduped_storage_log_queries = + sort_storage_access_queries(storage_log_queries.iter().map(|log| &log.log_query)).1; + CurrentExecutionState { events, - storage_log_queries: self.state.storage.get_final_log_queries(), + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduped_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: user_l2_to_l1_logs .into_iter() @@ -114,7 +129,10 @@ impl VmInterface for Vm { system_logs, total_log_queries, cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs, + deduplicated_events_logs: deduplicated_events_logs + .into_iter() + .map(GlueInto::glue_into) + .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), } } @@ -147,6 +165,10 @@ impl VmInterface for Vm { self.record_vm_memory_metrics_inner() } + fn has_enough_gas_for_batch_tip(&self) -> bool { + self.state.local_state.callstack.current.ergs_remaining >= BOOTLOADER_BATCH_TIP_OVERHEAD + } + fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.execute(VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index 1652a2f9424..5a12c0f2252 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -6,6 +6,9 @@ use zksync_system_constants::{MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS}; use crate::vm_latest::old_vm::utils::heap_page_from_base; +/// The amount of ergs to be reserved at the end of the batch to ensure that it has enough ergs to verify compression, etc. +pub(crate) const BOOTLOADER_BATCH_TIP_OVERHEAD: u32 = 80_000_000; + /// The size of the bootloader memory in bytes which is used by the protocol. /// While the maximal possible size is a lot higher, we restrict ourselves to a certain limit to reduce /// the requirements on RAM. diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index 9bda37e20dd..21e5b16cd2d 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -9,7 +9,8 @@ use crate::{ vm_latest::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, tracers::{ - dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, + circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, + DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, vm::Vm, }, @@ -21,6 +22,7 @@ impl Vm { &mut self, dispatcher: TracerDispatcher, execution_mode: VmExecutionMode, + custom_pubdata_tracer: Option>, ) -> VmExecutionResultAndLogs { let mut enable_refund_tracer = false; if let VmExecutionMode::OneTx = execution_mode { @@ -29,8 +31,12 @@ impl Vm { enable_refund_tracer = true; } - let (_, result) = - self.inspect_and_collect_results(dispatcher, execution_mode, enable_refund_tracer); + let (_, result) = self.inspect_and_collect_results( + dispatcher, + execution_mode, + enable_refund_tracer, + custom_pubdata_tracer, + ); result } @@ -41,6 +47,7 @@ impl Vm { dispatcher: TracerDispatcher, execution_mode: VmExecutionMode, with_refund_tracer: bool, + custom_pubdata_tracer: Option>, ) -> (VmExecutionStopReason, VmExecutionResultAndLogs) { let refund_tracers = with_refund_tracer.then_some(RefundsTracer::new(self.batch_env.clone())); @@ -50,7 +57,8 @@ impl Vm { dispatcher, self.storage.clone(), refund_tracers, - Some(PubdataTracer::new(self.batch_env.clone(), execution_mode)), + custom_pubdata_tracer + .or_else(|| Some(PubdataTracer::new(self.batch_env.clone(), execution_mode))), ); let timestamp_initial = Timestamp(self.state.local_state.timestamp); @@ -79,7 +87,7 @@ impl Vm { spent_pubdata_counter_before, pubdata_published, logs.total_log_queries_count, - tx_tracer.circuits_tracer.estimated_circuits_used, + circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs b/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs index a2682ef03af..99fa413938a 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs @@ -46,7 +46,10 @@ impl Vm { storage_logs_count + log_queries.len() + precompile_calls_count; VmExecutionLogs { - storage_logs, + storage_logs: storage_logs + .into_iter() + .map(|log| log.glue_into()) + .collect(), events, user_l2_to_l1_logs: user_logs .into_iter() diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index bdaf425ee27..0349b7d8cda 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -1,6 +1,6 @@ use zk_evm_1_4_1::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::U256; +use zksync_types::{circuit::CircuitStatistic, U256}; use crate::{ interface::{VmExecutionStatistics, VmMemoryMetrics}, @@ -24,7 +24,7 @@ impl Vm { spent_pubdata_counter_before: u32, pubdata_published: u32, total_log_queries_count: usize, - estimated_circuits_used: f32, + circuit_statistic: CircuitStatistic, ) -> VmExecutionStatistics { let computational_gas_used = self.calculate_computational_gas_used( tracer, @@ -41,7 +41,7 @@ impl Vm { computational_gas_used, total_log_queries: total_log_queries_count, pubdata_published, - estimated_circuits_used, + circuit_statistic, } } diff --git a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs index 72d6e1f696b..07ec473624b 100644 --- a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs @@ -12,19 +12,21 @@ use zksync_types::{ compression::compress_with_best_strategy, BYTES_PER_DERIVED_KEY, BYTES_PER_ENUMERATION_INDEX, }, - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, - U256, + AccountTreeId, Address, StorageKey, StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use crate::{ glue::GlueInto, - vm_latest::old_vm::{ - history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, + vm_latest::{ + old_vm::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, + }, + oracles::OracleWithHistory, }, - oracles::OracleWithHistory, + utils::logs::StorageLogQuery, }, }; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs new file mode 100644 index 00000000000..fc6a2f26d6e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -0,0 +1,284 @@ +use std::borrow::BorrowMut; + +use ethabi::Token; +use zk_evm_1_4_1::{ + aux_structures::Timestamp, zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, +}; +use zksync_contracts::load_sys_contract; +use zksync_system_constants::{ + CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, +}; +use zksync_types::{ + commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, + writes::StateDiffRecord, Address, Execute, H256, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; + +use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + constants::BOOTLOADER_BATCH_TIP_OVERHEAD, + tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, + tracers::PubdataTracer, + HistoryEnabled, TracerDispatcher, + }, +}; + +#[derive(Debug, Clone, Default)] +struct L1MessengerTestData { + l2_to_l1_logs: usize, + messages: Vec>, + bytecodes: Vec>, + state_diffs: Vec, +} + +struct MimicCallInfo { + to: Address, + who_to_mimic: Address, + data: Vec, +} + +fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { + let complex_upgrade = get_complex_upgrade_abi(); + let l1_messenger = load_sys_contract("L1Messenger"); + + let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendL2ToL1Log") + .unwrap() + .encode_input(&[ + Token::Bool(false), + Token::FixedBytes(H256::random().0.to_vec()), + Token::FixedBytes(H256::random().0.to_vec()), + ]) + .unwrap(), + }); + let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(message.clone())]) + .unwrap(), + }); + let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("requestBytecodeL1Publication") + .unwrap() + .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) + .unwrap(), + }); + + let encoded_calls = logs_mimic_calls + .chain(messages_mimic_calls) + .chain(bytecodes_mimic_calls) + .map(|call| { + Token::Tuple(vec![ + Token::Address(call.to), + Token::Address(call.who_to_mimic), + Token::Bytes(call.data), + ]) + }) + .collect::>(); + + complex_upgrade + .function("mimicCalls") + .unwrap() + .encode_input(&[Token::Array(encoded_calls)]) + .unwrap() +} + +fn execute_test(test_data: L1MessengerTestData) -> u32 { + let mut storage = get_empty_storage(); + let complex_upgrade_code = read_complex_upgrade(); + + // For this test we'll just put the bytecode onto the force deployer address + storage.borrow_mut().set_value( + get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), + hash_bytecode(&complex_upgrade_code), + ); + storage + .borrow_mut() + .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let bytecodes = test_data + .bytecodes + .iter() + .map(|bytecode| { + let hash = hash_bytecode(bytecode); + let words = bytes_to_be_words(bytecode.clone()); + (h256_to_u256(hash), words) + }) + .collect(); + vm.vm + .state + .decommittment_processor + .populate(bytecodes, Timestamp(0)); + + let data = populate_mimic_calls(test_data.clone()); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + calldata: data, + value: U256::zero(), + factory_deps: None, + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + // Now we count how much ergs were spent at the end of the batch + // It is assumed that the top level frame is the bootloader + + let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; + + // We ensure that indeed the provided state diffs are used + let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( + vm.vm.batch_env.clone(), + VmExecutionMode::Batch, + test_data.state_diffs, + ); + + let result = vm.vm.inspect_inner( + TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ); + + assert!(!result.result.is_failed(), "Batch wasn't successful"); + + let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; + + ergs_before - ergs_after +} + +fn generate_state_diffs( + repeated_writes: bool, + small_diff: bool, + number_of_state_diffs: usize, +) -> Vec { + (0..number_of_state_diffs) + .map(|i| { + let address = Address::from_low_u64_be(i as u64); + let key = U256::from(i); + let enumeration_index = if repeated_writes { i + 1 } else { 0 }; + + let (initial_value, final_value) = if small_diff { + // As small as it gets, one byte to denote zeroing out the value + (U256::from(1), U256::from(0)) + } else { + // As large as it gets + (U256::from(0), U256::from(2).pow(255.into())) + }; + + StateDiffRecord { + address, + key, + derived_key: u256_to_h256(i.into()).0, + enumeration_index: enumeration_index as u64, + initial_value, + final_value, + } + }) + .collect() +} + +#[test] +fn test_dry_run_upper_bound() { + // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. + // To get the upper bound, we'll try to do the following: + // 1. Max number of logs. + // 2. Lots of small L2->L1 messages / one large L2->L1 message. + // 3. Lots of small bytecodes / one large bytecode. + // 4. Lots of storage slot updates. + + let max_logs = execute_test(L1MessengerTestData { + l2_to_l1_logs: L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE, + ..Default::default() + }); + + let max_messages = execute_test(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; 0]; MAX_PUBDATA_PER_BLOCK as usize / L2ToL1Log::SERIALIZED_SIZE], + ..Default::default() + }); + + let long_message = execute_test(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize]; 1], + ..Default::default() + }); + + let max_bytecodes = execute_test(L1MessengerTestData { + // Each bytecode must be at least 32 bytes long + bytecodes: vec![vec![0; 32]; MAX_PUBDATA_PER_BLOCK as usize / 32], + ..Default::default() + }); + + let long_bytecode = execute_test(L1MessengerTestData { + // We have to add 48 since a valid bytecode must have an odd number of 32 byte words + bytecodes: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize + 48]; 1], + ..Default::default() + }); + + let lots_of_small_repeated_writes = execute_test(L1MessengerTestData { + // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) + state_diffs: generate_state_diffs(true, true, MAX_PUBDATA_PER_BLOCK as usize / 5), + ..Default::default() + }); + + let lots_of_big_repeated_writes = execute_test(L1MessengerTestData { + // Each big write will approximately require 32 bytes to encode + state_diffs: generate_state_diffs(true, false, MAX_PUBDATA_PER_BLOCK as usize / 32), + ..Default::default() + }); + + let lots_of_small_initial_writes = execute_test(L1MessengerTestData { + // Each initial write will take at least 32 bytes for derived key + 5 bytes for value + state_diffs: generate_state_diffs(false, true, MAX_PUBDATA_PER_BLOCK as usize / 37), + ..Default::default() + }); + + let lots_of_large_initial_writes = execute_test(L1MessengerTestData { + // Each big write will take at least 32 bytes for derived key + 32 bytes for value + state_diffs: generate_state_diffs(false, false, MAX_PUBDATA_PER_BLOCK as usize / 64), + ..Default::default() + }); + + let max_used_gas = vec![ + max_logs, + max_messages, + long_message, + max_bytecodes, + long_bytecode, + lots_of_small_repeated_writes, + lots_of_big_repeated_writes, + lots_of_small_initial_writes, + lots_of_large_initial_writes, + ] + .into_iter() + .max() + .unwrap(); + + // We use 2x overhead for the batch tip compared to the worst estimated scenario. + assert!( + max_used_gas * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, + "BOOTLOADER_BATCH_TIP_OVERHEAD is too low" + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index bc19fc8793a..3821817135b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -30,15 +30,40 @@ fn test_circuits() { vm.vm.push_transaction(tx); let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - const EXPECTED_CIRCUITS_USED: f32 = 4.8685; - let delta = - (res.statistics.estimated_circuits_used - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; - - if delta.abs() > 0.1 { - panic!( - "Estimation differs from expected result by too much: {}%, expected value: {}", - delta * 100.0, - res.statistics.estimated_circuits_used - ); + let s = res.statistics.circuit_statistic; + // Check `circuit_statistic`. + const EXPECTED: [f32; 11] = [ + 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, + ]; + let actual = [ + (s.main_vm, "main_vm"), + (s.ram_permutation, "ram_permutation"), + (s.storage_application, "storage_application"), + (s.storage_sorter, "storage_sorter"), + (s.code_decommitter, "code_decommitter"), + (s.code_decommitter_sorter, "code_decommitter_sorter"), + (s.log_demuxer, "log_demuxer"), + (s.events_sorter, "events_sorter"), + (s.keccak256, "keccak256"), + (s.ecrecover, "ecrecover"), + (s.sha256, "sha256"), + ]; + for ((actual, name), expected) in actual.iter().zip(EXPECTED) { + if expected == 0.0 { + assert_eq!( + *actual, expected, + "Check failed for {}, expected {}, actual {}", + name, expected, actual + ); + } else { + let diff = (actual - expected) / expected; + assert!( + diff.abs() < 0.1, + "Check failed for {}, expected {}, actual {}", + name, + expected, + actual + ); + } } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index b6c2cb654a8..a07608121bc 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -2,6 +2,7 @@ mod bootloader; mod default_aa; // TODO - fix this test // `mod invalid_bytecode;` +mod block_tip; mod bytecode_publishing; mod call_tracer; mod circuits; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs index 9dbb72d56d8..dbe0afa33fa 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; use zksync_state::WriteStorage; -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; +use zksync_types::{StorageKey, StorageValue, U256}; use crate::{ vm_latest::{ @@ -10,6 +10,7 @@ use crate::{ event_sink::InMemoryEventSink, history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, }, + utils::logs::StorageLogQuery, HistoryEnabled, HistoryMode, SimpleMemory, Vm, }, HistoryMode as CommonHistoryMode, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs index dfe8905a7e0..c3cc5d8d980 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs @@ -1,5 +1,7 @@ pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; +pub(crate) use vm_tester::{ + default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, +}; pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; mod inner_state; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 1e2bdcb4515..8ab728e8ce3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -1,5 +1,5 @@ use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; +use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; use zksync_state::WriteStorage; use zksync_test_account::TxType; use zksync_types::{ @@ -12,14 +12,17 @@ use zksync_types::{ }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use super::utils::read_test_contract; +use super::utils::{get_complex_upgrade_abi, read_test_contract}; use crate::{ interface::{ ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, }, vm_latest::{ - tests::{tester::VmTesterBuilder, utils::verify_required_storage}, + tests::{ + tester::VmTesterBuilder, + utils::{read_complex_upgrade, verify_required_storage}, + }, HistoryEnabled, }, }; @@ -343,20 +346,10 @@ fn get_complex_upgrade_tx( } } -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - fn read_msg_sender_test() -> Vec { read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") } -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - fn get_complex_upgrader_abi() -> Contract { load_sys_contract("ComplexUpgrader") } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index 7c937033a21..80d59ab709f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -109,3 +109,13 @@ pub(crate) fn read_precompiles_contract() -> Vec { "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", ) } + +pub(crate) fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +pub(crate) fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs index 6b63a38c12f..422294dd206 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs @@ -1,85 +1,67 @@ use zkevm_test_harness_1_4_1::{geometry_config::get_geometry_config, toolset::GeometryConfig}; - -const GEOMETRY_CONFIG: GeometryConfig = get_geometry_config(); -const OVERESTIMATE_PERCENT: f32 = 1.05; - -const MAIN_VM_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_vm_snapshot as f32; - -const CODE_DECOMMITTER_SORTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_code_decommitter_sorter as f32; - -const LOG_DEMUXER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_log_demuxer as f32; - -const STORAGE_SORTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_storage_sorter as f32; - -const EVENTS_OR_L1_MESSAGES_SORTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_events_or_l1_messages_sorter as f32; - -const RAM_PERMUTATION_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_ram_permutation as f32; - -pub(crate) const CODE_DECOMMITTER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_code_decommitter as f32; - -pub(crate) const STORAGE_APPLICATION_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_storage_application as f32; - -pub(crate) const KECCAK256_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_keccak256_circuit as f32; - -pub(crate) const SHA256_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32; - -pub(crate) const ECRECOVER_CYCLE_FRACTION: f32 = - OVERESTIMATE_PERCENT / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32; +use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. // In the worst case, a rich addressing may take 3 ram permutations // (1 for reading the opcode, 1 for writing input value, 1 for writing output value). -pub(crate) const RICH_ADDRESSING_OPCODE_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + 3.0 * RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const RICH_ADDRESSING_OPCODE_RAM_CYCLES: u32 = 3; -pub(crate) const AVERAGE_OPCODE_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const AVERAGE_OPCODE_RAM_CYCLES: u32 = 1; -// Here "base" fraction is a fraction that will be used unconditionally. -// Usage of `StorageApplication` is being tracked separately as it depends on whether slot was read before or not. -pub(crate) const STORAGE_READ_BASE_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + LOG_DEMUXER_CYCLE_FRACTION - + STORAGE_SORTER_CYCLE_FRACTION; +pub(crate) const STORAGE_READ_RAM_CYCLES: u32 = 1; +pub(crate) const STORAGE_READ_LOG_DEMUXER_CYCLES: u32 = 1; +pub(crate) const STORAGE_READ_STORAGE_SORTER_CYCLES: u32 = 1; +pub(crate) const STORAGE_READ_STORAGE_APPLICATION_CYCLES: u32 = 1; -pub(crate) const EVENT_OR_L1_MESSAGE_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + 2.0 * LOG_DEMUXER_CYCLE_FRACTION - + 2.0 * EVENTS_OR_L1_MESSAGES_SORTER_CYCLE_FRACTION; +pub(crate) const EVENT_RAM_CYCLES: u32 = 1; +pub(crate) const EVENT_LOG_DEMUXER_CYCLES: u32 = 2; +pub(crate) const EVENT_EVENTS_SORTER_CYCLES: u32 = 2; -// Here "base" fraction is a fraction that will be used unconditionally. -// Usage of `StorageApplication` is being tracked separately as it depends on whether slot was written before or not. -pub(crate) const STORAGE_WRITE_BASE_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + 2.0 * LOG_DEMUXER_CYCLE_FRACTION - + 2.0 * STORAGE_SORTER_CYCLE_FRACTION; +pub(crate) const STORAGE_WRITE_RAM_CYCLES: u32 = 1; +pub(crate) const STORAGE_WRITE_LOG_DEMUXER_CYCLES: u32 = 2; +pub(crate) const STORAGE_WRITE_STORAGE_SORTER_CYCLES: u32 = 2; +pub(crate) const STORAGE_WRITE_STORAGE_APPLICATION_CYCLES: u32 = 2; -pub(crate) const FAR_CALL_FRACTION: f32 = MAIN_VM_CYCLE_FRACTION - + RAM_PERMUTATION_CYCLE_FRACTION - + STORAGE_SORTER_CYCLE_FRACTION - + CODE_DECOMMITTER_SORTER_CYCLE_FRACTION; +pub(crate) const FAR_CALL_RAM_CYCLES: u32 = 1; +pub(crate) const FAR_CALL_STORAGE_SORTER_CYCLES: u32 = 1; +pub(crate) const FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES: u32 = 1; // 5 RAM permutations, because: 1 to read opcode + 2 reads + 2 writes. // 2 reads and 2 writes are needed because unaligned access is implemented with // aligned queries. -pub(crate) const UMA_WRITE_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + 5.0 * RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const UMA_WRITE_RAM_CYCLES: u32 = 5; // 3 RAM permutations, because: 1 to read opcode + 2 reads. // 2 reads are needed because unaligned access is implemented with aligned queries. -pub(crate) const UMA_READ_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + 3.0 * RAM_PERMUTATION_CYCLE_FRACTION; +pub(crate) const UMA_READ_RAM_CYCLES: u32 = 3; + +pub(crate) const PRECOMPILE_RAM_CYCLES: u32 = 1; +pub(crate) const PRECOMPILE_LOG_DEMUXER_CYCLES: u32 = 1; + +const GEOMETRY_CONFIG: GeometryConfig = get_geometry_config(); -pub(crate) const PRECOMPILE_CALL_COMMON_FRACTION: f32 = - MAIN_VM_CYCLE_FRACTION + RAM_PERMUTATION_CYCLE_FRACTION + LOG_DEMUXER_CYCLE_FRACTION; +pub(crate) fn circuit_statistic_from_cycles(cycles: CircuitCycleStatistic) -> CircuitStatistic { + CircuitStatistic { + main_vm: cycles.main_vm_cycles as f32 / GEOMETRY_CONFIG.cycles_per_vm_snapshot as f32, + ram_permutation: cycles.ram_permutation_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ram_permutation as f32, + storage_application: cycles.storage_application_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_application as f32, + storage_sorter: cycles.storage_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_sorter as f32, + code_decommitter: cycles.code_decommitter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_code_decommitter as f32, + code_decommitter_sorter: cycles.code_decommitter_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_code_decommitter_sorter as f32, + log_demuxer: cycles.log_demuxer_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_log_demuxer as f32, + events_sorter: cycles.events_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_events_or_l1_messages_sorter as f32, + keccak256: cycles.keccak256_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_keccak256_circuit as f32, + ecrecover: cycles.ecrecover_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, + sha256: cycles.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs index acd2d191c65..3c4214f1409 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs @@ -6,6 +6,7 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; use zksync_state::{StoragePtr, WriteStorage}; +use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -20,29 +21,16 @@ use crate::{ /// Tracer responsible for collecting information about refunds. #[derive(Debug)] -pub(crate) struct CircuitsTracer { - pub(crate) estimated_circuits_used: f32, +pub(crate) struct CircuitsTracer { + pub(crate) statistics: CircuitCycleStatistic, last_decommitment_history_entry_checked: Option, last_written_keys_history_entry_checked: Option, last_read_keys_history_entry_checked: Option, last_precompile_inner_entry_checked: Option, - _phantom_data: PhantomData, + _phantom_data: PhantomData<(S, H)>, } -impl CircuitsTracer { - pub(crate) fn new() -> Self { - Self { - estimated_circuits_used: 0.0, - last_decommitment_history_entry_checked: None, - last_written_keys_history_entry_checked: None, - last_read_keys_history_entry_checked: None, - last_precompile_inner_entry_checked: None, - _phantom_data: Default::default(), - } - } -} - -impl DynTracer> for CircuitsTracer { +impl DynTracer> for CircuitsTracer { fn before_execution( &mut self, _state: VmLocalStateData<'_>, @@ -50,7 +38,9 @@ impl DynTracer> for Circuits _memory: &SimpleMemory, _storage: StoragePtr, ) { - let used = match data.opcode.variant.opcode { + self.statistics.main_vm_cycles += 1; + + match data.opcode.variant.opcode { Opcode::Nop(_) | Opcode::Add(_) | Opcode::Sub(_) @@ -59,27 +49,51 @@ impl DynTracer> for Circuits | Opcode::Jump(_) | Opcode::Binop(_) | Opcode::Shift(_) - | Opcode::Ptr(_) => RICH_ADDRESSING_OPCODE_FRACTION, - Opcode::Context(_) | Opcode::Ret(_) | Opcode::NearCall(_) => AVERAGE_OPCODE_FRACTION, - Opcode::Log(LogOpcode::StorageRead) => STORAGE_READ_BASE_FRACTION, - Opcode::Log(LogOpcode::StorageWrite) => STORAGE_WRITE_BASE_FRACTION, + | Opcode::Ptr(_) => { + self.statistics.ram_permutation_cycles += RICH_ADDRESSING_OPCODE_RAM_CYCLES; + } + Opcode::Context(_) | Opcode::Ret(_) | Opcode::NearCall(_) => { + self.statistics.ram_permutation_cycles += AVERAGE_OPCODE_RAM_CYCLES; + } + Opcode::Log(LogOpcode::StorageRead) => { + self.statistics.ram_permutation_cycles += STORAGE_READ_RAM_CYCLES; + self.statistics.log_demuxer_cycles += STORAGE_READ_LOG_DEMUXER_CYCLES; + self.statistics.storage_sorter_cycles += STORAGE_READ_STORAGE_SORTER_CYCLES; + } + Opcode::Log(LogOpcode::StorageWrite) => { + self.statistics.ram_permutation_cycles += STORAGE_WRITE_RAM_CYCLES; + self.statistics.log_demuxer_cycles += STORAGE_WRITE_LOG_DEMUXER_CYCLES; + self.statistics.storage_sorter_cycles += STORAGE_WRITE_STORAGE_SORTER_CYCLES; + } Opcode::Log(LogOpcode::ToL1Message) | Opcode::Log(LogOpcode::Event) => { - EVENT_OR_L1_MESSAGE_FRACTION + self.statistics.ram_permutation_cycles += EVENT_RAM_CYCLES; + self.statistics.log_demuxer_cycles += EVENT_LOG_DEMUXER_CYCLES; + self.statistics.events_sorter_cycles += EVENT_EVENTS_SORTER_CYCLES; + } + Opcode::Log(LogOpcode::PrecompileCall) => { + self.statistics.ram_permutation_cycles += PRECOMPILE_RAM_CYCLES; + self.statistics.log_demuxer_cycles += PRECOMPILE_LOG_DEMUXER_CYCLES; + } + Opcode::FarCall(_) => { + self.statistics.ram_permutation_cycles += FAR_CALL_RAM_CYCLES; + self.statistics.code_decommitter_sorter_cycles += + FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES; + self.statistics.storage_sorter_cycles += FAR_CALL_STORAGE_SORTER_CYCLES; + } + Opcode::UMA(UMAOpcode::AuxHeapWrite | UMAOpcode::HeapWrite) => { + self.statistics.ram_permutation_cycles += UMA_WRITE_RAM_CYCLES; } - Opcode::Log(LogOpcode::PrecompileCall) => PRECOMPILE_CALL_COMMON_FRACTION, - Opcode::FarCall(_) => FAR_CALL_FRACTION, - Opcode::UMA(UMAOpcode::AuxHeapWrite | UMAOpcode::HeapWrite) => UMA_WRITE_FRACTION, Opcode::UMA( UMAOpcode::AuxHeapRead | UMAOpcode::HeapRead | UMAOpcode::FatPointerRead, - ) => UMA_READ_FRACTION, + ) => { + self.statistics.ram_permutation_cycles += UMA_READ_RAM_CYCLES; + } Opcode::Invalid(_) => unreachable!(), // invalid opcodes are never executed }; - - self.estimated_circuits_used += used; } } -impl VmTracer for CircuitsTracer { +impl VmTracer for CircuitsTracer { fn initialize_tracer(&mut self, state: &mut ZkSyncVmState) { self.last_decommitment_history_entry_checked = Some( state @@ -108,7 +122,28 @@ impl VmTracer for CircuitsTracer { state: &mut ZkSyncVmState, _bootloader_state: &mut BootloaderState, ) -> TracerExecutionStatus { - // Trace decommitments. + self.trace_decommitments(state); + self.trace_storage_writes(state); + self.trace_storage_reads(state); + self.trace_precompile_calls(state); + + TracerExecutionStatus::Continue + } +} + +impl CircuitsTracer { + pub(crate) fn new() -> Self { + Self { + statistics: CircuitCycleStatistic::new(), + last_decommitment_history_entry_checked: None, + last_written_keys_history_entry_checked: None, + last_read_keys_history_entry_checked: None, + last_precompile_inner_entry_checked: None, + _phantom_data: Default::default(), + } + } + + fn trace_decommitments(&mut self, state: &ZkSyncVmState) { let last_decommitment_history_entry_checked = self .last_decommitment_history_entry_checked .expect("Value must be set during init"); @@ -130,12 +165,12 @@ impl VmTracer for CircuitsTracer { // Each cycle of `CodeDecommitter` processes 2 words. // If the number of words in bytecode is odd, then number of cycles must be rounded up. let decommitter_cycles_used = (bytecode_len + 1) / 2; - self.estimated_circuits_used += - (decommitter_cycles_used as f32) * CODE_DECOMMITTER_CYCLE_FRACTION; + self.statistics.code_decommitter_cycles += decommitter_cycles_used as u32; } self.last_decommitment_history_entry_checked = Some(history.len()); + } - // Process storage writes. + fn trace_storage_writes(&mut self, state: &ZkSyncVmState) { let last_writes_history_entry_checked = self .last_written_keys_history_entry_checked .expect("Value must be set during init"); @@ -144,11 +179,12 @@ impl VmTracer for CircuitsTracer { // We assume that only insertions may happen during a single VM inspection. assert!(history_event.value.is_none()); - self.estimated_circuits_used += 2.0 * STORAGE_APPLICATION_CYCLE_FRACTION; + self.statistics.storage_application_cycles += STORAGE_WRITE_STORAGE_APPLICATION_CYCLES; } self.last_written_keys_history_entry_checked = Some(history.len()); + } - // Process storage reads. + fn trace_storage_reads(&mut self, state: &ZkSyncVmState) { let last_reads_history_entry_checked = self .last_read_keys_history_entry_checked .expect("Value must be set during init"); @@ -164,12 +200,14 @@ impl VmTracer for CircuitsTracer { .inner() .contains_key(&history_event.key) { - self.estimated_circuits_used += STORAGE_APPLICATION_CYCLE_FRACTION; + self.statistics.storage_application_cycles += + STORAGE_READ_STORAGE_APPLICATION_CYCLES; } } self.last_read_keys_history_entry_checked = Some(history.len()); + } - // Process precompiles. + fn trace_precompile_calls(&mut self, state: &ZkSyncVmState) { let last_precompile_inner_entry_checked = self .last_precompile_inner_entry_checked .expect("Value must be set during init"); @@ -178,15 +216,18 @@ impl VmTracer for CircuitsTracer { .precompile_cycles_history .inner(); for (precompile, cycles) in &inner[last_precompile_inner_entry_checked..] { - let fraction = match precompile { - PrecompileAddress::Ecrecover => ECRECOVER_CYCLE_FRACTION, - PrecompileAddress::SHA256 => SHA256_CYCLE_FRACTION, - PrecompileAddress::Keccak256 => KECCAK256_CYCLE_FRACTION, + match precompile { + PrecompileAddress::Ecrecover => { + self.statistics.ecrecover_cycles += *cycles as u32; + } + PrecompileAddress::SHA256 => { + self.statistics.sha256_cycles += *cycles as u32; + } + PrecompileAddress::Keccak256 => { + self.statistics.keccak256_cycles += *cycles as u32; + } }; - self.estimated_circuits_used += (*cycles as f32) * fraction; } self.last_precompile_inner_entry_checked = Some(inner.len()); - - TracerExecutionStatus::Continue } } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 1988cc9f027..0c61bae00a5 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -4,6 +4,7 @@ use std::{ }; use zk_evm_1_4_1::{ + aux_structures::Timestamp, tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, @@ -12,7 +13,6 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::Timestamp; use super::PubdataTracer; use crate::{ @@ -66,7 +66,7 @@ pub(crate) struct DefaultExecutionTracer { // This tracer tracks what opcodes were executed and calculates how much circuits will be generated. // It only takes into account circuits that are generated for actual execution. It doesn't // take into account e.g circuits produced by the initial bootloader memory commitment. - pub(crate) circuits_tracer: CircuitsTracer, + pub(crate) circuits_tracer: CircuitsTracer, storage: StoragePtr, _phantom: PhantomData, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs index 1bdb1b6ccdb..fe916e19e8c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs @@ -10,7 +10,7 @@ pub(crate) mod pubdata_tracer; pub(crate) mod refunds; pub(crate) mod result_tracer; -mod circuits_capacity; +pub(crate) mod circuits_capacity; pub mod dispatcher; pub(crate) mod traits; pub(crate) mod utils; diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 09fade1b8f5..2ff8c389608 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -4,7 +4,7 @@ use zk_evm_1_4_1::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; +use zkevm_test_harness_1_4_1::witness::sort_storage_access::sort_storage_access_queries; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ event::{ @@ -40,6 +40,9 @@ pub(crate) struct PubdataTracer { l1_batch_env: L1BatchEnv, pubdata_info_requested: bool, execution_mode: VmExecutionMode, + // For testing purposes it might be helpful to supply an exact set of state diffs to be provided + // to the L1Messenger. + enforced_state_diffs: Option>, _phantom_data: PhantomData, } @@ -49,12 +52,28 @@ impl PubdataTracer { l1_batch_env, pubdata_info_requested: false, execution_mode, + enforced_state_diffs: None, + _phantom_data: Default::default(), + } + } + + // Creates the pubdata tracer with constant state diffs. + // To be used in tests only. + #[cfg(test)] + pub(crate) fn new_with_forced_state_diffs( + l1_batch_env: L1BatchEnv, + execution_mode: VmExecutionMode, + forced_state_diffs: Vec, + ) -> Self { + Self { + l1_batch_env, + pubdata_info_requested: false, + execution_mode, + enforced_state_diffs: Some(forced_state_diffs), _phantom_data: Default::default(), } } -} -impl PubdataTracer { // Packs part of L1 Messenger total pubdata that corresponds to // `L2toL1Logs` sent in the block fn get_total_user_logs( @@ -117,7 +136,14 @@ impl PubdataTracer { // Packs part of L1Messenger total pubdata that corresponds to // State diffs needed to be published on L1 - fn get_state_diffs(storage: &StorageOracle) -> Vec { + fn get_state_diffs( + &self, + storage: &StorageOracle, + ) -> Vec { + if let Some(enforced_state_diffs) = &self.enforced_state_diffs { + return enforced_state_diffs.clone(); + } + sort_storage_access_queries( storage .storage_log_queries_after_timestamp(Timestamp(0)) @@ -153,7 +179,7 @@ impl PubdataTracer { user_logs: self.get_total_user_logs(state), l2_to_l1_messages: self.get_total_l1_messenger_messages(state), published_bytecodes: self.get_total_published_bytecodes(state), - state_diffs: Self::get_state_diffs(&state.storage), + state_diffs: self.get_state_diffs(&state.storage), } } } diff --git a/core/lib/multivm/src/versions/vm_latest/utils/logs.rs b/core/lib/multivm/src/versions/vm_latest/utils/logs.rs index c379b44088f..27effbdbbea 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/logs.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/logs.rs @@ -1,8 +1,9 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; +use zk_evm_1_4_1::aux_structures::{LogQuery, Timestamp}; use zksync_state::WriteStorage; -use zksync_types::{l2_to_l1_log::L2ToL1Log, VmEvent}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogQueryType, VmEvent}; use crate::{ + glue::GlueInto, interface::L1BatchEnv, vm_latest::{ old_vm::{events::merge_events, history_recorder::HistoryMode}, @@ -22,5 +23,15 @@ pub(crate) fn collect_events_and_l1_system_logs_after_timestamp VmInterface for Vm { tracer: Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode, None) } /// Get current state of bootloader memory. @@ -94,7 +96,7 @@ impl VmInterface for Vm { let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events); let system_logs = l1_messages .into_iter() - .map(|log| SystemL2ToL1Log(log.into())) + .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); let total_log_queries = self.state.event_sink.get_log_queries() + self @@ -104,9 +106,21 @@ impl VmInterface for Vm { .len() + self.state.storage.get_final_log_queries().len(); + let storage_log_queries = self.state.storage.get_final_log_queries(); + + let deduped_storage_log_queries = + sort_storage_access_queries(storage_log_queries.iter().map(|log| &log.log_query)).1; + CurrentExecutionState { events, - storage_log_queries: self.state.storage.get_final_log_queries(), + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduped_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: user_l2_to_l1_logs .into_iter() @@ -136,7 +150,7 @@ impl VmInterface for Vm { VmExecutionResultAndLogs, ) { self.push_transaction_with_compression(tx, with_compression); - let result = self.inspect_inner(tracer, VmExecutionMode::OneTx); + let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { ( Err(BytecodeCompressionError::BytecodeCompressionFailed), @@ -151,6 +165,10 @@ impl VmInterface for Vm { self.record_vm_memory_metrics_inner() } + fn has_enough_gas_for_batch_tip(&self) -> bool { + self.state.local_state.callstack.current.ergs_remaining >= BOOTLOADER_BATCH_TIP_OVERHEAD + } + fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.execute(VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_m5/mod.rs b/core/lib/multivm/src/versions/vm_m5/mod.rs index fc549761e03..946b2e4bf56 100644 --- a/core/lib/multivm/src/versions/vm_m5/mod.rs +++ b/core/lib/multivm/src/versions/vm_m5/mod.rs @@ -23,8 +23,6 @@ mod pubdata_utils; mod refunds; pub mod storage; pub mod test_utils; -#[cfg(test)] -mod tests; pub mod transaction_data; pub mod utils; mod vm; diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs b/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs index b38da4051f3..02cf5e9cdbc 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs @@ -7,21 +7,19 @@ use zk_evm_1_3_1::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, - StorageLogQueryType, BOOTLOADER_ADDRESS, U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQueryType, + BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; -use crate::{ - glue::GlueInto, - vm_m5::{ - history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryRecorder, StorageWrapper, - }, - storage::{Storage, StoragePtr}, - vm_instance::MultiVMSubversion, +use crate::vm_m5::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryRecorder, StorageWrapper, }, + storage::{Storage, StoragePtr}, + utils::StorageLogQuery, + vm_instance::MultiVMSubversion, }; // While the storage does not support different shards, it was decided to write the @@ -95,7 +93,7 @@ impl StorageOracle { self.frames_stack.push_forward( StorageLogQuery { - log_query: query.glue_into(), + log_query: query, log_type: StorageLogQueryType::Read, }, query.timestamp, @@ -119,7 +117,7 @@ impl StorageOracle { query.read_value = current_value; let mut storage_log_query = StorageLogQuery { - log_query: query.glue_into(), + log_query: query, log_type: log_query_type, }; self.frames_stack @@ -262,7 +260,7 @@ impl VmStorageOracle for StorageOracle { } }; - let LogQuery { written_value, .. } = query.log_query.glue_into(); + let LogQuery { written_value, .. } = query.log_query; let key = triplet_to_storage_key( query.log_query.shard_id, query.log_query.address, diff --git a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs index e38e5d3bffd..1f8f9922c14 100644 --- a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; -use zk_evm_1_3_1::aux_structures::Timestamp; +use itertools::Itertools; +use zk_evm_1_3_1::aux_structures::{LogQuery, Timestamp}; use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; use zksync_types::{ event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, @@ -85,14 +86,27 @@ impl VmInstance { .forward, from_timestamp, ); - let (_, deduplicated_logs) = - sort_storage_access_queries(storage_logs.iter().map(|log| &log.log_query)); + + // To allow calling the `vm-1.3.3`s. method, the `v1.3.1`'s `LogQuery` has to be converted + // to the `vm-1.3.3`'s `LogQuery`. Then, we need to convert it back. + let deduplicated_logs: Vec = sort_storage_access_queries( + &storage_logs + .iter() + .map(|log| { + GlueInto::::glue_into(log.log_query) + }) + .collect_vec(), + ) + .1 + .into_iter() + .map(GlueInto::::glue_into) + .collect(); deduplicated_logs .into_iter() .filter_map(|log| { if log.rw_flag { - let key = storage_key_of_log(&log.glue_into()); + let key = storage_key_of_log(&log); let pre_paid = pre_paid_before_tx(&key); let to_pay_by_user = self.state.storage.base_price_for_write(&log.glue_into()); diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index 6920e77b8a8..c052353ecb5 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -21,13 +21,13 @@ use zksync_types::{ fee::Fee, l2::L2Tx, web3::signing::keccak256, - Execute, L2ChainId, Nonce, StorageKey, StorageLogQuery, StorageValue, - CONTRACT_DEPLOYER_ADDRESS, H256, U256, + Execute, L2ChainId, Nonce, StorageKey, StorageValue, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; use zksync_utils::{ address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, }; +use super::utils::StorageLogQuery; use crate::vm_m5::{ event_sink::InMemoryEventSink, history_recorder::{FrameManager, HistoryRecorder}, diff --git a/core/lib/multivm/src/versions/vm_m5/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_m5/tests/bootloader.rs deleted file mode 100644 index d9e07c5068d..00000000000 --- a/core/lib/multivm/src/versions/vm_m5/tests/bootloader.rs +++ /dev/null @@ -1,1657 +0,0 @@ -// ``` -// //! -// //! Tests for the bootloader -// //! The description for each of the tests can be found in the corresponding `.yul` file. -// //! -// #![cfg_attr(test, allow(unused_imports))] -// -// use crate::errors::{VmRevertReason, VmRevertReasonParsingResult}; -// use crate::memory::SimpleMemory; -// use crate::oracles::tracer::{ -// read_pointer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, -// TransactionResultTracer, VmHook, -// }; -// use crate::storage::{Storage, StoragePtr}; -// use crate::test_utils::{ -// get_create_execute, get_create_zksync_address, get_deploy_tx, get_error_tx, -// mock_loadnext_test_call, VmInstanceInnerState, -// }; -// use crate::utils::{ -// create_test_block_params, insert_system_contracts, read_bootloader_test_code, -// BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, -// }; -// use crate::vm::{ -// get_vm_hook_params, tx_has_failed, VmBlockResult, VmExecutionStopReason, ZkSyncVmState, -// MAX_MEM_SIZE_BYTES, -// }; -// use crate::vm_with_bootloader::{ -// bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, -// init_vm_inner, push_raw_transaction_to_bootloader_memory, -// push_transaction_to_bootloader_memory, BlockContext, DerivedBlockContext, BOOTLOADER_HEAP_PAGE, -// BOOTLOADER_TX_DESCRIPTION_OFFSET, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -// }; -// use crate::vm_with_bootloader::{BlockContextMode, BootloaderJobType, TxExecutionMode}; -// use crate::{test_utils, VmInstance}; -// use crate::{TxRevertReason, VmExecutionResult}; -// use itertools::Itertools; -// use std::cell::RefCell; -// use std::convert::TryFrom; -// use std::ops::{Add, DivAssign}; -// use std::rc::Rc; -// use tempfile::TempDir; -// use zk_evm_1_3_1::abstractions::{ -// AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, -// MAX_HEAP_PAGE_SIZE_IN_WORDS, MAX_MEMORY_BYTES, -// }; -// use zk_evm_1_3_1::aux_structures::Timestamp; -// use zk_evm_1_3_1::block_properties::BlockProperties; -// use zk_evm_1_3_1::sha3::digest::typenum::U830; -// use zk_evm_1_3_1::witness_trace::VmWitnessTracer; -// use zk_evm_1_3_1::zkevm_opcode_defs::decoding::VmEncodingMode; -// use zk_evm_1_3_1::zkevm_opcode_defs::FatPointer; -// use zksync_types::block::DeployedContract; -// use zksync_types::ethabi::encode; -// use zksync_types::l1::L1Tx; -// use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; -// use zksync_utils::test_utils::LoadnextContractExecutionParams; -// use zksync_utils::{ -// address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, bytes_to_le_words, h256_to_u256, -// u256_to_h256, -// }; -// use zksync_utils::{h256_to_account_address, u256_to_account_address}; -// -// use crate::{transaction_data::TransactionData, OracleTools}; -// use std::time; -// use zksync_contracts::{ -// default_erc20_bytecode, get_loadnext_contract, known_codes_contract, load_contract, -// load_sys_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, -// BaseSystemContracts, SystemContractCode, PLAYGROUND_BLOCK_BOOTLOADER_CODE, -// }; -// use zksync_crypto::rand::random; -// use zksync_state::secondary_storage::SecondaryStateStorage; -// use zksync_state::storage_view::StorageView; -// use zksync_storage::db::Database; -// use zksync_storage::RocksDB; -// use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; -// use zksync_types::utils::{ -// deployed_address_create, storage_key_for_eth_balance, storage_key_for_standard_token_balance, -// }; -// use zksync_types::{ -// ethabi::Token, AccountTreeId, Address, Execute, ExecuteTransactionCommon, L1BatchNumber, -// L2ChainId, PackedEthSignature, StorageKey, StorageLogQueryType, Transaction, H256, -// KNOWN_CODES_STORAGE_ADDRESS, U256, -// }; -// use zksync_types::{fee::Fee, l2::L2Tx, l2_to_l1_log::L2ToL1Log, tx::ExecutionMetrics}; -// use zksync_types::{ -// get_code_key, get_is_account_key, get_known_code_key, get_nonce_key, L1TxCommonData, Nonce, -// PriorityOpId, SerialId, StorageLog, ZkSyncReadStorage, BOOTLOADER_ADDRESS, -// CONTRACT_DEPLOYER_ADDRESS, H160, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, -// MAX_TXS_IN_BLOCK, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, -// SYSTEM_CONTEXT_MINIMAL_BASE_FEE, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, -// }; -// -// use once_cell::sync::Lazy; -// use zksync_system_constants::ZKPORTER_IS_AVAILABLE; -// -// fn run_vm_with_custom_factory_deps<'a>( -// oracle_tools: &'a mut OracleTools<'a, false>, -// block_context: BlockContext, -// block_properties: &'a BlockProperties, -// encoded_tx: Vec, -// predefined_overhead: u32, -// expected_error: Option, -// ) { -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); -// let mut vm = init_vm_inner( -// oracle_tools, -// BlockContextMode::OverrideCurrent(block_context.into()), -// block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// vm.bootloader_state.add_tx_data(encoded_tx.len()); -// vm.state.memory.populate_page( -// BOOTLOADER_HEAP_PAGE as usize, -// get_bootloader_memory_for_encoded_tx( -// encoded_tx, -// 0, -// TxExecutionMode::VerifyExecute, -// 0, -// 0, -// predefined_overhead, -// ), -// Timestamp(0), -// ); -// -// let result = vm.execute_next_tx().err(); -// -// assert_eq!(expected_error, result); -// } -// -// fn get_balance(token_id: AccountTreeId, account: &Address, main_storage: StoragePtr<'_>) -> U256 { -// let key = storage_key_for_standard_token_balance(token_id, account); -// h256_to_u256(main_storage.borrow_mut().get_value(&key)) -// } -// -// #[test] -// fn test_dummy_bootloader() { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// let (block_context, block_properties) = create_test_block_params(); -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// let bootloader_code = read_bootloader_test_code("dummy"); -// let bootloader_hash = hash_bytecode(&bootloader_code); -// -// base_system_contracts.bootloader = SystemContractCode { -// code: bytes_to_be_words(bootloader_code), -// hash: bootloader_hash, -// }; -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let VmBlockResult { -// full_result: res, .. -// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); -// -// // Dummy bootloader should not panic -// assert!(res.revert_reason.is_none()); -// -// let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); -// -// verify_required_memory( -// &vm.state, -// vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], -// ); -// } -// -// #[test] -// fn test_bootloader_out_of_gas() { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// let (block_context, block_properties) = create_test_block_params(); -// -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// -// let bootloader_code = read_bootloader_test_code("dummy"); -// let bootloader_hash = hash_bytecode(&bootloader_code); -// -// base_system_contracts.bootloader = SystemContractCode { -// code: bytes_to_be_words(bootloader_code), -// hash: bootloader_hash, -// }; -// -// // init vm with only 100 ergs -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// 10, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let res = vm.execute_block_tip(); -// -// assert_eq!(res.revert_reason, Some(TxRevertReason::BootloaderOutOfGas)); -// } -// -// fn verify_required_storage(state: &ZkSyncVmState<'_>, required_values: Vec<(H256, StorageKey)>) { -// for (required_value, key) in required_values { -// let current_value = state.storage.storage.read_from_storage(&key); -// -// assert_eq!( -// u256_to_h256(current_value), -// required_value, -// "Invalid value at key {key:?}" -// ); -// } -// } -// -// fn verify_required_memory(state: &ZkSyncVmState<'_>, required_values: Vec<(U256, u32, u32)>) { -// for (required_value, memory_page, cell) in required_values { -// let current_value = state -// .memory -// .dump_page_content_as_u256_words(memory_page, cell..cell + 1)[0]; -// assert_eq!(current_value, required_value); -// } -// } -// -// #[test] -// fn test_default_aa_interaction() { -// // In this test, we aim to test whether a simple account interaction (without any fee logic) -// // will work. The account will try to deploy a simple contract from integration tests. -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let operator_address = block_context.context.operator_address; -// let base_fee = block_context.base_fee; -// // We deploy here counter contract, because its logic is trivial -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(10000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_data: TransactionData = tx.clone().into(); -// -// let maximal_fee = tx_data.gas_limit * tx_data.max_fee_per_gas; -// let sender_address = tx_data.from(); -// // set balance -// -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// -// let tx_execution_result = vm -// .execute_next_tx() -// .expect("Bootloader failed while processing transaction"); -// -// assert_eq!( -// tx_execution_result.status, -// TxExecutionStatus::Success, -// "Transaction wasn't successful" -// ); -// -// let VmBlockResult { -// full_result: res, .. -// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// // Should not panic -// assert!( -// res.revert_reason.is_none(), -// "Bootloader was not expected to revert: {:?}", -// res.revert_reason -// ); -// -// // Both deployment and ordinary nonce should be incremented by one. -// let account_nonce_key = get_nonce_key(&sender_address); -// let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; -// -// // The code hash of the deployed contract should be marked as republished. -// let known_codes_key = get_known_code_key(&contract_code_hash); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address, U256::zero()); -// let account_code_key = get_code_key(&deployed_address); -// -// let expected_slots = vec![ -// (u256_to_h256(expected_nonce), account_nonce_key), -// (u256_to_h256(U256::from(1u32)), known_codes_key), -// (contract_code_hash, account_code_key), -// ]; -// -// verify_required_storage(&vm.state, expected_slots); -// -// assert!(!tx_has_failed(&vm.state, 0)); -// -// let expected_fee = -// maximal_fee - U256::from(tx_execution_result.gas_refunded) * U256::from(base_fee); -// let operator_balance = get_balance( -// AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), -// &operator_address, -// vm.state.storage.storage.get_ptr(), -// ); -// -// assert!( -// operator_balance == expected_fee, -// "Operator did not receive his fee" -// ); -// } -// -// fn execute_vm_with_predetermined_refund(txs: Vec, refunds: Vec) -> VmBlockResult { -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// // set balance -// for tx in txs.iter() { -// let sender_address = tx.initiator_account(); -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// } -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// let codes_for_decommiter = txs -// .iter() -// .flat_map(|tx| { -// tx.execute -// .factory_deps -// .clone() -// .unwrap_or_default() -// .iter() -// .map(|dep| bytecode_to_factory_dep(dep.clone())) -// .collect::)>>() -// }) -// .collect(); -// -// vm.state.decommittment_processor.populate( -// codes_for_decommiter, -// Timestamp(vm.state.local_state.timestamp), -// ); -// -// let memory_with_suggested_refund = get_bootloader_memory( -// txs.into_iter().map(Into::into).collect(), -// refunds, -// TxExecutionMode::VerifyExecute, -// BlockContextMode::NewBlock(block_context, Default::default()), -// ); -// -// vm.state.memory.populate_page( -// BOOTLOADER_HEAP_PAGE as usize, -// memory_with_suggested_refund, -// Timestamp(0), -// ); -// -// vm.execute_till_block_end(BootloaderJobType::TransactionExecution) -// } -// -// #[test] -// fn test_predetermined_refunded_gas() { -// // In this test, we compare the execution of the bootloader with the predefined -// // refunded gas and without them -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let base_fee = block_context.base_fee; -// // We deploy here counter contract, because its logic is trivial -// let contract_code = read_test_contract(); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(10000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let sender_address = tx.initiator_account(); -// -// // set balance -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// -// let tx_execution_result = vm -// .execute_next_tx() -// .expect("Bootloader failed while processing transaction"); -// -// assert_eq!( -// tx_execution_result.status, -// TxExecutionStatus::Success, -// "Transaction wasn't successful" -// ); -// -// // If the refund provided by the operator or the final refund are the 0 -// // there is no impact of the operator's refund at all and so this test does not -// // make much sense. -// assert!( -// tx_execution_result.operator_suggested_refund > 0, -// "The operator's refund is 0" -// ); -// assert!( -// tx_execution_result.gas_refunded > 0, -// "The final refund is 0" -// ); -// -// let mut result = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// assert!( -// result.full_result.revert_reason.is_none(), -// "Bootloader was not expected to revert: {:?}", -// result.full_result.revert_reason -// ); -// -// let mut result_with_predetermined_refund = execute_vm_with_predetermined_refund( -// vec![tx], -// vec![tx_execution_result.operator_suggested_refund], -// ); -// // We need to sort these lists as those are flattened from HashMaps -// result.full_result.used_contract_hashes.sort(); -// result_with_predetermined_refund -// .full_result -// .used_contract_hashes -// .sort(); -// -// assert_eq!( -// result.full_result.events, -// result_with_predetermined_refund.full_result.events -// ); -// assert_eq!( -// result.full_result.l2_to_l1_logs, -// result_with_predetermined_refund.full_result.l2_to_l1_logs -// ); -// assert_eq!( -// result.full_result.storage_log_queries, -// result_with_predetermined_refund -// .full_result -// .storage_log_queries -// ); -// assert_eq!( -// result.full_result.used_contract_hashes, -// result_with_predetermined_refund -// .full_result -// .used_contract_hashes -// ); -// } -// -// #[derive(Debug, Clone)] -// enum TransactionRollbackTestInfo { -// Rejected(Transaction, TxRevertReason), -// Processed(Transaction, bool, TxExecutionStatus), -// } -// -// impl TransactionRollbackTestInfo { -// fn new_rejected(transaction: Transaction, revert_reason: TxRevertReason) -> Self { -// Self::Rejected(transaction, revert_reason) -// } -// -// fn new_processed( -// transaction: Transaction, -// should_be_rollbacked: bool, -// expected_status: TxExecutionStatus, -// ) -> Self { -// Self::Processed(transaction, should_be_rollbacked, expected_status) -// } -// -// fn get_transaction(&self) -> &Transaction { -// match self { -// TransactionRollbackTestInfo::Rejected(tx, _) => tx, -// TransactionRollbackTestInfo::Processed(tx, _, _) => tx, -// } -// } -// -// fn rejection_reason(&self) -> Option { -// match self { -// TransactionRollbackTestInfo::Rejected(_, revert_reason) => Some(revert_reason.clone()), -// TransactionRollbackTestInfo::Processed(_, _, _) => None, -// } -// } -// -// fn should_rollback(&self) -> bool { -// match self { -// TransactionRollbackTestInfo::Rejected(_, _) => true, -// TransactionRollbackTestInfo::Processed(_, x, _) => *x, -// } -// } -// -// fn expected_status(&self) -> TxExecutionStatus { -// match self { -// TransactionRollbackTestInfo::Rejected(_, _) => { -// panic!("There is no execution status for rejected transaction") -// } -// TransactionRollbackTestInfo::Processed(_, _, status) => *status, -// } -// } -// } -// -// // Accepts the address of the sender as well as the list of pairs of its transactions -// // and whether these transactions should succeed. -// fn execute_vm_with_possible_rollbacks( -// sender_address: Address, -// transactions: Vec, -// block_context: DerivedBlockContext, -// block_properties: BlockProperties, -// ) -> VmExecutionResult { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// // Setting infinite balance for the sender. -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// for test_info in transactions { -// vm.save_current_vm_as_snapshot(); -// let vm_state_before_tx = vm.dump_inner_state(); -// push_transaction_to_bootloader_memory( -// &mut vm, -// test_info.get_transaction(), -// TxExecutionMode::VerifyExecute, -// ); -// -// match vm.execute_next_tx() { -// Err(reason) => { -// assert_eq!(test_info.rejection_reason(), Some(reason)); -// } -// Ok(res) => { -// assert_eq!(test_info.rejection_reason(), None); -// assert_eq!( -// res.status, -// test_info.expected_status(), -// "Transaction status is not correct" -// ); -// } -// }; -// -// if test_info.should_rollback() { -// // Some error has occurred, we should reject the transaction -// vm.rollback_to_latest_snapshot(); -// -// // vm_state_before_tx. -// let state_after_rollback = vm.dump_inner_state(); -// assert_eq!( -// vm_state_before_tx, state_after_rollback, -// "Did not rollback VM state correctly" -// ); -// } -// } -// -// let VmBlockResult { -// full_result: mut result, -// .. -// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); -// // Used contract hashes are retrieved in unordered manner. -// // However it must be sorted for the comparisons in tests to work -// result.used_contract_hashes.sort(); -// -// result -// } -// -// // Sets the signature for an L2 transaction and returns the same transaction -// // but this different signature. -// fn change_signature(mut tx: Transaction, signature: Vec) -> Transaction { -// tx.common_data = match tx.common_data { -// ExecuteTransactionCommon::L2(mut data) => { -// data.signature = signature; -// ExecuteTransactionCommon::L2(data) -// } -// _ => unreachable!(), -// }; -// -// tx -// } -// -// #[test] -// fn test_vm_rollbacks() { -// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { -// let (block_context, block_properties) = create_test_block_params(); -// (block_context.into(), block_properties) -// }; -// -// let base_fee = U256::from(block_context.base_fee); -// -// let sender_private_key = H256::random(); -// let contract_code = read_test_contract(); -// -// let tx_nonce_0: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(10000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_nonce_1: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(1), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(10000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_nonce_2: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(2), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(10000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let wrong_signature_length_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 32]); -// let wrong_v_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 65]); -// let wrong_signature_tx = change_signature(tx_nonce_0.clone(), vec![27u8; 65]); -// -// let sender_address = tx_nonce_0.initiator_account(); -// -// let result_without_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// // The nonces are ordered correctly, all the transactions should succeed. -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_2.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// ], -// block_context, -// block_properties, -// ); -// -// let incorrect_nonce = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Incorrect nonce".to_string(), -// }); -// let reusing_nonce_twice = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Reusing the same nonce twice".to_string(), -// }); -// let signature_length_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Signature length is incorrect".to_string(), -// }); -// let v_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "v is neither 27 nor 28".to_string(), -// }); -// let signature_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), -// }); -// -// let result_with_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// TransactionRollbackTestInfo::new_rejected( -// wrong_signature_length_tx, -// signature_length_is_incorrect, -// ), -// TransactionRollbackTestInfo::new_rejected(wrong_v_tx, v_is_incorrect), -// TransactionRollbackTestInfo::new_rejected(wrong_signature_tx, signature_is_incorrect), -// // The correct nonce is 0, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce.clone()), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The correct nonce is 1, this tx will fail -// TransactionRollbackTestInfo::new_rejected( -// tx_nonce_0.clone(), -// reusing_nonce_twice.clone(), -// ), -// // The correct nonce is 1, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_1, -// false, -// TxExecutionStatus::Success, -// ), -// // The correct nonce is 2, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_0, reusing_nonce_twice.clone()), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_2.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // This tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2, reusing_nonce_twice.clone()), -// ], -// block_context, -// block_properties, -// ); -// -// assert_eq!(result_without_rollbacks, result_with_rollbacks); -// -// let loadnext_contract = get_loadnext_contract(); -// -// let loadnext_constructor_data = encode(&[Token::Uint(U256::from(100))]); -// let loadnext_deploy_tx: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(0), -// &loadnext_contract.bytecode, -// loadnext_contract.factory_deps, -// &loadnext_constructor_data, -// Fee { -// gas_limit: U256::from(60000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let loadnext_contract_address = -// get_create_zksync_address(loadnext_deploy_tx.initiator_account(), Nonce(0)); -// let deploy_loadnext_tx_info = TransactionRollbackTestInfo::new_processed( -// loadnext_deploy_tx, -// false, -// TxExecutionStatus::Success, -// ); -// -// let get_load_next_tx = |params: LoadnextContractExecutionParams, nonce: Nonce| { -// // Here we test loadnext with various kinds of operations -// let tx: Transaction = mock_loadnext_test_call( -// sender_private_key, -// nonce, -// loadnext_contract_address, -// Fee { -// gas_limit: U256::from(80000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// params, -// ) -// .into(); -// -// tx -// }; -// -// let loadnext_tx_0 = get_load_next_tx( -// LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// }, -// Nonce(1), -// ); -// let loadnext_tx_1 = get_load_next_tx( -// LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// }, -// Nonce(2), -// ); -// -// let result_without_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// deploy_loadnext_tx_info.clone(), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// ], -// block_context, -// block_properties, -// ); -// -// let result_with_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// deploy_loadnext_tx_info, -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The nonce has been bumped up, this transaction should now fail -// TransactionRollbackTestInfo::new_rejected(loadnext_tx_0, reusing_nonce_twice.clone()), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The nonce has been bumped up, this transaction should now fail -// TransactionRollbackTestInfo::new_rejected(loadnext_tx_1, reusing_nonce_twice), -// ], -// block_context, -// block_properties, -// ); -// -// assert_eq!(result_without_rollbacks, result_with_rollbacks); -// } -// -// // Inserts the contracts into the test environment, bypassing the -// // deployer system contract. Besides the reference to storage -// // it accepts a `contracts` tuple of information about the contract -// // and whether or not it is an account. -// fn insert_contracts( -// raw_storage: &mut SecondaryStateStorage, -// contracts: Vec<(DeployedContract, bool)>, -// ) { -// let logs: Vec = contracts -// .iter() -// .flat_map(|(contract, is_account)| { -// let mut new_logs = vec![]; -// -// let deployer_code_key = get_code_key(contract.account_id.address()); -// new_logs.push(StorageLog::new_write_log( -// deployer_code_key, -// hash_bytecode(&contract.bytecode), -// )); -// -// if *is_account { -// let is_account_key = get_is_account_key(contract.account_id.address()); -// new_logs.push(StorageLog::new_write_log( -// is_account_key, -// u256_to_h256(1u32.into()), -// )); -// } -// -// new_logs -// }) -// .collect(); -// raw_storage.process_transaction_logs(&logs); -// -// for (contract, _) in contracts { -// raw_storage.store_contract(*contract.account_id.address(), contract.bytecode.clone()); -// raw_storage.store_factory_dep(hash_bytecode(&contract.bytecode), contract.bytecode); -// } -// raw_storage.save(L1BatchNumber(0)); -// } -// -// enum NonceHolderTestMode { -// SetValueUnderNonce, -// IncreaseMinNonceBy5, -// IncreaseMinNonceTooMuch, -// LeaveNonceUnused, -// IncreaseMinNonceBy1, -// SwitchToArbitraryOrdering, -// } -// -// impl From for u8 { -// fn from(mode: NonceHolderTestMode) -> u8 { -// match mode { -// NonceHolderTestMode::SetValueUnderNonce => 0, -// NonceHolderTestMode::IncreaseMinNonceBy5 => 1, -// NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, -// NonceHolderTestMode::LeaveNonceUnused => 3, -// NonceHolderTestMode::IncreaseMinNonceBy1 => 4, -// NonceHolderTestMode::SwitchToArbitraryOrdering => 5, -// } -// } -// } -// -// fn get_nonce_holder_test_tx( -// nonce: U256, -// account_address: Address, -// test_mode: NonceHolderTestMode, -// block_context: &DerivedBlockContext, -// ) -> TransactionData { -// TransactionData { -// tx_type: 113, -// from: account_address, -// to: account_address, -// gas_limit: U256::from(10000000u32), -// pubdata_price_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// max_fee_per_gas: U256::from(block_context.base_fee), -// max_priority_fee_per_gas: U256::zero(), -// nonce, -// // The reserved fields that are unique for different types of transactions. -// // E.g. nonce is currently used in all transaction, but it should not be mandatory -// // in the long run. -// reserved: [U256::zero(); 4], -// data: vec![12], -// signature: vec![test_mode.into()], -// -// ..Default::default() -// } -// } -// -// fn run_vm_with_raw_tx<'a>( -// oracle_tools: &'a mut OracleTools<'a, false>, -// block_context: DerivedBlockContext, -// block_properties: &'a BlockProperties, -// tx: TransactionData, -// ) -> (VmExecutionResult, bool) { -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); -// let mut vm = init_vm_inner( -// oracle_tools, -// BlockContextMode::OverrideCurrent(block_context), -// block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let overhead = tx.overhead_gas(); -// push_raw_transaction_to_bootloader_memory( -// &mut vm, -// tx, -// TxExecutionMode::VerifyExecute, -// overhead, -// ); -// let VmBlockResult { -// full_result: result, -// .. -// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// -// (result, tx_has_failed(&vm.state, 0)) -// } -// -// #[test] -// fn test_nonce_holder() { -// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { -// let (block_context, block_properties) = create_test_block_params(); -// (block_context.into(), block_properties) -// }; -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// -// let account_address = H160::random(); -// let account = DeployedContract { -// account_id: AccountTreeId::new(account_address), -// bytecode: read_nonce_holder_tester(), -// }; -// -// insert_contracts(&mut raw_storage, vec![(account, true)]); -// -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// // We deploy here counter contract, because its logic is trivial -// -// let key = storage_key_for_eth_balance(&account_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut run_nonce_test = |nonce: U256, -// test_mode: NonceHolderTestMode, -// error_message: Option, -// comment: &'static str| { -// let tx = get_nonce_holder_test_tx(nonce, account_address, test_mode, &block_context); -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// let (result, tx_has_failed) = -// run_vm_with_raw_tx(&mut oracle_tools, block_context, &block_properties, tx); -// if let Some(msg) = error_message { -// let expected_error = TxRevertReason::ValidationFailed(VmRevertReason::General { msg }); -// assert_eq!( -// result -// .revert_reason -// .expect("No revert reason") -// .revert_reason, -// expected_error, -// "{}", -// comment -// ); -// } else { -// assert!(!tx_has_failed, "{}", comment); -// } -// }; -// -// // Test 1: trying to set value under non sequential nonce value. -// run_nonce_test( -// 1u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// Some("Previous nonce has not been used".to_string()), -// "Allowed to set value under non sequential value", -// ); -// -// // Test 2: increase min nonce by 1 with sequential nonce ordering: -// run_nonce_test( -// 0u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy1, -// None, -// "Failed to increment nonce by 1 for sequential account", -// ); -// -// // Test 3: correctly set value under nonce with sequential nonce ordering: -// run_nonce_test( -// 1u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// None, -// "Failed to set value under nonce sequential value", -// ); -// -// // Test 5: migrate to the arbitrary nonce ordering: -// run_nonce_test( -// 2u32.into(), -// NonceHolderTestMode::SwitchToArbitraryOrdering, -// None, -// "Failed to switch to arbitrary ordering", -// ); -// -// // Test 6: increase min nonce by 5 -// run_nonce_test( -// 6u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// None, -// "Failed to increase min nonce by 5", -// ); -// -// // Test 7: since the nonces in range [6,10] are no longer allowed, the -// // tx with nonce 10 should not be allowed -// run_nonce_test( -// 10u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// Some("Reusing the same nonce twice".to_string()), -// "Allowed to reuse nonce below the minimal one", -// ); -// -// // Test 8: we should be able to use nonce 13 -// run_nonce_test( -// 13u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// None, -// "Did not allow to use unused nonce 10", -// ); -// -// // Test 9: we should not be able to reuse nonce 13 -// run_nonce_test( -// 13u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// Some("Reusing the same nonce twice".to_string()), -// "Allowed to reuse the same nonce twice", -// ); -// -// // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 -// run_nonce_test( -// 14u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// None, -// "Did not allow to use a bumped nonce", -// ); -// -// // Test 6: Do not allow bumping nonce by too much -// run_nonce_test( -// 16u32.into(), -// NonceHolderTestMode::IncreaseMinNonceTooMuch, -// Some("The value for incrementing the nonce is too high".to_string()), -// "Allowed for incrementing min nonce too much", -// ); -// -// // Test 7: Do not allow not setting a nonce as used -// run_nonce_test( -// 16u32.into(), -// NonceHolderTestMode::LeaveNonceUnused, -// Some("The nonce was not set as used".to_string()), -// "Allowed to leave nonce as unused", -// ); -// } -// -// #[test] -// fn test_l1_tx_execution() { -// // In this test, we try to execute a contract deployment from L1 -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// let (block_context, block_properties) = create_test_block_params(); -// -// // Here instead of marking code hash via the bootloader means, we will -// // using L1->L2 communication, the same it would likely be done during the priority mode. -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); -// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); -// -// let required_l2_to_l1_logs = vec![ -// L2ToL1Log { -// shard_id: 0, -// is_service: false, -// tx_number_in_block: 0, -// sender: SYSTEM_CONTEXT_ADDRESS, -// key: u256_to_h256(U256::from(block_context.block_timestamp)), -// value: Default::default(), -// }, -// L2ToL1Log { -// shard_id: 0, -// is_service: true, -// tx_number_in_block: 0, -// sender: BOOTLOADER_ADDRESS, -// key: l1_deploy_tx_data.canonical_l1_tx_hash(), -// value: u256_to_h256(U256::from(1u32)), -// }, -// ]; -// -// let sender_address = l1_deploy_tx_data.from(); -// -// oracle_tools.decommittment_processor.populate( -// vec![( -// h256_to_u256(contract_code_hash), -// bytes_to_be_words(contract_code), -// )], -// Timestamp(0), -// ); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory(&mut vm, &l1_deploy_tx, TxExecutionMode::VerifyExecute); -// -// let res = vm.execute_next_tx().unwrap(); -// -// // The code hash of the deployed contract should be marked as republished. -// let known_codes_key = get_known_code_key(&contract_code_hash); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address, U256::zero()); -// let account_code_key = get_code_key(&deployed_address); -// -// let expected_slots = vec![ -// (u256_to_h256(U256::from(1u32)), known_codes_key), -// (contract_code_hash, account_code_key), -// ]; -// assert!(!tx_has_failed(&vm.state, 0)); -// -// verify_required_storage(&vm.state, expected_slots); -// -// assert_eq!(res.result.logs.l2_to_l1_logs, required_l2_to_l1_logs); -// -// let tx = get_l1_execute_test_contract_tx(deployed_address, true); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); -// assert_eq!(res.initial_storage_writes, 0); -// -// let tx = get_l1_execute_test_contract_tx(deployed_address, false); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); -// assert_eq!(res.initial_storage_writes, 2); -// -// let repeated_writes = res.repeated_storage_writes; -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); -// assert_eq!(res.initial_storage_writes, 1); -// // We do the same storage write, so it will be deduplicated -// assert_eq!(res.repeated_storage_writes, repeated_writes); -// -// let mut tx = get_l1_execute_test_contract_tx(deployed_address, false); -// tx.execute.value = U256::from(1); -// match &mut tx.common_data { -// ExecuteTransactionCommon::L1(l1_data) => { -// l1_data.to_mint = U256::from(4); -// } -// _ => unreachable!(), -// } -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// let execution_result = vm.execute_next_tx().unwrap(); -// // The method is not payable, so the transaction with non-zero value should fail -// assert_eq!( -// execution_result.status, -// TxExecutionStatus::Failure, -// "The transaction should fail" -// ); -// -// let res = ExecutionMetrics::new(&execution_result.result.logs, 0, 0, 0, 0); -// -// // There are 2 initial writes here: -// // - totalSupply of ETH token -// // - balance of the refund recipient -// assert_eq!(res.initial_storage_writes, 2); -// } -// -// #[test] -// fn test_invalid_bytecode() { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let (block_context, block_properties) = create_test_block_params(); -// -// let test_vm_with_custom_bytecode_hash = -// |bytecode_hash: H256, expected_revert_reason: Option| { -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let (encoded_tx, predefined_overhead) = -// get_l1_tx_with_custom_bytecode_hash(h256_to_u256(bytecode_hash)); -// -// run_vm_with_custom_factory_deps( -// &mut oracle_tools, -// block_context, -// &block_properties, -// encoded_tx, -// predefined_overhead, -// expected_revert_reason, -// ); -// }; -// -// let failed_to_mark_factory_deps = |msg: &str| { -// TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { -// msg: msg.to_string(), -// }) -// }; -// -// // Here we provide the correctly-formatted bytecode hash of -// // odd length, so it should work. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// None, -// ); -// -// // Here we provide correctly formatted bytecode of even length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Code length in words must be odd", -// )), -// ); -// -// // Here we provide incorrectly formatted bytecode of odd length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Incorrectly formatted bytecodeHash", -// )), -// ); -// -// // Here we provide incorrectly formatted bytecode of odd length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Incorrectly formatted bytecodeHash", -// )), -// ); -// } -// -// #[test] -// fn test_tracing_of_execution_errors() { -// // In this test, we are checking that the execution errors are transmitted correctly from the bootloader. -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// -// let contract_address = Address::random(); -// let error_contract = DeployedContract { -// account_id: AccountTreeId::new(contract_address), -// bytecode: read_error_contract(), -// }; -// -// let tx = get_error_tx( -// H256::random(), -// Nonce(0), -// contract_address, -// Fee { -// gas_limit: U256::from(1000000u32), -// max_fee_per_gas: U256::from(10000000000u64), -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(50000u32), -// }, -// ); -// -// insert_contracts(&mut raw_storage, vec![(error_contract, false)]); -// -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let key = storage_key_for_eth_balance(&tx.common_data.initiator_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory(&mut vm, &tx.into(), TxExecutionMode::VerifyExecute); -// -// let mut tracer = TransactionResultTracer::default(); -// assert_eq!( -// vm.execute_with_custom_tracer(&mut tracer), -// VmExecutionStopReason::VmFinished, -// "Tracer should never request stop" -// ); -// -// match tracer.revert_reason { -// Some(revert_reason) => { -// let revert_reason = VmRevertReason::try_from(&revert_reason as &[u8]).unwrap(); -// assert_eq!( -// revert_reason, -// VmRevertReason::General { -// msg: "short".to_string() -// } -// ) -// } -// _ => panic!( -// "Tracer captured incorrect result {:#?}", -// tracer.revert_reason -// ), -// } -// } -// -// /// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -// #[test] -// fn test_tx_gas_limit_offset() { -// let gas_limit = U256::from(999999); -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let raw_storage = SecondaryStateStorage::new(db); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let contract_code = read_test_contract(); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// Default::default(), -// Default::default(), -// Fee { -// gas_limit, -// ..Default::default() -// }, -// ) -// .into(); -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// -// let gas_limit_from_memory = vm -// .state -// .memory -// .read_slot( -// BOOTLOADER_HEAP_PAGE as usize, -// TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, -// ) -// .value; -// assert_eq!(gas_limit_from_memory, gas_limit); -// } -// -// #[test] -// fn test_is_write_initial_behaviour() { -// // In this test, we check result of `is_write_initial` at different stages. -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let base_fee = block_context.base_fee; -// let account_pk = H256::random(); -// let contract_code = read_test_contract(); -// let tx: Transaction = get_deploy_tx( -// account_pk, -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(10000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let sender_address = tx.initiator_account(); -// let nonce_key = get_nonce_key(&sender_address); -// -// // Check that the next write to the nonce key will be initial. -// assert!(storage_ptr.is_write_initial(&nonce_key)); -// -// // Set balance to be able to pay fee for txs. -// let balance_key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&balance_key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); -// -// vm.execute_next_tx() -// .expect("Bootloader failed while processing the first transaction"); -// // Check that `is_write_initial` still returns true for the nonce key. -// assert!(storage_ptr.is_write_initial(&nonce_key)); -// } -// -// pub fn get_l1_tx_with_custom_bytecode_hash(bytecode_hash: U256) -> (Vec, u32) { -// let tx: TransactionData = get_l1_execute_test_contract_tx(Default::default(), false).into(); -// let predefined_overhead = tx.overhead_gas_with_custom_factory_deps(vec![bytecode_hash]); -// let tx_bytes = tx.abi_encode_with_custom_factory_deps(vec![bytecode_hash]); -// -// (bytes_to_be_words(tx_bytes), predefined_overhead) -// } -// -// const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; -// -// pub fn get_l1_execute_test_contract_tx(deployed_address: Address, with_panic: bool) -> Transaction { -// let execute = execute_test_contract(deployed_address, with_panic); -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender: H160::random(), -// gas_limit: U256::from(1000000u32), -// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// pub fn get_l1_deploy_tx(code: &[u8], calldata: &[u8]) -> Transaction { -// let execute = get_create_execute(code, calldata); -// -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender: H160::random(), -// gas_limit: U256::from(2000000u32), -// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// fn read_test_contract() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -// } -// -// fn read_nonce_holder_tester() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -// } -// -// fn read_error_contract() -> Vec { -// read_bytecode( -// "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", -// ) -// } -// -// fn execute_test_contract(address: Address, with_panic: bool) -> Execute { -// let test_contract = load_contract( -// "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", -// ); -// -// let function = test_contract.function("incrementWithRevert").unwrap(); -// -// let calldata = function -// .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) -// .expect("failed to encode parameters"); -// Execute { -// contract_address: address, -// calldata, -// value: U256::zero(), -// factory_deps: None, -// } -// } -// ``` diff --git a/core/lib/multivm/src/versions/vm_m5/tests/mod.rs b/core/lib/multivm/src/versions/vm_m5/tests/mod.rs deleted file mode 100644 index 3900135abea..00000000000 --- a/core/lib/multivm/src/versions/vm_m5/tests/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod bootloader; diff --git a/core/lib/multivm/src/versions/vm_m5/utils.rs b/core/lib/multivm/src/versions/vm_m5/utils.rs index addab5c78af..2e401b9de8b 100644 --- a/core/lib/multivm/src/versions/vm_m5/utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/utils.rs @@ -7,7 +7,7 @@ use zk_evm_1_3_1::{ }; use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; -use zksync_types::{Address, StorageLogQuery, H160, MAX_L2_TX_GAS_LIMIT, U256}; +use zksync_types::{Address, StorageLogQueryType, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; use crate::{ @@ -259,3 +259,10 @@ pub fn read_bootloader_test_code(test: &str) -> Vec { test )) } + +/// Log query, which handle initial and repeated writes to the storage +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct StorageLogQuery { + pub log_query: LogQuery, + pub log_type: StorageLogQueryType, +} diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 08fa783cbdb..472f0688248 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,3 +1,6 @@ +use itertools::Itertools; +use zk_evm_1_3_1::aux_structures::LogQuery; +use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; use zksync_state::StoragePtr; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, @@ -153,9 +156,33 @@ impl VmInterface for Vm { .cloned() .collect(); + let storage_log_queries = self.vm.get_final_log_queries(); + + // To allow calling the `vm-1.3.3`s. method, the `v1.3.1`'s `LogQuery` has to be converted + // to the `vm-1.3.3`'s `LogQuery`. Then, we need to convert it back. + let deduplicated_logs: Vec = sort_storage_access_queries( + &storage_log_queries + .iter() + .map(|log| { + GlueInto::::glue_into(log.log_query) + }) + .collect_vec(), + ) + .1 + .into_iter() + .map(GlueInto::::glue_into) + .collect(); + CurrentExecutionState { events, - storage_log_queries: self.vm.get_final_log_queries(), + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduplicated_logs + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes, system_logs: vec![], user_l2_to_l1_logs: l2_to_l1_logs, @@ -197,6 +224,12 @@ impl VmInterface for Vm { } } + fn has_enough_gas_for_batch_tip(&self) -> bool { + // For this version this overhead has not been calculated and it has not been used with those versions. + // We return some value just in case for backwards compatibility + true + } + fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index 2a78a817999..ed7d8078e19 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -13,7 +13,7 @@ use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, tx::tx_execution_info::TxExecutionStatus, vm_trace::VmExecutionTrace, - L1BatchNumber, StorageLogQuery, VmEvent, U256, + L1BatchNumber, VmEvent, U256, }; use crate::{ @@ -40,6 +40,7 @@ use crate::{ utils::{ collect_log_queries_after_timestamp, collect_storage_log_queries_after_timestamp, dump_memory_page_using_primitive_value, precompile_calls_count_after_timestamp, + StorageLogQuery, }, vm_with_bootloader::{ BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, @@ -468,10 +469,7 @@ impl VmInstance { .collect(); ( events, - l1_messages - .into_iter() - .map(|log| L2ToL1Log::from(GlueInto::::glue_into(log))) - .collect(), + l1_messages.into_iter().map(GlueInto::glue_into).collect(), ) } @@ -507,7 +505,7 @@ impl VmInstance { from_timestamp, ); VmExecutionLogs { - storage_logs, + storage_logs: storage_logs.into_iter().map(GlueInto::glue_into).collect(), events, user_l2_to_l1_logs: l2_to_l1_logs.into_iter().map(UserL2ToL1Log).collect(), system_l2_to_l1_logs: vec![], @@ -752,12 +750,8 @@ impl VmInstance { e.into_vm_event(L1BatchNumber(self.block_context.context.block_number)) }) .collect(); - full_result.l2_to_l1_logs = l1_messages - .into_iter() - .map(|log| { - L2ToL1Log::from(GlueInto::::glue_into(log)) - }) - .collect(); + full_result.l2_to_l1_logs = + l1_messages.into_iter().map(GlueInto::glue_into).collect(); VmBlockResult { full_result, block_tip_result, diff --git a/core/lib/multivm/src/versions/vm_m6/mod.rs b/core/lib/multivm/src/versions/vm_m6/mod.rs index 88367cf3857..3aeff47dbdc 100644 --- a/core/lib/multivm/src/versions/vm_m6/mod.rs +++ b/core/lib/multivm/src/versions/vm_m6/mod.rs @@ -17,8 +17,6 @@ pub mod utils; pub mod vm_instance; pub mod vm_with_bootloader; -#[cfg(test)] -mod tests; mod vm; pub use errors::TxRevertReason; diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs b/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs index 4eafbacbebd..a354ef627e3 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs @@ -6,21 +6,19 @@ use zk_evm_1_3_1::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, - StorageLogQueryType, BOOTLOADER_ADDRESS, U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQueryType, + BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; -use crate::{ - glue::GlueInto, - vm_m6::{ - history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, WithHistory, - }, - storage::{Storage, StoragePtr}, +use crate::vm_m6::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, }, + storage::{Storage, StoragePtr}, + utils::StorageLogQuery, }; // While the storage does not support different shards, it was decided to write the @@ -85,7 +83,7 @@ impl StorageOracle { self.frames_stack.push_forward( StorageLogQuery { - log_query: query.glue_into(), + log_query: query, log_type: StorageLogQueryType::Read, }, query.timestamp, @@ -109,7 +107,7 @@ impl StorageOracle { query.read_value = current_value; let mut storage_log_query = StorageLogQuery { - log_query: query.glue_into(), + log_query: query, log_type: log_query_type, }; self.frames_stack @@ -266,7 +264,7 @@ impl VmStorageOracle for StorageOracle { } }; - let LogQuery { written_value, .. } = query.log_query.glue_into(); + let LogQuery { written_value, .. } = query.log_query; let key = triplet_to_storage_key( query.log_query.shard_id, query.log_query.address, diff --git a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs index 81d11b94f1a..b9b89a38f04 100644 --- a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; -use zk_evm_1_3_1::aux_structures::Timestamp; +use itertools::Itertools; +use zk_evm_1_3_1::aux_structures::{LogQuery, Timestamp}; use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; use zksync_types::{ event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, @@ -79,8 +80,21 @@ impl VmInstance { self.state.storage.frames_stack.forward().current_frame(), from_timestamp, ); - let (_, deduplicated_logs) = - sort_storage_access_queries(storage_logs.iter().map(|log| &log.log_query)); + + // To allow calling the `vm-1.3.3`s. method, the `v1.3.1`'s `LogQuery` has to be converted + // to the `vm-1.3.3`'s `LogQuery`. Then, we need to convert it back. + let deduplicated_logs: Vec = sort_storage_access_queries( + &storage_logs + .iter() + .map(|log| { + GlueInto::::glue_into(log.log_query) + }) + .collect_vec(), + ) + .1 + .into_iter() + .map(GlueInto::::glue_into) + .collect(); deduplicated_logs .into_iter() diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index 55e5add1164..528731ee888 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -19,13 +19,13 @@ use zksync_types::{ fee::Fee, l2::L2Tx, web3::signing::keccak256, - Execute, L2ChainId, Nonce, StorageKey, StorageLogQuery, StorageValue, - CONTRACT_DEPLOYER_ADDRESS, H256, U256, + Execute, L2ChainId, Nonce, StorageKey, StorageValue, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; use zksync_utils::{ address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, }; +use super::utils::StorageLogQuery; use crate::vm_m6::{ event_sink::InMemoryEventSink, history_recorder::{ diff --git a/core/lib/multivm/src/versions/vm_m6/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_m6/tests/bootloader.rs deleted file mode 100644 index 16d2b7f47d2..00000000000 --- a/core/lib/multivm/src/versions/vm_m6/tests/bootloader.rs +++ /dev/null @@ -1,2176 +0,0 @@ -// ``` -// //! -// //! Tests for the bootloader -// //! The description for each of the tests can be found in the corresponding `.yul` file. -// //! -// use itertools::Itertools; -// use std::{ -// collections::{HashMap, HashSet}, -// convert::TryFrom, -// }; -// use tempfile::TempDir; -// -// use crate::{ -// errors::VmRevertReason, -// history_recorder::HistoryMode, -// oracles::tracer::{StorageInvocationTracer, TransactionResultTracer}, -// storage::{Storage, StoragePtr}, -// test_utils::{ -// get_create_execute, get_create_zksync_address, get_deploy_tx, get_error_tx, -// mock_loadnext_test_call, -// }, -// transaction_data::TransactionData, -// utils::{ -// create_test_block_params, insert_system_contracts, read_bootloader_test_code, -// BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, -// }, -// vm::{tx_has_failed, VmExecutionStopReason, ZkSyncVmState}, -// vm_with_bootloader::{ -// bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, -// push_raw_transaction_to_bootloader_memory, BlockContext, BlockContextMode, -// BootloaderJobType, TxExecutionMode, -// }, -// vm_with_bootloader::{ -// init_vm_inner, push_transaction_to_bootloader_memory, DerivedBlockContext, -// BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -// }, -// HistoryEnabled, OracleTools, TxRevertReason, VmBlockResult, VmExecutionResult, VmInstance, -// }; -// -// use zk_evm_1_3_1::{ -// aux_structures::Timestamp, block_properties::BlockProperties, zkevm_opcode_defs::FarCallOpcode, -// }; -// -// use zksync_types::{ -// block::DeployedContract, -// ethabi::encode, -// get_is_account_key, -// storage_writes_deduplicator::StorageWritesDeduplicator, -// system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, -// tx::tx_execution_info::TxExecutionStatus, -// utils::{ -// deployed_address_create, storage_key_for_eth_balance, -// storage_key_for_standard_token_balance, -// }, -// vm_trace::{Call, CallType}, -// Execute, L1BatchNumber, L1TxCommonData, StorageKey, StorageLog, L1_MESSENGER_ADDRESS, -// {ethabi::Token, AccountTreeId, Address, ExecuteTransactionCommon, Transaction, H256, U256}, -// {fee::Fee, l2_to_l1_log::L2ToL1Log}, -// { -// get_code_key, get_known_code_key, get_nonce_key, Nonce, BOOTLOADER_ADDRESS, H160, -// L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, SYSTEM_CONTEXT_ADDRESS, -// }, -// }; -// -// use zksync_utils::{ -// bytecode::CompressedBytecodeInfo, -// test_utils::LoadnextContractExecutionParams, -// {bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}, -// }; -// -// use zksync_contracts::{ -// get_loadnext_contract, load_contract, read_bytecode, SystemContractCode, -// PLAYGROUND_BLOCK_BOOTLOADER_CODE, -// }; -// -// use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; -// use zksync_storage::{db::Database, RocksDB}; -// -// fn run_vm_with_custom_factory_deps<'a, H: HistoryMode>( -// oracle_tools: &'a mut OracleTools<'a, false, H>, -// block_context: BlockContext, -// block_properties: &'a BlockProperties, -// encoded_tx: Vec, -// predefined_overhead: u32, -// expected_error: Option, -// ) { -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); -// let mut vm = init_vm_inner( -// oracle_tools, -// BlockContextMode::OverrideCurrent(block_context.into()), -// block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// vm.bootloader_state.add_tx_data(encoded_tx.len()); -// vm.state.memory.populate_page( -// BOOTLOADER_HEAP_PAGE as usize, -// get_bootloader_memory_for_encoded_tx( -// encoded_tx, -// 0, -// TxExecutionMode::VerifyExecute, -// 0, -// 0, -// predefined_overhead, -// u32::MAX, -// 0, -// vec![], -// ), -// Timestamp(0), -// ); -// -// let result = vm.execute_next_tx(u32::MAX, false).err(); -// -// assert_eq!(expected_error, result); -// } -// -// fn get_balance(token_id: AccountTreeId, account: &Address, main_storage: StoragePtr<'_>) -> U256 { -// let key = storage_key_for_standard_token_balance(token_id, account); -// h256_to_u256(main_storage.borrow_mut().get_value(&key)) -// } -// -// #[test] -// fn test_dummy_bootloader() { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// let (block_context, block_properties) = create_test_block_params(); -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// let bootloader_code = read_bootloader_test_code("dummy"); -// let bootloader_hash = hash_bytecode(&bootloader_code); -// -// base_system_contracts.bootloader = SystemContractCode { -// code: bytes_to_be_words(bootloader_code), -// hash: bootloader_hash, -// }; -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let VmBlockResult { -// full_result: res, .. -// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); -// -// // Dummy bootloader should not panic -// assert!(res.revert_reason.is_none()); -// -// let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); -// -// verify_required_memory( -// &vm.state, -// vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], -// ); -// } -// -// #[test] -// fn test_bootloader_out_of_gas() { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// let (block_context, block_properties) = create_test_block_params(); -// -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// -// let bootloader_code = read_bootloader_test_code("dummy"); -// let bootloader_hash = hash_bytecode(&bootloader_code); -// -// base_system_contracts.bootloader = SystemContractCode { -// code: bytes_to_be_words(bootloader_code), -// hash: bootloader_hash, -// }; -// -// // init vm with only 10 ergs -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// 10, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let res = vm.execute_block_tip(); -// -// assert_eq!(res.revert_reason, Some(TxRevertReason::BootloaderOutOfGas)); -// } -// -// fn verify_required_storage( -// state: &ZkSyncVmState<'_, H>, -// required_values: Vec<(H256, StorageKey)>, -// ) { -// for (required_value, key) in required_values { -// let current_value = state.storage.storage.read_from_storage(&key); -// -// assert_eq!( -// u256_to_h256(current_value), -// required_value, -// "Invalid value at key {key:?}" -// ); -// } -// } -// -// fn verify_required_memory( -// state: &ZkSyncVmState<'_, H>, -// required_values: Vec<(U256, u32, u32)>, -// ) { -// for (required_value, memory_page, cell) in required_values { -// let current_value = state -// .memory -// .read_slot(memory_page as usize, cell as usize) -// .value; -// assert_eq!(current_value, required_value); -// } -// } -// -// #[test] -// fn test_default_aa_interaction() { -// // In this test, we aim to test whether a simple account interaction (without any fee logic) -// // will work. The account will try to deploy a simple contract from integration tests. -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let operator_address = block_context.context.operator_address; -// let base_fee = block_context.base_fee; -// // We deploy here counter contract, because its logic is trivial -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(20000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_data: TransactionData = tx.clone().into(); -// -// let maximal_fee = tx_data.gas_limit * tx_data.max_fee_per_gas; -// let sender_address = tx_data.from(); -// // set balance -// -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let tx_execution_result = vm -// .execute_next_tx(u32::MAX, false) -// .expect("Bootloader failed while processing transaction"); -// -// assert_eq!( -// tx_execution_result.status, -// TxExecutionStatus::Success, -// "Transaction wasn't successful" -// ); -// -// let VmBlockResult { -// full_result: res, .. -// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// // Should not panic -// assert!( -// res.revert_reason.is_none(), -// "Bootloader was not expected to revert: {:?}", -// res.revert_reason -// ); -// -// // Both deployment and ordinary nonce should be incremented by one. -// let account_nonce_key = get_nonce_key(&sender_address); -// let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; -// -// // The code hash of the deployed contract should be marked as republished. -// let known_codes_key = get_known_code_key(&contract_code_hash); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address, U256::zero()); -// let account_code_key = get_code_key(&deployed_address); -// -// let expected_slots = vec![ -// (u256_to_h256(expected_nonce), account_nonce_key), -// (u256_to_h256(U256::from(1u32)), known_codes_key), -// (contract_code_hash, account_code_key), -// ]; -// -// verify_required_storage(&vm.state, expected_slots); -// -// assert!(!tx_has_failed(&vm.state, 0)); -// -// let expected_fee = -// maximal_fee - U256::from(tx_execution_result.gas_refunded) * U256::from(base_fee); -// let operator_balance = get_balance( -// AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), -// &operator_address, -// vm.state.storage.storage.get_ptr(), -// ); -// -// assert!( -// operator_balance == expected_fee, -// "Operator did not receive his fee" -// ); -// } -// -// fn execute_vm_with_predetermined_refund( -// txs: Vec, -// refunds: Vec, -// compressed_bytecodes: Vec>, -// ) -> VmBlockResult { -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// // set balance -// for tx in txs.iter() { -// let sender_address = tx.initiator_account(); -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// } -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// let codes_for_decommiter = txs -// .iter() -// .flat_map(|tx| { -// tx.execute -// .factory_deps -// .clone() -// .unwrap_or_default() -// .iter() -// .map(|dep| bytecode_to_factory_dep(dep.clone())) -// .collect::)>>() -// }) -// .collect(); -// -// vm.state.decommittment_processor.populate( -// codes_for_decommiter, -// Timestamp(vm.state.local_state.timestamp), -// ); -// -// let memory_with_suggested_refund = get_bootloader_memory( -// txs.into_iter().map(Into::into).collect(), -// refunds, -// compressed_bytecodes, -// TxExecutionMode::VerifyExecute, -// BlockContextMode::NewBlock(block_context, Default::default()), -// ); -// -// vm.state.memory.populate_page( -// BOOTLOADER_HEAP_PAGE as usize, -// memory_with_suggested_refund, -// Timestamp(0), -// ); -// -// vm.execute_till_block_end(BootloaderJobType::TransactionExecution) -// } -// -// #[test] -// fn test_predetermined_refunded_gas() { -// // In this test, we compare the execution of the bootloader with the predefined -// // refunded gas and without them -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let base_fee = block_context.base_fee; -// -// // We deploy here counter contract, because its logic is trivial -// let contract_code = read_test_contract(); -// let published_bytecode = CompressedBytecodeInfo::from_original(contract_code.clone()).unwrap(); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(20000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let sender_address = tx.initiator_account(); -// -// // set balance -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let tx_execution_result = vm -// .execute_next_tx(u32::MAX, false) -// .expect("Bootloader failed while processing transaction"); -// -// assert_eq!( -// tx_execution_result.status, -// TxExecutionStatus::Success, -// "Transaction wasn't successful" -// ); -// -// // If the refund provided by the operator or the final refund are the 0 -// // there is no impact of the operator's refund at all and so this test does not -// // make much sense. -// assert!( -// tx_execution_result.operator_suggested_refund > 0, -// "The operator's refund is 0" -// ); -// assert!( -// tx_execution_result.gas_refunded > 0, -// "The final refund is 0" -// ); -// -// let mut result = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// assert!( -// result.full_result.revert_reason.is_none(), -// "Bootloader was not expected to revert: {:?}", -// result.full_result.revert_reason -// ); -// -// let mut result_with_predetermined_refund = execute_vm_with_predetermined_refund( -// vec![tx], -// vec![tx_execution_result.operator_suggested_refund], -// vec![vec![published_bytecode]], -// ); -// // We need to sort these lists as those are flattened from HashMaps -// result.full_result.used_contract_hashes.sort(); -// result_with_predetermined_refund -// .full_result -// .used_contract_hashes -// .sort(); -// -// assert_eq!( -// result.full_result.events, -// result_with_predetermined_refund.full_result.events -// ); -// assert_eq!( -// result.full_result.l2_to_l1_logs, -// result_with_predetermined_refund.full_result.l2_to_l1_logs -// ); -// assert_eq!( -// result.full_result.storage_log_queries, -// result_with_predetermined_refund -// .full_result -// .storage_log_queries -// ); -// assert_eq!( -// result.full_result.used_contract_hashes, -// result_with_predetermined_refund -// .full_result -// .used_contract_hashes -// ); -// } -// -// #[derive(Debug, Clone)] -// enum TransactionRollbackTestInfo { -// Rejected(Transaction, TxRevertReason), -// Processed(Transaction, bool, TxExecutionStatus), -// } -// -// impl TransactionRollbackTestInfo { -// fn new_rejected(transaction: Transaction, revert_reason: TxRevertReason) -> Self { -// Self::Rejected(transaction, revert_reason) -// } -// -// fn new_processed( -// transaction: Transaction, -// should_be_rollbacked: bool, -// expected_status: TxExecutionStatus, -// ) -> Self { -// Self::Processed(transaction, should_be_rollbacked, expected_status) -// } -// -// fn get_transaction(&self) -> &Transaction { -// match self { -// TransactionRollbackTestInfo::Rejected(tx, _) => tx, -// TransactionRollbackTestInfo::Processed(tx, _, _) => tx, -// } -// } -// -// fn rejection_reason(&self) -> Option { -// match self { -// TransactionRollbackTestInfo::Rejected(_, revert_reason) => Some(revert_reason.clone()), -// TransactionRollbackTestInfo::Processed(_, _, _) => None, -// } -// } -// -// fn should_rollback(&self) -> bool { -// match self { -// TransactionRollbackTestInfo::Rejected(_, _) => true, -// TransactionRollbackTestInfo::Processed(_, x, _) => *x, -// } -// } -// -// fn expected_status(&self) -> TxExecutionStatus { -// match self { -// TransactionRollbackTestInfo::Rejected(_, _) => { -// panic!("There is no execution status for rejected transaction") -// } -// TransactionRollbackTestInfo::Processed(_, _, status) => *status, -// } -// } -// } -// -// // Accepts the address of the sender as well as the list of pairs of its transactions -// // and whether these transactions should succeed. -// fn execute_vm_with_possible_rollbacks( -// sender_address: Address, -// transactions: Vec, -// block_context: DerivedBlockContext, -// block_properties: BlockProperties, -// ) -> VmExecutionResult { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// // Setting infinite balance for the sender. -// let key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// for test_info in transactions { -// vm.save_current_vm_as_snapshot(); -// let vm_state_before_tx = vm.dump_inner_state(); -// push_transaction_to_bootloader_memory( -// &mut vm, -// test_info.get_transaction(), -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// match vm.execute_next_tx(u32::MAX, false) { -// Err(reason) => { -// assert_eq!(test_info.rejection_reason(), Some(reason)); -// } -// Ok(res) => { -// assert_eq!(test_info.rejection_reason(), None); -// assert_eq!( -// res.status, -// test_info.expected_status(), -// "Transaction status is not correct" -// ); -// } -// }; -// -// if test_info.should_rollback() { -// // Some error has occurred, we should reject the transaction -// vm.rollback_to_latest_snapshot(); -// -// // vm_state_before_tx. -// let state_after_rollback = vm.dump_inner_state(); -// assert_eq!( -// vm_state_before_tx, state_after_rollback, -// "Did not rollback VM state correctly" -// ); -// } -// } -// -// let VmBlockResult { -// full_result: mut result, -// .. -// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); -// // Used contract hashes are retrieved in unordered manner. -// // However it must be sorted for the comparisons in tests to work -// result.used_contract_hashes.sort(); -// -// result -// } -// -// // Sets the signature for an L2 transaction and returns the same transaction -// // but this different signature. -// fn change_signature(mut tx: Transaction, signature: Vec) -> Transaction { -// tx.common_data = match tx.common_data { -// ExecuteTransactionCommon::L2(mut data) => { -// data.signature = signature; -// ExecuteTransactionCommon::L2(data) -// } -// _ => unreachable!(), -// }; -// -// tx -// } -// -// #[test] -// fn test_vm_rollbacks() { -// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { -// let (block_context, block_properties) = create_test_block_params(); -// (block_context.into(), block_properties) -// }; -// -// let base_fee = U256::from(block_context.base_fee); -// -// let sender_private_key = H256::random(); -// let contract_code = read_test_contract(); -// -// let tx_nonce_0: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(12000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_nonce_1: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(1), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(12000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let tx_nonce_2: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(2), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(12000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let wrong_signature_length_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 32]); -// let wrong_v_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 65]); -// let wrong_signature_tx = change_signature(tx_nonce_0.clone(), vec![27u8; 65]); -// -// let sender_address = tx_nonce_0.initiator_account(); -// -// let result_without_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// // The nonces are ordered correctly, all the transactions should succeed. -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_2.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// ], -// block_context, -// block_properties, -// ); -// -// let incorrect_nonce = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Incorrect nonce".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, -// 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// }); -// let reusing_nonce_twice = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Reusing the same nonce twice".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, -// 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, -// 0, 0, 0, -// ], -// }); -// let signature_length_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Signature length is incorrect".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, -// 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, -// 116, 0, 0, 0, -// ], -// }); -// let v_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "v is neither 27 nor 28".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, -// 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// }); -// let signature_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { -// msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), -// data: vec![], -// }); -// -// let result_with_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// TransactionRollbackTestInfo::new_rejected( -// wrong_signature_length_tx, -// signature_length_is_incorrect, -// ), -// TransactionRollbackTestInfo::new_rejected(wrong_v_tx, v_is_incorrect), -// TransactionRollbackTestInfo::new_rejected(wrong_signature_tx, signature_is_incorrect), -// // The correct nonce is 0, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce.clone()), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The correct nonce is 1, this tx will fail -// TransactionRollbackTestInfo::new_rejected( -// tx_nonce_0.clone(), -// reusing_nonce_twice.clone(), -// ), -// // The correct nonce is 1, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_1, -// false, -// TxExecutionStatus::Success, -// ), -// // The correct nonce is 2, this tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_0, reusing_nonce_twice.clone()), -// // This tx will succeed -// TransactionRollbackTestInfo::new_processed( -// tx_nonce_2.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // This tx will fail -// TransactionRollbackTestInfo::new_rejected(tx_nonce_2, reusing_nonce_twice.clone()), -// ], -// block_context, -// block_properties, -// ); -// -// assert_eq!(result_without_rollbacks, result_with_rollbacks); -// -// let loadnext_contract = get_loadnext_contract(); -// -// let loadnext_constructor_data = encode(&[Token::Uint(U256::from(100))]); -// let loadnext_deploy_tx: Transaction = get_deploy_tx( -// sender_private_key, -// Nonce(0), -// &loadnext_contract.bytecode, -// loadnext_contract.factory_deps, -// &loadnext_constructor_data, -// Fee { -// gas_limit: U256::from(70000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// let loadnext_contract_address = -// get_create_zksync_address(loadnext_deploy_tx.initiator_account(), Nonce(0)); -// let deploy_loadnext_tx_info = TransactionRollbackTestInfo::new_processed( -// loadnext_deploy_tx, -// false, -// TxExecutionStatus::Success, -// ); -// -// let get_load_next_tx = |params: LoadnextContractExecutionParams, nonce: Nonce| { -// // Here we test loadnext with various kinds of operations -// let tx: Transaction = mock_loadnext_test_call( -// sender_private_key, -// nonce, -// loadnext_contract_address, -// Fee { -// gas_limit: U256::from(100000000u32), -// max_fee_per_gas: base_fee, -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// params, -// ) -// .into(); -// -// tx -// }; -// -// let loadnext_tx_0 = get_load_next_tx( -// LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// }, -// Nonce(1), -// ); -// let loadnext_tx_1 = get_load_next_tx( -// LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// }, -// Nonce(2), -// ); -// -// let result_without_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// deploy_loadnext_tx_info.clone(), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// ], -// block_context, -// block_properties, -// ); -// -// let result_with_rollbacks = execute_vm_with_possible_rollbacks( -// sender_address, -// vec![ -// deploy_loadnext_tx_info, -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_0.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The nonce has been bumped up, this transaction should now fail -// TransactionRollbackTestInfo::new_rejected(loadnext_tx_0, reusing_nonce_twice.clone()), -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// true, -// TxExecutionStatus::Success, -// ), -// // After the previous tx has been rolled back, this one should succeed -// TransactionRollbackTestInfo::new_processed( -// loadnext_tx_1.clone(), -// false, -// TxExecutionStatus::Success, -// ), -// // The nonce has been bumped up, this transaction should now fail -// TransactionRollbackTestInfo::new_rejected(loadnext_tx_1, reusing_nonce_twice), -// ], -// block_context, -// block_properties, -// ); -// -// assert_eq!(result_without_rollbacks, result_with_rollbacks); -// } -// -// // Inserts the contracts into the test environment, bypassing the -// // deployer system contract. Besides the reference to storage -// // it accepts a `contracts` tuple of information about the contract -// // and whether or not it is an account. -// fn insert_contracts( -// raw_storage: &mut SecondaryStateStorage, -// contracts: Vec<(DeployedContract, bool)>, -// ) { -// let logs: Vec = contracts -// .iter() -// .flat_map(|(contract, is_account)| { -// let mut new_logs = vec![]; -// -// let deployer_code_key = get_code_key(contract.account_id.address()); -// new_logs.push(StorageLog::new_write_log( -// deployer_code_key, -// hash_bytecode(&contract.bytecode), -// )); -// -// if *is_account { -// let is_account_key = get_is_account_key(contract.account_id.address()); -// new_logs.push(StorageLog::new_write_log( -// is_account_key, -// u256_to_h256(1u32.into()), -// )); -// } -// -// new_logs -// }) -// .collect(); -// raw_storage.process_transaction_logs(&logs); -// -// for (contract, _) in contracts { -// raw_storage.store_factory_dep(hash_bytecode(&contract.bytecode), contract.bytecode); -// } -// raw_storage.save(L1BatchNumber(0)); -// } -// -// enum NonceHolderTestMode { -// SetValueUnderNonce, -// IncreaseMinNonceBy5, -// IncreaseMinNonceTooMuch, -// LeaveNonceUnused, -// IncreaseMinNonceBy1, -// SwitchToArbitraryOrdering, -// } -// -// impl From for u8 { -// fn from(mode: NonceHolderTestMode) -> u8 { -// match mode { -// NonceHolderTestMode::SetValueUnderNonce => 0, -// NonceHolderTestMode::IncreaseMinNonceBy5 => 1, -// NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, -// NonceHolderTestMode::LeaveNonceUnused => 3, -// NonceHolderTestMode::IncreaseMinNonceBy1 => 4, -// NonceHolderTestMode::SwitchToArbitraryOrdering => 5, -// } -// } -// } -// -// fn get_nonce_holder_test_tx( -// nonce: U256, -// account_address: Address, -// test_mode: NonceHolderTestMode, -// block_context: &DerivedBlockContext, -// ) -> TransactionData { -// TransactionData { -// tx_type: 113, -// from: account_address, -// to: account_address, -// gas_limit: U256::from(10000000u32), -// pubdata_price_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// max_fee_per_gas: U256::from(block_context.base_fee), -// max_priority_fee_per_gas: U256::zero(), -// nonce, -// // The reserved fields that are unique for different types of transactions. -// // E.g. nonce is currently used in all transaction, but it should not be mandatory -// // in the long run. -// reserved: [U256::zero(); 4], -// data: vec![12], -// signature: vec![test_mode.into()], -// -// ..Default::default() -// } -// } -// -// fn run_vm_with_raw_tx<'a, H: HistoryMode>( -// oracle_tools: &'a mut OracleTools<'a, false, H>, -// block_context: DerivedBlockContext, -// block_properties: &'a BlockProperties, -// tx: TransactionData, -// ) -> (VmExecutionResult, bool) { -// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); -// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); -// let mut vm = init_vm_inner( -// oracle_tools, -// BlockContextMode::OverrideCurrent(block_context), -// block_properties, -// BLOCK_GAS_LIMIT, -// &base_system_contracts, -// TxExecutionMode::VerifyExecute, -// ); -// -// let block_gas_price_per_pubdata = block_context.context.block_gas_price_per_pubdata(); -// -// let overhead = tx.overhead_gas(block_gas_price_per_pubdata as u32); -// push_raw_transaction_to_bootloader_memory( -// &mut vm, -// tx, -// TxExecutionMode::VerifyExecute, -// overhead, -// None, -// ); -// let VmBlockResult { -// full_result: result, -// .. -// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); -// -// (result, tx_has_failed(&vm.state, 0)) -// } -// -// #[test] -// fn test_nonce_holder() { -// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { -// let (block_context, block_properties) = create_test_block_params(); -// (block_context.into(), block_properties) -// }; -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// -// let account_address = H160::random(); -// let account = DeployedContract { -// account_id: AccountTreeId::new(account_address), -// bytecode: read_nonce_holder_tester(), -// }; -// -// insert_contracts(&mut raw_storage, vec![(account, true)]); -// -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// // We deploy here counter contract, because its logic is trivial -// -// let key = storage_key_for_eth_balance(&account_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut run_nonce_test = |nonce: U256, -// test_mode: NonceHolderTestMode, -// error_message: Option, -// comment: &'static str| { -// let tx = get_nonce_holder_test_tx(nonce, account_address, test_mode, &block_context); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// let (result, tx_has_failed) = -// run_vm_with_raw_tx(&mut oracle_tools, block_context, &block_properties, tx); -// if let Some(msg) = error_message { -// let expected_error = -// TxRevertReason::ValidationFailed(VmRevertReason::General { msg, data: vec![] }); -// assert_eq!( -// result -// .revert_reason -// .expect("No revert reason") -// .revert_reason -// .to_string(), -// expected_error.to_string(), -// "{}", -// comment -// ); -// } else { -// assert!(!tx_has_failed, "{}", comment); -// } -// }; -// -// // Test 1: trying to set value under non sequential nonce value. -// run_nonce_test( -// 1u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// Some("Previous nonce has not been used".to_string()), -// "Allowed to set value under non sequential value", -// ); -// -// // Test 2: increase min nonce by 1 with sequential nonce ordering: -// run_nonce_test( -// 0u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy1, -// None, -// "Failed to increment nonce by 1 for sequential account", -// ); -// -// // Test 3: correctly set value under nonce with sequential nonce ordering: -// run_nonce_test( -// 1u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// None, -// "Failed to set value under nonce sequential value", -// ); -// -// // Test 5: migrate to the arbitrary nonce ordering: -// run_nonce_test( -// 2u32.into(), -// NonceHolderTestMode::SwitchToArbitraryOrdering, -// None, -// "Failed to switch to arbitrary ordering", -// ); -// -// // Test 6: increase min nonce by 5 -// run_nonce_test( -// 6u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// None, -// "Failed to increase min nonce by 5", -// ); -// -// // Test 7: since the nonces in range [6,10] are no longer allowed, the -// // tx with nonce 10 should not be allowed -// run_nonce_test( -// 10u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// Some("Reusing the same nonce twice".to_string()), -// "Allowed to reuse nonce below the minimal one", -// ); -// -// // Test 8: we should be able to use nonce 13 -// run_nonce_test( -// 13u32.into(), -// NonceHolderTestMode::SetValueUnderNonce, -// None, -// "Did not allow to use unused nonce 10", -// ); -// -// // Test 9: we should not be able to reuse nonce 13 -// run_nonce_test( -// 13u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// Some("Reusing the same nonce twice".to_string()), -// "Allowed to reuse the same nonce twice", -// ); -// -// // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 -// run_nonce_test( -// 14u32.into(), -// NonceHolderTestMode::IncreaseMinNonceBy5, -// None, -// "Did not allow to use a bumped nonce", -// ); -// -// // Test 6: Do not allow bumping nonce by too much -// run_nonce_test( -// 16u32.into(), -// NonceHolderTestMode::IncreaseMinNonceTooMuch, -// Some("The value for incrementing the nonce is too high".to_string()), -// "Allowed for incrementing min nonce too much", -// ); -// -// // Test 7: Do not allow not setting a nonce as used -// run_nonce_test( -// 16u32.into(), -// NonceHolderTestMode::LeaveNonceUnused, -// Some("The nonce was not set as used".to_string()), -// "Allowed to leave nonce as unused", -// ); -// } -// -// #[test] -// fn test_l1_tx_execution() { -// // In this test, we try to execute a contract deployment from L1 -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// let (block_context, block_properties) = create_test_block_params(); -// -// // Here instead of marking code hash via the bootloader means, we will -// // using L1->L2 communication, the same it would likely be done during the priority mode. -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); -// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); -// -// let required_l2_to_l1_logs = vec![ -// L2ToL1Log { -// shard_id: 0, -// is_service: false, -// tx_number_in_block: 0, -// sender: SYSTEM_CONTEXT_ADDRESS, -// key: u256_to_h256(U256::from(block_context.block_timestamp)), -// value: Default::default(), -// }, -// L2ToL1Log { -// shard_id: 0, -// is_service: true, -// tx_number_in_block: 0, -// sender: BOOTLOADER_ADDRESS, -// key: l1_deploy_tx_data.canonical_l1_tx_hash(), -// value: u256_to_h256(U256::from(1u32)), -// }, -// ]; -// -// let sender_address = l1_deploy_tx_data.from(); -// -// oracle_tools.decommittment_processor.populate( -// vec![( -// h256_to_u256(contract_code_hash), -// bytes_to_be_words(contract_code), -// )], -// Timestamp(0), -// ); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &l1_deploy_tx, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// let res = vm.execute_next_tx(u32::MAX, false).unwrap(); -// -// // The code hash of the deployed contract should be marked as republished. -// let known_codes_key = get_known_code_key(&contract_code_hash); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address, U256::zero()); -// let account_code_key = get_code_key(&deployed_address); -// -// let expected_slots = vec![ -// (u256_to_h256(U256::from(1u32)), known_codes_key), -// (contract_code_hash, account_code_key), -// ]; -// assert!(!tx_has_failed(&vm.state, 0)); -// -// verify_required_storage(&vm.state, expected_slots); -// -// assert_eq!(res.result.logs.l2_to_l1_logs, required_l2_to_l1_logs); -// -// let tx = get_l1_execute_test_contract_tx(deployed_address, true); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let res = StorageWritesDeduplicator::apply_on_empty_state( -// &vm.execute_next_tx(u32::MAX, false) -// .unwrap() -// .result -// .logs -// .storage_logs, -// ); -// assert_eq!(res.initial_storage_writes, 0); -// -// let tx = get_l1_execute_test_contract_tx(deployed_address, false); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// let res = StorageWritesDeduplicator::apply_on_empty_state( -// &vm.execute_next_tx(u32::MAX, false) -// .unwrap() -// .result -// .logs -// .storage_logs, -// ); -// assert_eq!(res.initial_storage_writes, 2); -// -// let repeated_writes = res.repeated_storage_writes; -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// let res = StorageWritesDeduplicator::apply_on_empty_state( -// &vm.execute_next_tx(u32::MAX, false) -// .unwrap() -// .result -// .logs -// .storage_logs, -// ); -// assert_eq!(res.initial_storage_writes, 1); -// // We do the same storage write, so it will be deduplicated -// assert_eq!(res.repeated_storage_writes, repeated_writes); -// -// let mut tx = get_l1_execute_test_contract_tx(deployed_address, false); -// tx.execute.value = U256::from(1); -// match &mut tx.common_data { -// ExecuteTransactionCommon::L1(l1_data) => { -// l1_data.to_mint = U256::from(4); -// } -// _ => unreachable!(), -// } -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// let execution_result = vm.execute_next_tx(u32::MAX, false).unwrap(); -// // The method is not payable, so the transaction with non-zero value should fail -// assert_eq!( -// execution_result.status, -// TxExecutionStatus::Failure, -// "The transaction should fail" -// ); -// -// let res = -// StorageWritesDeduplicator::apply_on_empty_state(&execution_result.result.logs.storage_logs); -// -// // There are 2 initial writes here: -// // - totalSupply of ETH token -// // - balance of the refund recipient -// assert_eq!(res.initial_storage_writes, 2); -// } -// -// #[test] -// fn test_invalid_bytecode() { -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let (block_context, block_properties) = create_test_block_params(); -// let block_gas_per_pubdata = block_context.block_gas_price_per_pubdata(); -// -// let test_vm_with_custom_bytecode_hash = -// |bytecode_hash: H256, expected_revert_reason: Option| { -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( -// h256_to_u256(bytecode_hash), -// block_gas_per_pubdata as u32, -// ); -// -// run_vm_with_custom_factory_deps( -// &mut oracle_tools, -// block_context, -// &block_properties, -// encoded_tx, -// predefined_overhead, -// expected_revert_reason, -// ); -// }; -// -// let failed_to_mark_factory_deps = |msg: &str, data: Vec| { -// TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { -// msg: msg.to_string(), -// data, -// }) -// }; -// -// // Here we provide the correctly-formatted bytecode hash of -// // odd length, so it should work. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// None, -// ); -// -// // Here we provide correctly formatted bytecode of even length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Code length in words must be odd", -// vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, -// 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, -// 32, 98, 101, 32, 111, 100, 100, -// ], -// )), -// ); -// -// // Here we provide incorrectly formatted bytecode of odd length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Incorrectly formatted bytecodeHash", -// vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, -// 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, -// 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// )), -// ); -// -// // Here we provide incorrectly formatted bytecode of odd length, so -// // it should fail. -// test_vm_with_custom_bytecode_hash( -// H256([ -// 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, -// ]), -// Some(failed_to_mark_factory_deps( -// "Incorrectly formatted bytecodeHash", -// vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, -// 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, -// 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// ], -// )), -// ); -// } -// -// #[test] -// fn test_tracing_of_execution_errors() { -// // In this test, we are checking that the execution errors are transmitted correctly from the bootloader. -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let private_key = H256::random(); -// -// let contract_address = Address::random(); -// let error_contract = DeployedContract { -// account_id: AccountTreeId::new(contract_address), -// bytecode: read_error_contract(), -// }; -// -// let tx = get_error_tx( -// private_key, -// Nonce(0), -// contract_address, -// Fee { -// gas_limit: U256::from(1000000u32), -// max_fee_per_gas: U256::from(10000000000u64), -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ); -// -// insert_contracts(&mut raw_storage, vec![(error_contract, false)]); -// -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let key = storage_key_for_eth_balance(&tx.common_data.initiator_address); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &tx.into(), -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// let mut tracer = TransactionResultTracer::new(usize::MAX, false); -// assert_eq!( -// vm.execute_with_custom_tracer(&mut tracer), -// VmExecutionStopReason::VmFinished, -// "Tracer should never request stop" -// ); -// -// match tracer.revert_reason { -// Some(revert_reason) => { -// let revert_reason = VmRevertReason::try_from(&revert_reason as &[u8]).unwrap(); -// assert_eq!( -// revert_reason, -// VmRevertReason::General { -// msg: "short".to_string(), -// data: vec![ -// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, -// 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0 -// ], -// } -// ) -// } -// _ => panic!( -// "Tracer captured incorrect result {:#?}", -// tracer.revert_reason -// ), -// } -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// let tx = get_error_tx( -// private_key, -// Nonce(1), -// contract_address, -// Fee { -// gas_limit: U256::from(1000000u32), -// max_fee_per_gas: U256::from(10000000000u64), -// max_priority_fee_per_gas: U256::zero(), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ); -// push_transaction_to_bootloader_memory( -// &mut vm, -// &tx.into(), -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// let mut tracer = TransactionResultTracer::new(10, false); -// assert_eq!( -// vm.execute_with_custom_tracer(&mut tracer), -// VmExecutionStopReason::TracerRequestedStop, -// ); -// assert!(tracer.is_limit_reached()); -// } -// -// /// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -// #[test] -// fn test_tx_gas_limit_offset() { -// let gas_limit = U256::from(999999); -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let raw_storage = SecondaryStateStorage::new(db); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let contract_code = read_test_contract(); -// let tx: Transaction = get_deploy_tx( -// H256::random(), -// Nonce(0), -// &contract_code, -// Default::default(), -// Default::default(), -// Fee { -// gas_limit, -// ..Default::default() -// }, -// ) -// .into(); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let gas_limit_from_memory = vm -// .state -// .memory -// .read_slot( -// BOOTLOADER_HEAP_PAGE as usize, -// TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, -// ) -// .value; -// assert_eq!(gas_limit_from_memory, gas_limit); -// } -// -// #[test] -// fn test_is_write_initial_behaviour() { -// // In this test, we check result of `is_write_initial` at different stages. -// -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// -// let base_fee = block_context.base_fee; -// let account_pk = H256::random(); -// let contract_code = read_test_contract(); -// let tx: Transaction = get_deploy_tx( -// account_pk, -// Nonce(0), -// &contract_code, -// vec![], -// &[], -// Fee { -// gas_limit: U256::from(20000000u32), -// max_fee_per_gas: U256::from(base_fee), -// max_priority_fee_per_gas: U256::from(0), -// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), -// }, -// ) -// .into(); -// -// let sender_address = tx.initiator_account(); -// let nonce_key = get_nonce_key(&sender_address); -// -// // Check that the next write to the nonce key will be initial. -// assert!(storage_ptr.is_write_initial(&nonce_key)); -// -// // Set balance to be able to pay fee for txs. -// let balance_key = storage_key_for_eth_balance(&sender_address); -// storage_ptr.set_value(&balance_key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// vm.execute_next_tx(u32::MAX, false) -// .expect("Bootloader failed while processing the first transaction"); -// // Check that `is_write_initial` still returns true for the nonce key. -// assert!(storage_ptr.is_write_initial(&nonce_key)); -// } -// -// pub fn get_l1_tx_with_custom_bytecode_hash( -// bytecode_hash: U256, -// block_gas_per_pubdata: u32, -// ) -> (Vec, u32) { -// let tx: TransactionData = get_l1_execute_test_contract_tx(Default::default(), false).into(); -// let predefined_overhead = -// tx.overhead_gas_with_custom_factory_deps(vec![bytecode_hash], block_gas_per_pubdata); -// let tx_bytes = tx.abi_encode_with_custom_factory_deps(vec![bytecode_hash]); -// -// (bytes_to_be_words(tx_bytes), predefined_overhead) -// } -// -// const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; -// -// pub fn get_l1_execute_test_contract_tx(deployed_address: Address, with_panic: bool) -> Transaction { -// let sender = H160::random(); -// get_l1_execute_test_contract_tx_with_sender( -// sender, -// deployed_address, -// with_panic, -// U256::zero(), -// false, -// ) -// } -// -// pub fn get_l1_tx_with_large_output(sender: Address, deployed_address: Address) -> Transaction { -// let test_contract = load_contract( -// "etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json", -// ); -// -// let function = test_contract.function("longReturnData").unwrap(); -// -// let calldata = function -// .encode_input(&[]) -// .expect("failed to encode parameters"); -// -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender, -// gas_limit: U256::from(100000000u32), -// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute: Execute { -// contract_address: deployed_address, -// calldata, -// value: U256::zero(), -// factory_deps: None, -// }, -// received_timestamp_ms: 0, -// } -// } -// -// pub fn get_l1_execute_test_contract_tx_with_sender( -// sender: Address, -// deployed_address: Address, -// with_panic: bool, -// value: U256, -// payable: bool, -// ) -> Transaction { -// let execute = execute_test_contract(deployed_address, with_panic, value, payable); -// -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender, -// gas_limit: U256::from(200_000_000u32), -// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), -// to_mint: value, -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// pub fn get_l1_deploy_tx(code: &[u8], calldata: &[u8]) -> Transaction { -// let execute = get_create_execute(code, calldata); -// -// Transaction { -// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { -// sender: H160::random(), -// gas_limit: U256::from(2000000u32), -// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), -// ..Default::default() -// }), -// execute, -// received_timestamp_ms: 0, -// } -// } -// -// fn read_test_contract() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -// } -// -// fn read_long_return_data_contract() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json") -// } -// -// fn read_nonce_holder_tester() -> Vec { -// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -// } -// -// fn read_error_contract() -> Vec { -// read_bytecode( -// "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", -// ) -// } -// -// fn execute_test_contract( -// address: Address, -// with_panic: bool, -// value: U256, -// payable: bool, -// ) -> Execute { -// let test_contract = load_contract( -// "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", -// ); -// -// let function = if payable { -// test_contract -// .function("incrementWithRevertPayable") -// .unwrap() -// } else { -// test_contract.function("incrementWithRevert").unwrap() -// }; -// -// let calldata = function -// .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) -// .expect("failed to encode parameters"); -// -// Execute { -// contract_address: address, -// calldata, -// value, -// factory_deps: None, -// } -// } -// -// #[test] -// fn test_call_tracer() { -// let sender = H160::random(); -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// -// let (block_context, block_properties) = create_test_block_params(); -// -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); -// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); -// -// let sender_address_counter = l1_deploy_tx_data.from(); -// let mut storage_accessor = StorageView::new(&raw_storage); -// let storage_ptr: &mut dyn Storage = &mut storage_accessor; -// -// let key = storage_key_for_eth_balance(&sender_address_counter); -// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); -// -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// oracle_tools.decommittment_processor.populate( -// vec![( -// h256_to_u256(contract_code_hash), -// bytes_to_be_words(contract_code), -// )], -// Timestamp(0), -// ); -// -// let contract_code = read_long_return_data_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let l1_deploy_long_return_data_tx = get_l1_deploy_tx(&contract_code, &[]); -// oracle_tools.decommittment_processor.populate( -// vec![( -// h256_to_u256(contract_code_hash), -// bytes_to_be_words(contract_code), -// )], -// Timestamp(0), -// ); -// -// let tx_data: TransactionData = l1_deploy_long_return_data_tx.clone().into(); -// let sender_long_return_address = tx_data.from(); -// // The contract should be deployed successfully. -// let deployed_address_long_return_data = -// deployed_address_create(sender_long_return_address, U256::zero()); -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context.into(), Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &l1_deploy_tx, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// // The contract should be deployed successfully. -// let deployed_address = deployed_address_create(sender_address_counter, U256::zero()); -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// let mut create_call = None; -// // The first MIMIC call is call to value simulator. All calls goes through it. -// // The second MIMIC call is call to Deployer contract. -// // And only third level call is construct call to the newly deployed contract And we call it create_call. -// for call in &calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in &call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in &call.calls { -// if let CallType::Create = call.r#type { -// create_call = Some(call.clone()); -// } -// } -// } -// } -// } -// } -// let expected = Call { -// r#type: CallType::Create, -// to: deployed_address, -// from: sender_address_counter, -// parent_gas: 0, -// gas_used: 0, -// gas: 0, -// value: U256::zero(), -// input: vec![], -// output: vec![ -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 0, 0, 0, 0, -// ], -// error: None, -// revert_reason: None, -// calls: vec![], -// }; -// assert_eq!(create_call.unwrap(), expected); -// -// push_transaction_to_bootloader_memory( -// &mut vm, -// &l1_deploy_long_return_data_tx, -// TxExecutionMode::VerifyExecute, -// None, -// ); -// -// vm.execute_next_tx(u32::MAX, false).unwrap(); -// -// let tx = get_l1_execute_test_contract_tx_with_sender( -// sender, -// deployed_address, -// false, -// U256::from(1u8), -// true, -// ); -// -// let tx_data: TransactionData = tx.clone().into(); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// -// // We don't want to compare gas used, because it's not fully deterministic. -// let expected = Call { -// r#type: CallType::Call(FarCallOpcode::Mimic), -// to: deployed_address, -// from: tx_data.from(), -// parent_gas: 0, -// gas_used: 0, -// gas: 0, -// value: U256::from(1), -// input: tx_data.data, -// output: vec![ -// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// 0, 0, 1, -// ], -// error: None, -// revert_reason: None, -// calls: vec![], -// }; -// -// // First loop filter out the bootloaders calls and -// // the second loop filters out the calls msg value simulator calls -// for call in calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// assert_eq!(expected, call); -// } -// } -// } -// } -// -// let tx = get_l1_execute_test_contract_tx_with_sender( -// sender, -// deployed_address, -// true, -// U256::from(1u8), -// true, -// ); -// -// let tx_data: TransactionData = tx.clone().into(); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// -// let expected = Call { -// r#type: CallType::Call(FarCallOpcode::Mimic), -// to: deployed_address, -// from: tx_data.from(), -// parent_gas: 257030, -// gas_used: 348, -// gas: 253008, -// value: U256::from(1u8), -// input: tx_data.data, -// output: vec![], -// error: None, -// revert_reason: Some("This method always reverts".to_string()), -// calls: vec![], -// }; -// -// for call in calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// assert_eq!(expected, call); -// } -// } -// } -// } -// -// let tx = get_l1_tx_with_large_output(sender, deployed_address_long_return_data); -// -// let tx_data: TransactionData = tx.clone().into(); -// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); -// -// assert_ne!(deployed_address_long_return_data, deployed_address); -// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); -// let calls = res.call_traces; -// for call in calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// for call in call.calls { -// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { -// assert_eq!(call.input, tx_data.data); -// assert_eq!( -// call.revert_reason, -// Some("Unknown revert reason".to_string()) -// ); -// } -// } -// } -// } -// } -// -// #[test] -// fn test_get_used_contracts() { -// // get block context -// let (block_context, block_properties) = create_test_block_params(); -// let block_context: DerivedBlockContext = block_context.into(); -// -// // insert system contracts to avoid vm errors during initialization -// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); -// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); -// let mut raw_storage = SecondaryStateStorage::new(db); -// insert_system_contracts(&mut raw_storage); -// -// // get oracle tools -// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); -// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); -// -// // init vm -// let mut vm = init_vm_inner( -// &mut oracle_tools, -// BlockContextMode::NewBlock(block_context, Default::default()), -// &block_properties, -// BLOCK_GAS_LIMIT, -// &BASE_SYSTEM_CONTRACTS, -// TxExecutionMode::VerifyExecute, -// ); -// -// assert!(known_bytecodes_without_aa_code(&vm).is_empty()); -// -// // create and push and execute some not-empty factory deps transaction with success status -// // to check that get_used_contracts() updates -// let contract_code = read_test_contract(); -// let contract_code_hash = hash_bytecode(&contract_code); -// let tx1 = get_l1_deploy_tx(&contract_code, &[]); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx1, TxExecutionMode::VerifyExecute, None); -// -// let res1 = vm.execute_next_tx(u32::MAX, true).unwrap(); -// assert_eq!(res1.status, TxExecutionStatus::Success); -// assert!(vm -// .get_used_contracts() -// .contains(&h256_to_u256(contract_code_hash))); -// -// assert_eq!( -// vm.get_used_contracts() -// .into_iter() -// .collect::>(), -// known_bytecodes_without_aa_code(&vm) -// .keys() -// .cloned() -// .collect::>() -// ); -// -// // create push and execute some non-empty factory deps transaction that fails -// // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) -// -// let mut tx2 = tx1; -// tx2.execute.contract_address = L1_MESSENGER_ADDRESS; -// -// let calldata = vec![1, 2, 3]; -// let big_calldata: Vec = calldata -// .iter() -// .cycle() -// .take(calldata.len() * 1024) -// .cloned() -// .collect(); -// -// tx2.execute.calldata = big_calldata; -// tx2.execute.factory_deps = Some(vec![vec![1; 32]]); -// -// push_transaction_to_bootloader_memory(&mut vm, &tx2, TxExecutionMode::VerifyExecute, None); -// -// let res2 = vm.execute_next_tx(u32::MAX, false).unwrap(); -// -// assert_eq!(res2.status, TxExecutionStatus::Failure); -// -// for factory_dep in tx2.execute.factory_deps.unwrap() { -// let hash = hash_bytecode(&factory_dep); -// let hash_to_u256 = h256_to_u256(hash); -// assert!(known_bytecodes_without_aa_code(&vm) -// .keys() -// .contains(&hash_to_u256)); -// assert!(!vm.get_used_contracts().contains(&hash_to_u256)); -// } -// } -// -// fn known_bytecodes_without_aa_code(vm: &VmInstance) -> HashMap> { -// let mut known_bytecodes_without_aa_code = vm -// .state -// .decommittment_processor -// .known_bytecodes -// .inner() -// .clone(); -// -// known_bytecodes_without_aa_code -// .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) -// .unwrap(); -// -// known_bytecodes_without_aa_code -// } -// ``` diff --git a/core/lib/multivm/src/versions/vm_m6/tests/mod.rs b/core/lib/multivm/src/versions/vm_m6/tests/mod.rs deleted file mode 100644 index 3900135abea..00000000000 --- a/core/lib/multivm/src/versions/vm_m6/tests/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod bootloader; diff --git a/core/lib/multivm/src/versions/vm_m6/utils.rs b/core/lib/multivm/src/versions/vm_m6/utils.rs index 4cabab82c9c..59bf6bb1d29 100644 --- a/core/lib/multivm/src/versions/vm_m6/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/utils.rs @@ -7,7 +7,7 @@ use zk_evm_1_3_1::{ }; use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; -use zksync_types::{Address, StorageLogQuery, H160, MAX_L2_TX_GAS_LIMIT, U256}; +use zksync_types::{Address, StorageLogQueryType, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; use crate::{ @@ -289,3 +289,10 @@ pub(crate) fn calculate_computational_gas_used< 0 }) } + +/// Log query, which handle initial and repeated writes to the storage +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct StorageLogQuery { + pub log_query: LogQuery, + pub log_type: StorageLogQueryType, +} diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 25a922ee510..698041d6cb2 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,5 +1,8 @@ use std::collections::HashSet; +use itertools::Itertools; +use zk_evm_1_3_1::aux_structures::LogQuery; +use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; use zksync_state::StoragePtr; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, @@ -17,6 +20,7 @@ use crate::{ L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, + tracers::old_tracers::TracerDispatcher, vm_m6::{events::merge_events, storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, }; @@ -61,8 +65,7 @@ impl Vm { } impl VmInterface for Vm { - /// Tracers are not supported. So we use `()` as a placeholder - type TracerDispatcher = (); + type TracerDispatcher = TracerDispatcher; fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { let vm_version: VmVersion = system_env.version.into(); @@ -85,26 +88,35 @@ impl VmInterface for Vm { fn inspect( &mut self, - _tracer: Self::TracerDispatcher, + tracer: Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { + if let Some(storage_invocations) = tracer.storage_invocations { + self.vm + .execution_mode + .set_invocation_limit(storage_invocations); + } + match execution_mode { - VmExecutionMode::OneTx => { - match self.system_env.execution_mode { - TxExecutionMode::VerifyExecute => { - // Even that call tracer is supported here, we don't use it now - self.vm.execute_next_tx( - self.system_env.default_validation_computational_gas_limit, - false, - ).glue_into() + VmExecutionMode::OneTx => match self.system_env.execution_mode { + TxExecutionMode::VerifyExecute => { + let enable_call_tracer = tracer.call_tracer.is_some(); + let result = self.vm.execute_next_tx( + self.system_env.default_validation_computational_gas_limit, + enable_call_tracer, + ); + if let (Ok(result), Some(call_tracer)) = (&result, &tracer.call_tracer) { + call_tracer.set(result.call_traces.clone()).unwrap(); } - TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self.vm - .execute_till_block_end( - crate::vm_m6::vm_with_bootloader::BootloaderJobType::TransactionExecution, - ) - .glue_into(), + result.glue_into() } - } + TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self + .vm + .execute_till_block_end( + crate::vm_m6::vm_with_bootloader::BootloaderJobType::TransactionExecution, + ) + .glue_into(), + }, VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } @@ -160,9 +172,35 @@ impl VmInterface for Vm { .cloned() .collect(); + let storage_log_queries = self.vm.get_final_log_queries(); + + // To allow calling the `vm-1.3.3`s. method, the `v1.3.1`'s `LogQuery` has to be converted + // to the `vm-1.3.3`'s `LogQuery`. Then, we need to convert it back. + let deduplicated_logs: Vec = sort_storage_access_queries( + &storage_log_queries + .iter() + .map(|log| { + GlueInto::::glue_into(log.log_query) + }) + .collect_vec(), + ) + .1 + .into_iter() + .map(GlueInto::::glue_into) + .collect(); + CurrentExecutionState { events, - storage_log_queries: self.vm.get_final_log_queries(), + storage_log_queries: self + .vm + .get_final_log_queries() + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduplicated_logs + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes, system_logs: vec![], total_log_queries, @@ -176,13 +214,19 @@ impl VmInterface for Vm { fn inspect_transaction_with_bytecode_compression( &mut self, - _tracer: Self::TracerDispatcher, + tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> ( Result<(), BytecodeCompressionError>, VmExecutionResultAndLogs, ) { + if let Some(storage_invocations) = tracer.storage_invocations { + self.vm + .execution_mode + .set_invocation_limit(storage_invocations); + } + self.last_tx_compressed_bytecodes = vec![]; let bytecodes = if with_compression { let deps = tx.execute.factory_deps.as_deref().unwrap_or_default(); @@ -222,13 +266,17 @@ impl VmInterface for Vm { // Even that call tracer is supported here, we don't use it. let result = match self.system_env.execution_mode { - TxExecutionMode::VerifyExecute => self - .vm - .execute_next_tx( + TxExecutionMode::VerifyExecute => { + let enable_call_tracer = tracer.call_tracer.is_some(); + let result = self.vm.execute_next_tx( self.system_env.default_validation_computational_gas_limit, - false, - ) - .glue_into(), + enable_call_tracer, + ); + if let (Ok(result), Some(call_tracer)) = (&result, &tracer.call_tracer) { + call_tracer.set(result.call_traces.clone()).unwrap(); + } + result.glue_into() + } TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self .vm .execute_till_block_end( @@ -266,6 +314,12 @@ impl VmInterface for Vm { } } + fn has_enough_gas_for_batch_tip(&self) -> bool { + // For this version this overhead has not been calculated and it has not been used with those versions. + // We return some value just in case for backwards compatibility + true + } + fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index 1e792d308f1..68416970ee8 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -13,7 +13,7 @@ use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, tx::tx_execution_info::TxExecutionStatus, vm_trace::{Call, VmExecutionTrace, VmTrace}, - L1BatchNumber, StorageLogQuery, VmEvent, H256, U256, + L1BatchNumber, VmEvent, H256, U256, }; use crate::{ @@ -41,7 +41,7 @@ use crate::{ utils::{ calculate_computational_gas_used, collect_log_queries_after_timestamp, collect_storage_log_queries_after_timestamp, dump_memory_page_using_primitive_value, - precompile_calls_count_after_timestamp, + precompile_calls_count_after_timestamp, StorageLogQuery, }, vm_with_bootloader::{ BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, @@ -436,10 +436,7 @@ impl VmInstance { .collect(); ( events, - l1_messages - .into_iter() - .map(|log| L2ToL1Log::from(GlueInto::::glue_into(log))) - .collect(), + l1_messages.into_iter().map(GlueInto::glue_into).collect(), ) } @@ -463,7 +460,7 @@ impl VmInstance { from_timestamp, ); VmExecutionLogs { - storage_logs, + storage_logs: storage_logs.into_iter().map(GlueInto::glue_into).collect(), events, user_l2_to_l1_logs: l2_to_l1_logs.into_iter().map(UserL2ToL1Log).collect(), system_l2_to_l1_logs: vec![], @@ -788,12 +785,8 @@ impl VmInstance { e.into_vm_event(L1BatchNumber(self.block_context.context.block_number)) }) .collect(); - full_result.l2_to_l1_logs = l1_messages - .into_iter() - .map(|log| { - L2ToL1Log::from(GlueInto::::glue_into(log)) - }) - .collect(); + full_result.l2_to_l1_logs = + l1_messages.into_iter().map(GlueInto::glue_into).collect(); full_result.computational_gas_used = block_tip_result.computational_gas_used; VmBlockResult { full_result, diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 988cfaefdf5..3601e480ee4 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -244,6 +244,18 @@ impl TxExecutionMode { } => *missed_storage_invocation_limit, } } + + pub fn set_invocation_limit(&mut self, limit: usize) { + match self { + Self::VerifyExecute => {} + TxExecutionMode::EstimateFee { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit = limit, + TxExecutionMode::EthCall { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit = limit, + } + } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs index bded1c19041..dee06ee6180 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs @@ -6,6 +6,7 @@ use zksync_types::{ }; use crate::{ + glue::GlueInto, interface::types::outputs::VmExecutionLogs, vm_refunds_enhancement::{ old_vm::{events::merge_events, utils::precompile_calls_count_after_timestamp}, @@ -44,7 +45,7 @@ impl Vm { let total_log_queries_count = storage_logs_count + log_queries.len() + precompile_calls_count; VmExecutionLogs { - storage_logs, + storage_logs: storage_logs.into_iter().map(GlueInto::glue_into).collect(), events, user_l2_to_l1_logs: l2_to_l1_logs.into_iter().map(UserL2ToL1Log).collect(), system_l2_to_l1_logs: vec![], @@ -64,6 +65,9 @@ impl Vm { .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) .collect(); - (events, l1_messages.into_iter().map(Into::into).collect()) + ( + events, + l1_messages.into_iter().map(GlueInto::glue_into).collect(), + ) } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index d64e71c3ff2..49d4b02de8b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -40,7 +40,7 @@ impl Vm { computational_gas_used, total_log_queries: total_log_queries_count, pubdata_published, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs index 6e58b8b3092..40b2d83030a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs @@ -7,17 +7,20 @@ use zk_evm_1_3_3::{ }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, - StorageLogQueryType, BOOTLOADER_ADDRESS, U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQueryType, + BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; -use crate::vm_refunds_enhancement::old_vm::{ - history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, +use crate::vm_refunds_enhancement::{ + old_vm::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, + }, + oracles::OracleWithHistory, }, - oracles::OracleWithHistory, + utils::logs::StorageLogQuery, }; // While the storage does not support different shards, it was decided to write the diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs index 47fe3142aba..b5787a6ec47 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs @@ -1,6 +1,7 @@ use std::fmt::{Debug, Formatter}; use zk_evm_1_3_3::{ + aux_structures::Timestamp, tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, @@ -9,7 +10,6 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::Timestamp; use crate::{ interface::{ diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/logs.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/logs.rs new file mode 100644 index 00000000000..ba1ed871f52 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/logs.rs @@ -0,0 +1,9 @@ +use zk_evm_1_3_3::aux_structures::LogQuery; +use zksync_types::StorageLogQueryType; + +/// Log query, which handle initial and repeated writes to the storage +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct StorageLogQuery { + pub log_query: LogQuery, + pub log_type: StorageLogQueryType, +} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/mod.rs index 15ffa92b549..1648736ae43 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/mod.rs @@ -1,5 +1,6 @@ /// Utility functions for the VM. pub mod fee; pub mod l2_blocks; +pub mod logs; pub mod overhead; pub mod transaction_encoding; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index f1554ee1761..2e4df854e5a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,8 +1,10 @@ +use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ + glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, @@ -89,7 +91,7 @@ impl VmInterface for Vm { let l2_to_l1_logs = l1_messages .into_iter() - .map(|log| UserL2ToL1Log(log.into())) + .map(|log| UserL2ToL1Log(log.glue_into())) .collect(); let total_log_queries = self.state.event_sink.get_log_queries() + self @@ -99,15 +101,30 @@ impl VmInterface for Vm { .len() + self.state.storage.get_final_log_queries().len(); + let storage_log_queries = self.state.storage.get_final_log_queries(); + + let deduped_storage_log_queries = + sort_storage_access_queries(storage_log_queries.iter().map(|log| &log.log_query)).1; + CurrentExecutionState { events, - storage_log_queries: self.state.storage.get_final_log_queries(), + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduped_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], total_log_queries, cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs, + deduplicated_events_logs: deduplicated_events_logs + .into_iter() + .map(GlueInto::glue_into) + .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), } } @@ -134,6 +151,12 @@ impl VmInterface for Vm { } } + fn has_enough_gas_for_batch_tip(&self) -> bool { + // For this version this overhead has not been calculated and it has not been used with those versions. + // We return some value just in case for backwards compatibility + true + } + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { self.record_vm_memory_metrics_inner() } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs index 0d407efd041..6c2b801c1d0 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs @@ -6,6 +6,7 @@ use zksync_types::{ }; use crate::{ + glue::GlueInto, interface::types::outputs::VmExecutionLogs, vm_virtual_blocks::{ old_vm::{events::merge_events, utils::precompile_calls_count_after_timestamp}, @@ -44,7 +45,7 @@ impl Vm { let total_log_queries_count = storage_logs_count + log_queries.len() + precompile_calls_count; VmExecutionLogs { - storage_logs, + storage_logs: storage_logs.into_iter().map(GlueInto::glue_into).collect(), events, user_l2_to_l1_logs: l2_to_l1_logs.into_iter().map(UserL2ToL1Log).collect(), system_l2_to_l1_logs: vec![], @@ -64,6 +65,9 @@ impl Vm { .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) .collect(); - (events, l1_messages.into_iter().map(Into::into).collect()) + ( + events, + l1_messages.into_iter().map(GlueInto::glue_into).collect(), + ) } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index 1421a7b35f4..06ef4f90340 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -40,7 +40,7 @@ impl Vm { total_log_queries: total_log_queries_count, // This field will be populated by the `RefundTracer` pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs index 2555f57fc7e..b099930cbed 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs @@ -7,15 +7,18 @@ use zk_evm_1_3_3::{ }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, - StorageLogQueryType, BOOTLOADER_ADDRESS, U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQueryType, + BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, WithHistory, +use crate::vm_virtual_blocks::{ + old_vm::history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, + }, + utils::logs::StorageLogQuery, }; // While the storage does not support different shards, it was decided to write the diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs index f6007214494..1e7780edda2 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs @@ -4,6 +4,7 @@ use std::{ }; use zk_evm_1_3_3::{ + aux_structures::Timestamp, tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, @@ -12,7 +13,6 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::{Opcode, RetOpcode}, }; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::Timestamp; use crate::{ interface::{dyn_tracers::vm_1_3_3::DynTracer, tracer::VmExecutionStopReason, VmExecutionMode}, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/logs.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/logs.rs new file mode 100644 index 00000000000..ba1ed871f52 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/logs.rs @@ -0,0 +1,9 @@ +use zk_evm_1_3_3::aux_structures::LogQuery; +use zksync_types::StorageLogQueryType; + +/// Log query, which handle initial and repeated writes to the storage +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct StorageLogQuery { + pub log_query: LogQuery, + pub log_type: StorageLogQueryType, +} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/mod.rs index 15ffa92b549..1648736ae43 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/mod.rs @@ -1,5 +1,6 @@ /// Utility functions for the VM. pub mod fee; pub mod l2_blocks; +pub mod logs; pub mod overhead; pub mod transaction_encoding; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 3bb43669f00..7afbaab076d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,8 +1,10 @@ +use zkevm_test_harness_1_3_3::witness::sort_storage_access::sort_storage_access_queries; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ + glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, @@ -89,7 +91,7 @@ impl VmInterface for Vm { let l2_to_l1_logs = l1_messages .into_iter() - .map(|log| UserL2ToL1Log(log.into())) + .map(|log| UserL2ToL1Log(log.glue_into())) .collect(); let total_log_queries = self.state.event_sink.get_log_queries() + self @@ -99,15 +101,30 @@ impl VmInterface for Vm { .len() + self.state.storage.get_final_log_queries().len(); + let storage_log_queries = self.state.storage.get_final_log_queries(); + + let deduped_storage_log_queries = + sort_storage_access_queries(storage_log_queries.iter().map(|log| &log.log_query)).1; + CurrentExecutionState { events, - storage_log_queries: self.state.storage.get_final_log_queries(), + storage_log_queries: storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + deduplicated_storage_log_queries: deduped_storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], total_log_queries, cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs, + deduplicated_events_logs: deduplicated_events_logs + .into_iter() + .map(GlueInto::glue_into) + .collect(), storage_refunds: Vec::new(), } } @@ -134,6 +151,12 @@ impl VmInterface for Vm { } } + fn has_enough_gas_for_batch_tip(&self) -> bool { + // For this version this overhead has not been calculated and it has not been used with those versions. + // We return some value just in case for backwards compatibility + true + } + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { self.record_vm_memory_metrics_inner() } diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 4eaca6f44b0..44b70db07d8 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -116,6 +116,10 @@ impl VmInterface for VmInstance { dispatch_vm!(self.record_vm_memory_metrics()) } + fn has_enough_gas_for_batch_tip(&self) -> bool { + dispatch_vm!(self.has_enough_gas_for_batch_tip()) + } + /// Return the results of execution of all batch fn finish_batch(&mut self) -> FinishedL1Batch { dispatch_vm!(self.finish_batch()) diff --git a/core/lib/node/src/implementations/task/metadata_calculator.rs b/core/lib/node/src/implementations/task/metadata_calculator.rs index 4bfe18167b4..0a61ad00abb 100644 --- a/core/lib/node/src/implementations/task/metadata_calculator.rs +++ b/core/lib/node/src/implementations/task/metadata_calculator.rs @@ -45,7 +45,7 @@ impl IntoZkSyncTask for MetadataCalculatorTaskBuilder { } let metadata_calculator = - MetadataCalculator::new(self.0, object_store.map(|os| os.0)).await; + MetadataCalculator::new(self.0, object_store.map(|os| os.0)).await?; let healthchecks = node .get_resource_or_default::>() diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index ec42f47c6bf..d09fe10975e 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -13,7 +13,7 @@ categories = ["cryptography"] vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } zksync_config = { path = "../config" } zksync_types = { path = "../types" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } anyhow = "1.0" async-trait = "0.1" diff --git a/core/lib/object_store/src/lib.rs b/core/lib/object_store/src/lib.rs index bf6630ef060..0eddf3a61d5 100644 --- a/core/lib/object_store/src/lib.rs +++ b/core/lib/object_store/src/lib.rs @@ -39,6 +39,6 @@ pub mod _reexports { } pub use self::{ - objects::{AggregationsKey, CircuitKey, ClosedFormInputKey, FriCircuitKey, StoredObject}, + objects::StoredObject, raw::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}, }; diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index e01519fa71d..75c0f5460ad 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -7,8 +7,6 @@ use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use prost::Message; use zksync_protobuf::{decode, ProtoFmt}; use zksync_types::{ - aggregated_operations::L1BatchProofForL1, - proofs::{AggregationRound, PrepareBasicCircuitsJob}, snapshots::{ SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, }, @@ -131,62 +129,6 @@ impl StoredObject for WitnessBlockState { serialize_using_bincode!(); } -impl StoredObject for PrepareBasicCircuitsJob { - const BUCKET: Bucket = Bucket::WitnessInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("merkel_tree_paths_{key}.bin") - } - - serialize_using_bincode!(); -} - -/// Storage key for a [AggregationWrapper`]. -#[derive(Debug, Clone, Copy)] -pub struct AggregationsKey { - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub depth: u16, -} - -/// Storage key for a [ClosedFormInputWrapper`]. -#[derive(Debug, Clone, Copy)] -pub struct ClosedFormInputKey { - pub block_number: L1BatchNumber, - pub circuit_id: u8, -} - -/// Storage key for a [`CircuitWrapper`]. -#[derive(Debug, Clone, Copy)] -pub struct FriCircuitKey { - pub block_number: L1BatchNumber, - pub sequence_number: usize, - pub circuit_id: u8, - pub aggregation_round: AggregationRound, - pub depth: u16, -} - -/// Storage key for a [`ZkSyncCircuit`]. -#[derive(Debug, Clone, Copy)] -pub struct CircuitKey<'a> { - pub block_number: L1BatchNumber, - pub sequence_number: usize, - pub circuit_type: &'a str, - pub aggregation_round: AggregationRound, -} - -impl StoredObject for L1BatchProofForL1 { - const BUCKET: Bucket = Bucket::ProofsFri; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("l1_batch_proof_{key}.bin") - } - - serialize_using_bincode!(); -} - impl dyn ObjectStore + '_ { /// Fetches the value for the given key if it exists. /// diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml new file mode 100644 index 00000000000..a91ca218842 --- /dev/null +++ b/core/lib/protobuf_config/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "zksync_protobuf_config" +version = "0.1.0" +edition = "2021" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +links = "zksync_protobuf_config_proto" + +[dependencies] +serde_json = "1.0" +zksync_basic_types = { path = "../basic_types" } +zksync_config = { path = "../config" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_types = { path = "../types" } + +anyhow = "1.0" +prost = "0.12.1" + +[dev-dependencies] +rand = "0.8" +pretty_assertions = "1.4.0" + +[build-dependencies] +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } + diff --git a/core/lib/protobuf_config/build.rs b/core/lib/protobuf_config/build.rs new file mode 100644 index 00000000000..66afd8fea6d --- /dev/null +++ b/core/lib/protobuf_config/build.rs @@ -0,0 +1,12 @@ +//! Generates rust code from protobufs. +fn main() { + zksync_protobuf_build::Config { + input_root: "src/proto".into(), + proto_root: "zksync/config".into(), + dependencies: vec![], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: true, + } + .generate() + .unwrap(); +} diff --git a/core/lib/protobuf_config/src/alerts.rs b/core/lib/protobuf_config/src/alerts.rs new file mode 100644 index 00000000000..9c314301934 --- /dev/null +++ b/core/lib/protobuf_config/src/alerts.rs @@ -0,0 +1,18 @@ +use zksync_config::configs::AlertsConfig; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::Alerts { + type Type = AlertsConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + sporadic_crypto_errors_substrs: self.sporadic_crypto_errors_substrs.clone(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + sporadic_crypto_errors_substrs: this.sporadic_crypto_errors_substrs.clone(), + } + } +} diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs new file mode 100644 index 00000000000..26173de29ff --- /dev/null +++ b/core/lib/protobuf_config/src/api.rs @@ -0,0 +1,222 @@ +use anyhow::Context as _; +use zksync_config::configs::{api, ApiConfig}; +use zksync_protobuf::required; + +use crate::{ + parse_h256, proto, + repr::{read_required_repr, ProtoRepr}, +}; + +impl ProtoRepr for proto::Api { + type Type = ApiConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + web3_json_rpc: read_required_repr(&self.web3_json_rpc).context("web3_json_rpc")?, + contract_verification: read_required_repr(&self.contract_verification) + .context("contract_verification")?, + prometheus: read_required_repr(&self.prometheus).context("prometheus")?, + healthcheck: read_required_repr(&self.healthcheck).context("healthcheck")?, + merkle_tree: read_required_repr(&self.merkle_tree).context("merkle_tree")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + web3_json_rpc: Some(ProtoRepr::build(&this.web3_json_rpc)), + contract_verification: Some(ProtoRepr::build(&this.contract_verification)), + prometheus: Some(ProtoRepr::build(&this.prometheus)), + healthcheck: Some(ProtoRepr::build(&this.healthcheck)), + merkle_tree: Some(ProtoRepr::build(&this.merkle_tree)), + } + } +} + +impl ProtoRepr for proto::Web3JsonRpc { + type Type = api::Web3JsonRpcConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + http_port: required(&self.http_port) + .and_then(|p| Ok((*p).try_into()?)) + .context("http_port")?, + http_url: required(&self.http_url).context("http_url")?.clone(), + ws_port: required(&self.ws_port) + .and_then(|p| Ok((*p).try_into()?)) + .context("ws_port")?, + ws_url: required(&self.ws_url).context("ws_url")?.clone(), + req_entities_limit: self.req_entities_limit, + filters_limit: self.filters_limit, + subscriptions_limit: self.subscriptions_limit, + pubsub_polling_interval: self.pubsub_polling_interval, + max_nonce_ahead: *required(&self.max_nonce_ahead).context("max_nonce_ahead")?, + gas_price_scale_factor: *required(&self.gas_price_scale_factor) + .context("gas_price_scale_factor")?, + request_timeout: self.request_timeout, + account_pks: self + .account_pks + .as_ref() + .map(|keys| { + keys.keys + .iter() + .enumerate() + .map(|(i, k)| parse_h256(k).context(i)) + .collect::>() + .context("keys") + }) + .transpose() + .context("account_pks")?, + estimate_gas_scale_factor: *required(&self.estimate_gas_scale_factor) + .context("estimate_gas_scale_factor")?, + estimate_gas_acceptable_overestimation: *required( + &self.estimate_gas_acceptable_overestimation, + ) + .context("acceptable_overestimation")?, + l1_to_l2_transactions_compatibility_mode: *required( + &self.l1_to_l2_transactions_compatibility_mode, + ) + .context("l1_to_l2_transactions_compatibility_mode")?, + max_tx_size: required(&self.max_tx_size) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_tx_size")?, + vm_execution_cache_misses_limit: self + .vm_execution_cache_misses_limit + .map(|x| x.try_into()) + .transpose() + .context("vm_execution_cache_misses_limit")?, + vm_concurrency_limit: self + .vm_concurrency_limit + .map(|x| x.try_into()) + .transpose() + .context("vm_concurrency_limit")?, + factory_deps_cache_size_mb: self + .factory_deps_cache_size_mb + .map(|x| x.try_into()) + .transpose() + .context("factory_deps_cache_size_mb")?, + initial_writes_cache_size_mb: self + .initial_writes_cache_size_mb + .map(|x| x.try_into()) + .transpose() + .context("initial_writes_cache_size_mb")?, + latest_values_cache_size_mb: self + .latest_values_cache_size_mb + .map(|x| x.try_into()) + .transpose() + .context("latests_values_cache_size_mb")?, + fee_history_limit: self.fee_history_limit, + max_batch_request_size: self + .max_batch_request_size + .map(|x| x.try_into()) + .transpose() + .context("max_batch_requres_size")?, + max_response_body_size_mb: self + .max_response_body_size_mb + .map(|x| x.try_into()) + .transpose() + .context("max_response_body_size_mb")?, + websocket_requests_per_minute_limit: self + .websocket_requests_per_minute_limit + .map(|x| x.try_into()) + .transpose() + .context("websocket_requests_per_minute_limit")?, + tree_api_url: self.tree_api_url.clone(), + }) + } + fn build(this: &Self::Type) -> Self { + Self { + http_port: Some(this.http_port.into()), + http_url: Some(this.http_url.clone()), + ws_port: Some(this.ws_port.into()), + ws_url: Some(this.ws_url.clone()), + req_entities_limit: this.req_entities_limit, + filters_limit: this.filters_limit, + subscriptions_limit: this.subscriptions_limit, + pubsub_polling_interval: this.pubsub_polling_interval, + max_nonce_ahead: Some(this.max_nonce_ahead), + gas_price_scale_factor: Some(this.gas_price_scale_factor), + request_timeout: this.request_timeout, + account_pks: this.account_pks.as_ref().map(|keys| proto::PrivateKeys { + keys: keys.iter().map(|k| k.as_bytes().into()).collect(), + }), + estimate_gas_scale_factor: Some(this.estimate_gas_scale_factor), + estimate_gas_acceptable_overestimation: Some( + this.estimate_gas_acceptable_overestimation, + ), + l1_to_l2_transactions_compatibility_mode: Some( + this.l1_to_l2_transactions_compatibility_mode, + ), + max_tx_size: Some(this.max_tx_size.try_into().unwrap()), + vm_execution_cache_misses_limit: this + .vm_execution_cache_misses_limit + .map(|x| x.try_into().unwrap()), + vm_concurrency_limit: this.vm_concurrency_limit.map(|x| x.try_into().unwrap()), + factory_deps_cache_size_mb: this + .factory_deps_cache_size_mb + .map(|x| x.try_into().unwrap()), + initial_writes_cache_size_mb: this + .initial_writes_cache_size_mb + .map(|x| x.try_into().unwrap()), + latest_values_cache_size_mb: this + .latest_values_cache_size_mb + .map(|x| x.try_into().unwrap()), + fee_history_limit: this.fee_history_limit, + max_batch_request_size: this.max_batch_request_size.map(|x| x.try_into().unwrap()), + max_response_body_size_mb: this + .max_response_body_size_mb + .map(|x| x.try_into().unwrap()), + websocket_requests_per_minute_limit: this + .websocket_requests_per_minute_limit + .map(|x| x.into()), + tree_api_url: this.tree_api_url.clone(), + } + } +} + +impl ProtoRepr for proto::ContractVerificationApi { + type Type = api::ContractVerificationApiConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + port: required(&self.port) + .and_then(|p| Ok((*p).try_into()?)) + .context("port")?, + url: required(&self.url).context("url")?.clone(), + }) + } + fn build(this: &Self::Type) -> Self { + Self { + port: Some(this.port.into()), + url: Some(this.url.clone()), + } + } +} + +impl ProtoRepr for proto::HealthCheck { + type Type = api::HealthCheckConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + port: required(&self.port) + .and_then(|p| Ok((*p).try_into()?)) + .context("port")?, + }) + } + fn build(this: &Self::Type) -> Self { + Self { + port: Some(this.port.into()), + } + } +} + +impl ProtoRepr for proto::MerkleTreeApi { + type Type = api::MerkleTreeApiConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + port: required(&self.port) + .and_then(|p| Ok((*p).try_into()?)) + .context("port")?, + }) + } + fn build(this: &Self::Type) -> Self { + Self { + port: Some(this.port.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs new file mode 100644 index 00000000000..155803ad7eb --- /dev/null +++ b/core/lib/protobuf_config/src/chain.rs @@ -0,0 +1,276 @@ +use anyhow::Context as _; +use zksync_basic_types::network::Network; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{parse_h160, proto, repr::ProtoRepr}; + +impl proto::Network { + fn new(n: &Network) -> Self { + match n { + Network::Mainnet => Self::Mainnet, + Network::Rinkeby => Self::Rinkeby, + Network::Ropsten => Self::Ropsten, + Network::Goerli => Self::Goerli, + Network::Sepolia => Self::Sepolia, + Network::Localhost => Self::Localhost, + Network::Unknown => Self::Unknown, + Network::Test => Self::Test, + } + } + + fn parse(&self) -> Network { + match self { + Self::Mainnet => Network::Mainnet, + Self::Rinkeby => Network::Rinkeby, + Self::Ropsten => Network::Ropsten, + Self::Goerli => Network::Goerli, + Self::Sepolia => Network::Sepolia, + Self::Localhost => Network::Localhost, + Self::Unknown => Network::Unknown, + Self::Test => Network::Test, + } + } +} + +impl proto::FeeModelVersion { + fn new(n: &configs::chain::FeeModelVersion) -> Self { + use configs::chain::FeeModelVersion as From; + match n { + From::V1 => Self::V1, + From::V2 => Self::V2, + } + } + + fn parse(&self) -> configs::chain::FeeModelVersion { + use configs::chain::FeeModelVersion as To; + match self { + Self::V1 => To::V1, + Self::V2 => To::V2, + } + } +} + +impl proto::L1BatchCommitDataGeneratorMode { + fn new(n: &configs::chain::L1BatchCommitDataGeneratorMode) -> Self { + use configs::chain::L1BatchCommitDataGeneratorMode as From; + match n { + From::Rollup => Self::Rollup, + From::Validium => Self::Validium, + } + } + + fn parse(&self) -> configs::chain::L1BatchCommitDataGeneratorMode { + use configs::chain::L1BatchCommitDataGeneratorMode as To; + match self { + Self::Rollup => To::Rollup, + Self::Validium => To::Validium, + } + } +} + +impl ProtoRepr for proto::EthNetwork { + type Type = configs::chain::NetworkConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + network: required(&self.network) + .and_then(|x| Ok(proto::Network::try_from(*x)?)) + .context("network")? + .parse(), + zksync_network: required(&self.zksync_network) + .context("zksync_network")? + .clone(), + zksync_network_id: required(&self.zksync_network_id) + .and_then(|x| (*x).try_into().map_err(anyhow::Error::msg)) + .context("zksync_network_id")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + network: Some(proto::Network::new(&this.network).into()), + zksync_network: Some(this.zksync_network.clone()), + zksync_network_id: Some(this.zksync_network_id.as_u64()), + } + } +} + +impl ProtoRepr for proto::StateKeeper { + type Type = configs::chain::StateKeeperConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + transaction_slots: required(&self.transaction_slots) + .and_then(|x| Ok((*x).try_into()?)) + .context("transaction_slots")?, + block_commit_deadline_ms: *required(&self.block_commit_deadline_ms) + .context("block_commit_deadline_ms")?, + miniblock_commit_deadline_ms: *required(&self.miniblock_commit_deadline_ms) + .context("miniblock_commit_deadline_ms")?, + miniblock_seal_queue_capacity: required(&self.miniblock_seal_queue_capacity) + .and_then(|x| Ok((*x).try_into()?)) + .context("miniblock_seal_queue_capacity")?, + max_single_tx_gas: *required(&self.max_single_tx_gas).context("max_single_tx_gas")?, + max_allowed_l2_tx_gas_limit: *required(&self.max_allowed_l2_tx_gas_limit) + .context("max_allowed_l2_tx_gas_limit")?, + reject_tx_at_geometry_percentage: *required(&self.reject_tx_at_geometry_percentage) + .context("reject_tx_at_geometry_percentage")?, + reject_tx_at_eth_params_percentage: *required(&self.reject_tx_at_eth_params_percentage) + .context("reject_tx_at_eth_params_percentage")?, + reject_tx_at_gas_percentage: *required(&self.reject_tx_at_gas_percentage) + .context("reject_tx_at_gas_percentage")?, + close_block_at_geometry_percentage: *required(&self.close_block_at_geometry_percentage) + .context("close_block_at_geometry_percentage")?, + close_block_at_eth_params_percentage: *required( + &self.close_block_at_eth_params_percentage, + ) + .context("close_block_at_eth_params_percentage")?, + close_block_at_gas_percentage: *required(&self.close_block_at_gas_percentage) + .context("close_block_at_gas_percentage")?, + fee_account_addr: required(&self.fee_account_addr) + .and_then(|a| parse_h160(a)) + .context("fee_account_addr")?, + minimal_l2_gas_price: *required(&self.minimal_l2_gas_price) + .context("minimal_l2_gas_price")?, + compute_overhead_part: *required(&self.compute_overhead_part) + .context("compute_overhead_part")?, + pubdata_overhead_part: *required(&self.pubdata_overhead_part) + .context("pubdata_overhead_part")?, + batch_overhead_l1_gas: *required(&self.batch_overhead_l1_gas) + .context("batch_overhead_l1_gas")?, + max_gas_per_batch: *required(&self.max_gas_per_batch).context("max_gas_per_batch")?, + max_pubdata_per_batch: *required(&self.max_pubdata_per_batch) + .context("max_pubdata_per_batch")?, + fee_model_version: required(&self.fee_model_version) + .and_then(|x| Ok(proto::FeeModelVersion::try_from(*x)?)) + .context("fee_model_version")? + .parse(), + validation_computational_gas_limit: *required(&self.validation_computational_gas_limit) + .context("validation_computational_gas_limit")?, + save_call_traces: *required(&self.save_call_traces).context("save_call_traces")?, + virtual_blocks_interval: *required(&self.virtual_blocks_interval) + .context("virtual_blocks_interval")?, + virtual_blocks_per_miniblock: *required(&self.virtual_blocks_per_miniblock) + .context("virtual_blocks_per_miniblock")?, + upload_witness_inputs_to_gcs: *required(&self.upload_witness_inputs_to_gcs) + .context("upload_witness_inputs_to_gcs")?, + enum_index_migration_chunk_size: self + .enum_index_migration_chunk_size + .map(|x| x.try_into()) + .transpose() + .context("enum_index_migration_chunk_size")?, + l1_batch_commit_data_generator_mode: required(&self.fee_model_version) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("l1_batch_commit_data_generator_mode")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + transaction_slots: Some(this.transaction_slots.try_into().unwrap()), + block_commit_deadline_ms: Some(this.block_commit_deadline_ms), + miniblock_commit_deadline_ms: Some(this.miniblock_commit_deadline_ms), + miniblock_seal_queue_capacity: Some( + this.miniblock_seal_queue_capacity.try_into().unwrap(), + ), + max_single_tx_gas: Some(this.max_single_tx_gas), + max_allowed_l2_tx_gas_limit: Some(this.max_allowed_l2_tx_gas_limit), + reject_tx_at_geometry_percentage: Some(this.reject_tx_at_geometry_percentage), + reject_tx_at_eth_params_percentage: Some(this.reject_tx_at_eth_params_percentage), + reject_tx_at_gas_percentage: Some(this.reject_tx_at_gas_percentage), + close_block_at_geometry_percentage: Some(this.close_block_at_geometry_percentage), + close_block_at_eth_params_percentage: Some(this.close_block_at_eth_params_percentage), + close_block_at_gas_percentage: Some(this.close_block_at_gas_percentage), + fee_account_addr: Some(this.fee_account_addr.as_bytes().into()), + minimal_l2_gas_price: Some(this.minimal_l2_gas_price), + compute_overhead_part: Some(this.compute_overhead_part), + pubdata_overhead_part: Some(this.pubdata_overhead_part), + batch_overhead_l1_gas: Some(this.batch_overhead_l1_gas), + max_gas_per_batch: Some(this.max_gas_per_batch), + max_pubdata_per_batch: Some(this.max_pubdata_per_batch), + fee_model_version: Some(proto::FeeModelVersion::new(&this.fee_model_version).into()), + validation_computational_gas_limit: Some(this.validation_computational_gas_limit), + save_call_traces: Some(this.save_call_traces), + virtual_blocks_interval: Some(this.virtual_blocks_interval), + virtual_blocks_per_miniblock: Some(this.virtual_blocks_per_miniblock), + upload_witness_inputs_to_gcs: Some(this.upload_witness_inputs_to_gcs), + enum_index_migration_chunk_size: this + .enum_index_migration_chunk_size + .as_ref() + .map(|x| (*x).try_into().unwrap()), + l1_batch_commit_data_generator_mode: Some( + proto::L1BatchCommitDataGeneratorMode::new( + &this.l1_batch_commit_data_generator_mode, + ) + .into(), + ), + } + } +} + +impl ProtoRepr for proto::OperationsManager { + type Type = configs::chain::OperationsManagerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + delay_interval: *required(&self.delay_interval).context("delay_interval")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + delay_interval: Some(this.delay_interval), + } + } +} + +impl ProtoRepr for proto::Mempool { + type Type = configs::chain::MempoolConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + sync_interval_ms: *required(&self.sync_interval_ms).context("sync_interval_ms")?, + sync_batch_size: required(&self.sync_batch_size) + .and_then(|x| Ok((*x).try_into()?)) + .context("sync_batch_size")?, + capacity: *required(&self.capacity).context("capacity")?, + stuck_tx_timeout: *required(&self.stuck_tx_timeout).context("stuck_tx_timeout")?, + remove_stuck_txs: *required(&self.remove_stuck_txs).context("remove_stuck_txs")?, + delay_interval: *required(&self.delay_interval).context("delay_interval")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + sync_interval_ms: Some(this.sync_interval_ms), + sync_batch_size: Some(this.sync_batch_size.try_into().unwrap()), + capacity: Some(this.capacity), + stuck_tx_timeout: Some(this.stuck_tx_timeout), + remove_stuck_txs: Some(this.remove_stuck_txs), + delay_interval: Some(this.delay_interval), + } + } +} + +impl ProtoRepr for proto::CircuitBreaker { + type Type = configs::chain::CircuitBreakerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + sync_interval_ms: *required(&self.sync_interval_ms).context("sync_interval_ms")?, + http_req_max_retry_number: required(&self.http_req_max_retry_number) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_req_max_retry_number")?, + http_req_retry_interval_sec: required(&self.http_req_retry_interval_sec) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_req_retry_interval_sec")?, + replication_lag_limit_sec: self.replication_lag_limit_sec, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + sync_interval_ms: Some(this.sync_interval_ms), + http_req_max_retry_number: Some(this.http_req_max_retry_number.try_into().unwrap()), + http_req_retry_interval_sec: Some(this.http_req_retry_interval_sec.into()), + replication_lag_limit_sec: this.replication_lag_limit_sec, + } + } +} diff --git a/core/lib/protobuf_config/src/contract_verifier.rs b/core/lib/protobuf_config/src/contract_verifier.rs new file mode 100644 index 00000000000..caa109fdca3 --- /dev/null +++ b/core/lib/protobuf_config/src/contract_verifier.rs @@ -0,0 +1,27 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::ContractVerifier { + type Type = configs::ContractVerifierConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + compilation_timeout: *required(&self.compilation_timeout) + .context("compilation_timeout")?, + polling_interval: self.polling_interval, + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + compilation_timeout: Some(this.compilation_timeout), + polling_interval: this.polling_interval, + prometheus_port: Some(this.prometheus_port.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs new file mode 100644 index 00000000000..6521bd0bf81 --- /dev/null +++ b/core/lib/protobuf_config/src/contracts.rs @@ -0,0 +1,179 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{parse_h160, parse_h256, proto, repr::ProtoRepr}; + +impl proto::ProverAtGenesis { + fn new(x: &configs::contracts::ProverAtGenesis) -> Self { + use configs::contracts::ProverAtGenesis as From; + match x { + From::Fri => Self::Fri, + From::Old => Self::Old, + } + } + + fn parse(&self) -> configs::contracts::ProverAtGenesis { + use configs::contracts::ProverAtGenesis as To; + match self { + Self::Fri => To::Fri, + Self::Old => To::Old, + } + } +} + +impl ProtoRepr for proto::Contracts { + type Type = configs::ContractsConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + governance_addr: required(&self.governance_addr) + .and_then(|x| parse_h160(x)) + .context("governance_addr")?, + mailbox_facet_addr: required(&self.mailbox_facet_addr) + .and_then(|x| parse_h160(x)) + .context("mailbox_facet_addr")?, + executor_facet_addr: required(&self.executor_facet_addr) + .and_then(|x| parse_h160(x)) + .context("executor_facet_addr")?, + admin_facet_addr: required(&self.admin_facet_addr) + .and_then(|x| parse_h160(x)) + .context("admin_facet_addr")?, + getters_facet_addr: required(&self.getters_facet_addr) + .and_then(|x| parse_h160(x)) + .context("getters_facet_addr")?, + verifier_addr: required(&self.verifier_addr) + .and_then(|x| parse_h160(x)) + .context("verifier_addr")?, + diamond_init_addr: required(&self.diamond_init_addr) + .and_then(|x| parse_h160(x)) + .context("diamond_init_addr")?, + diamond_upgrade_init_addr: required(&self.diamond_upgrade_init_addr) + .and_then(|x| parse_h160(x)) + .context("diamond_upgrade_init_addr")?, + diamond_proxy_addr: required(&self.diamond_proxy_addr) + .and_then(|x| parse_h160(x)) + .context("diamond_proxy_addr")?, + validator_timelock_addr: required(&self.validator_timelock_addr) + .and_then(|x| parse_h160(x)) + .context("validator_timelock_addr")?, + genesis_tx_hash: required(&self.genesis_tx_hash) + .and_then(|x| parse_h256(x)) + .context("genesis_tx_hash")?, + l1_erc20_bridge_proxy_addr: required(&self.l1_erc20_bridge_proxy_addr) + .and_then(|x| parse_h160(x)) + .context("l1_erc20_bridge_proxy_addr")?, + l1_erc20_bridge_impl_addr: required(&self.l1_erc20_bridge_impl_addr) + .and_then(|x| parse_h160(x)) + .context("l1_erc20_bridge_impl_addr")?, + l2_erc20_bridge_addr: required(&self.l2_erc20_bridge_addr) + .and_then(|x| parse_h160(x)) + .context("l2_erc20_bridge_addr")?, + l1_weth_bridge_proxy_addr: self + .l1_weth_bridge_proxy_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l1_weth_bridge_proxy_addr")?, + l2_weth_bridge_addr: self + .l2_weth_bridge_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_weth_bridge_addr")?, + l1_allow_list_addr: required(&self.l1_allow_list_addr) + .and_then(|x| parse_h160(x)) + .context("l1_allow_list_addr")?, + l2_testnet_paymaster_addr: self + .l2_testnet_paymaster_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_testnet_paymaster_addr")?, + recursion_scheduler_level_vk_hash: required(&self.recursion_scheduler_level_vk_hash) + .and_then(|x| parse_h256(x)) + .context("recursion_scheduler_level_vk_hash")?, + recursion_node_level_vk_hash: required(&self.recursion_node_level_vk_hash) + .and_then(|x| parse_h256(x)) + .context("recursion_node_level_vk_hash")?, + recursion_leaf_level_vk_hash: required(&self.recursion_leaf_level_vk_hash) + .and_then(|x| parse_h256(x)) + .context("recursion_leaf_level_vk_hash")?, + recursion_circuits_set_vks_hash: required(&self.recursion_circuits_set_vks_hash) + .and_then(|x| parse_h256(x)) + .context("recursion_circuits_set_vks_hash")?, + l1_multicall3_addr: required(&self.l1_multicall3_addr) + .and_then(|x| parse_h160(x)) + .context("l1_multicall3_addr")?, + fri_recursion_scheduler_level_vk_hash: required( + &self.fri_recursion_scheduler_level_vk_hash, + ) + .and_then(|x| parse_h256(x)) + .context("fri_recursion_scheduler_level_vk_hash")?, + fri_recursion_node_level_vk_hash: required(&self.fri_recursion_node_level_vk_hash) + .and_then(|x| parse_h256(x)) + .context("fri_recursion_node_level_vk_hash")?, + fri_recursion_leaf_level_vk_hash: required(&self.fri_recursion_leaf_level_vk_hash) + .and_then(|x| parse_h256(x)) + .context("fri_recursion_leaf_level_vk_hash")?, + prover_at_genesis: required(&self.prover_at_genesis) + .and_then(|x| Ok(proto::ProverAtGenesis::try_from(*x)?)) + .context("prover_at_genesis")? + .parse(), + snark_wrapper_vk_hash: required(&self.snark_wrapper_vk_hash) + .and_then(|x| parse_h256(x)) + .context("snark_wrapper_vk_hash")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + governance_addr: Some(this.governance_addr.as_bytes().into()), + mailbox_facet_addr: Some(this.mailbox_facet_addr.as_bytes().into()), + executor_facet_addr: Some(this.executor_facet_addr.as_bytes().into()), + admin_facet_addr: Some(this.admin_facet_addr.as_bytes().into()), + getters_facet_addr: Some(this.getters_facet_addr.as_bytes().into()), + verifier_addr: Some(this.verifier_addr.as_bytes().into()), + diamond_init_addr: Some(this.diamond_init_addr.as_bytes().into()), + diamond_upgrade_init_addr: Some(this.diamond_upgrade_init_addr.as_bytes().into()), + diamond_proxy_addr: Some(this.diamond_proxy_addr.as_bytes().into()), + validator_timelock_addr: Some(this.validator_timelock_addr.as_bytes().into()), + genesis_tx_hash: Some(this.genesis_tx_hash.as_bytes().into()), + l1_erc20_bridge_proxy_addr: Some(this.l1_erc20_bridge_proxy_addr.as_bytes().into()), + l1_erc20_bridge_impl_addr: Some(this.l1_erc20_bridge_impl_addr.as_bytes().into()), + l2_erc20_bridge_addr: Some(this.l2_erc20_bridge_addr.as_bytes().into()), + l1_weth_bridge_proxy_addr: this + .l1_weth_bridge_proxy_addr + .as_ref() + .map(|x| x.as_bytes().into()), + l2_weth_bridge_addr: this + .l2_weth_bridge_addr + .as_ref() + .map(|x| x.as_bytes().into()), + l1_allow_list_addr: Some(this.l1_allow_list_addr.as_bytes().into()), + l2_testnet_paymaster_addr: this + .l2_testnet_paymaster_addr + .as_ref() + .map(|x| x.as_bytes().into()), + recursion_scheduler_level_vk_hash: Some( + this.recursion_scheduler_level_vk_hash.as_bytes().into(), + ), + recursion_node_level_vk_hash: Some(this.recursion_node_level_vk_hash.as_bytes().into()), + recursion_leaf_level_vk_hash: Some(this.recursion_leaf_level_vk_hash.as_bytes().into()), + recursion_circuits_set_vks_hash: Some( + this.recursion_circuits_set_vks_hash.as_bytes().into(), + ), + l1_multicall3_addr: Some(this.l1_multicall3_addr.as_bytes().into()), + fri_recursion_scheduler_level_vk_hash: Some( + this.fri_recursion_scheduler_level_vk_hash.as_bytes().into(), + ), + fri_recursion_node_level_vk_hash: Some( + this.fri_recursion_node_level_vk_hash.as_bytes().into(), + ), + fri_recursion_leaf_level_vk_hash: Some( + this.fri_recursion_leaf_level_vk_hash.as_bytes().into(), + ), + prover_at_genesis: Some(proto::ProverAtGenesis::new(&this.prover_at_genesis).into()), + snark_wrapper_vk_hash: Some(this.snark_wrapper_vk_hash.as_bytes().into()), + } + } +} diff --git a/core/lib/protobuf_config/src/database.rs b/core/lib/protobuf_config/src/database.rs new file mode 100644 index 00000000000..90e0b2bf42e --- /dev/null +++ b/core/lib/protobuf_config/src/database.rs @@ -0,0 +1,107 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{ + proto, + repr::{read_required_repr, ProtoRepr}, +}; + +impl proto::MerkleTreeMode { + fn new(x: &configs::database::MerkleTreeMode) -> Self { + use configs::database::MerkleTreeMode as From; + match x { + From::Full => Self::Full, + From::Lightweight => Self::Lightweight, + } + } + + fn parse(&self) -> configs::database::MerkleTreeMode { + use configs::database::MerkleTreeMode as To; + match self { + Self::Full => To::Full, + Self::Lightweight => To::Lightweight, + } + } +} + +impl ProtoRepr for proto::MerkleTree { + type Type = configs::database::MerkleTreeConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + path: required(&self.path).context("path")?.clone(), + mode: required(&self.mode) + .and_then(|x| Ok(proto::MerkleTreeMode::try_from(*x)?)) + .context("mode")? + .parse(), + multi_get_chunk_size: required(&self.multi_get_chunk_size) + .and_then(|x| Ok((*x).try_into()?)) + .context("multi_get_chunk_size")?, + block_cache_size_mb: required(&self.block_cache_size_mb) + .and_then(|x| Ok((*x).try_into()?)) + .context("block_cache_size_mb")?, + memtable_capacity_mb: required(&self.memtable_capacity_mb) + .and_then(|x| Ok((*x).try_into()?)) + .context("memtable_capacity_mb")?, + stalled_writes_timeout_sec: *required(&self.stalled_writes_timeout_sec) + .context("stalled_writes_timeout_sec")?, + max_l1_batches_per_iter: required(&self.max_l1_batches_per_iter) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_l1_batches_per_iter")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + path: Some(this.path.clone()), + mode: Some(proto::MerkleTreeMode::new(&this.mode).into()), + multi_get_chunk_size: Some(this.multi_get_chunk_size.try_into().unwrap()), + block_cache_size_mb: Some(this.block_cache_size_mb.try_into().unwrap()), + memtable_capacity_mb: Some(this.memtable_capacity_mb.try_into().unwrap()), + stalled_writes_timeout_sec: Some(this.stalled_writes_timeout_sec), + max_l1_batches_per_iter: Some(this.max_l1_batches_per_iter.try_into().unwrap()), + } + } +} + +impl ProtoRepr for proto::Db { + type Type = configs::database::DBConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + state_keeper_db_path: required(&self.state_keeper_db_path) + .context("state_keeper_db_path")? + .clone(), + merkle_tree: read_required_repr(&self.merkle_tree).context("merkle_tree")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + state_keeper_db_path: Some(this.state_keeper_db_path.clone()), + merkle_tree: Some(ProtoRepr::build(&this.merkle_tree)), + } + } +} + +impl ProtoRepr for proto::Postgres { + type Type = configs::database::PostgresConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + master_url: self.master_url.clone(), + replica_url: self.replica_url.clone(), + prover_url: self.prover_url.clone(), + max_connections: self.max_connections, + statement_timeout_sec: self.statement_timeout_sec, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + master_url: this.master_url.clone(), + replica_url: this.replica_url.clone(), + prover_url: this.prover_url.clone(), + max_connections: this.max_connections, + statement_timeout_sec: this.statement_timeout_sec, + } + } +} diff --git a/core/lib/protobuf_config/src/eth_client.rs b/core/lib/protobuf_config/src/eth_client.rs new file mode 100644 index 00000000000..4965b2e95a6 --- /dev/null +++ b/core/lib/protobuf_config/src/eth_client.rs @@ -0,0 +1,22 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::EthClient { + type Type = configs::ETHClientConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + chain_id: *required(&self.chain_id).context("chain_id")?, + web3_url: required(&self.web3_url).context("web3_url")?.clone(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + chain_id: Some(this.chain_id), + web3_url: Some(this.web3_url.clone()), + } + } +} diff --git a/core/lib/protobuf_config/src/eth_sender.rs b/core/lib/protobuf_config/src/eth_sender.rs new file mode 100644 index 00000000000..36810586ba3 --- /dev/null +++ b/core/lib/protobuf_config/src/eth_sender.rs @@ -0,0 +1,180 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{ + proto, + repr::{read_required_repr, ProtoRepr}, +}; + +impl proto::ProofSendingMode { + fn new(x: &configs::eth_sender::ProofSendingMode) -> Self { + use configs::eth_sender::ProofSendingMode as From; + match x { + From::OnlyRealProofs => Self::OnlyRealProofs, + From::OnlySampledProofs => Self::OnlySampledProofs, + From::SkipEveryProof => Self::SkipEveryProof, + } + } + + fn parse(&self) -> configs::eth_sender::ProofSendingMode { + use configs::eth_sender::ProofSendingMode as To; + match self { + Self::OnlyRealProofs => To::OnlyRealProofs, + Self::OnlySampledProofs => To::OnlySampledProofs, + Self::SkipEveryProof => To::SkipEveryProof, + } + } +} + +impl proto::ProofLoadingMode { + fn new(x: &configs::eth_sender::ProofLoadingMode) -> Self { + use configs::eth_sender::ProofLoadingMode as From; + match x { + From::OldProofFromDb => Self::OldProofFromDb, + From::FriProofFromGcs => Self::FriProofFromGcs, + } + } + + fn parse(&self) -> configs::eth_sender::ProofLoadingMode { + use configs::eth_sender::ProofLoadingMode as To; + match self { + Self::OldProofFromDb => To::OldProofFromDb, + Self::FriProofFromGcs => To::FriProofFromGcs, + } + } +} + +impl ProtoRepr for proto::EthSender { + type Type = configs::eth_sender::ETHSenderConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + sender: read_required_repr(&self.sender).context("sender")?, + gas_adjuster: read_required_repr(&self.gas_adjuster).context("gas_adjuster")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + sender: Some(ProtoRepr::build(&this.sender)), + gas_adjuster: Some(ProtoRepr::build(&this.gas_adjuster)), + } + } +} + +impl ProtoRepr for proto::Sender { + type Type = configs::eth_sender::SenderConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + aggregated_proof_sizes: self + .aggregated_proof_sizes + .iter() + .enumerate() + .map(|(i, x)| (*x).try_into().context(i)) + .collect::>() + .context("aggregated_proof_sizes")?, + wait_confirmations: self.wait_confirmations, + tx_poll_period: *required(&self.tx_poll_period).context("tx_poll_period")?, + aggregate_tx_poll_period: *required(&self.aggregate_tx_poll_period) + .context("aggregate_tx_poll_period")?, + max_txs_in_flight: *required(&self.max_txs_in_flight).context("max_txs_in_flight")?, + proof_sending_mode: required(&self.proof_sending_mode) + .and_then(|x| Ok(proto::ProofSendingMode::try_from(*x)?)) + .context("proof_sending_mode")? + .parse(), + max_aggregated_tx_gas: *required(&self.max_aggregated_tx_gas) + .context("max_aggregated_tx_gas")?, + max_eth_tx_data_size: required(&self.max_eth_tx_data_size) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_eth_tx_data_size")?, + max_aggregated_blocks_to_commit: *required(&self.max_aggregated_blocks_to_commit) + .context("max_aggregated_blocks_to_commit")?, + max_aggregated_blocks_to_execute: *required(&self.max_aggregated_blocks_to_execute) + .context("max_aggregated_blocks_to_execute")?, + aggregated_block_commit_deadline: *required(&self.aggregated_block_commit_deadline) + .context("aggregated_block_commit_deadline")?, + aggregated_block_prove_deadline: *required(&self.aggregated_block_prove_deadline) + .context("aggregated_block_prove_deadline")?, + aggregated_block_execute_deadline: *required(&self.aggregated_block_execute_deadline) + .context("aggregated_block_execute_deadline")?, + timestamp_criteria_max_allowed_lag: required(&self.timestamp_criteria_max_allowed_lag) + .and_then(|x| Ok((*x).try_into()?)) + .context("timestamp_criteria_max_allowed_lag")?, + l1_batch_min_age_before_execute_seconds: self.l1_batch_min_age_before_execute_seconds, + max_acceptable_priority_fee_in_gwei: *required( + &self.max_acceptable_priority_fee_in_gwei, + ) + .context("max_acceptable_priority_fee_in_gwei")?, + proof_loading_mode: required(&self.proof_loading_mode) + .and_then(|x| Ok(proto::ProofLoadingMode::try_from(*x)?)) + .context("proof_loading_mode")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + aggregated_proof_sizes: this + .aggregated_proof_sizes + .iter() + .map(|x| (*x).try_into().unwrap()) + .collect(), + wait_confirmations: this.wait_confirmations, + tx_poll_period: Some(this.tx_poll_period), + aggregate_tx_poll_period: Some(this.aggregate_tx_poll_period), + max_txs_in_flight: Some(this.max_txs_in_flight), + proof_sending_mode: Some(proto::ProofSendingMode::new(&this.proof_sending_mode).into()), + max_aggregated_tx_gas: Some(this.max_aggregated_tx_gas), + max_eth_tx_data_size: Some(this.max_eth_tx_data_size.try_into().unwrap()), + max_aggregated_blocks_to_commit: Some(this.max_aggregated_blocks_to_commit), + max_aggregated_blocks_to_execute: Some(this.max_aggregated_blocks_to_execute), + aggregated_block_commit_deadline: Some(this.aggregated_block_commit_deadline), + aggregated_block_prove_deadline: Some(this.aggregated_block_prove_deadline), + aggregated_block_execute_deadline: Some(this.aggregated_block_execute_deadline), + timestamp_criteria_max_allowed_lag: Some( + this.timestamp_criteria_max_allowed_lag.try_into().unwrap(), + ), + l1_batch_min_age_before_execute_seconds: this.l1_batch_min_age_before_execute_seconds, + max_acceptable_priority_fee_in_gwei: Some(this.max_acceptable_priority_fee_in_gwei), + proof_loading_mode: Some(proto::ProofLoadingMode::new(&this.proof_loading_mode).into()), + } + } +} + +impl ProtoRepr for proto::GasAdjuster { + type Type = configs::eth_sender::GasAdjusterConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + default_priority_fee_per_gas: *required(&self.default_priority_fee_per_gas) + .context("default_priority_fee_per_gas")?, + max_base_fee_samples: required(&self.max_base_fee_samples) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_base_fee_samples")?, + pricing_formula_parameter_a: *required(&self.pricing_formula_parameter_a) + .context("pricing_formula_parameter_a")?, + pricing_formula_parameter_b: *required(&self.pricing_formula_parameter_b) + .context("pricing_formula_parameter_b")?, + internal_l1_pricing_multiplier: *required(&self.internal_l1_pricing_multiplier) + .context("internal_l1_pricing_multiplier")?, + internal_enforced_l1_gas_price: self.internal_enforced_l1_gas_price, + poll_period: *required(&self.poll_period).context("poll_period")?, + max_l1_gas_price: self.max_l1_gas_price, + l1_gas_per_pubdata_byte: *required(&self.l1_gas_per_pubdata_byte) + .context("l1_gas_per_pubdata_byte")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + default_priority_fee_per_gas: Some(this.default_priority_fee_per_gas), + max_base_fee_samples: Some(this.max_base_fee_samples.try_into().unwrap()), + pricing_formula_parameter_a: Some(this.pricing_formula_parameter_a), + pricing_formula_parameter_b: Some(this.pricing_formula_parameter_b), + internal_l1_pricing_multiplier: Some(this.internal_l1_pricing_multiplier), + internal_enforced_l1_gas_price: this.internal_enforced_l1_gas_price, + poll_period: Some(this.poll_period), + max_l1_gas_price: this.max_l1_gas_price, + l1_gas_per_pubdata_byte: Some(this.l1_gas_per_pubdata_byte), + } + } +} diff --git a/core/lib/protobuf_config/src/eth_watch.rs b/core/lib/protobuf_config/src/eth_watch.rs new file mode 100644 index 00000000000..42c1d20f522 --- /dev/null +++ b/core/lib/protobuf_config/src/eth_watch.rs @@ -0,0 +1,23 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::EthWatch { + type Type = configs::ETHWatchConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + confirmations_for_eth_event: self.confirmations_for_eth_event, + eth_node_poll_interval: *required(&self.eth_node_poll_interval) + .context("eth_node_poll_interval")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + confirmations_for_eth_event: this.confirmations_for_eth_event, + eth_node_poll_interval: Some(this.eth_node_poll_interval), + } + } +} diff --git a/core/lib/protobuf_config/src/fri_proof_compressor.rs b/core/lib/protobuf_config/src/fri_proof_compressor.rs new file mode 100644 index 00000000000..5f1753c9dfa --- /dev/null +++ b/core/lib/protobuf_config/src/fri_proof_compressor.rs @@ -0,0 +1,49 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::FriProofCompressor { + type Type = configs::FriProofCompressorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + compression_mode: required(&self.compression_mode) + .and_then(|x| Ok((*x).try_into()?)) + .context("compression_mode")?, + prometheus_listener_port: required(&self.prometheus_listener_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_listener_port")?, + prometheus_pushgateway_url: required(&self.prometheus_pushgateway_url) + .context("prometheus_pushgateway_url")? + .clone(), + prometheus_push_interval_ms: self.prometheus_push_interval_ms, + generation_timeout_in_secs: required(&self.generation_timeout_in_secs) + .and_then(|x| Ok((*x).try_into()?)) + .context("generation_timeout_in_secs")?, + max_attempts: *required(&self.max_attempts).context("max_attempts")?, + universal_setup_path: required(&self.universal_setup_path) + .context("universal_setup_path")? + .clone(), + universal_setup_download_url: required(&self.universal_setup_download_url) + .context("universal_setup_download_url")? + .clone(), + verify_wrapper_proof: *required(&self.verify_wrapper_proof) + .context("verify_wrapper_proof")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + compression_mode: Some(this.compression_mode.into()), + prometheus_listener_port: Some(this.prometheus_listener_port.into()), + prometheus_pushgateway_url: Some(this.prometheus_pushgateway_url.clone()), + prometheus_push_interval_ms: this.prometheus_push_interval_ms, + generation_timeout_in_secs: Some(this.generation_timeout_in_secs.into()), + max_attempts: Some(this.max_attempts), + universal_setup_path: Some(this.universal_setup_path.clone()), + universal_setup_download_url: Some(this.universal_setup_download_url.clone()), + verify_wrapper_proof: Some(this.verify_wrapper_proof), + } + } +} diff --git a/core/lib/protobuf_config/src/fri_prover.rs b/core/lib/protobuf_config/src/fri_prover.rs new file mode 100644 index 00000000000..bcd2d6a9d99 --- /dev/null +++ b/core/lib/protobuf_config/src/fri_prover.rs @@ -0,0 +1,98 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl proto::SetupLoadMode { + fn new(x: &configs::fri_prover::SetupLoadMode) -> Self { + use configs::fri_prover::SetupLoadMode as From; + match x { + From::FromDisk => Self::FromDisk, + From::FromMemory => Self::FromMemory, + } + } + + fn parse(&self) -> configs::fri_prover::SetupLoadMode { + use configs::fri_prover::SetupLoadMode as To; + match self { + Self::FromDisk => To::FromDisk, + Self::FromMemory => To::FromMemory, + } + } +} + +impl ProtoRepr for proto::FriProver { + type Type = configs::FriProverConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + setup_data_path: required(&self.setup_data_path) + .context("setup_data_path")? + .clone(), + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + max_attempts: *required(&self.max_attempts).context("max_attempts")?, + generation_timeout_in_secs: required(&self.generation_timeout_in_secs) + .and_then(|x| Ok((*x).try_into()?)) + .context("generation_timeout_in_secs")?, + base_layer_circuit_ids_to_be_verified: required( + &self.base_layer_circuit_ids_to_be_verified, + ) + .context("base_layer_circuit_ids_to_be_verified")? + .clone(), + recursive_layer_circuit_ids_to_be_verified: required( + &self.recursive_layer_circuit_ids_to_be_verified, + ) + .context("recursive_layer_circuit_ids_to_be_verified")? + .clone(), + setup_load_mode: required(&self.setup_load_mode) + .and_then(|x| Ok(proto::SetupLoadMode::try_from(*x)?)) + .context("setup_load_mode")? + .parse(), + specialized_group_id: required(&self.specialized_group_id) + .and_then(|x| Ok((*x).try_into()?)) + .context("specialized_group_id")?, + witness_vector_generator_thread_count: self + .witness_vector_generator_thread_count + .map(|x| x.try_into()) + .transpose() + .context("witness_vector_generator_thread_count")?, + queue_capacity: required(&self.queue_capacity) + .and_then(|x| Ok((*x).try_into()?)) + .context("queue_capacity")?, + witness_vector_receiver_port: required(&self.witness_vector_receiver_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("witness_vector_receiver_port")?, + zone_read_url: required(&self.zone_read_url) + .context("zone_read_url")? + .clone(), + shall_save_to_public_bucket: *required(&self.shall_save_to_public_bucket) + .context("shall_save_to_public_bucket")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + setup_data_path: Some(this.setup_data_path.clone()), + prometheus_port: Some(this.prometheus_port.into()), + max_attempts: Some(this.max_attempts), + generation_timeout_in_secs: Some(this.generation_timeout_in_secs.into()), + base_layer_circuit_ids_to_be_verified: Some( + this.base_layer_circuit_ids_to_be_verified.clone(), + ), + recursive_layer_circuit_ids_to_be_verified: Some( + this.recursive_layer_circuit_ids_to_be_verified.clone(), + ), + setup_load_mode: Some(proto::SetupLoadMode::new(&this.setup_load_mode).into()), + specialized_group_id: Some(this.specialized_group_id.into()), + witness_vector_generator_thread_count: this + .witness_vector_generator_thread_count + .map(|x| x.try_into().unwrap()), + queue_capacity: Some(this.queue_capacity.try_into().unwrap()), + witness_vector_receiver_port: Some(this.witness_vector_receiver_port.into()), + zone_read_url: Some(this.zone_read_url.clone()), + shall_save_to_public_bucket: Some(this.shall_save_to_public_bucket), + } + } +} diff --git a/core/lib/protobuf_config/src/fri_prover_gateway.rs b/core/lib/protobuf_config/src/fri_prover_gateway.rs new file mode 100644 index 00000000000..5d791635762 --- /dev/null +++ b/core/lib/protobuf_config/src/fri_prover_gateway.rs @@ -0,0 +1,34 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::FriProverGateway { + type Type = configs::FriProverGatewayConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + api_url: required(&self.api_url).context("api_url")?.clone(), + api_poll_duration_secs: required(&self.api_poll_duration_secs) + .and_then(|x| Ok((*x).try_into()?)) + .context("api_poll_duration_secs")?, + prometheus_listener_port: required(&self.prometheus_listener_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_listener_port")?, + prometheus_pushgateway_url: required(&self.prometheus_pushgateway_url) + .context("prometheus_pushgateway_url")? + .clone(), + prometheus_push_interval_ms: self.prometheus_push_interval_ms, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + api_url: Some(this.api_url.clone()), + api_poll_duration_secs: Some(this.api_poll_duration_secs.into()), + prometheus_listener_port: Some(this.prometheus_listener_port.into()), + prometheus_pushgateway_url: Some(this.prometheus_pushgateway_url.clone()), + prometheus_push_interval_ms: this.prometheus_push_interval_ms, + } + } +} diff --git a/core/lib/protobuf_config/src/fri_prover_group.rs b/core/lib/protobuf_config/src/fri_prover_group.rs new file mode 100644 index 00000000000..9c845465761 --- /dev/null +++ b/core/lib/protobuf_config/src/fri_prover_group.rs @@ -0,0 +1,81 @@ +use std::collections::HashSet; + +use anyhow::Context as _; +use zksync_basic_types::basic_fri_types::CircuitIdRoundTuple; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::CircuitIdRoundTuple { + type Type = CircuitIdRoundTuple; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + circuit_id: required(&self.circuit_id) + .and_then(|x| Ok((*x).try_into()?)) + .context("circuit_id")?, + aggregation_round: required(&self.aggregation_round) + .and_then(|x| Ok((*x).try_into()?)) + .context("aggregation_round")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + circuit_id: Some(this.circuit_id.into()), + aggregation_round: Some(this.aggregation_round.into()), + } + } +} + +fn read_vec(v: &[proto::CircuitIdRoundTuple]) -> anyhow::Result> { + v.iter() + .enumerate() + .map(|(i, x)| x.read().context(i)) + .collect() +} + +fn build_vec(v: &HashSet) -> Vec { + let mut v: Vec<_> = v.iter().cloned().collect(); + v.sort(); + v.iter().map(ProtoRepr::build).collect() +} + +impl ProtoRepr for proto::FriProverGroup { + type Type = configs::fri_prover_group::FriProverGroupConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + group_0: read_vec(&self.group_0).context("group_0")?, + group_1: read_vec(&self.group_1).context("group_1")?, + group_2: read_vec(&self.group_2).context("group_2")?, + group_3: read_vec(&self.group_3).context("group_3")?, + group_4: read_vec(&self.group_4).context("group_4")?, + group_5: read_vec(&self.group_5).context("group_5")?, + group_6: read_vec(&self.group_6).context("group_6")?, + group_7: read_vec(&self.group_7).context("group_7")?, + group_8: read_vec(&self.group_8).context("group_8")?, + group_9: read_vec(&self.group_9).context("group_9")?, + group_10: read_vec(&self.group_10).context("group_10")?, + group_11: read_vec(&self.group_11).context("group_11")?, + group_12: read_vec(&self.group_12).context("group_12")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + group_0: build_vec(&this.group_0), + group_1: build_vec(&this.group_1), + group_2: build_vec(&this.group_2), + group_3: build_vec(&this.group_3), + group_4: build_vec(&this.group_4), + group_5: build_vec(&this.group_5), + group_6: build_vec(&this.group_6), + group_7: build_vec(&this.group_7), + group_8: build_vec(&this.group_8), + group_9: build_vec(&this.group_9), + group_10: build_vec(&this.group_10), + group_11: build_vec(&this.group_11), + group_12: build_vec(&this.group_12), + } + } +} diff --git a/core/lib/protobuf_config/src/fri_witness_generator.rs b/core/lib/protobuf_config/src/fri_witness_generator.rs new file mode 100644 index 00000000000..fef20dcc389 --- /dev/null +++ b/core/lib/protobuf_config/src/fri_witness_generator.rs @@ -0,0 +1,39 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::FriWitnessGenerator { + type Type = configs::FriWitnessGeneratorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + generation_timeout_in_secs: required(&self.generation_timeout_in_secs) + .and_then(|x| Ok((*x).try_into()?)) + .context("generation_timeout_in_secs")?, + max_attempts: *required(&self.max_attempts).context("max_attempts")?, + blocks_proving_percentage: self + .blocks_proving_percentage + .map(|x| x.try_into()) + .transpose() + .context("blocks_proving_percentage")?, + dump_arguments_for_blocks: self.dump_arguments_for_blocks.clone(), + last_l1_batch_to_process: self.last_l1_batch_to_process, + force_process_block: self.force_process_block, + shall_save_to_public_bucket: *required(&self.shall_save_to_public_bucket) + .context("shall_save_to_public_bucket")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + generation_timeout_in_secs: Some(this.generation_timeout_in_secs.into()), + max_attempts: Some(this.max_attempts), + blocks_proving_percentage: this.blocks_proving_percentage.map(|x| x.into()), + dump_arguments_for_blocks: this.dump_arguments_for_blocks.clone(), + last_l1_batch_to_process: this.last_l1_batch_to_process, + force_process_block: this.force_process_block, + shall_save_to_public_bucket: Some(this.shall_save_to_public_bucket), + } + } +} diff --git a/core/lib/protobuf_config/src/fri_witness_vector_generator.rs b/core/lib/protobuf_config/src/fri_witness_vector_generator.rs new file mode 100644 index 00000000000..9c2b432a80a --- /dev/null +++ b/core/lib/protobuf_config/src/fri_witness_vector_generator.rs @@ -0,0 +1,56 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::FriWitnessVectorGenerator { + type Type = configs::FriWitnessVectorGeneratorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + max_prover_reservation_duration_in_secs: required( + &self.max_prover_reservation_duration_in_secs, + ) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_prover_reservation_duration_in_secs")?, + prover_instance_wait_timeout_in_secs: required( + &self.prover_instance_wait_timeout_in_secs, + ) + .and_then(|x| Ok((*x).try_into()?)) + .context("prover_instance_wait_timeout_in_secs")?, + prover_instance_poll_time_in_milli_secs: required( + &self.prover_instance_poll_time_in_milli_secs, + ) + .and_then(|x| Ok((*x).try_into()?)) + .context("prover_instance_poll_time_in_milli_secs")?, + prometheus_listener_port: required(&self.prometheus_listener_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_listener_port")?, + prometheus_pushgateway_url: required(&self.prometheus_pushgateway_url) + .context("prometheus_pushgateway_url")? + .clone(), + prometheus_push_interval_ms: self.prometheus_push_interval_ms, + specialized_group_id: required(&self.specialized_group_id) + .and_then(|x| Ok((*x).try_into()?)) + .context("specialized_group_id")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + max_prover_reservation_duration_in_secs: Some( + this.max_prover_reservation_duration_in_secs.into(), + ), + prover_instance_wait_timeout_in_secs: Some( + this.prover_instance_wait_timeout_in_secs.into(), + ), + prover_instance_poll_time_in_milli_secs: Some( + this.prover_instance_poll_time_in_milli_secs.into(), + ), + prometheus_listener_port: Some(this.prometheus_listener_port.into()), + prometheus_pushgateway_url: Some(this.prometheus_pushgateway_url.clone()), + prometheus_push_interval_ms: this.prometheus_push_interval_ms, + specialized_group_id: Some(this.specialized_group_id.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/house_keeper.rs b/core/lib/protobuf_config/src/house_keeper.rs new file mode 100644 index 00000000000..00843db3a7c --- /dev/null +++ b/core/lib/protobuf_config/src/house_keeper.rs @@ -0,0 +1,87 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::HouseKeeper { + type Type = configs::house_keeper::HouseKeeperConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + l1_batch_metrics_reporting_interval_ms: *required( + &self.l1_batch_metrics_reporting_interval_ms, + ) + .context("l1_batch_metrics_reporting_interval_ms")?, + gpu_prover_queue_reporting_interval_ms: *required( + &self.gpu_prover_queue_reporting_interval_ms, + ) + .context("gpu_prover_queue_reporting_interval_ms")?, + prover_job_retrying_interval_ms: *required(&self.prover_job_retrying_interval_ms) + .context("prover_job_retrying_interval_ms")?, + prover_stats_reporting_interval_ms: *required(&self.prover_stats_reporting_interval_ms) + .context("prover_stats_reporting_interval_ms")?, + witness_job_moving_interval_ms: *required(&self.witness_job_moving_interval_ms) + .context("witness_job_moving_interval_ms")?, + witness_generator_stats_reporting_interval_ms: *required( + &self.witness_generator_stats_reporting_interval_ms, + ) + .context("witness_generator_stats_reporting_interval_ms")?, + fri_witness_job_moving_interval_ms: *required(&self.fri_witness_job_moving_interval_ms) + .context("fri_witness_job_moving_interval_ms")?, + fri_prover_job_retrying_interval_ms: *required( + &self.fri_prover_job_retrying_interval_ms, + ) + .context("fri_prover_job_retrying_interval_ms")?, + fri_witness_generator_job_retrying_interval_ms: *required( + &self.fri_witness_generator_job_retrying_interval_ms, + ) + .context("fri_witness_generator_job_retrying_interval_ms")?, + prover_db_pool_size: *required(&self.prover_db_pool_size) + .context("prover_db_pool_size")?, + fri_prover_stats_reporting_interval_ms: *required( + &self.fri_prover_stats_reporting_interval_ms, + ) + .context("fri_prover_stats_reporting_interval_ms")?, + fri_proof_compressor_job_retrying_interval_ms: *required( + &self.fri_proof_compressor_job_retrying_interval_ms, + ) + .context("fri_proof_compressor_job_retrying_interval_ms")?, + fri_proof_compressor_stats_reporting_interval_ms: *required( + &self.fri_proof_compressor_stats_reporting_interval_ms, + ) + .context("fri_proof_compressor_stats_reporting_interval_ms")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + l1_batch_metrics_reporting_interval_ms: Some( + this.l1_batch_metrics_reporting_interval_ms, + ), + gpu_prover_queue_reporting_interval_ms: Some( + this.gpu_prover_queue_reporting_interval_ms, + ), + prover_job_retrying_interval_ms: Some(this.prover_job_retrying_interval_ms), + prover_stats_reporting_interval_ms: Some(this.prover_stats_reporting_interval_ms), + witness_job_moving_interval_ms: Some(this.witness_job_moving_interval_ms), + witness_generator_stats_reporting_interval_ms: Some( + this.witness_generator_stats_reporting_interval_ms, + ), + fri_witness_job_moving_interval_ms: Some(this.fri_witness_job_moving_interval_ms), + fri_prover_job_retrying_interval_ms: Some(this.fri_prover_job_retrying_interval_ms), + fri_witness_generator_job_retrying_interval_ms: Some( + this.fri_witness_generator_job_retrying_interval_ms, + ), + prover_db_pool_size: Some(this.prover_db_pool_size), + fri_prover_stats_reporting_interval_ms: Some( + this.fri_prover_stats_reporting_interval_ms, + ), + fri_proof_compressor_job_retrying_interval_ms: Some( + this.fri_proof_compressor_job_retrying_interval_ms, + ), + fri_proof_compressor_stats_reporting_interval_ms: Some( + this.fri_proof_compressor_stats_reporting_interval_ms, + ), + } + } +} diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs new file mode 100644 index 00000000000..164fbfa7c55 --- /dev/null +++ b/core/lib/protobuf_config/src/lib.rs @@ -0,0 +1,43 @@ +//! Defined protobuf mapping for the config files. +//! It allows to encode the configs using: +//! * protobuf binary format +//! * protobuf text format +//! * protobuf json format + +mod alerts; +mod api; +mod chain; +mod contract_verifier; +mod contracts; +mod database; +mod eth_client; +mod eth_sender; +mod eth_watch; +mod fri_proof_compressor; +mod fri_prover; +mod fri_prover_gateway; +mod fri_prover_group; +mod fri_witness_generator; +mod fri_witness_vector_generator; +mod house_keeper; +mod object_store; +mod proof_data_handler; +mod snapshots_creator; +mod witness_generator; + +pub mod proto; +mod repr; +#[cfg(test)] +mod tests; +mod utils; + +use anyhow::Context as _; +use zksync_types::{H160, H256}; + +fn parse_h256(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) +} + +fn parse_h160(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) +} diff --git a/core/lib/protobuf_config/src/object_store.rs b/core/lib/protobuf_config/src/object_store.rs new file mode 100644 index 00000000000..b845007caa6 --- /dev/null +++ b/core/lib/protobuf_config/src/object_store.rs @@ -0,0 +1,60 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl proto::ObjectStoreMode { + fn new(x: &configs::object_store::ObjectStoreMode) -> Self { + type From = configs::object_store::ObjectStoreMode; + match x { + From::GCS => Self::Gcs, + From::GCSWithCredentialFile => Self::GcsWithCredentialFile, + From::FileBacked => Self::FileBacked, + From::GCSAnonymousReadOnly => Self::GcsAnonymousReadOnly, + } + } + fn parse(&self) -> configs::object_store::ObjectStoreMode { + type To = configs::object_store::ObjectStoreMode; + match self { + Self::Gcs => To::GCS, + Self::GcsWithCredentialFile => To::GCSWithCredentialFile, + Self::FileBacked => To::FileBacked, + Self::GcsAnonymousReadOnly => To::GCSAnonymousReadOnly, + } + } +} + +impl ProtoRepr for proto::ObjectStore { + type Type = configs::ObjectStoreConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + bucket_base_url: required(&self.bucket_base_url) + .context("bucket_base_url")? + .clone(), + mode: required(&self.mode) + .and_then(|x| Ok(proto::ObjectStoreMode::try_from(*x)?)) + .context("mode")? + .parse(), + file_backed_base_path: required(&self.file_backed_base_path) + .context("file_backed_base_path")? + .clone(), + gcs_credential_file_path: required(&self.gcs_credential_file_path) + .context("gcs_credential_file_path")? + .clone(), + max_retries: required(&self.max_retries) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_retries")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + bucket_base_url: Some(this.bucket_base_url.clone()), + mode: Some(proto::ObjectStoreMode::new(&this.mode).into()), + file_backed_base_path: Some(this.file_backed_base_path.clone()), + gcs_credential_file_path: Some(this.gcs_credential_file_path.clone()), + max_retries: Some(this.max_retries.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs new file mode 100644 index 00000000000..38712eccf4f --- /dev/null +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -0,0 +1,54 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl proto::ProtocolVersionLoadingMode { + fn new(x: &configs::proof_data_handler::ProtocolVersionLoadingMode) -> Self { + type From = configs::proof_data_handler::ProtocolVersionLoadingMode; + match x { + From::FromDb => Self::FromDb, + From::FromEnvVar => Self::FromEnvVar, + } + } + fn parse(&self) -> configs::proof_data_handler::ProtocolVersionLoadingMode { + type To = configs::proof_data_handler::ProtocolVersionLoadingMode; + match self { + Self::FromDb => To::FromDb, + Self::FromEnvVar => To::FromEnvVar, + } + } +} + +impl ProtoRepr for proto::ProofDataHandler { + type Type = configs::ProofDataHandlerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, + proof_generation_timeout_in_secs: required(&self.proof_generation_timeout_in_secs) + .and_then(|x| Ok((*x).try_into()?)) + .context("proof_generation_timeout_in_secs")?, + protocol_version_loading_mode: required(&self.protocol_version_loading_mode) + .and_then(|x| Ok(proto::ProtocolVersionLoadingMode::try_from(*x)?)) + .context("protocol_version_loading_mode")? + .parse(), + fri_protocol_version_id: required(&self.fri_protocol_version_id) + .and_then(|x| Ok((*x).try_into()?)) + .context("fri_protocol_version_id")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + http_port: Some(this.http_port.into()), + proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), + protocol_version_loading_mode: Some( + proto::ProtocolVersionLoadingMode::new(&this.protocol_version_loading_mode).into(), + ), + fri_protocol_version_id: Some(this.fri_protocol_version_id.into()), + } + } +} diff --git a/core/lib/protobuf_config/src/proto/alerts.proto b/core/lib/protobuf_config/src/proto/alerts.proto new file mode 100644 index 00000000000..6d9905b79f7 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/alerts.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package zksync.config; + +message Alerts { + repeated string sporadic_crypto_errors_substrs = 1; +} diff --git a/core/lib/protobuf_config/src/proto/api.proto b/core/lib/protobuf_config/src/proto/api.proto new file mode 100644 index 00000000000..ce13e45d365 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/api.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package zksync.config; + +import "zksync/config/utils.proto"; + +message PrivateKeys { + repeated bytes keys = 1; // H256 +} + +message Web3JsonRpc { + optional uint32 http_port = 1; // required; u16 + optional string http_url = 2; // required + optional uint32 ws_port = 3; // required; u16 + optional string ws_url = 4; // required + optional uint32 req_entities_limit = 5; // optional + optional uint32 filters_limit = 6; // optional + optional uint32 subscriptions_limit = 7; // optional + optional uint64 pubsub_polling_interval = 8; // optional + optional uint32 max_nonce_ahead = 9; // required + optional double gas_price_scale_factor = 10; // required + optional uint64 request_timeout = 11; // seconds + optional PrivateKeys account_pks = 12; // optional + optional double estimate_gas_scale_factor = 13; // required + optional uint32 estimate_gas_acceptable_overestimation = 14; // required + optional bool l1_to_l2_transactions_compatibility_mode = 15; // required + optional uint64 max_tx_size = 16; // required; B + optional uint64 vm_execution_cache_misses_limit = 17; // optional + optional uint64 vm_concurrency_limit = 18; // optional + optional uint64 factory_deps_cache_size_mb = 19; // optional; MB + optional uint64 initial_writes_cache_size_mb = 20; // optional; MB + optional uint64 latest_values_cache_size_mb = 21; // optional; MB + optional uint64 fee_history_limit = 22; // optional + optional uint64 max_batch_request_size = 23; // optional + optional uint64 max_response_body_size_mb = 24; // optional; MB + optional uint32 websocket_requests_per_minute_limit = 25; // optional + optional string tree_api_url = 26; // optional +} + +message ContractVerificationApi { + optional uint32 port = 1; // required; u16 + optional string url = 2; // required +} + +message HealthCheck { + optional uint32 port = 1; // required; u16 +} + +message MerkleTreeApi { + optional uint32 port = 1; // required; u16 +} + +message Api { + optional Web3JsonRpc web3_json_rpc = 1; // required + optional ContractVerificationApi contract_verification = 2; // required + optional Prometheus prometheus = 3; // required + optional HealthCheck healthcheck = 4; // required + optional MerkleTreeApi merkle_tree = 5; // required +} diff --git a/core/lib/protobuf_config/src/proto/chain.proto b/core/lib/protobuf_config/src/proto/chain.proto new file mode 100644 index 00000000000..8e08d47271b --- /dev/null +++ b/core/lib/protobuf_config/src/proto/chain.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; + +package zksync.config; + +enum Network { + UNKNOWN = 0; + MAINNET = 1; + RINKEBY = 2; + ROPSTEN = 3; + GOERLI = 4; + SEPOLIA = 5; + LOCALHOST = 6; + TEST = 7; +} + +enum FeeModelVersion { + V1 = 0; + V2 = 1; +} + +message EthNetwork { + optional Network network = 1; // required + optional string zksync_network = 2; // required + optional uint64 zksync_network_id = 3; // required; L2ChainId +} + +message StateKeeper { + optional uint64 transaction_slots = 1; // required + optional uint64 block_commit_deadline_ms = 2; // required; ms + optional uint64 miniblock_commit_deadline_ms = 3; // required; ms + optional uint64 miniblock_seal_queue_capacity = 4; // required + optional uint32 max_single_tx_gas = 5; // required; gwei? + optional uint32 max_allowed_l2_tx_gas_limit = 6; // required; wei? + optional double reject_tx_at_geometry_percentage = 7; // required; % + optional double reject_tx_at_eth_params_percentage = 8; // required; % + optional double reject_tx_at_gas_percentage = 9; // required; % + optional double close_block_at_geometry_percentage = 10; // required; % + optional double close_block_at_eth_params_percentage = 11; // required; % + optional double close_block_at_gas_percentage = 12; // required; % + optional bytes fee_account_addr = 13; // required; H160 + optional uint64 minimal_l2_gas_price = 14; // required; wei? + optional double compute_overhead_part = 15; // required; [0,1] + optional double pubdata_overhead_part = 16; // required; [0,1] + optional uint64 batch_overhead_l1_gas = 17; // required; wei? + optional uint64 max_gas_per_batch = 18; // required; wei? + optional uint64 max_pubdata_per_batch = 19; // required; bytes? + optional FeeModelVersion fee_model_version = 20; // required + optional uint32 validation_computational_gas_limit = 21; // required; wei? + optional bool save_call_traces = 22; // required + optional uint32 virtual_blocks_interval = 23; // required + optional uint32 virtual_blocks_per_miniblock = 24; // required + optional bool upload_witness_inputs_to_gcs = 25; // required + optional uint64 enum_index_migration_chunk_size = 26; // optional +} + +message OperationsManager { + optional uint64 delay_interval = 1; // required; ms +} + +message Mempool { + optional uint64 sync_interval_ms = 1; // required; ms + optional uint64 sync_batch_size = 2; // required; ? + optional uint64 capacity = 3; // required; ? + optional uint64 stuck_tx_timeout = 4; // required; s + optional bool remove_stuck_txs = 5; // required + optional uint64 delay_interval = 6; // required; ms +} + +message CircuitBreaker { + optional uint64 sync_interval_ms = 1; // required; ms + optional uint64 http_req_max_retry_number = 2; // required + optional uint32 http_req_retry_interval_sec = 3; // required; s + optional uint32 replication_lag_limit_sec = 4; // optional; s +} + + diff --git a/core/lib/protobuf_config/src/proto/contract_verifier.proto b/core/lib/protobuf_config/src/proto/contract_verifier.proto new file mode 100644 index 00000000000..f76ba2b1227 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/contract_verifier.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package zksync.config; + +message ContractVerifier { + optional uint64 compilation_timeout = 1; // required; s + optional uint64 polling_interval = 2; // optional; ms + optional uint32 prometheus_port = 3; // required; u16 +} diff --git a/core/lib/protobuf_config/src/proto/contracts.proto b/core/lib/protobuf_config/src/proto/contracts.proto new file mode 100644 index 00000000000..1acda022cd9 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/contracts.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package zksync.config; + +enum ProverAtGenesis { + FRI = 0; + OLD = 1; +} + +message Contracts { + optional bytes governance_addr = 1; // required; H160 + optional bytes mailbox_facet_addr = 2; // required; H160 + optional bytes executor_facet_addr = 3; // required; H160 + optional bytes admin_facet_addr = 4; // required; H160 + optional bytes getters_facet_addr = 5; // required; H160 + optional bytes verifier_addr = 6; // required; H160 + optional bytes diamond_init_addr = 7; // required; H160 + optional bytes diamond_upgrade_init_addr = 8; // required; H160 + optional bytes diamond_proxy_addr = 9; // required; H160 + optional bytes validator_timelock_addr = 10; // required; H160 + optional bytes genesis_tx_hash = 11; // required; H256 + optional bytes l1_erc20_bridge_proxy_addr = 12; // required; H160 + optional bytes l1_erc20_bridge_impl_addr = 13; // required; H160 + optional bytes l2_erc20_bridge_addr = 14; // required; H160 + optional bytes l1_weth_bridge_proxy_addr = 15; // optional; H160 + optional bytes l2_weth_bridge_addr = 16; // optional; H160 + optional bytes l1_allow_list_addr = 17; // required; H160 + optional bytes l2_testnet_paymaster_addr = 18; // optional; H160 + optional bytes recursion_scheduler_level_vk_hash = 19; // required; H256 + optional bytes recursion_node_level_vk_hash = 20; // required; H256 + optional bytes recursion_leaf_level_vk_hash = 21; // required; H256 + optional bytes recursion_circuits_set_vks_hash = 22; // required; H256 + optional bytes l1_multicall3_addr = 23; // required; H160 + optional bytes fri_recursion_scheduler_level_vk_hash = 24; // required; H256 + optional bytes fri_recursion_node_level_vk_hash = 25; // required; H256 + optional bytes fri_recursion_leaf_level_vk_hash = 26; // required; H256 + optional ProverAtGenesis prover_at_genesis = 27; // required + optional bytes snark_wrapper_vk_hash = 28; // required; H256 +} diff --git a/core/lib/protobuf_config/src/proto/database.proto b/core/lib/protobuf_config/src/proto/database.proto new file mode 100644 index 00000000000..64e1e7fd300 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/database.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package zksync.config; + +enum MerkleTreeMode { + FULL = 0; + LIGHTWEIGHT = 1; +} + +message MerkleTree { + optional string path = 1; // optional; fs path + optional MerkleTreeMode mode = 2; // optional + optional uint64 multi_get_chunk_size = 3; // optional; ? + optional uint64 block_cache_size_mb = 4; // optional; MB + optional uint64 memtable_capacity_mb = 5; // optional; MB + optional uint64 stalled_writes_timeout_sec = 6; // optional; s + optional uint64 max_l1_batches_per_iter = 7; // optional +} + +message DB { + optional string state_keeper_db_path = 1; // optional; fs path + optional MerkleTree merkle_tree = 2; // optional +} + +message Postgres { + optional string master_url = 1; // optional + optional string replica_url = 2; // optional + optional string prover_url = 3; // optional + optional uint32 max_connections = 4; // optional + optional uint64 statement_timeout_sec = 5; // optional; s +} diff --git a/core/lib/protobuf_config/src/proto/eth_client.proto b/core/lib/protobuf_config/src/proto/eth_client.proto new file mode 100644 index 00000000000..50723dc2dd1 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/eth_client.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package zksync.config; + +message ETHClient { + optional uint64 chain_id = 1; // required; TODO: shouldn't it be Network? + optional string web3_url = 2; // required +} diff --git a/core/lib/protobuf_config/src/proto/eth_sender.proto b/core/lib/protobuf_config/src/proto/eth_sender.proto new file mode 100644 index 00000000000..b5e866d0412 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/eth_sender.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package zksync.config; + +message ETHSender { + optional Sender sender = 1; // required + optional GasAdjuster gas_adjuster = 2; // required +} + +enum ProofSendingMode { + ONLY_REAL_PROOFS = 0; + ONLY_SAMPLED_PROOFS = 1; + SKIP_EVERY_PROOF = 2; +} + +enum ProofLoadingMode { + OLD_PROOF_FROM_DB = 0; + FRI_PROOF_FROM_GCS = 1; +} + +message Sender { + repeated uint64 aggregated_proof_sizes = 1; // ? + optional uint64 wait_confirmations = 2; // optional + optional uint64 tx_poll_period = 3; // required; s + optional uint64 aggregate_tx_poll_period = 4; // required; s + optional uint64 max_txs_in_flight = 5; // required + optional ProofSendingMode proof_sending_mode = 6; // required + optional uint32 max_aggregated_tx_gas = 7; // required; wei? + optional uint64 max_eth_tx_data_size = 8; // required; ? + optional uint32 max_aggregated_blocks_to_commit = 9; // required + optional uint32 max_aggregated_blocks_to_execute = 10; // required + optional uint64 aggregated_block_commit_deadline = 11; // required; ? + optional uint64 aggregated_block_prove_deadline = 12; // required; ? + optional uint64 aggregated_block_execute_deadline = 13; // required; ? + optional uint64 timestamp_criteria_max_allowed_lag = 14; // required; ? + optional uint64 l1_batch_min_age_before_execute_seconds = 15; // optional; s + optional uint64 max_acceptable_priority_fee_in_gwei = 16; // required; gwei + optional ProofLoadingMode proof_loading_mode = 17; // required + // operator_private_key? +} + +message GasAdjuster { + optional uint64 default_priority_fee_per_gas = 1; // required; wei? + optional uint64 max_base_fee_samples = 2; // required; wei? + optional double pricing_formula_parameter_a = 3; // required + optional double pricing_formula_parameter_b = 4; // required + optional double internal_l1_pricing_multiplier = 5; // required + optional uint64 internal_enforced_l1_gas_price = 6; // optional; wei? + optional uint64 poll_period = 7; // required; s + optional uint64 max_l1_gas_price = 8; // optional; wei? +} diff --git a/core/lib/protobuf_config/src/proto/eth_watch.proto b/core/lib/protobuf_config/src/proto/eth_watch.proto new file mode 100644 index 00000000000..fe78fd33238 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/eth_watch.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package zksync.config; + +message ETHWatch { + optional uint64 confirmations_for_eth_event = 1; // optional + optional uint64 eth_node_poll_interval = 2; // required; ms +} diff --git a/core/lib/protobuf_config/src/proto/fri_proof_compressor.proto b/core/lib/protobuf_config/src/proto/fri_proof_compressor.proto new file mode 100644 index 00000000000..b8880936c5e --- /dev/null +++ b/core/lib/protobuf_config/src/proto/fri_proof_compressor.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package zksync.config; + +message FriProofCompressor { + optional uint32 compression_mode = 1; // required; u8 + optional uint32 prometheus_listener_port = 2; // required; u16 + optional string prometheus_pushgateway_url = 3; // required + optional uint64 prometheus_push_interval_ms = 4; // optional; ms + optional uint32 generation_timeout_in_secs = 5; // required; s + optional uint32 max_attempts = 6; // required + optional string universal_setup_path = 7; // required; fs path + optional string universal_setup_download_url = 8; // required + optional bool verify_wrapper_proof = 9; // required +} diff --git a/core/lib/protobuf_config/src/proto/fri_prover.proto b/core/lib/protobuf_config/src/proto/fri_prover.proto new file mode 100644 index 00000000000..5547bd9b8da --- /dev/null +++ b/core/lib/protobuf_config/src/proto/fri_prover.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package zksync.config; + +enum SetupLoadMode { + FROM_DISK = 0; + FROM_MEMORY = 1; +} + +message FriProver { + optional string setup_data_path = 1; // required; fs path? + optional uint32 prometheus_port = 2; // required; u16 + optional uint32 max_attempts = 3; // required + optional uint32 generation_timeout_in_secs = 4; // required; s + optional bytes base_layer_circuit_ids_to_be_verified = 5; // required + optional bytes recursive_layer_circuit_ids_to_be_verified = 6; // required + optional SetupLoadMode setup_load_mode = 7; // required + optional uint32 specialized_group_id = 8; // required; u8 + optional uint64 witness_vector_generator_thread_count = 9; // optional + optional uint64 queue_capacity = 10; // required + optional uint32 witness_vector_receiver_port = 11; // required; u16 + optional string zone_read_url = 12; // required + optional bool shall_save_to_public_bucket = 13; // required +} diff --git a/core/lib/protobuf_config/src/proto/fri_prover_gateway.proto b/core/lib/protobuf_config/src/proto/fri_prover_gateway.proto new file mode 100644 index 00000000000..716b761607c --- /dev/null +++ b/core/lib/protobuf_config/src/proto/fri_prover_gateway.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package zksync.config; + +message FriProverGateway { + optional string api_url = 1; // required + optional uint32 api_poll_duration_secs = 2; // required; s + optional uint32 prometheus_listener_port = 3; // required; u16 + optional string prometheus_pushgateway_url = 4; // required + optional uint64 prometheus_push_interval_ms = 5; // optional; ms +} diff --git a/core/lib/protobuf_config/src/proto/fri_prover_group.proto b/core/lib/protobuf_config/src/proto/fri_prover_group.proto new file mode 100644 index 00000000000..8a4dd9511b4 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/fri_prover_group.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package zksync.config; + +message CircuitIdRoundTuple { + optional uint32 circuit_id = 1; // required; u8 + optional uint32 aggregation_round = 2; // required; u8 +} + +message FriProverGroup { + repeated CircuitIdRoundTuple group_0 = 1; + repeated CircuitIdRoundTuple group_1 = 2; + repeated CircuitIdRoundTuple group_2 = 3; + repeated CircuitIdRoundTuple group_3 = 4; + repeated CircuitIdRoundTuple group_4 = 5; + repeated CircuitIdRoundTuple group_5 = 6; + repeated CircuitIdRoundTuple group_6 = 7; + repeated CircuitIdRoundTuple group_7 = 8; + repeated CircuitIdRoundTuple group_8 = 9; + repeated CircuitIdRoundTuple group_9 = 10; + repeated CircuitIdRoundTuple group_10 = 11; + repeated CircuitIdRoundTuple group_11 = 12; + repeated CircuitIdRoundTuple group_12 = 13; +} diff --git a/core/lib/protobuf_config/src/proto/fri_witness_generator.proto b/core/lib/protobuf_config/src/proto/fri_witness_generator.proto new file mode 100644 index 00000000000..dda7f1e8fef --- /dev/null +++ b/core/lib/protobuf_config/src/proto/fri_witness_generator.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package zksync.config; + +message FriWitnessGenerator { + optional uint32 generation_timeout_in_secs = 1; // required; s + optional uint32 max_attempts = 2; // required + optional uint32 blocks_proving_percentage = 3; // optional; 0-100 + repeated uint32 dump_arguments_for_blocks = 4; + optional uint32 last_l1_batch_to_process = 5; // optional + optional uint32 force_process_block = 6; // optional + optional bool shall_save_to_public_bucket = 7; // required +} diff --git a/core/lib/protobuf_config/src/proto/fri_witness_vector_generator.proto b/core/lib/protobuf_config/src/proto/fri_witness_vector_generator.proto new file mode 100644 index 00000000000..5ec4c0eca29 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/fri_witness_vector_generator.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package zksync.config; + +message FriWitnessVectorGenerator { + optional uint32 max_prover_reservation_duration_in_secs = 1; // required; s + optional uint32 prover_instance_wait_timeout_in_secs = 2; // required; s + optional uint32 prover_instance_poll_time_in_milli_secs = 3; // required; ms + optional uint32 prometheus_listener_port = 4; // required; u16 + optional string prometheus_pushgateway_url = 5; // required + optional uint64 prometheus_push_interval_ms = 6; // optional; ms + optional uint32 specialized_group_id = 7; // required; u8 +} diff --git a/core/lib/protobuf_config/src/proto/house_keeper.proto b/core/lib/protobuf_config/src/proto/house_keeper.proto new file mode 100644 index 00000000000..d27fb6da71e --- /dev/null +++ b/core/lib/protobuf_config/src/proto/house_keeper.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package zksync.config; + +message HouseKeeper { + optional uint64 l1_batch_metrics_reporting_interval_ms = 1; // required; ms + optional uint64 gpu_prover_queue_reporting_interval_ms = 2; // required; ms + optional uint64 prover_job_retrying_interval_ms = 3; // required; ms + optional uint64 prover_stats_reporting_interval_ms = 4; // required ms + optional uint64 witness_job_moving_interval_ms = 5; // required; ms + optional uint64 witness_generator_stats_reporting_interval_ms = 6; // required; ms + optional uint64 fri_witness_job_moving_interval_ms = 7; // required; ms + optional uint64 fri_prover_job_retrying_interval_ms = 8; // required; ms + optional uint64 fri_witness_generator_job_retrying_interval_ms = 9; // required; ms + optional uint32 prover_db_pool_size = 10; // required + optional uint64 fri_prover_stats_reporting_interval_ms = 11; // required; ms + optional uint64 fri_proof_compressor_job_retrying_interval_ms = 12; // required; ms + optional uint64 fri_proof_compressor_stats_reporting_interval_ms = 13; // required; ms +} diff --git a/core/lib/protobuf_config/src/proto/mod.rs b/core/lib/protobuf_config/src/proto/mod.rs new file mode 100644 index 00000000000..660bf4c5b4c --- /dev/null +++ b/core/lib/protobuf_config/src/proto/mod.rs @@ -0,0 +1,2 @@ +#![allow(warnings)] +include!(concat!(env!("OUT_DIR"), "/src/proto/gen.rs")); diff --git a/core/lib/protobuf_config/src/proto/object_store.proto b/core/lib/protobuf_config/src/proto/object_store.proto new file mode 100644 index 00000000000..941799c07c2 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/object_store.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package zksync.config; + +enum ObjectStoreMode { + GCS = 0; + GCS_WITH_CREDENTIAL_FILE = 1; + FILE_BACKED = 2; + GCS_ANONYMOUS_READ_ONLY = 3; +} + +message ObjectStore { + optional string bucket_base_url = 1; // required; url + optional ObjectStoreMode mode = 2; // required + optional string file_backed_base_path = 3; // required; fs path + optional string gcs_credential_file_path = 4; // required; fs path + optional uint32 max_retries = 5; // required +} diff --git a/core/lib/protobuf_config/src/proto/proof_data_handler.proto b/core/lib/protobuf_config/src/proto/proof_data_handler.proto new file mode 100644 index 00000000000..a76e15ed5ce --- /dev/null +++ b/core/lib/protobuf_config/src/proto/proof_data_handler.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package zksync.config; + +enum ProtocolVersionLoadingMode { + FROM_DB = 0; + FROM_ENV_VAR = 1; +} + +message ProofDataHandler { + optional uint32 http_port = 1; // required; u16 + optional uint32 proof_generation_timeout_in_secs = 2; // required; s + optional ProtocolVersionLoadingMode protocol_version_loading_mode = 3; // required + optional uint32 fri_protocol_version_id = 4; // required; u16 +} diff --git a/core/lib/protobuf_config/src/proto/snapshots_creator.proto b/core/lib/protobuf_config/src/proto/snapshots_creator.proto new file mode 100644 index 00000000000..27e4c66435d --- /dev/null +++ b/core/lib/protobuf_config/src/proto/snapshots_creator.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package zksync.config; + +message SnapshotsCreator { + optional uint64 storage_logs_chunk_size = 1; // optional + optional uint32 concurrent_queries_count = 2; // optional +} diff --git a/core/lib/protobuf_config/src/proto/utils.proto b/core/lib/protobuf_config/src/proto/utils.proto new file mode 100644 index 00000000000..40c6c10c756 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/utils.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package zksync.config; + +message Prometheus { + optional uint32 listener_port = 1; // required + optional string pushgateway_url = 2; // required + optional uint64 push_interval_ms = 3; +} diff --git a/core/lib/protobuf_config/src/proto/witness_generator.proto b/core/lib/protobuf_config/src/proto/witness_generator.proto new file mode 100644 index 00000000000..5836c5892e7 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/witness_generator.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package zksync.config; + +enum BasicWitnessGeneratorDataSource { + FROM_POSTGRES = 0; + FROM_POSTGRES_SHADOW_BLOB = 1; + FROM_BLOB = 2; +} + +message WitnessGenerator { + optional uint32 generation_timeout_in_secs = 1; // required; s + optional string initial_setup_key_path = 2; // required; fs path + optional string key_download_url = 3; // required; url + optional uint32 max_attempts = 4; // required + optional uint32 blocks_proving_percentage = 5; // optional; 0-100 (percentage) + repeated uint32 dump_arguments_for_blocks = 6; + optional uint32 last_l1_batch_to_process = 7; // optional + optional BasicWitnessGeneratorDataSource data_source = 8; // required +} diff --git a/core/lib/protobuf_config/src/repr.rs b/core/lib/protobuf_config/src/repr.rs new file mode 100644 index 00000000000..8b71ee63c84 --- /dev/null +++ b/core/lib/protobuf_config/src/repr.rs @@ -0,0 +1,15 @@ +use anyhow::Context as _; + +/// Trait reverse to `zksync_protobuf::ProtoFmt` for cases where +/// you would like to specify a custom proto encoding for an externally defined type. +pub(crate) trait ProtoRepr: + zksync_protobuf::build::prost_reflect::ReflectMessage + Default +{ + type Type; + fn read(&self) -> anyhow::Result; + fn build(this: &Self::Type) -> Self; +} + +pub(crate) fn read_required_repr(field: &Option

) -> anyhow::Result { + field.as_ref().context("missing field")?.read() +} diff --git a/core/lib/protobuf_config/src/snapshots_creator.rs b/core/lib/protobuf_config/src/snapshots_creator.rs new file mode 100644 index 00000000000..1ea5aada120 --- /dev/null +++ b/core/lib/protobuf_config/src/snapshots_creator.rs @@ -0,0 +1,24 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::SnapshotsCreator { + type Type = configs::SnapshotsCreatorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + storage_logs_chunk_size: *required(&self.storage_logs_chunk_size) + .context("storage_logs_chunk_size")?, + concurrent_queries_count: *required(&self.concurrent_queries_count) + .context("concurrent_queries_count")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + storage_logs_chunk_size: Some(this.storage_logs_chunk_size), + concurrent_queries_count: Some(this.concurrent_queries_count), + } + } +} diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs new file mode 100644 index 00000000000..de4803a1008 --- /dev/null +++ b/core/lib/protobuf_config/src/tests.rs @@ -0,0 +1,92 @@ +use pretty_assertions::assert_eq; +use rand::Rng; +use zksync_config::testonly; + +use crate::{proto, repr::ProtoRepr}; + +fn encode(msg: &P::Type) -> Vec { + let msg = P::build(msg); + zksync_protobuf::canonical_raw(&msg.encode_to_vec(), &msg.descriptor()).unwrap() +} + +fn decode(bytes: &[u8]) -> anyhow::Result { + P::read(&P::decode(bytes)?) +} + +fn encode_json(msg: &P::Type) -> String { + let mut s = serde_json::Serializer::pretty(vec![]); + zksync_protobuf::serde::serialize_proto(&P::build(msg), &mut s).unwrap(); + String::from_utf8(s.into_inner()).unwrap() +} + +fn decode_json(json: &str) -> anyhow::Result { + let mut d = serde_json::Deserializer::from_str(json); + P::read(&zksync_protobuf::serde::deserialize_proto(&mut d)?) +} + +#[track_caller] +fn encode_decode(rng: &mut impl Rng) +where + P::Type: PartialEq + std::fmt::Debug + testonly::RandomConfig, +{ + for required_only in [false, true] { + let want: P::Type = testonly::Gen { + rng, + required_only, + decimal_fractions: false, + } + .gen(); + let got = decode::

(&encode::

(&want)).unwrap(); + assert_eq!(&want, &got, "binary encoding"); + + let want: P::Type = testonly::Gen { + rng, + required_only, + decimal_fractions: true, + } + .gen(); + let got = decode_json::

(&encode_json::

(&want)).unwrap(); + assert_eq!(&want, &got, "json encoding"); + } +} + +/// Tests config <-> proto (boilerplate) conversions. +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); + encode_decode::(rng); +} diff --git a/core/lib/protobuf_config/src/utils.rs b/core/lib/protobuf_config/src/utils.rs new file mode 100644 index 00000000000..d4089df01df --- /dev/null +++ b/core/lib/protobuf_config/src/utils.rs @@ -0,0 +1,28 @@ +use anyhow::Context as _; +use zksync_config::configs::PrometheusConfig; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl ProtoRepr for proto::Prometheus { + type Type = PrometheusConfig; + fn read(&self) -> anyhow::Result { + Ok(PrometheusConfig { + listener_port: required(&self.listener_port) + .and_then(|p| Ok((*p).try_into()?)) + .context("listener_port")?, + pushgateway_url: required(&self.pushgateway_url) + .context("pushgateway_url")? + .clone(), + push_interval_ms: self.push_interval_ms, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + listener_port: Some(this.listener_port.into()), + pushgateway_url: Some(this.pushgateway_url.clone()), + push_interval_ms: this.push_interval_ms, + } + } +} diff --git a/core/lib/protobuf_config/src/witness_generator.rs b/core/lib/protobuf_config/src/witness_generator.rs new file mode 100644 index 00000000000..d4513ebffb5 --- /dev/null +++ b/core/lib/protobuf_config/src/witness_generator.rs @@ -0,0 +1,68 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::required; + +use crate::{proto, repr::ProtoRepr}; + +impl proto::BasicWitnessGeneratorDataSource { + fn new(x: &configs::witness_generator::BasicWitnessGeneratorDataSource) -> Self { + type From = configs::witness_generator::BasicWitnessGeneratorDataSource; + match x { + From::FromPostgres => Self::FromPostgres, + From::FromPostgresShadowBlob => Self::FromPostgresShadowBlob, + From::FromBlob => Self::FromBlob, + } + } + fn parse(&self) -> configs::witness_generator::BasicWitnessGeneratorDataSource { + type To = configs::witness_generator::BasicWitnessGeneratorDataSource; + match self { + Self::FromPostgres => To::FromPostgres, + Self::FromPostgresShadowBlob => To::FromPostgresShadowBlob, + Self::FromBlob => To::FromBlob, + } + } +} + +impl ProtoRepr for proto::WitnessGenerator { + type Type = configs::WitnessGeneratorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + generation_timeout_in_secs: required(&self.generation_timeout_in_secs) + .and_then(|x| Ok((*x).try_into()?)) + .context("generation_timeout_in_secs")?, + initial_setup_key_path: required(&self.initial_setup_key_path) + .context("initial_setup_key_path")? + .clone(), + key_download_url: required(&self.key_download_url) + .context("key_download_url")? + .clone(), + max_attempts: *required(&self.max_attempts).context("max_attempts")?, + blocks_proving_percentage: self + .blocks_proving_percentage + .map(|x| x.try_into()) + .transpose() + .context("blocks_proving_percentage")?, + dump_arguments_for_blocks: self.dump_arguments_for_blocks.clone(), + last_l1_batch_to_process: self.last_l1_batch_to_process, + data_source: required(&self.data_source) + .and_then(|x| Ok(proto::BasicWitnessGeneratorDataSource::try_from(*x)?)) + .context("data_source")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + generation_timeout_in_secs: Some(this.generation_timeout_in_secs.into()), + initial_setup_key_path: Some(this.initial_setup_key_path.clone()), + key_download_url: Some(this.key_download_url.clone()), + max_attempts: Some(this.max_attempts), + blocks_proving_percentage: this.blocks_proving_percentage.map(|x| x.into()), + dump_arguments_for_blocks: this.dump_arguments_for_blocks.clone(), + last_l1_batch_to_process: this.last_l1_batch_to_process, + data_source: Some( + proto::BasicWitnessGeneratorDataSource::new(&this.data_source).into(), + ), + } + } +} diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml new file mode 100644 index 00000000000..3bb9e65fe80 --- /dev/null +++ b/core/lib/prover_interface/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "zksync_prover_interface" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +readme = "README.md" + +[dependencies] +zksync_types = { path = "../types" } +zksync_object_store = { path = "../object_store" } + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } + +serde = "1.0.90" +strum = { version = "0.24", features = ["derive"] } +serde_with = { version = "1", features = ["base64"] } +chrono = { version = "0.4", features = ["serde"] } + +[dev-dependencies] +tokio = { version = "1.21.2", features = ["full"] } +bincode = "1" diff --git a/core/lib/types/src/prover_server_api/mod.rs b/core/lib/prover_interface/src/api.rs similarity index 79% rename from core/lib/types/src/prover_server_api/mod.rs rename to core/lib/prover_interface/src/api.rs index fdbbd57624f..85cf88c4f90 100644 --- a/core/lib/types/src/prover_server_api/mod.rs +++ b/core/lib/prover_interface/src/api.rs @@ -1,12 +1,14 @@ -use serde::{Deserialize, Serialize}; -use zksync_basic_types::L1BatchNumber; +//! Prover and server subsystems communicate via the API. +//! This module defines the types used in the API. -use crate::{ - aggregated_operations::L1BatchProofForL1, - proofs::PrepareBasicCircuitsJob, +use serde::{Deserialize, Serialize}; +use zksync_types::{ protocol_version::{FriProtocolVersionId, L1VerifierConfig}, + L1BatchNumber, }; +use crate::{inputs::PrepareBasicCircuitsJob, outputs::L1BatchProofForL1}; + #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationData { pub l1_batch_number: L1BatchNumber, diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs new file mode 100644 index 00000000000..44fe60edddd --- /dev/null +++ b/core/lib/prover_interface/src/inputs.rs @@ -0,0 +1,185 @@ +use std::{convert::TryInto, fmt::Debug}; + +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, Bytes}; +use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_types::{L1BatchNumber, H256, U256}; + +const HASH_LEN: usize = H256::len_bytes(); + +/// Metadata emitted by a Merkle tree after processing single storage log. +#[serde_as] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StorageLogMetadata { + #[serde_as(as = "Bytes")] + pub root_hash: [u8; HASH_LEN], + pub is_write: bool, + pub first_write: bool, + #[serde_as(as = "Vec")] + pub merkle_paths: Vec<[u8; HASH_LEN]>, + pub leaf_hashed_key: U256, + pub leaf_enumeration_index: u64, + // **NB.** For compatibility reasons, `#[serde_as(as = "Bytes")]` attributes are not added below. + pub value_written: [u8; HASH_LEN], + pub value_read: [u8; HASH_LEN], +} + +impl StorageLogMetadata { + pub fn leaf_hashed_key_array(&self) -> [u8; 32] { + let mut result = [0_u8; 32]; + self.leaf_hashed_key.to_little_endian(&mut result); + result + } + + pub fn into_merkle_paths_array(self) -> Box<[[u8; HASH_LEN]; PATH_LEN]> { + let actual_len = self.merkle_paths.len(); + self.merkle_paths.try_into().unwrap_or_else(|_| { + panic!( + "Unexpected length of Merkle paths in `StorageLogMetadata`: expected {}, got {}", + PATH_LEN, actual_len + ); + }) + } +} + +/// Witness data produced by the Merkle tree as a result of processing a single block. Used +/// as an input to the witness generator. +/// +/// # Stability +/// +/// This type is serialized using `bincode` to be passed from the metadata calculator +/// to the witness generator. As such, changes in its `serde` serialization +/// must be backwards-compatible. +/// +/// # Compact form +/// +/// In order to reduce storage space, this job supports a compact format. In this format, +/// only the first item in `merkle_paths` is guaranteed to have the full Merkle path (i.e., +/// 256 items with the current Merkle tree). The following items may have less hashes in their +/// Merkle paths; if this is the case, the starting hashes are skipped and are the same +/// as in the first path. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PrepareBasicCircuitsJob { + // Merkle paths and some auxiliary information for each read / write operation in a block. + merkle_paths: Vec, + next_enumeration_index: u64, +} + +impl StoredObject for PrepareBasicCircuitsJob { + const BUCKET: Bucket = Bucket::WitnessInput; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("merkel_tree_paths_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl PrepareBasicCircuitsJob { + /// Creates a new job with the specified leaf index and no included paths. + pub fn new(next_enumeration_index: u64) -> Self { + Self { + merkle_paths: vec![], + next_enumeration_index, + } + } + + /// Returns the next leaf index at the beginning of the block. + pub fn next_enumeration_index(&self) -> u64 { + self.next_enumeration_index + } + + /// Reserves additional capacity for Merkle paths. + pub fn reserve(&mut self, additional_capacity: usize) { + self.merkle_paths.reserve(additional_capacity); + } + + /// Pushes an additional Merkle path. + pub fn push_merkle_path(&mut self, mut path: StorageLogMetadata) { + let Some(first_path) = self.merkle_paths.first() else { + self.merkle_paths.push(path); + return; + }; + assert_eq!(first_path.merkle_paths.len(), path.merkle_paths.len()); + + let mut hash_pairs = path.merkle_paths.iter().zip(&first_path.merkle_paths); + let first_unique_idx = + hash_pairs.position(|(hash, first_path_hash)| hash != first_path_hash); + let first_unique_idx = first_unique_idx.unwrap_or(path.merkle_paths.len()); + path.merkle_paths = path.merkle_paths.split_off(first_unique_idx); + self.merkle_paths.push(path); + } + + /// Converts this job into an iterator over the contained Merkle paths. + pub fn into_merkle_paths(self) -> impl ExactSizeIterator { + let mut merkle_paths = self.merkle_paths; + if let [first, rest @ ..] = merkle_paths.as_mut_slice() { + for path in rest { + assert!( + path.merkle_paths.len() <= first.merkle_paths.len(), + "Merkle paths in `PrepareBasicCircuitsJob` are malformed; the first path is not \ + the longest one" + ); + let spliced_len = first.merkle_paths.len() - path.merkle_paths.len(); + let spliced_hashes = &first.merkle_paths[0..spliced_len]; + path.merkle_paths + .splice(0..0, spliced_hashes.iter().cloned()); + debug_assert_eq!(path.merkle_paths.len(), first.merkle_paths.len()); + } + } + merkle_paths.into_iter() + } +} + +/// Enriched `PrepareBasicCircuitsJob`. All the other fields are taken from the `l1_batches` table. +#[derive(Debug, Clone)] +pub struct BasicCircuitWitnessGeneratorInput { + pub block_number: L1BatchNumber, + pub previous_block_hash: H256, + pub previous_block_timestamp: u64, + pub block_timestamp: u64, + pub used_bytecodes_hashes: Vec, + pub initial_heap_content: Vec<(usize, U256)>, + pub merkle_paths_input: PrepareBasicCircuitsJob, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn prepare_basic_circuits_job_roundtrip() { + let zero_hash = [0_u8; 32]; + let logs = (0..10).map(|i| { + let mut merkle_paths = vec![zero_hash; 255]; + merkle_paths.push([i as u8; 32]); + StorageLogMetadata { + root_hash: zero_hash, + is_write: i % 2 == 0, + first_write: i % 3 == 0, + merkle_paths, + leaf_hashed_key: U256::from(i), + leaf_enumeration_index: i + 1, + value_written: [i as u8; 32], + value_read: [0; 32], + } + }); + let logs: Vec<_> = logs.collect(); + + let mut job = PrepareBasicCircuitsJob::new(4); + job.reserve(logs.len()); + for log in &logs { + job.push_merkle_path(log.clone()); + } + + // Check that Merkle paths are compacted. + for (i, log) in job.merkle_paths.iter().enumerate() { + let expected_merkle_path_len = if i == 0 { 256 } else { 1 }; + assert_eq!(log.merkle_paths.len(), expected_merkle_path_len); + } + + let logs_from_job: Vec<_> = job.into_merkle_paths().collect(); + assert_eq!(logs_from_job, logs); + } +} diff --git a/core/lib/prover_interface/src/lib.rs b/core/lib/prover_interface/src/lib.rs new file mode 100644 index 00000000000..31d66c2af1e --- /dev/null +++ b/core/lib/prover_interface/src/lib.rs @@ -0,0 +1,9 @@ +//! Point of interaction of the core subsystem with the prover subsystem. +//! Defines the means of communication between the two subsystems without exposing the internal details of either. + +/// Types that define the API for interaction between prover and server subsystems. +pub mod api; +/// Inputs for proof generation provided by the core subsystem. +pub mod inputs; +/// Outputs of proof generation provided by the prover subsystem. +pub mod outputs; diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs new file mode 100644 index 00000000000..ebadc610146 --- /dev/null +++ b/core/lib/prover_interface/src/outputs.rs @@ -0,0 +1,38 @@ +use core::fmt; + +use serde::{Deserialize, Serialize}; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, + witness::oracle::VmWitnessOracle, +}; +use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_types::L1BatchNumber; + +/// The only type of proof utilized by the core subsystem: a "final" proof that can be sent +/// to the L1 contract. +#[derive(Clone, Serialize, Deserialize)] +pub struct L1BatchProofForL1 { + pub aggregation_result_coords: [[u8; 32]; 4], + pub scheduler_proof: Proof>>, +} + +impl fmt::Debug for L1BatchProofForL1 { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("L1BatchProofForL1") + .field("aggregation_result_coords", &self.aggregation_result_coords) + .finish_non_exhaustive() + } +} + +impl StoredObject for L1BatchProofForL1 { + const BUCKET: Bucket = Bucket::ProofsFri; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_proof_{key}.bin") + } + + serialize_using_bincode!(); +} diff --git a/core/lib/object_store/tests/integration.rs b/core/lib/prover_interface/tests/job_serialization.rs similarity index 92% rename from core/lib/object_store/tests/integration.rs rename to core/lib/prover_interface/tests/job_serialization.rs index 9db2061f17f..a71f7ea3ae3 100644 --- a/core/lib/object_store/tests/integration.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -1,11 +1,9 @@ -//! Integration tests for object store. +//! Integration tests for object store serialization of job objects. use tokio::fs; use zksync_object_store::{Bucket, ObjectStoreFactory}; -use zksync_types::{ - proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, - L1BatchNumber, -}; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_types::L1BatchNumber; /// Tests compatibility of the `PrepareBasicCircuitsJob` serialization to the previously used /// one. diff --git a/core/lib/object_store/tests/snapshots/prepare-basic-circuits-job-full.bin b/core/lib/prover_interface/tests/snapshots/prepare-basic-circuits-job-full.bin similarity index 100% rename from core/lib/object_store/tests/snapshots/prepare-basic-circuits-job-full.bin rename to core/lib/prover_interface/tests/snapshots/prepare-basic-circuits-job-full.bin diff --git a/core/tests/cross_external_nodes_checker/Cargo.toml b/core/lib/snapshots_applier/Cargo.toml similarity index 61% rename from core/tests/cross_external_nodes_checker/Cargo.toml rename to core/lib/snapshots_applier/Cargo.toml index 4f8285aef5a..5b89bfe8475 100644 --- a/core/tests/cross_external_nodes_checker/Cargo.toml +++ b/core/lib/snapshots_applier/Cargo.toml @@ -1,26 +1,25 @@ [package] -name = "cross_external_nodes_checker" +name = "zksync_snapshots_applier" version = "0.1.0" -edition = "2018" +edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] -publish = false # We don't want to publish our binaries. [dependencies] +zksync_dal = { path = "../../lib/dal" } zksync_types = { path = "../../lib/types" } +zksync_object_store = { path = "../../lib/object_store" } zksync_web3_decl = { path = "../../lib/web3_decl" } zksync_utils = { path = "../../lib/utils" } -vlog = { path = "../../lib/vlog" } -serde_json = "1.0" + +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } anyhow = "1.0" +async-trait = "0.1" tokio = { version = "1", features = ["time"] } -futures = "0.3" -envy = "0.4" -serde = { version = "1.0" } -ctrlc = { version = "3.1" } tracing = "0.1" +thiserror = "1.0" diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs new file mode 100644 index 00000000000..48fc2a813c9 --- /dev/null +++ b/core/lib/snapshots_applier/src/lib.rs @@ -0,0 +1,347 @@ +//! Logic for applying application-level snapshots to Postgres storage. + +use std::{collections::HashMap, fmt}; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; +use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_types::{ + api::en::SyncBlock, + snapshots::{ + SnapshotFactoryDependencies, SnapshotHeader, SnapshotRecoveryStatus, SnapshotStorageLog, + SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, + }, + MiniblockNumber, H256, +}; +use zksync_utils::bytecode::hash_bytecode; +use zksync_web3_decl::jsonrpsee::core::{client::Error, ClientError as RpcError}; + +use self::metrics::{InitialStage, StorageLogsChunksStage, METRICS}; + +mod metrics; +#[cfg(test)] +mod tests; + +#[derive(thiserror::Error, Debug)] +pub enum SnapshotsApplierError { + #[error("canceled")] + Canceled(String), + #[error(transparent)] + Fatal(#[from] anyhow::Error), + #[error(transparent)] + Retryable(anyhow::Error), +} + +impl SnapshotsApplierError { + fn canceled(message: &str) -> Self { + Self::Canceled(message.to_owned()) + } +} + +impl From for SnapshotsApplierError { + fn from(error: ObjectStoreError) -> Self { + match error { + ObjectStoreError::KeyNotFound(_) | ObjectStoreError::Serialization(_) => { + Self::Fatal(error.into()) + } + ObjectStoreError::Other(_) => Self::Retryable(error.into()), + } + } +} + +impl From for SnapshotsApplierError { + fn from(error: SqlxError) -> Self { + match error { + SqlxError::Database(_) + | SqlxError::RowNotFound + | SqlxError::ColumnNotFound(_) + | SqlxError::Configuration(_) + | SqlxError::TypeNotFound { .. } => Self::Fatal(error.into()), + _ => Self::Retryable(error.into()), + } + } +} + +impl From for SnapshotsApplierError { + fn from(error: RpcError) -> Self { + match error { + Error::Transport(_) | Error::RequestTimeout | Error::RestartNeeded(_) => { + Self::Retryable(error.into()) + } + _ => Self::Fatal(error.into()), + } + } +} + +/// Main node API used by the [`SnapshotsApplier`]. +#[async_trait] +pub trait SnapshotsApplierMainNodeClient: fmt::Debug + Send + Sync { + async fn fetch_l2_block(&self, number: MiniblockNumber) -> Result, RpcError>; + + async fn fetch_newest_snapshot(&self) -> Result, RpcError>; +} + +/// Applying application-level storage snapshots to the Postgres storage. +#[derive(Debug)] +pub struct SnapshotsApplier<'a> { + connection_pool: &'a ConnectionPool, + blob_store: &'a dyn ObjectStore, + applied_snapshot_status: SnapshotRecoveryStatus, +} + +impl<'a> SnapshotsApplier<'a> { + /// Recovers [`SnapshotRecoveryStatus`] from the storage and the main node. + async fn prepare_applied_snapshot_status( + storage: &mut StorageProcessor<'_>, + main_node_client: &dyn SnapshotsApplierMainNodeClient, + ) -> Result<(SnapshotRecoveryStatus, bool), SnapshotsApplierError> { + let latency = + METRICS.initial_stage_duration[&InitialStage::FetchMetadataFromMainNode].start(); + + let applied_snapshot_status = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await?; + + if let Some(applied_snapshot_status) = applied_snapshot_status { + if !applied_snapshot_status + .storage_logs_chunks_processed + .contains(&false) + { + return Err(SnapshotsApplierError::canceled( + "This node has already been initialized from a snapshot", + )); + } + + let latency = latency.observe(); + tracing::info!("Re-initialized snapshots applier after reset/failure in {latency:?}"); + + Ok((applied_snapshot_status, false)) + } else { + if !storage.blocks_dal().is_genesis_needed().await? { + return Err(SnapshotsApplierError::canceled( + "This node has already been initialized without a snapshot", + )); + } + + let latency = latency.observe(); + tracing::info!("Initialized fresh snapshots applier in {latency:?}"); + + Ok(( + SnapshotsApplier::create_fresh_recovery_status(main_node_client).await?, + true, + )) + } + } + + pub async fn load_snapshot( + connection_pool: &'a ConnectionPool, + main_node_client: &dyn SnapshotsApplierMainNodeClient, + blob_store: &'a dyn ObjectStore, + ) -> Result<(), SnapshotsApplierError> { + let mut storage = connection_pool + .access_storage_tagged("snapshots_applier") + .await?; + let mut storage_transaction = storage.start_transaction().await?; + + let (applied_snapshot_status, created_from_scratch) = + Self::prepare_applied_snapshot_status(&mut storage_transaction, main_node_client) + .await?; + + let mut recovery = Self { + connection_pool, + blob_store, + applied_snapshot_status, + }; + + METRICS.storage_logs_chunks_count.set( + recovery + .applied_snapshot_status + .storage_logs_chunks_processed + .len(), + ); + METRICS.storage_logs_chunks_left_to_process.set( + recovery + .applied_snapshot_status + .storage_logs_chunks_left_to_process(), + ); + + if created_from_scratch { + recovery + .recover_factory_deps(&mut storage_transaction) + .await?; + storage_transaction + .snapshot_recovery_dal() + .insert_initial_recovery_status(&recovery.applied_snapshot_status) + .await?; + } + storage_transaction.commit().await?; + drop(storage); + + recovery.recover_storage_logs().await?; + Ok(()) + } + + async fn create_fresh_recovery_status( + main_node_client: &dyn SnapshotsApplierMainNodeClient, + ) -> Result { + let snapshot_response = main_node_client.fetch_newest_snapshot().await?; + + let snapshot = snapshot_response.ok_or(SnapshotsApplierError::canceled( + "Main node does not have any ready snapshots, skipping initialization from snapshot!", + ))?; + + let l1_batch_number = snapshot.l1_batch_number; + let miniblock_number = snapshot.miniblock_number; + tracing::info!( + "Found snapshot with data up to L1 batch #{l1_batch_number}, storage_logs are divided into {} chunk(s)", + snapshot.storage_logs_chunks.len() + ); + + let miniblock = main_node_client + .fetch_l2_block(miniblock_number) + .await? + .with_context(|| format!("miniblock #{miniblock_number} is missing on main node"))?; + let miniblock_root_hash = miniblock + .hash + .context("snapshot miniblock fetched from main node doesn't have hash set")?; + + Ok(SnapshotRecoveryStatus { + l1_batch_number, + l1_batch_root_hash: snapshot.last_l1_batch_with_metadata.metadata.root_hash, + miniblock_number: snapshot.miniblock_number, + miniblock_root_hash, + storage_logs_chunks_processed: vec![false; snapshot.storage_logs_chunks.len()], + }) + } + + async fn recover_factory_deps( + &mut self, + storage: &mut StorageProcessor<'_>, + ) -> Result<(), SnapshotsApplierError> { + let latency = METRICS.initial_stage_duration[&InitialStage::ApplyFactoryDeps].start(); + + tracing::debug!("Fetching factory dependencies from object store"); + let factory_deps: SnapshotFactoryDependencies = self + .blob_store + .get(self.applied_snapshot_status.l1_batch_number) + .await?; + tracing::debug!( + "Fetched {} factory dependencies from object store", + factory_deps.factory_deps.len() + ); + + let all_deps_hashmap: HashMap> = factory_deps + .factory_deps + .into_iter() + .map(|dep| (hash_bytecode(&dep.bytecode.0), dep.bytecode.0)) + .collect(); + storage + .storage_dal() + .insert_factory_deps( + self.applied_snapshot_status.miniblock_number, + &all_deps_hashmap, + ) + .await?; + + let latency = latency.observe(); + tracing::info!("Applied factory dependencies in {latency:?}"); + + Ok(()) + } + + async fn insert_initial_writes_chunk( + &mut self, + storage_logs: &[SnapshotStorageLog], + storage: &mut StorageProcessor<'_>, + ) -> Result<(), SnapshotsApplierError> { + storage + .storage_logs_dedup_dal() + .insert_initial_writes_from_snapshot(storage_logs) + .await?; + Ok(()) + } + + async fn insert_storage_logs_chunk( + &mut self, + storage_logs: &[SnapshotStorageLog], + storage: &mut StorageProcessor<'_>, + ) -> Result<(), SnapshotsApplierError> { + storage + .storage_logs_dal() + .insert_storage_logs_from_snapshot( + self.applied_snapshot_status.miniblock_number, + storage_logs, + ) + .await?; + Ok(()) + } + + #[tracing::instrument(level = "debug", err, skip(self))] + async fn recover_storage_logs_single_chunk( + &mut self, + chunk_id: u64, + ) -> Result<(), SnapshotsApplierError> { + tracing::info!("Processing storage logs chunk {chunk_id}"); + let latency = + METRICS.storage_logs_chunks_duration[&StorageLogsChunksStage::LoadFromGcs].start(); + + let storage_key = SnapshotStorageLogsStorageKey { + chunk_id, + l1_batch_number: self.applied_snapshot_status.l1_batch_number, + }; + let storage_snapshot_chunk: SnapshotStorageLogsChunk = + self.blob_store.get(storage_key).await?; + let storage_logs = &storage_snapshot_chunk.storage_logs; + let latency = latency.observe(); + tracing::info!( + "Loaded {} storage logs from GCS for chunk {chunk_id} in {latency:?}", + storage_logs.len() + ); + + let latency = + METRICS.storage_logs_chunks_duration[&StorageLogsChunksStage::SaveToPostgres].start(); + + let mut storage = self + .connection_pool + .access_storage_tagged("snapshots_applier") + .await?; + let mut storage_transaction = storage.start_transaction().await?; + + tracing::info!("Loading {} storage logs into Postgres", storage_logs.len()); + self.insert_storage_logs_chunk(storage_logs, &mut storage_transaction) + .await?; + self.insert_initial_writes_chunk(storage_logs, &mut storage_transaction) + .await?; + + self.applied_snapshot_status.storage_logs_chunks_processed[chunk_id as usize] = true; + storage_transaction + .snapshot_recovery_dal() + .mark_storage_logs_chunk_as_processed(chunk_id) + .await?; + storage_transaction.commit().await?; + + let chunks_left = METRICS.storage_logs_chunks_left_to_process.dec_by(1) - 1; + let latency = latency.observe(); + tracing::info!("Saved storage logs for chunk {chunk_id} in {latency:?}, there are {chunks_left} left to process"); + + Ok(()) + } + + pub async fn recover_storage_logs(mut self) -> Result<(), SnapshotsApplierError> { + for chunk_id in 0..self + .applied_snapshot_status + .storage_logs_chunks_processed + .len() + { + //TODO Add retries and parallelize this step + if !self.applied_snapshot_status.storage_logs_chunks_processed[chunk_id] { + self.recover_storage_logs_single_chunk(chunk_id as u64) + .await?; + } + } + + Ok(()) + } +} diff --git a/core/lib/snapshots_applier/src/metrics.rs b/core/lib/snapshots_applier/src/metrics.rs new file mode 100644 index 00000000000..e10270f79fe --- /dev/null +++ b/core/lib/snapshots_applier/src/metrics.rs @@ -0,0 +1,44 @@ +//! Metrics for the snapshot applier. + +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(crate) enum StorageLogsChunksStage { + LoadFromGcs, + SaveToPostgres, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(crate) enum InitialStage { + FetchMetadataFromMainNode, + ApplyFactoryDeps, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "snapshots_applier")] +pub(crate) struct SnapshotsApplierMetrics { + /// Number of chunks in the applied snapshot. Set when snapshots applier starts. + pub storage_logs_chunks_count: Gauge, + + /// Number of chunks left to apply. + pub storage_logs_chunks_left_to_process: Gauge, + + /// Total latency of applying snapshot. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub snapshot_applying_duration: Histogram, + + /// Latency of initial recovery operation split by stage. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub initial_stage_duration: Family>, + + /// Latency of storage log chunk processing split by stage. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub storage_logs_chunks_duration: Family>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs new file mode 100644 index 00000000000..de750ef12f9 --- /dev/null +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -0,0 +1,130 @@ +//! Snapshot applier tests. + +use std::collections::HashMap; + +use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStoreFactory; +use zksync_types::{ + snapshots::{ + SnapshotFactoryDependencies, SnapshotFactoryDependency, SnapshotHeader, + SnapshotRecoveryStatus, SnapshotStorageLog, SnapshotStorageLogsChunk, + SnapshotStorageLogsChunkMetadata, SnapshotStorageLogsStorageKey, + }, + Bytes, L1BatchNumber, MiniblockNumber, H256, +}; + +use self::utils::{l1_block_metadata, miniblock_metadata, random_storage_logs, MockMainNodeClient}; +use crate::SnapshotsApplier; + +mod utils; + +#[tokio::test] +async fn snapshots_creator_can_successfully_recover_db() { + let pool = ConnectionPool::test_pool().await; + let object_store_factory = ObjectStoreFactory::mock(); + let object_store = object_store_factory.create_store().await; + let mut client = MockMainNodeClient::default(); + let miniblock_number = MiniblockNumber(1234); + let l1_batch_number = L1BatchNumber(123); + let l1_batch_root_hash = H256::random(); + let miniblock_hash = H256::random(); + let factory_dep_bytes: Vec = (0..32).collect(); + let factory_deps = SnapshotFactoryDependencies { + factory_deps: vec![SnapshotFactoryDependency { + bytecode: Bytes::from(factory_dep_bytes), + }], + }; + object_store + .put(l1_batch_number, &factory_deps) + .await + .unwrap(); + + let mut all_snapshot_storage_logs = HashMap::::new(); + for chunk_id in 0..2 { + let chunk_storage_logs = SnapshotStorageLogsChunk { + storage_logs: random_storage_logs(l1_batch_number, chunk_id, 10), + }; + let chunk_key = SnapshotStorageLogsStorageKey { + l1_batch_number, + chunk_id, + }; + object_store + .put(chunk_key, &chunk_storage_logs) + .await + .unwrap(); + + all_snapshot_storage_logs.extend( + chunk_storage_logs + .storage_logs + .into_iter() + .map(|log| (log.key.hashed_key(), log)), + ); + } + + let snapshot_header = SnapshotHeader { + l1_batch_number, + miniblock_number, + last_l1_batch_with_metadata: l1_block_metadata(l1_batch_number, l1_batch_root_hash), + storage_logs_chunks: vec![ + SnapshotStorageLogsChunkMetadata { + chunk_id: 0, + filepath: "file0".to_string(), + }, + SnapshotStorageLogsChunkMetadata { + chunk_id: 1, + filepath: "file1".to_string(), + }, + ], + factory_deps_filepath: "some_filepath".to_string(), + }; + client.fetch_newest_snapshot_response = Some(snapshot_header); + client.fetch_l2_block_responses.insert( + miniblock_number, + miniblock_metadata(miniblock_number, l1_batch_number, miniblock_hash), + ); + + SnapshotsApplier::load_snapshot(&pool, &client, &object_store) + .await + .unwrap(); + + let mut storage = pool.access_storage().await.unwrap(); + let mut recovery_dal = storage.snapshot_recovery_dal(); + + let expected_status = SnapshotRecoveryStatus { + l1_batch_number, + l1_batch_root_hash, + miniblock_number, + miniblock_root_hash: miniblock_hash, + storage_logs_chunks_processed: vec![true, true], + }; + + let current_db_status = recovery_dal.get_applied_snapshot_status().await.unwrap(); + assert_eq!(current_db_status.unwrap(), expected_status); + + let all_initial_writes = storage + .storage_logs_dedup_dal() + .dump_all_initial_writes_for_tests() + .await; + assert_eq!(all_initial_writes.len(), all_snapshot_storage_logs.len()); + for initial_write in all_initial_writes { + let log = &all_snapshot_storage_logs[&initial_write.hashed_key]; + assert_eq!( + initial_write.l1_batch_number, + log.l1_batch_number_of_initial_write + ); + assert_eq!(initial_write.index, log.enumeration_index); + } + + let all_storage_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + assert_eq!(all_storage_logs.len(), all_snapshot_storage_logs.len()); + for db_log in all_storage_logs { + let expected_log = &all_snapshot_storage_logs[&db_log.hashed_key]; + assert_eq!(db_log.address, *expected_log.key.address()); + assert_eq!(db_log.key, *expected_log.key.key()); + assert_eq!(db_log.value, expected_log.value); + assert_eq!(db_log.miniblock_number, miniblock_number); + } +} diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs new file mode 100644 index 00000000000..418518774ae --- /dev/null +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -0,0 +1,109 @@ +//! Test utils. + +use std::collections::HashMap; + +use async_trait::async_trait; +use zksync_types::{ + api::en::SyncBlock, + block::L1BatchHeader, + commitment::{L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata}, + snapshots::{SnapshotHeader, SnapshotStorageLog}, + AccountTreeId, L1BatchNumber, MiniblockNumber, ProtocolVersionId, StorageKey, StorageValue, + H160, H256, +}; +use zksync_web3_decl::jsonrpsee::core::ClientError as RpcError; + +use crate::SnapshotsApplierMainNodeClient; + +#[derive(Debug, Default)] +pub(super) struct MockMainNodeClient { + pub fetch_l2_block_responses: HashMap, + pub fetch_newest_snapshot_response: Option, +} + +#[async_trait] +impl SnapshotsApplierMainNodeClient for MockMainNodeClient { + async fn fetch_l2_block(&self, number: MiniblockNumber) -> Result, RpcError> { + Ok(self.fetch_l2_block_responses.get(&number).cloned()) + } + + async fn fetch_newest_snapshot(&self) -> Result, RpcError> { + Ok(self.fetch_newest_snapshot_response.clone()) + } +} + +pub(crate) fn miniblock_metadata( + number: MiniblockNumber, + l1_batch_number: L1BatchNumber, + hash: H256, +) -> SyncBlock { + SyncBlock { + number, + l1_batch_number, + last_in_batch: true, + timestamp: 0, + l1_gas_price: 0, + l2_fair_gas_price: 0, + fair_pubdata_price: None, + base_system_contracts_hashes: Default::default(), + operator_address: Default::default(), + transactions: None, + virtual_blocks: None, + hash: Some(hash), + protocol_version: Default::default(), + } +} + +pub(crate) fn l1_block_metadata( + l1_batch_number: L1BatchNumber, + root_hash: H256, +) -> L1BatchWithMetadata { + L1BatchWithMetadata { + header: L1BatchHeader::new( + l1_batch_number, + 0, + Default::default(), + ProtocolVersionId::default(), + ), + metadata: L1BatchMetadata { + root_hash, + rollup_last_leaf_index: 0, + merkle_root_hash: H256::zero(), + initial_writes_compressed: vec![], + repeated_writes_compressed: vec![], + commitment: H256::zero(), + l2_l1_messages_compressed: vec![], + l2_l1_merkle_root: H256::zero(), + block_meta_params: L1BatchMetaParameters { + zkporter_is_available: false, + bootloader_code_hash: H256::zero(), + default_aa_code_hash: H256::zero(), + }, + aux_data_hash: H256::zero(), + meta_parameters_hash: H256::zero(), + pass_through_data_hash: H256::zero(), + events_queue_commitment: None, + bootloader_initial_content_commitment: None, + state_diffs_compressed: vec![], + }, + factory_deps: vec![], + } +} + +pub(super) fn random_storage_logs( + l1_batch_number: L1BatchNumber, + chunk_id: u64, + logs_per_chunk: u64, +) -> Vec { + (0..logs_per_chunk) + .map(|x| SnapshotStorageLog { + key: StorageKey::new( + AccountTreeId::from_fixed_bytes(H160::random().to_fixed_bytes()), + H256::random(), + ), + value: StorageValue::random(), + l1_batch_number_of_initial_write: l1_batch_number, + enumeration_index: x + chunk_id * logs_per_chunk, + }) + .collect() +} diff --git a/core/lib/state/Cargo.toml b/core/lib/state/Cargo.toml index 87e433a4160..fed7fa0c015 100644 --- a/core/lib/state/Cargo.toml +++ b/core/lib/state/Cargo.toml @@ -23,5 +23,7 @@ tracing = "0.1" itertools = "0.10.3" [dev-dependencies] +assert_matches = "1.5.0" rand = "0.8.5" tempfile = "3.0.2" +test-casing = "0.1.2" diff --git a/core/lib/state/src/in_memory.rs b/core/lib/state/src/in_memory.rs index d6058649a45..6dfc98434eb 100644 --- a/core/lib/state/src/in_memory.rs +++ b/core/lib/state/src/in_memory.rs @@ -15,7 +15,7 @@ pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; /// In-memory storage. #[derive(Debug, Default, Clone)] pub struct InMemoryStorage { - pub(crate) state: HashMap, + pub(crate) state: HashMap, pub(crate) factory_deps: HashMap>, last_enum_index_set: u64, } @@ -68,7 +68,7 @@ impl InMemoryStorage { let state: HashMap<_, _> = state_without_indices .into_iter() .enumerate() - .map(|(idx, (key, value))| (key, (value, idx as u64 + 1))) + .map(|(idx, (key, value))| (key.hashed_key(), (value, idx as u64 + 1))) .collect(); let factory_deps = contracts @@ -86,7 +86,7 @@ impl InMemoryStorage { /// Sets the storage `value` at the specified `key`. pub fn set_value(&mut self, key: StorageKey, value: StorageValue) { - match self.state.entry(key) { + match self.state.entry(key.hashed_key()) { Entry::Occupied(mut entry) => { entry.get_mut().0 = value; } @@ -101,24 +101,19 @@ impl InMemoryStorage { pub fn store_factory_dep(&mut self, hash: H256, bytecode: Vec) { self.factory_deps.insert(hash, bytecode); } - - /// Get internal state of the storage. - pub fn get_state(&self) -> &HashMap { - &self.state - } } impl ReadStorage for &InMemoryStorage { fn read_value(&mut self, key: &StorageKey) -> StorageValue { self.state - .get(key) + .get(&key.hashed_key()) .map(|(value, _)| value) .copied() .unwrap_or_default() } fn is_write_initial(&mut self, key: &StorageKey) -> bool { - !self.state.contains_key(key) + !self.state.contains_key(&key.hashed_key()) } fn load_factory_dep(&mut self, hash: H256) -> Option> { @@ -126,7 +121,7 @@ impl ReadStorage for &InMemoryStorage { } fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - self.state.get(key).map(|(_, idx)| *idx) + self.state.get(&key.hashed_key()).map(|(_, idx)| *idx) } } diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 3d54967c9ad..f76f3884656 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -30,7 +30,7 @@ mod witness; pub use self::{ in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, postgres::{PostgresStorage, PostgresStorageCaches}, - rocksdb::RocksdbStorage, + rocksdb::{RocksbStorageBuilder, RocksdbStorage}, shadow_storage::ShadowStorage, storage_view::{StorageView, StorageViewMetrics}, witness::WitnessStorage, diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index 6514da136d5..75adcbba8c6 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -221,12 +221,15 @@ fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { // insert the contracts let mut contracts = HashMap::new(); contracts.insert(H256::zero(), vec![1, 2, 3]); - storage.rt_handle.block_on( - storage - .connection - .storage_dal() - .insert_factory_deps(MiniblockNumber(0), &contracts), - ); + storage + .rt_handle + .block_on( + storage + .connection + .storage_dal() + .insert_factory_deps(MiniblockNumber(0), &contracts), + ) + .unwrap(); // Create the storage that should have the cache filled. let mut storage = PostgresStorage::new( diff --git a/core/lib/state/src/rocksdb/metrics.rs b/core/lib/state/src/rocksdb/metrics.rs index 997f4b42ed3..912bcdba251 100644 --- a/core/lib/state/src/rocksdb/metrics.rs +++ b/core/lib/state/src/rocksdb/metrics.rs @@ -2,7 +2,7 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics}; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; #[derive(Debug, Metrics)] #[metrics(prefix = "server_state_keeper_secondary_storage")] @@ -18,3 +18,36 @@ pub(super) struct RocksdbStorageMetrics { #[vise::register] pub(super) static METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(super) enum RecoveryStage { + LoadFactoryDeps, + SaveFactoryDeps, + LoadChunkStarts, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(super) enum ChunkRecoveryStage { + LoadEntries, + SaveEntries, +} + +/// Recovery-related group of metrics. +#[derive(Debug, Metrics)] +#[metrics(prefix = "server_state_keeper_secondary_storage_recovery")] +pub(super) struct RocksdbRecoveryMetrics { + /// Number of chunks recovered. + pub recovered_chunk_count: Gauge, + /// Latency of a storage recovery stage (not related to the recovery of a particular chunk; + /// those metrics are tracked in the `chunk_latency` histogram). + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub latency: Family>, + /// Latency of a chunk recovery stage. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub chunk_latency: Family>, +} + +#[vise::register] +pub(super) static RECOVERY_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index 6e0bb7233ee..956307566af 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -19,24 +19,37 @@ //! | Contracts | address (20 bytes) | `Vec` | Contract contents | //! | Factory deps | hash (32 bytes) | `Vec` | Bytecodes for new contracts that a certain contract may deploy. | -use std::{collections::HashMap, convert::TryInto, mem, path::Path, time::Instant}; - +use std::{ + collections::HashMap, + convert::TryInto, + mem, + path::{Path, PathBuf}, + time::Instant, +}; + +use anyhow::Context as _; use itertools::{Either, Itertools}; +use tokio::sync::watch; use zksync_dal::StorageProcessor; use zksync_storage::{db::NamedColumnFamily, RocksDB}; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; use self::metrics::METRICS; +#[cfg(test)] +use self::tests::RocksdbStorageEventListener; use crate::{InMemoryStorage, ReadStorage}; mod metrics; +mod recovery; +#[cfg(test)] +mod tests; -fn serialize_block_number(block_number: u32) -> [u8; 4] { +fn serialize_l1_batch_number(block_number: u32) -> [u8; 4] { block_number.to_le_bytes() } -fn deserialize_block_number(bytes: &[u8]) -> u32 { +fn deserialize_l1_batch_number(bytes: &[u8]) -> u32 { let bytes: [u8; 4] = bytes.try_into().expect("incorrect block number format"); u32::from_le_bytes(bytes) } @@ -96,86 +109,189 @@ impl StateValue { } } +/// Error emitted when [`RocksdbStorage`] is being updated. +#[derive(Debug)] +enum RocksdbSyncError { + Internal(anyhow::Error), + Interrupted, +} + +impl From for RocksdbSyncError { + fn from(err: anyhow::Error) -> Self { + Self::Internal(err) + } +} + /// [`ReadStorage`] implementation backed by RocksDB. #[derive(Debug)] pub struct RocksdbStorage { db: RocksDB, pending_patch: InMemoryStorage, enum_index_migration_chunk_size: usize, + /// Test-only listeners to events produced by the storage. + #[cfg(test)] + listener: RocksdbStorageEventListener, } -impl RocksdbStorage { - const BLOCK_NUMBER_KEY: &'static [u8] = b"block_number"; - const ENUM_INDEX_MIGRATION_CURSOR: &'static [u8] = b"enum_index_migration_cursor"; +/// Builder of [`RocksdbStorage`]. The storage data is inaccessible until the storage is [`Self::synchronize()`]d +/// with Postgres. +#[derive(Debug)] +pub struct RocksbStorageBuilder(RocksdbStorage); - fn is_special_key(key: &[u8]) -> bool { - key == Self::BLOCK_NUMBER_KEY || key == Self::ENUM_INDEX_MIGRATION_CURSOR +impl RocksbStorageBuilder { + /// Enables enum indices migration. + pub fn enable_enum_index_migration(&mut self, chunk_size: usize) { + self.0.enum_index_migration_chunk_size = chunk_size; + } + + /// Returns the last processed l1 batch number + 1. + /// + /// # Panics + /// + /// Panics on RocksDB errors. + pub async fn l1_batch_number(&self) -> Option { + self.0.l1_batch_number().await } - /// Creates a new storage with the provided RocksDB `path`. - pub fn new(path: &Path) -> Self { - let db = RocksDB::new(path); - Self { - db, - pending_patch: InMemoryStorage::default(), - enum_index_migration_chunk_size: 100, + /// Synchronizes this storage with Postgres using the provided connection. + /// + /// # Return value + /// + /// Returns `Ok(None)` if the update is interrupted using `stop_receiver`. + /// + /// # Errors + /// + /// - Errors if the local L1 batch number is greater than the last sealed L1 batch number + /// in Postgres. + pub async fn synchronize( + self, + storage: &mut StorageProcessor<'_>, + stop_receiver: &watch::Receiver, + ) -> anyhow::Result> { + let mut inner = self.0; + match inner.update_from_postgres(storage, stop_receiver).await { + Ok(()) => Ok(Some(inner)), + Err(RocksdbSyncError::Interrupted) => Ok(None), + Err(RocksdbSyncError::Internal(err)) => Err(err), } } - /// Enables enum indices migration. - pub fn enable_enum_index_migration(&mut self, chunk_size: usize) { - self.enum_index_migration_chunk_size = chunk_size; + /// Rolls back the state to a previous L1 batch number. + /// + /// # Errors + /// + /// Propagates RocksDB and Postgres errors. + pub async fn rollback( + mut self, + storage: &mut StorageProcessor<'_>, + last_l1_batch_to_keep: L1BatchNumber, + ) -> anyhow::Result<()> { + self.0.rollback(storage, last_l1_batch_to_keep).await } +} - /// Synchronizes this storage with Postgres using the provided connection. +impl RocksdbStorage { + const L1_BATCH_NUMBER_KEY: &'static [u8] = b"block_number"; + const ENUM_INDEX_MIGRATION_CURSOR: &'static [u8] = b"enum_index_migration_cursor"; + + /// Desired size of log chunks loaded from Postgres during snapshot recovery. + /// This is intentionally not configurable because chunks must be the same for the entire recovery + /// (i.e., not changed after a node restart). + const DESIRED_LOG_CHUNK_SIZE: u64 = 200_000; + + fn is_special_key(key: &[u8]) -> bool { + key == Self::L1_BATCH_NUMBER_KEY || key == Self::ENUM_INDEX_MIGRATION_CURSOR + } + + /// Creates a new storage builder with the provided RocksDB `path`. /// - /// # Panics + /// # Errors /// - /// Panics if the local L1 batch number is greater than the last sealed L1 batch number - /// in Postgres. - pub async fn update_from_postgres(&mut self, conn: &mut StorageProcessor<'_>) { + /// Propagates RocksDB I/O errors. + pub async fn builder(path: &Path) -> anyhow::Result { + Self::new(path.to_path_buf()) + .await + .map(RocksbStorageBuilder) + } + + async fn new(path: PathBuf) -> anyhow::Result { + tokio::task::spawn_blocking(move || { + Ok(Self { + db: RocksDB::new(&path).context("failed initializing state keeper RocksDB")?, + pending_patch: InMemoryStorage::default(), + enum_index_migration_chunk_size: 100, + #[cfg(test)] + listener: RocksdbStorageEventListener::default(), + }) + }) + .await + .context("panicked initializing state keeper RocksDB")? + } + + async fn update_from_postgres( + &mut self, + storage: &mut StorageProcessor<'_>, + stop_receiver: &watch::Receiver, + ) -> Result<(), RocksdbSyncError> { + let mut current_l1_batch_number = self + .ensure_ready(storage, Self::DESIRED_LOG_CHUNK_SIZE, stop_receiver) + .await?; + let latency = METRICS.update.start(); - let Some(latest_l1_batch_number) = conn + let Some(latest_l1_batch_number) = storage .blocks_dal() .get_sealed_l1_batch_number() .await - .unwrap() + .context("failed fetching sealed L1 batch number")? else { // No L1 batches are persisted in Postgres; update is not necessary. - return; + return Ok(()); }; tracing::debug!("Loading storage for l1 batch number {latest_l1_batch_number}"); - let mut current_l1_batch_number = self.l1_batch_number().0; - assert!( - current_l1_batch_number <= latest_l1_batch_number.0 + 1, - "L1 batch number in state keeper cache ({current_l1_batch_number}) is greater than \ - the last sealed L1 batch number in Postgres ({latest_l1_batch_number})" - ); + if current_l1_batch_number > latest_l1_batch_number + 1 { + let err = anyhow::anyhow!( + "L1 batch number in state keeper cache ({current_l1_batch_number}) is greater than \ + the last sealed L1 batch number in Postgres ({latest_l1_batch_number})" + ); + return Err(err.into()); + } - while current_l1_batch_number <= latest_l1_batch_number.0 { - let current_lag = latest_l1_batch_number.0 - current_l1_batch_number + 1; + while current_l1_batch_number <= latest_l1_batch_number { + if *stop_receiver.borrow() { + return Err(RocksdbSyncError::Interrupted); + } + let current_lag = latest_l1_batch_number.0 - current_l1_batch_number.0 + 1; METRICS.lag.set(current_lag.into()); - tracing::debug!("loading state changes for l1 batch {current_l1_batch_number}"); - let storage_logs = conn + tracing::debug!("Loading state changes for l1 batch {current_l1_batch_number}"); + let storage_logs = storage .storage_logs_dal() - .get_touched_slots_for_l1_batch(L1BatchNumber(current_l1_batch_number)) - .await; - self.apply_storage_logs(storage_logs, conn).await; + .get_touched_slots_for_l1_batch(current_l1_batch_number) + .await + .with_context(|| { + format!("failed loading touched slots for L1 batch {current_l1_batch_number}") + })?; + self.apply_storage_logs(storage_logs, storage).await?; - tracing::debug!("loading factory deps for l1 batch {current_l1_batch_number}"); - let factory_deps = conn + tracing::debug!("Loading factory deps for L1 batch {current_l1_batch_number}"); + let factory_deps = storage .blocks_dal() - .get_l1_batch_factory_deps(L1BatchNumber(current_l1_batch_number)) + .get_l1_batch_factory_deps(current_l1_batch_number) .await - .unwrap(); + .with_context(|| { + format!("failed loading factory deps for L1 batch {current_l1_batch_number}") + })?; for (hash, bytecode) in factory_deps { self.store_factory_dep(hash, bytecode); } current_l1_batch_number += 1; - self.save(L1BatchNumber(current_l1_batch_number)).await; + self.save(Some(current_l1_batch_number)) + .await + .with_context(|| format!("failed saving L1 batch #{current_l1_batch_number}"))?; + #[cfg(test)] + (self.listener.on_l1_batch_synced)(current_l1_batch_number - 1); } latency.observe(); @@ -188,50 +304,66 @@ impl RocksdbStorage { assert!(self.enum_index_migration_chunk_size > 0); // Enum indices must be at the storage. Run migration till the end. - while self.enum_migration_start_from().is_some() { - self.save_missing_enum_indices(conn).await; + while self.enum_migration_start_from().await.is_some() { + if *stop_receiver.borrow() { + return Err(RocksdbSyncError::Interrupted); + } + self.save_missing_enum_indices(storage).await?; } + Ok(()) } async fn apply_storage_logs( &mut self, storage_logs: HashMap, - conn: &mut StorageProcessor<'_>, - ) { - let (logs_with_known_indices, logs_with_unknown_indices): (Vec<_>, Vec<_>) = self - .process_transaction_logs(storage_logs) + storage: &mut StorageProcessor<'_>, + ) -> anyhow::Result<()> { + let db = self.db.clone(); + let processed_logs = + tokio::task::spawn_blocking(move || Self::process_transaction_logs(&db, storage_logs)) + .await + .context("panicked processing storage logs")?; + + let (logs_with_known_indices, logs_with_unknown_indices): (Vec<_>, Vec<_>) = processed_logs + .into_iter() .partition_map(|(key, StateValue { value, enum_index })| match enum_index { - Some(index) => Either::Left((key, (value, index))), - None => Either::Right((key, value)), + Some(index) => Either::Left((key.hashed_key(), (value, index))), + None => Either::Right((key.hashed_key(), value)), }); let keys_with_unknown_indices: Vec<_> = logs_with_unknown_indices .iter() - .map(|(key, _)| key.hashed_key()) + .map(|&(key, _)| key) .collect(); - let enum_indices_and_batches = conn + let enum_indices_and_batches = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&keys_with_unknown_indices) - .await; - assert_eq!( - keys_with_unknown_indices.len(), - enum_indices_and_batches.len() + .await + .context("failed getting enumeration indices for storage logs")?; + anyhow::ensure!( + keys_with_unknown_indices.len() == enum_indices_and_batches.len(), + "Inconsistent Postgres data: not all new storage logs have enumeration indices" ); - self.pending_patch.state = - logs_with_known_indices - .into_iter() - .chain(logs_with_unknown_indices.into_iter().map(|(key, value)| { - (key, (value, enum_indices_and_batches[&key.hashed_key()].1)) - })) - .collect(); + self.pending_patch.state = logs_with_known_indices + .into_iter() + .chain( + logs_with_unknown_indices + .into_iter() + .map(|(key, value)| (key, (value, enum_indices_and_batches[&key].1))), + ) + .collect(); + Ok(()) } - async fn save_missing_enum_indices(&self, conn: &mut StorageProcessor<'_>) { - let (Some(start_from), true) = ( - self.enum_migration_start_from(), + async fn save_missing_enum_indices( + &self, + storage: &mut StorageProcessor<'_>, + ) -> anyhow::Result<()> { + let (true, Some(start_from)) = ( self.enum_index_migration_chunk_size > 0, + self.enum_migration_start_from().await, ) else { - return; + return Ok(()); }; let started_at = Instant::now(); @@ -239,91 +371,107 @@ impl RocksdbStorage { "RocksDB enum index migration is not finished, starting from key {start_from:0>64x}" ); - let mut write_batch = self.db.new_write_batch(); - let (keys, values): (Vec<_>, Vec<_>) = self - .db - .from_iterator_cf(StateKeeperColumnFamily::State, start_from.as_bytes()) - .filter_map(|(key, value)| { - if Self::is_special_key(&key) { - return None; - } - let state_value = StateValue::deserialize(&value); - (state_value.enum_index.is_none()) - .then(|| (H256::from_slice(&key), state_value.value)) - }) - .take(self.enum_index_migration_chunk_size) - .unzip(); - let enum_indices_and_batches = conn + let db = self.db.clone(); + let enum_index_migration_chunk_size = self.enum_index_migration_chunk_size; + let (keys, values): (Vec<_>, Vec<_>) = tokio::task::spawn_blocking(move || { + db.from_iterator_cf(StateKeeperColumnFamily::State, start_from.as_bytes()) + .filter_map(|(key, value)| { + if Self::is_special_key(&key) { + return None; + } + let state_value = StateValue::deserialize(&value); + state_value + .enum_index + .is_none() + .then(|| (H256::from_slice(&key), state_value.value)) + }) + .take(enum_index_migration_chunk_size) + .unzip() + }) + .await + .unwrap(); + + let enum_indices_and_batches = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&keys) - .await; + .await + .context("failed getting enumeration indices for storage logs")?; assert_eq!(keys.len(), enum_indices_and_batches.len()); + let key_count = keys.len(); - for (key, value) in keys.iter().zip(values) { - let index = enum_indices_and_batches[key].1; - write_batch.put_cf( - StateKeeperColumnFamily::State, - key.as_bytes(), - &StateValue::new(value, Some(index)).serialize(), - ); - } - - let next_key = keys - .last() - .and_then(|last_key| h256_to_u256(*last_key).checked_add(U256::one())) - .map(u256_to_h256); - match (next_key, keys.len()) { - (Some(next_key), keys_len) if keys_len == self.enum_index_migration_chunk_size => { + let db = self.db.clone(); + tokio::task::spawn_blocking(move || { + let mut write_batch = db.new_write_batch(); + for (key, value) in keys.iter().zip(values) { + let index = enum_indices_and_batches[key].1; write_batch.put_cf( StateKeeperColumnFamily::State, - Self::ENUM_INDEX_MIGRATION_CURSOR, - next_key.as_bytes(), + key.as_bytes(), + &StateValue::new(value, Some(index)).serialize(), ); } - _ => { - write_batch.put_cf( - StateKeeperColumnFamily::State, - Self::ENUM_INDEX_MIGRATION_CURSOR, - &[], - ); - tracing::info!("RocksDB enum index migration finished"); + + let next_key = keys + .last() + .and_then(|last_key| h256_to_u256(*last_key).checked_add(U256::one())) + .map(u256_to_h256); + match (next_key, keys.len()) { + (Some(next_key), keys_len) if keys_len == enum_index_migration_chunk_size => { + write_batch.put_cf( + StateKeeperColumnFamily::State, + Self::ENUM_INDEX_MIGRATION_CURSOR, + next_key.as_bytes(), + ); + } + _ => { + write_batch.put_cf( + StateKeeperColumnFamily::State, + Self::ENUM_INDEX_MIGRATION_CURSOR, + &[], + ); + tracing::info!("RocksDB enum index migration finished"); + } } - } - self.db - .write(write_batch) - .expect("failed to save state data into rocksdb"); + db.write(write_batch) + .context("failed saving enum indices to RocksDB") + }) + .await + .context("panicked while saving enum indices to RocksDB")??; + tracing::info!( - "RocksDB enum index migration chunk took {:?}, migrated {} keys", - started_at.elapsed(), - keys.len() + "RocksDB enum index migration chunk took {:?}, migrated {key_count} keys", + started_at.elapsed() ); + Ok(()) } fn read_value_inner(&self, key: &StorageKey) -> Option { - self.read_state_value(key) - .map(|state_value| state_value.value) + Self::read_state_value(&self.db, key.hashed_key()).map(|state_value| state_value.value) } - fn read_state_value(&self, key: &StorageKey) -> Option { + fn read_state_value( + db: &RocksDB, + hashed_key: H256, + ) -> Option { let cf = StateKeeperColumnFamily::State; - self.db - .get_cf(cf, &Self::serialize_state_key(key)) + db.get_cf(cf, &Self::serialize_state_key(hashed_key)) .expect("failed to read rocksdb state value") .map(|value| StateValue::deserialize(&value)) } /// Returns storage logs to apply. fn process_transaction_logs( - &self, + db: &RocksDB, updates: HashMap, - ) -> impl Iterator + '_ { - updates.into_iter().filter_map(|(key, new_value)| { - if let Some(state_value) = self.read_state_value(&key) { + ) -> Vec<(StorageKey, StateValue)> { + let it = updates.into_iter().filter_map(move |(key, new_value)| { + if let Some(state_value) = Self::read_state_value(db, key.hashed_key()) { Some((key, StateValue::new(new_value, state_value.enum_index))) } else { (!new_value.is_zero()).then_some((key, StateValue::new(new_value, None))) } - }) + }); + it.collect() } /// Stores a factory dependency with the specified `hash` and `bytecode`. @@ -331,16 +479,11 @@ impl RocksdbStorage { self.pending_patch.factory_deps.insert(hash, bytecode); } - /// Rolls back the state to a previous L1 batch number. - /// - /// # Panics - /// - /// Panics on RocksDB errors. - pub async fn rollback( + async fn rollback( &mut self, connection: &mut StorageProcessor<'_>, last_l1_batch_to_keep: L1BatchNumber, - ) { + ) -> anyhow::Result<()> { tracing::info!("Rolling back state keeper storage to L1 batch #{last_l1_batch_to_keep}..."); tracing::info!("Getting logs that should be applied to rollback state..."); @@ -348,7 +491,8 @@ impl RocksdbStorage { let logs = connection .storage_logs_dal() .get_storage_logs_for_revert(last_l1_batch_to_keep) - .await; + .await + .context("failed getting logs for rollback")?; tracing::info!("Got {} logs, took {:?}", logs.len(), stage_start.elapsed()); tracing::info!("Getting number of last miniblock for L1 batch #{last_l1_batch_to_keep}..."); @@ -357,8 +501,10 @@ impl RocksdbStorage { .blocks_dal() .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) .await - .unwrap() - .expect("L1 batch should contain at least one miniblock"); + .with_context(|| { + format!("failed fetching miniblock range for L1 batch #{last_l1_batch_to_keep}") + })? + .context("L1 batch should contain at least one miniblock")?; tracing::info!( "Got miniblock number {last_miniblock_to_keep}, took {:?}", stage_start.elapsed() @@ -369,7 +515,10 @@ impl RocksdbStorage { let factory_deps = connection .storage_dal() .get_factory_deps_for_revert(last_miniblock_to_keep) - .await; + .await + .with_context(|| { + format!("failed fetching factory deps for miniblock #{last_miniblock_to_keep}") + })?; tracing::info!( "Got {} factory deps, took {:?}", factory_deps.len(), @@ -394,8 +543,8 @@ impl RocksdbStorage { } batch.put_cf( cf, - Self::BLOCK_NUMBER_KEY, - &serialize_block_number(last_l1_batch_to_keep.0 + 1), + Self::L1_BATCH_NUMBER_KEY, + &serialize_l1_batch_number(last_l1_batch_to_keep.0 + 1), ); let cf = StateKeeperColumnFamily::FactoryDeps; @@ -404,29 +553,31 @@ impl RocksdbStorage { } db.write(batch) - .expect("failed to save state data into RocksDB"); + .context("failed to save state data into RocksDB") }) .await - .unwrap(); + .context("panicked during revert")? } /// Saves the pending changes to RocksDB. Must be executed on a Tokio thread. - async fn save(&mut self, l1_batch_number: L1BatchNumber) { + async fn save(&mut self, l1_batch_number: Option) -> anyhow::Result<()> { let pending_patch = mem::take(&mut self.pending_patch); let db = self.db.clone(); let save_task = tokio::task::spawn_blocking(move || { let mut batch = db.new_write_batch(); let cf = StateKeeperColumnFamily::State; - batch.put_cf( - cf, - Self::BLOCK_NUMBER_KEY, - &serialize_block_number(l1_batch_number.0), - ); + if let Some(l1_batch_number) = l1_batch_number { + batch.put_cf( + cf, + Self::L1_BATCH_NUMBER_KEY, + &serialize_l1_batch_number(l1_batch_number.0), + ); + } for (key, (value, enum_index)) in pending_patch.state { batch.put_cf( cf, - &Self::serialize_state_key(&key), + &Self::serialize_state_key(key), &StateValue::new(value, Some(enum_index)).serialize(), ); } @@ -436,26 +587,31 @@ impl RocksdbStorage { batch.put_cf(cf, &hash.to_fixed_bytes(), value.as_ref()); } db.write(batch) - .expect("failed to save state data into rocksdb"); + .context("failed to save state data into RocksDB") }); - save_task.await.unwrap(); + save_task + .await + .context("panicked when saving state data into RocksDB")? } - /// Returns the last processed l1 batch number + 1 + /// Returns the last processed l1 batch number + 1. + /// /// # Panics + /// /// Panics on RocksDB errors. - pub fn l1_batch_number(&self) -> L1BatchNumber { + async fn l1_batch_number(&self) -> Option { let cf = StateKeeperColumnFamily::State; - let block_number = self - .db - .get_cf(cf, Self::BLOCK_NUMBER_KEY) - .expect("failed to fetch block number"); - let block_number = block_number.map_or(0, |bytes| deserialize_block_number(&bytes)); - L1BatchNumber(block_number) + let db = self.db.clone(); + let number_bytes = + tokio::task::spawn_blocking(move || db.get_cf(cf, Self::L1_BATCH_NUMBER_KEY)) + .await + .expect("failed getting L1 batch number from RocksDB") + .expect("failed getting L1 batch number from RocksDB"); + number_bytes.map(|bytes| L1BatchNumber(deserialize_l1_batch_number(&bytes))) } - fn serialize_state_key(key: &StorageKey) -> [u8; 32] { - key.hashed_key().to_fixed_bytes() + fn serialize_state_key(key: H256) -> [u8; 32] { + key.to_fixed_bytes() } /// Estimates the number of key–value entries in the VM state. @@ -464,16 +620,20 @@ impl RocksdbStorage { .estimated_number_of_entries(StateKeeperColumnFamily::State) } - fn enum_migration_start_from(&self) -> Option { - let value = self - .db - .get_cf( + async fn enum_migration_start_from(&self) -> Option { + let db = self.db.clone(); + let value = tokio::task::spawn_blocking(move || { + db.get_cf( StateKeeperColumnFamily::State, Self::ENUM_INDEX_MIGRATION_CURSOR, ) - .expect("failed to read `ENUM_INDEX_MIGRATION_CURSOR`"); + .expect("failed to read `ENUM_INDEX_MIGRATION_CURSOR`") + }) + .await + .unwrap(); + match value { - Some(v) if v.is_empty() => None, + Some(value) if value.is_empty() => None, Some(cursor) => Some(H256::from_slice(&cursor)), None => Some(H256::zero()), } @@ -499,250 +659,7 @@ impl ReadStorage for RocksdbStorage { fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { // Can safely unwrap here since it indicates that the migration has not yet ended and boojum will // only be deployed when the migration is finished. - self.read_state_value(key) + Self::read_state_value(&self.db, key.hashed_key()) .map(|state_value| state_value.enum_index.unwrap()) } } - -#[cfg(test)] -mod tests { - use tempfile::TempDir; - use zksync_dal::ConnectionPool; - use zksync_types::{MiniblockNumber, StorageLog}; - - use super::*; - use crate::test_utils::{ - create_l1_batch, create_miniblock, gen_storage_logs, prepare_postgres, - }; - - #[tokio::test] - async fn rocksdb_storage_basics() { - let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); - let mut storage = RocksdbStorage::new(dir.path()); - let mut storage_logs: HashMap<_, _> = gen_storage_logs(0..20) - .into_iter() - .map(|log| (log.key, log.value)) - .collect(); - let changed_keys = storage.process_transaction_logs(storage_logs.clone()); - storage.pending_patch.state = changed_keys - .map(|(key, state_value)| (key, (state_value.value, 1))) // enum index doesn't matter in the test - .collect(); - storage.save(L1BatchNumber(0)).await; - { - for (key, value) in &storage_logs { - assert!(!storage.is_write_initial(key)); - assert_eq!(storage.read_value(key), *value); - } - } - - // Overwrite some of the logs. - for log in storage_logs.values_mut().step_by(2) { - *log = StorageValue::zero(); - } - let changed_keys = storage.process_transaction_logs(storage_logs.clone()); - storage.pending_patch.state = changed_keys - .map(|(key, state_value)| (key, (state_value.value, 1))) // enum index doesn't matter in the test - .collect(); - storage.save(L1BatchNumber(1)).await; - - for (key, value) in &storage_logs { - assert!(!storage.is_write_initial(key)); - assert_eq!(storage.read_value(key), *value); - } - } - - #[tokio::test] - async fn rocksdb_storage_syncing_with_postgres() { - let pool = ConnectionPool::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); - prepare_postgres(&mut conn).await; - let storage_logs = gen_storage_logs(20..40); - create_miniblock(&mut conn, MiniblockNumber(1), storage_logs.clone()).await; - create_l1_batch(&mut conn, L1BatchNumber(1), &storage_logs).await; - - let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); - let mut storage = RocksdbStorage::new(dir.path()); - storage.update_from_postgres(&mut conn).await; - - assert_eq!(storage.l1_batch_number(), L1BatchNumber(2)); - for log in &storage_logs { - assert_eq!(storage.read_value(&log.key), log.value); - } - } - - async fn insert_factory_deps( - conn: &mut StorageProcessor<'_>, - miniblock_number: MiniblockNumber, - indices: impl Iterator, - ) { - let factory_deps = indices - .map(|i| (H256::repeat_byte(i), vec![i; 64])) - .collect(); - conn.storage_dal() - .insert_factory_deps(miniblock_number, &factory_deps) - .await; - } - - #[tokio::test] - async fn rocksdb_storage_revert() { - let pool = ConnectionPool::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); - prepare_postgres(&mut conn).await; - let storage_logs = gen_storage_logs(20..40); - create_miniblock(&mut conn, MiniblockNumber(1), storage_logs[..10].to_vec()).await; - insert_factory_deps(&mut conn, MiniblockNumber(1), 0..1).await; - create_miniblock(&mut conn, MiniblockNumber(2), storage_logs[10..].to_vec()).await; - insert_factory_deps(&mut conn, MiniblockNumber(2), 1..3).await; - create_l1_batch(&mut conn, L1BatchNumber(1), &storage_logs).await; - - let inserted_storage_logs = gen_storage_logs(50..60); - let replaced_storage_logs: Vec<_> = storage_logs - .iter() - .step_by(2) - .map(|&log| StorageLog { - value: H256::repeat_byte(0xf0), - ..log - }) - .collect(); - - let mut new_storage_logs = inserted_storage_logs.clone(); - new_storage_logs.extend_from_slice(&replaced_storage_logs); - create_miniblock(&mut conn, MiniblockNumber(3), new_storage_logs).await; - insert_factory_deps(&mut conn, MiniblockNumber(3), 3..5).await; - create_l1_batch(&mut conn, L1BatchNumber(2), &inserted_storage_logs).await; - - let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); - let mut storage = RocksdbStorage::new(dir.path()); - storage.update_from_postgres(&mut conn).await; - - // Perform some sanity checks before the revert. - assert_eq!(storage.l1_batch_number(), L1BatchNumber(3)); - { - for log in &inserted_storage_logs { - assert_eq!(storage.read_value(&log.key), log.value); - } - for log in &replaced_storage_logs { - assert_eq!(storage.read_value(&log.key), log.value); - } - - for i in 0..5 { - assert_eq!( - storage.load_factory_dep(H256::repeat_byte(i)).unwrap(), - [i; 64] - ); - } - } - - storage.rollback(&mut conn, L1BatchNumber(1)).await; - assert_eq!(storage.l1_batch_number(), L1BatchNumber(2)); - { - for log in &inserted_storage_logs { - assert_eq!(storage.read_value(&log.key), H256::zero()); - } - for log in &replaced_storage_logs { - assert_ne!(storage.read_value(&log.key), log.value); - } - - for i in 0..3 { - assert_eq!( - storage.load_factory_dep(H256::repeat_byte(i)).unwrap(), - [i; 64] - ); - } - for i in 3..5 { - assert!(storage.load_factory_dep(H256::repeat_byte(i)).is_none()); - } - } - } - - #[tokio::test] - async fn rocksdb_enum_index_migration() { - let pool = ConnectionPool::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); - prepare_postgres(&mut conn).await; - let storage_logs = gen_storage_logs(20..40); - create_miniblock(&mut conn, MiniblockNumber(1), storage_logs.clone()).await; - create_l1_batch(&mut conn, L1BatchNumber(1), &storage_logs).await; - - let enum_indices: HashMap<_, _> = conn - .storage_logs_dedup_dal() - .initial_writes_for_batch(L1BatchNumber(1)) - .await - .into_iter() - .collect(); - - let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); - let mut storage = RocksdbStorage::new(dir.path()); - storage.update_from_postgres(&mut conn).await; - - assert_eq!(storage.l1_batch_number(), L1BatchNumber(2)); - // Check that enum indices are correct after syncing with Postgres. - for log in &storage_logs { - let expected_index = enum_indices[&log.key.hashed_key()]; - assert_eq!( - storage.read_state_value(&log.key).unwrap().enum_index, - Some(expected_index) - ); - } - - // Remove enum indices for some keys. - let mut write_batch = storage.db.new_write_batch(); - for log in &storage_logs { - write_batch.put_cf( - StateKeeperColumnFamily::State, - log.key.hashed_key().as_bytes(), - log.value.as_bytes(), - ); - write_batch.delete_cf( - StateKeeperColumnFamily::State, - RocksdbStorage::ENUM_INDEX_MIGRATION_CURSOR, - ); - } - storage.db.write(write_batch).unwrap(); - - // Check that migration works as expected. - let ordered_keys_to_migrate: Vec = storage_logs - .iter() - .map(|log| log.key) - .sorted_by_key(StorageKey::hashed_key) - .collect(); - - storage.enable_enum_index_migration(10); - let start_from = storage.enum_migration_start_from(); - assert_eq!(start_from, Some(H256::zero())); - - // Migrate the first half. - storage.save_missing_enum_indices(&mut conn).await; - for key in ordered_keys_to_migrate.iter().take(10) { - let expected_index = enum_indices[&key.hashed_key()]; - assert_eq!( - storage.read_state_value(key).unwrap().enum_index, - Some(expected_index) - ); - } - assert!(storage - .read_state_value(&ordered_keys_to_migrate[10]) - .unwrap() - .enum_index - .is_none()); - - // Migrate the second half. - storage.save_missing_enum_indices(&mut conn).await; - for key in ordered_keys_to_migrate.iter().skip(10) { - let expected_index = enum_indices[&key.hashed_key()]; - assert_eq!( - storage.read_state_value(key).unwrap().enum_index, - Some(expected_index) - ); - } - - // 20 keys were processed but we haven't checked that no keys to migrate are left. - let start_from = storage.enum_migration_start_from(); - assert!(start_from.is_some()); - - // Check that migration will be marked as completed after the next iteration. - storage.save_missing_enum_indices(&mut conn).await; - let start_from = storage.enum_migration_start_from(); - assert!(start_from.is_none()); - } -} diff --git a/core/lib/state/src/rocksdb/recovery.rs b/core/lib/state/src/rocksdb/recovery.rs new file mode 100644 index 00000000000..dae4ae144be --- /dev/null +++ b/core/lib/state/src/rocksdb/recovery.rs @@ -0,0 +1,254 @@ +//! Logic for [`RocksdbStorage`] related to snapshot recovery. + +use std::ops; + +use anyhow::Context as _; +use tokio::sync::watch; +use zksync_dal::{storage_logs_dal::StorageRecoveryLogEntry, StorageProcessor}; +use zksync_types::{ + snapshots::{uniform_hashed_keys_chunk, SnapshotRecoveryStatus}, + L1BatchNumber, MiniblockNumber, H256, +}; + +use super::{ + metrics::{ChunkRecoveryStage, RecoveryStage, RECOVERY_METRICS}, + RocksdbStorage, RocksdbSyncError, StateValue, +}; + +#[derive(Debug)] +struct KeyChunk { + id: u64, + key_range: ops::RangeInclusive, + start_entry: Option, +} + +impl RocksdbStorage { + /// Ensures that this storage is ready for normal operation (i.e., updates by L1 batch). + /// + /// # Return value + /// + /// Returns the next L1 batch that should be fed to the storage. + pub(super) async fn ensure_ready( + &mut self, + storage: &mut StorageProcessor<'_>, + desired_log_chunk_size: u64, + stop_receiver: &watch::Receiver, + ) -> Result { + if let Some(number) = self.l1_batch_number().await { + return Ok(number); + } + + // Check whether we need to perform a snapshot migration. + let snapshot_recovery = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .context("failed getting snapshot recovery info")?; + Ok(if let Some(snapshot_recovery) = snapshot_recovery { + self.recover_from_snapshot( + storage, + &snapshot_recovery, + desired_log_chunk_size, + stop_receiver, + ) + .await?; + snapshot_recovery.l1_batch_number + 1 + } else { + // No recovery snapshot; we're initializing the cache from the genesis + L1BatchNumber(0) + }) + } + + /// # Important + /// + /// `Self::L1_BATCH_NUMBER_KEY` must be set at the very end of the process. If it is set earlier, recovery is not fault-tolerant + /// (it would be considered complete even if it failed in the middle). + async fn recover_from_snapshot( + &mut self, + storage: &mut StorageProcessor<'_>, + snapshot_recovery: &SnapshotRecoveryStatus, + desired_log_chunk_size: u64, + stop_receiver: &watch::Receiver, + ) -> Result<(), RocksdbSyncError> { + if *stop_receiver.borrow() { + return Err(RocksdbSyncError::Interrupted); + } + tracing::info!("Recovering secondary storage from snapshot: {snapshot_recovery:?}"); + + self.recover_factory_deps(storage, snapshot_recovery) + .await?; + + if *stop_receiver.borrow() { + return Err(RocksdbSyncError::Interrupted); + } + let key_chunks = + Self::load_key_chunks(storage, snapshot_recovery, desired_log_chunk_size).await?; + + RECOVERY_METRICS.recovered_chunk_count.set(0); + for key_chunk in key_chunks { + if *stop_receiver.borrow() { + return Err(RocksdbSyncError::Interrupted); + } + + let chunk_id = key_chunk.id; + let Some(chunk_start) = key_chunk.start_entry else { + tracing::info!("Chunk {chunk_id} (hashed key range {key_chunk:?}) doesn't have entries in Postgres; skipping"); + RECOVERY_METRICS.recovered_chunk_count.inc_by(1); + continue; + }; + + // Check whether the chunk is already recovered. + let state_value = self.read_state_value_async(chunk_start.key).await; + if let Some(state_value) = state_value { + if state_value.value != chunk_start.value + || state_value.enum_index != Some(chunk_start.leaf_index) + { + let err = anyhow::anyhow!( + "Mismatch between entry for key {:?} in Postgres snapshot for miniblock #{} \ + ({chunk_start:?}) and RocksDB cache ({state_value:?}); the recovery procedure may be corrupted", + chunk_start.key, + snapshot_recovery.miniblock_number + ); + return Err(err.into()); + } + tracing::info!("Chunk {chunk_id} (hashed key range {key_chunk:?}) is already recovered; skipping"); + } else { + self.recover_logs_chunk( + storage, + snapshot_recovery.miniblock_number, + key_chunk.key_range.clone(), + ) + .await + .with_context(|| { + format!( + "failed recovering logs chunk {chunk_id} (hashed key range {:?})", + key_chunk.key_range + ) + })?; + + #[cfg(test)] + (self.listener.on_logs_chunk_recovered)(chunk_id); + } + RECOVERY_METRICS.recovered_chunk_count.inc_by(1); + } + + tracing::info!("All chunks recovered; finalizing recovery process"); + self.save(Some(snapshot_recovery.l1_batch_number + 1)) + .await?; + Ok(()) + } + + async fn recover_factory_deps( + &mut self, + storage: &mut StorageProcessor<'_>, + snapshot_recovery: &SnapshotRecoveryStatus, + ) -> anyhow::Result<()> { + // We don't expect that many factory deps; that's why we recover factory deps in any case. + let latency = RECOVERY_METRICS.latency[&RecoveryStage::LoadFactoryDeps].start(); + let factory_deps = storage + .snapshots_creator_dal() + .get_all_factory_deps(snapshot_recovery.miniblock_number) + .await + .context("Failed getting factory dependencies")?; + let latency = latency.observe(); + tracing::info!( + "Loaded {} factory dependencies from the snapshot in {latency:?}", + factory_deps.len() + ); + + let latency = RECOVERY_METRICS.latency[&RecoveryStage::SaveFactoryDeps].start(); + for (bytecode_hash, bytecode) in factory_deps { + self.store_factory_dep(bytecode_hash, bytecode); + } + self.save(None) + .await + .context("failed saving factory deps")?; + let latency = latency.observe(); + tracing::info!("Saved factory dependencies to RocksDB in {latency:?}"); + Ok(()) + } + + async fn load_key_chunks( + storage: &mut StorageProcessor<'_>, + snapshot_recovery: &SnapshotRecoveryStatus, + desired_log_chunk_size: u64, + ) -> anyhow::Result> { + let snapshot_miniblock = snapshot_recovery.miniblock_number; + let log_count = storage + .storage_logs_dal() + .count_miniblock_storage_logs(snapshot_miniblock) + .await + .with_context(|| { + format!("Failed getting number of logs for miniblock #{snapshot_miniblock}") + })?; + let chunk_count = log_count.div_ceil(desired_log_chunk_size); + tracing::info!( + "Estimated the number of chunks for recovery based on {log_count} logs: {chunk_count}" + ); + + let latency = RECOVERY_METRICS.latency[&RecoveryStage::LoadChunkStarts].start(); + let key_chunks: Vec<_> = (0..chunk_count) + .map(|chunk_id| uniform_hashed_keys_chunk(chunk_id, chunk_count)) + .collect(); + let chunk_starts = storage + .storage_logs_dal() + .get_chunk_starts_for_miniblock(snapshot_miniblock, &key_chunks) + .await + .context("Failed getting chunk starts")?; + let latency = latency.observe(); + tracing::info!("Loaded {chunk_count} chunk starts in {latency:?}"); + + let key_chunks = (0..chunk_count) + .zip(key_chunks) + .zip(chunk_starts) + .map(|((id, key_range), start_entry)| KeyChunk { + id, + key_range, + start_entry, + }) + .collect(); + Ok(key_chunks) + } + + async fn read_state_value_async(&self, hashed_key: H256) -> Option { + let db = self.db.clone(); + tokio::task::spawn_blocking(move || Self::read_state_value(&db, hashed_key)) + .await + .unwrap() + } + + async fn recover_logs_chunk( + &mut self, + storage: &mut StorageProcessor<'_>, + snapshot_miniblock: MiniblockNumber, + key_chunk: ops::RangeInclusive, + ) -> anyhow::Result<()> { + let latency = RECOVERY_METRICS.chunk_latency[&ChunkRecoveryStage::LoadEntries].start(); + let all_entries = storage + .storage_logs_dal() + .get_tree_entries_for_miniblock(snapshot_miniblock, key_chunk.clone()) + .await + .with_context(|| { + format!("Failed getting entries for chunk {key_chunk:?} in snapshot for miniblock #{snapshot_miniblock}") + })?; + let latency = latency.observe(); + tracing::debug!( + "Loaded {} log entries for chunk {key_chunk:?} in {latency:?}", + all_entries.len() + ); + + let latency = RECOVERY_METRICS.chunk_latency[&ChunkRecoveryStage::SaveEntries].start(); + self.pending_patch.state = all_entries + .into_iter() + .map(|entry| (entry.key, (entry.value, entry.leaf_index))) + .collect(); + self.save(None) + .await + .context("failed saving storage logs chunk")?; + let latency = latency.observe(); + tracing::debug!("Saved logs chunk {key_chunk:?} to RocksDB in {latency:?}"); + + tracing::info!("Recovered hashed key chunk {key_chunk:?}"); + Ok(()) + } +} diff --git a/core/lib/state/src/rocksdb/tests.rs b/core/lib/state/src/rocksdb/tests.rs new file mode 100644 index 00000000000..38ca942e679 --- /dev/null +++ b/core/lib/state/src/rocksdb/tests.rs @@ -0,0 +1,495 @@ +//! Tests for [`RocksdbStorage`]. + +use std::fmt; + +use assert_matches::assert_matches; +use tempfile::TempDir; +use test_casing::test_casing; +use zksync_dal::ConnectionPool; +use zksync_types::{MiniblockNumber, StorageLog}; + +use super::*; +use crate::test_utils::{ + create_l1_batch, create_miniblock, gen_storage_logs, prepare_postgres, + prepare_postgres_for_snapshot_recovery, +}; + +pub(super) struct RocksdbStorageEventListener { + /// Called when an L1 batch is synced. + pub on_l1_batch_synced: Box, + /// Called when an storage logs chunk is recovered from a snapshot. + pub on_logs_chunk_recovered: Box, +} + +impl fmt::Debug for RocksdbStorageEventListener { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("RocksdbStorageEventListener") + .finish_non_exhaustive() + } +} + +impl Default for RocksdbStorageEventListener { + fn default() -> Self { + Self { + on_l1_batch_synced: Box::new(|_| { /* do nothing */ }), + on_logs_chunk_recovered: Box::new(|_| { /* do nothing */ }), + } + } +} + +#[tokio::test] +async fn rocksdb_storage_basics() { + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = RocksdbStorage::new(dir.path().to_path_buf()).await.unwrap(); + let mut storage_logs: HashMap<_, _> = gen_storage_logs(0..20) + .into_iter() + .map(|log| (log.key, log.value)) + .collect(); + let changed_keys = RocksdbStorage::process_transaction_logs(&storage.db, storage_logs.clone()); + storage.pending_patch.state = changed_keys + .into_iter() + .map(|(key, state_value)| (key.hashed_key(), (state_value.value, 1))) // enum index doesn't matter in the test + .collect(); + storage.save(Some(L1BatchNumber(0))).await.unwrap(); + { + for (key, value) in &storage_logs { + assert!(!storage.is_write_initial(key)); + assert_eq!(storage.read_value(key), *value); + } + } + + // Overwrite some of the logs. + for log in storage_logs.values_mut().step_by(2) { + *log = StorageValue::zero(); + } + let changed_keys = RocksdbStorage::process_transaction_logs(&storage.db, storage_logs.clone()); + storage.pending_patch.state = changed_keys + .into_iter() + .map(|(key, state_value)| (key.hashed_key(), (state_value.value, 1))) // enum index doesn't matter in the test + .collect(); + storage.save(Some(L1BatchNumber(1))).await.unwrap(); + + for (key, value) in &storage_logs { + assert!(!storage.is_write_initial(key)); + assert_eq!(storage.read_value(key), *value); + } +} + +async fn sync_test_storage(dir: &TempDir, conn: &mut StorageProcessor<'_>) -> RocksdbStorage { + let (_stop_sender, stop_receiver) = watch::channel(false); + RocksdbStorage::builder(dir.path()) + .await + .expect("Failed initializing RocksDB") + .synchronize(conn, &stop_receiver) + .await + .unwrap() + .expect("Storage synchronization unexpectedly stopped") +} + +#[tokio::test] +async fn rocksdb_storage_syncing_with_postgres() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + prepare_postgres(&mut conn).await; + let storage_logs = gen_storage_logs(20..40); + create_miniblock(&mut conn, MiniblockNumber(1), storage_logs.clone()).await; + create_l1_batch(&mut conn, L1BatchNumber(1), &storage_logs).await; + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = sync_test_storage(&dir, &mut conn).await; + + assert_eq!(storage.l1_batch_number().await, Some(L1BatchNumber(2))); + for log in &storage_logs { + assert_eq!(storage.read_value(&log.key), log.value); + } +} + +#[tokio::test] +async fn rocksdb_storage_syncing_fault_tolerance() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + prepare_postgres(&mut conn).await; + let storage_logs = gen_storage_logs(100..200); + for (i, block_logs) in storage_logs.chunks(20).enumerate() { + let number = u32::try_from(i).unwrap() + 1; + create_miniblock(&mut conn, MiniblockNumber(number), block_logs.to_vec()).await; + create_l1_batch(&mut conn, L1BatchNumber(number), block_logs).await; + } + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let (stop_sender, stop_receiver) = watch::channel(false); + let mut storage = RocksdbStorage::builder(dir.path()) + .await + .expect("Failed initializing RocksDB"); + let mut expected_l1_batch_number = L1BatchNumber(0); + storage.0.listener.on_l1_batch_synced = Box::new(move |number| { + assert_eq!(number, expected_l1_batch_number); + expected_l1_batch_number += 1; + if number == L1BatchNumber(2) { + stop_sender.send_replace(true); + } + }); + let storage = storage + .synchronize(&mut conn, &stop_receiver) + .await + .unwrap(); + assert!(storage.is_none()); + + // Resume storage syncing and check that it completes. + let storage = RocksdbStorage::builder(dir.path()) + .await + .expect("Failed initializing RocksDB"); + assert_eq!(storage.l1_batch_number().await, Some(L1BatchNumber(3))); + + let (_stop_sender, stop_receiver) = watch::channel(false); + let mut storage = storage + .synchronize(&mut conn, &stop_receiver) + .await + .unwrap() + .expect("Storage synchronization unexpectedly stopped"); + assert_eq!(storage.l1_batch_number().await, Some(L1BatchNumber(6))); + for log in &storage_logs { + assert_eq!(storage.read_value(&log.key), log.value); + assert!(!storage.is_write_initial(&log.key)); + } +} + +async fn insert_factory_deps( + conn: &mut StorageProcessor<'_>, + miniblock_number: MiniblockNumber, + indices: impl Iterator, +) { + let factory_deps = indices + .map(|i| (H256::repeat_byte(i), vec![i; 64])) + .collect(); + conn.storage_dal() + .insert_factory_deps(miniblock_number, &factory_deps) + .await + .unwrap(); +} + +#[tokio::test] +async fn rocksdb_storage_revert() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + prepare_postgres(&mut conn).await; + let storage_logs = gen_storage_logs(20..40); + create_miniblock(&mut conn, MiniblockNumber(1), storage_logs[..10].to_vec()).await; + insert_factory_deps(&mut conn, MiniblockNumber(1), 0..1).await; + create_miniblock(&mut conn, MiniblockNumber(2), storage_logs[10..].to_vec()).await; + insert_factory_deps(&mut conn, MiniblockNumber(2), 1..3).await; + create_l1_batch(&mut conn, L1BatchNumber(1), &storage_logs).await; + + let inserted_storage_logs = gen_storage_logs(50..60); + let replaced_storage_logs: Vec<_> = storage_logs + .iter() + .step_by(2) + .map(|&log| StorageLog { + value: H256::repeat_byte(0xf0), + ..log + }) + .collect(); + + let mut new_storage_logs = inserted_storage_logs.clone(); + new_storage_logs.extend_from_slice(&replaced_storage_logs); + create_miniblock(&mut conn, MiniblockNumber(3), new_storage_logs).await; + insert_factory_deps(&mut conn, MiniblockNumber(3), 3..5).await; + create_l1_batch(&mut conn, L1BatchNumber(2), &inserted_storage_logs).await; + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = sync_test_storage(&dir, &mut conn).await; + + // Perform some sanity checks before the revert. + assert_eq!(storage.l1_batch_number().await, Some(L1BatchNumber(3))); + { + for log in &inserted_storage_logs { + assert_eq!(storage.read_value(&log.key), log.value); + } + for log in &replaced_storage_logs { + assert_eq!(storage.read_value(&log.key), log.value); + } + + for i in 0..5 { + assert_eq!( + storage.load_factory_dep(H256::repeat_byte(i)).unwrap(), + [i; 64] + ); + } + } + + storage.rollback(&mut conn, L1BatchNumber(1)).await.unwrap(); + assert_eq!(storage.l1_batch_number().await, Some(L1BatchNumber(2))); + { + for log in &inserted_storage_logs { + assert_eq!(storage.read_value(&log.key), H256::zero()); + } + for log in &replaced_storage_logs { + assert_ne!(storage.read_value(&log.key), log.value); + } + + for i in 0..3 { + assert_eq!( + storage.load_factory_dep(H256::repeat_byte(i)).unwrap(), + [i; 64] + ); + } + for i in 3..5 { + assert!(storage.load_factory_dep(H256::repeat_byte(i)).is_none()); + } + } +} + +#[tokio::test] +async fn rocksdb_enum_index_migration() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + prepare_postgres(&mut conn).await; + let storage_logs = gen_storage_logs(20..40); + create_miniblock(&mut conn, MiniblockNumber(1), storage_logs.clone()).await; + create_l1_batch(&mut conn, L1BatchNumber(1), &storage_logs).await; + + let enum_indices: HashMap<_, _> = conn + .storage_logs_dedup_dal() + .initial_writes_for_batch(L1BatchNumber(1)) + .await + .into_iter() + .collect(); + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = sync_test_storage(&dir, &mut conn).await; + + assert_eq!(storage.l1_batch_number().await, Some(L1BatchNumber(2))); + // Check that enum indices are correct after syncing with Postgres. + for log in &storage_logs { + let expected_index = enum_indices[&log.key.hashed_key()]; + assert_eq!( + storage.get_enumeration_index(&log.key), + Some(expected_index) + ); + } + + // Remove enum indices for some keys. + let mut write_batch = storage.db.new_write_batch(); + for log in &storage_logs { + write_batch.put_cf( + StateKeeperColumnFamily::State, + log.key.hashed_key().as_bytes(), + log.value.as_bytes(), + ); + write_batch.delete_cf( + StateKeeperColumnFamily::State, + RocksdbStorage::ENUM_INDEX_MIGRATION_CURSOR, + ); + } + storage.db.write(write_batch).unwrap(); + + // Check that migration works as expected. + let ordered_keys_to_migrate: Vec = storage_logs + .iter() + .map(|log| log.key) + .sorted_by_key(StorageKey::hashed_key) + .collect(); + + storage.enum_index_migration_chunk_size = 10; + let start_from = storage.enum_migration_start_from().await; + assert_eq!(start_from, Some(H256::zero())); + + // Migrate the first half. + storage.save_missing_enum_indices(&mut conn).await.unwrap(); + for key in ordered_keys_to_migrate.iter().take(10) { + let expected_index = enum_indices[&key.hashed_key()]; + assert_eq!(storage.get_enumeration_index(key), Some(expected_index)); + } + let non_migrated_state_value = + RocksdbStorage::read_state_value(&storage.db, ordered_keys_to_migrate[10].hashed_key()) + .unwrap(); + assert!(non_migrated_state_value.enum_index.is_none()); + + // Migrate the second half. + storage.save_missing_enum_indices(&mut conn).await.unwrap(); + for key in ordered_keys_to_migrate.iter().skip(10) { + let expected_index = enum_indices[&key.hashed_key()]; + assert_eq!(storage.get_enumeration_index(key), Some(expected_index)); + } + + // 20 keys were processed but we haven't checked that no keys to migrate are left. + let start_from = storage.enum_migration_start_from().await; + assert!(start_from.is_some()); + + // Check that migration will be marked as completed after the next iteration. + storage.save_missing_enum_indices(&mut conn).await.unwrap(); + let start_from = storage.enum_migration_start_from().await; + assert!(start_from.is_none()); +} + +#[test_casing(4, [RocksdbStorage::DESIRED_LOG_CHUNK_SIZE, 20, 5, 1])] +#[tokio::test] +async fn low_level_snapshot_recovery(log_chunk_size: u64) { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + let (snapshot_recovery, mut storage_logs) = + prepare_postgres_for_snapshot_recovery(&mut conn).await; + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = RocksdbStorage::new(dir.path().to_path_buf()).await.unwrap(); + let (_stop_sender, stop_receiver) = watch::channel(false); + let next_l1_batch = storage + .ensure_ready(&mut conn, log_chunk_size, &stop_receiver) + .await + .unwrap(); + assert_eq!(next_l1_batch, snapshot_recovery.l1_batch_number + 1); + assert_eq!( + storage.l1_batch_number().await, + Some(snapshot_recovery.l1_batch_number + 1) + ); + + // Sort logs in the same order as enum indices are assigned (by full `StorageKey`). + storage_logs.sort_unstable_by_key(|log| log.key); + for (i, log) in storage_logs.iter().enumerate() { + assert_eq!(storage.read_value(&log.key), log.value); + let expected_index = i as u64 + 1; + assert_eq!( + storage.get_enumeration_index(&log.key), + Some(expected_index) + ); + } +} + +#[tokio::test] +async fn recovering_factory_deps_from_snapshot() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + let (snapshot_recovery, _) = prepare_postgres_for_snapshot_recovery(&mut conn).await; + + let mut all_factory_deps = HashMap::new(); + for number in 0..snapshot_recovery.miniblock_number.0 { + let bytecode_hash = H256::from_low_u64_be(number.into()); + let bytecode = vec![u8::try_from(number).unwrap(); 1_024]; + all_factory_deps.insert(bytecode_hash, bytecode.clone()); + + let number = MiniblockNumber(number); + // FIXME (PLA-589): don't store miniblocks once the corresponding foreign keys are removed + create_miniblock(&mut conn, number, vec![]).await; + conn.storage_dal() + .insert_factory_deps(number, &HashMap::from([(bytecode_hash, bytecode)])) + .await + .unwrap(); + } + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = sync_test_storage(&dir, &mut conn).await; + + for (bytecode_hash, bytecode) in &all_factory_deps { + assert_eq!(storage.load_factory_dep(*bytecode_hash).unwrap(), *bytecode); + } +} + +#[tokio::test] +async fn recovering_from_snapshot_and_following_logs() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + let (snapshot_recovery, mut storage_logs) = + prepare_postgres_for_snapshot_recovery(&mut conn).await; + + // Add some more storage logs. + let new_storage_logs = gen_storage_logs(500..600); + create_miniblock( + &mut conn, + snapshot_recovery.miniblock_number + 1, + new_storage_logs.clone(), + ) + .await; + create_l1_batch( + &mut conn, + snapshot_recovery.l1_batch_number + 1, + &new_storage_logs, + ) + .await; + + let updated_storage_logs: Vec<_> = storage_logs + .iter() + .step_by(3) + .copied() + .map(|mut log| { + log.value = H256::repeat_byte(0xff); + log + }) + .collect(); + create_miniblock( + &mut conn, + snapshot_recovery.miniblock_number + 2, + updated_storage_logs.clone(), + ) + .await; + create_l1_batch(&mut conn, snapshot_recovery.l1_batch_number + 2, &[]).await; + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = sync_test_storage(&dir, &mut conn).await; + + for (i, log) in new_storage_logs.iter().enumerate() { + assert_eq!(storage.read_value(&log.key), log.value); + let expected_index = (i + storage_logs.len()) as u64 + 1; + assert_eq!( + storage.get_enumeration_index(&log.key), + Some(expected_index) + ); + assert!(!storage.is_write_initial(&log.key)); + } + + for log in &updated_storage_logs { + assert_eq!(storage.read_value(&log.key), log.value); + assert!(storage.get_enumeration_index(&log.key).unwrap() <= storage_logs.len() as u64); + } + storage_logs.sort_unstable_by_key(|log| log.key); + for (i, log) in storage_logs.iter().enumerate() { + let expected_index = i as u64 + 1; + assert_eq!( + storage.get_enumeration_index(&log.key), + Some(expected_index) + ); + assert!(!storage.is_write_initial(&log.key)); + } +} + +#[tokio::test] +async fn recovery_fault_tolerance() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + let (_, storage_logs) = prepare_postgres_for_snapshot_recovery(&mut conn).await; + let log_chunk_size = storage_logs.len() as u64 / 5; + + let dir = TempDir::new().expect("cannot create temporary dir for state keeper"); + let mut storage = RocksdbStorage::new(dir.path().to_path_buf()).await.unwrap(); + let (stop_sender, stop_receiver) = watch::channel(false); + let mut synced_chunk_count = 0_u64; + storage.listener.on_logs_chunk_recovered = Box::new(move |chunk_id| { + assert_eq!(chunk_id, synced_chunk_count); + synced_chunk_count += 1; + if synced_chunk_count == 2 { + stop_sender.send_replace(true); + } + }); + + let err = storage + .ensure_ready(&mut conn, log_chunk_size, &stop_receiver) + .await + .unwrap_err(); + assert_matches!(err, RocksdbSyncError::Interrupted); + drop(storage); + + // Resume recovery and check that no chunks are recovered twice. + let (_stop_sender, stop_receiver) = watch::channel(false); + let mut storage = RocksdbStorage::new(dir.path().to_path_buf()).await.unwrap(); + storage.listener.on_logs_chunk_recovered = Box::new(|chunk_id| { + assert!(chunk_id >= 2); + }); + storage + .ensure_ready(&mut conn, log_chunk_size, &stop_receiver) + .await + .unwrap(); + for log in &storage_logs { + assert_eq!(storage.read_value(&log.key), log.value); + assert!(!storage.is_write_initial(&log.key)); + } +} diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 340f2ea6223..9c4fca8285e 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -4,7 +4,8 @@ use std::ops; use zksync_dal::StorageProcessor; use zksync_types::{ - block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, + block::{L1BatchHeader, MiniblockHeader}, + snapshots::SnapshotRecoveryStatus, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersion, StorageKey, StorageLog, H256, }; @@ -32,6 +33,10 @@ pub(crate) async fn prepare_postgres(conn: &mut StorageProcessor<'_>) { .delete_l1_batches(L1BatchNumber(0)) .await .unwrap(); + conn.blocks_dal() + .delete_initial_writes(L1BatchNumber(0)) + .await + .unwrap(); } pub(crate) fn gen_storage_logs(indices: ops::Range) -> Vec { @@ -72,6 +77,7 @@ pub(crate) async fn create_miniblock( hash: H256::from_low_u64_be(u64::from(miniblock_number.0)), l1_tx_count: 0, l2_tx_count: 0, + fee_account_address: Address::default(), base_fee_per_gas: 0, batch_fee_input: Default::default(), gas_per_pubdata_limit: 0, @@ -96,16 +102,9 @@ pub(crate) async fn create_l1_batch( l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { - let mut header = L1BatchHeader::new( - l1_batch_number, - 0, - Address::default(), - Default::default(), - Default::default(), - ); - header.is_finished = true; + let header = L1BatchHeader::new(l1_batch_number, 0, Default::default(), Default::default()); conn.blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&header) .await .unwrap(); conn.blocks_dal() @@ -119,3 +118,39 @@ pub(crate) async fn create_l1_batch( .insert_initial_writes(l1_batch_number, &written_keys) .await; } + +pub(crate) async fn prepare_postgres_for_snapshot_recovery( + conn: &mut StorageProcessor<'_>, +) -> (SnapshotRecoveryStatus, Vec) { + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + + let snapshot_recovery = SnapshotRecoveryStatus { + l1_batch_number: L1BatchNumber(23), + l1_batch_root_hash: H256::zero(), // not used + miniblock_number: MiniblockNumber(42), + miniblock_root_hash: H256::zero(), // not used + storage_logs_chunks_processed: vec![true; 100], + }; + conn.snapshot_recovery_dal() + .insert_initial_recovery_status(&snapshot_recovery) + .await + .unwrap(); + + // FIXME (PLA-589): don't store miniblock / L1 batch once the corresponding foreign keys are removed + let snapshot_storage_logs = gen_storage_logs(100..200); + create_miniblock( + conn, + snapshot_recovery.miniblock_number, + snapshot_storage_logs.clone(), + ) + .await; + create_l1_batch( + conn, + snapshot_recovery.l1_batch_number, + &snapshot_storage_logs, + ) + .await; + (snapshot_recovery, snapshot_storage_logs) +} diff --git a/core/lib/storage/src/db.rs b/core/lib/storage/src/db.rs index 24502493a60..e8402b96c62 100644 --- a/core/lib/storage/src/db.rs +++ b/core/lib/storage/src/db.rs @@ -298,11 +298,11 @@ pub struct RocksDB { } impl RocksDB { - pub fn new(path: &Path) -> Self { + pub fn new(path: &Path) -> Result { Self::with_options(path, RocksDBOptions::default()) } - pub fn with_options(path: &Path, options: RocksDBOptions) -> Self { + pub fn with_options(path: &Path, options: RocksDBOptions) -> Result { let caches = RocksDBCaches::new(options.block_cache_capacity); let db_options = Self::rocksdb_options(None, None); let existing_cfs = DB::list_cf(&db_options, path).unwrap_or_else(|err| { @@ -354,7 +354,7 @@ impl RocksDB { ColumnFamilyDescriptor::new(cf_name, cf_options) }); - let db = DB::open_cf_descriptors(&db_options, path, cfs).expect("failed to init rocksdb"); + let db = DB::open_cf_descriptors(&db_options, path, cfs)?; let inner = Arc::new(RocksDBInner { db, db_name: CF::DB_NAME, @@ -371,12 +371,12 @@ impl RocksDB { ); inner.wait_for_writes_to_resume(&options.stalled_writes_retries); - Self { + Ok(Self { inner, sync_writes: false, stalled_writes_retries: options.stalled_writes_retries, _cf: PhantomData, - } + }) } /// Switches on sync writes in [`Self::write()`] and [`Self::put()`]. This has a performance @@ -665,13 +665,15 @@ mod tests { #[test] fn changing_column_families() { let temp_dir = TempDir::new().unwrap(); - let db = RocksDB::::new(temp_dir.path()).with_sync_writes(); + let db = RocksDB::::new(temp_dir.path()) + .unwrap() + .with_sync_writes(); let mut batch = db.new_write_batch(); batch.put_cf(OldColumnFamilies::Default, b"test", b"value"); db.write(batch).unwrap(); drop(db); - let db = RocksDB::::new(temp_dir.path()); + let db = RocksDB::::new(temp_dir.path()).unwrap(); let value = db.get_cf(NewColumnFamilies::Default, b"test").unwrap(); assert_eq!(value.unwrap(), b"value"); } @@ -691,13 +693,15 @@ mod tests { #[test] fn default_column_family_does_not_need_to_be_explicitly_opened() { let temp_dir = TempDir::new().unwrap(); - let db = RocksDB::::new(temp_dir.path()).with_sync_writes(); + let db = RocksDB::::new(temp_dir.path()) + .unwrap() + .with_sync_writes(); let mut batch = db.new_write_batch(); batch.put_cf(OldColumnFamilies::Junk, b"test", b"value"); db.write(batch).unwrap(); drop(db); - let db = RocksDB::::new(temp_dir.path()); + let db = RocksDB::::new(temp_dir.path()).unwrap(); let value = db.get_cf(JunkColumnFamily, b"test").unwrap(); assert_eq!(value.unwrap(), b"value"); } @@ -705,7 +709,9 @@ mod tests { #[test] fn write_batch_can_be_restored_from_bytes() { let temp_dir = TempDir::new().unwrap(); - let db = RocksDB::::new(temp_dir.path()).with_sync_writes(); + let db = RocksDB::::new(temp_dir.path()) + .unwrap() + .with_sync_writes(); let mut batch = db.new_write_batch(); batch.put_cf(NewColumnFamilies::Default, b"test", b"value"); batch.put_cf(NewColumnFamilies::Default, b"test2", b"value2"); diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index f9378fd2d66..3a2a6e4eb54 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -17,13 +17,7 @@ zksync_basic_types = { path = "../basic_types" } zksync_contracts = { path = "../contracts" } zksync_mini_merkle_tree = { path = "../mini_merkle_tree" } zksync_config = { path = "../config" } -# We need this import because we wanat DAL to be responsible for (de)serialization -codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } -zk_evm_1_4_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } -zk_evm_1_4_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0" } -zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } anyhow = "1.0.75" chrono = { version = "0.4", features = ["serde"] } @@ -48,4 +42,4 @@ tokio = { version = "1", features = ["rt", "macros"] } serde_with = { version = "1", features = ["hex"] } [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } diff --git a/core/lib/types/src/aggregated_operations.rs b/core/lib/types/src/aggregated_operations.rs index c4ef7a432dd..dadfad265cb 100644 --- a/core/lib/types/src/aggregated_operations.rs +++ b/core/lib/types/src/aggregated_operations.rs @@ -1,165 +1,4 @@ -use std::{fmt, ops, str::FromStr, sync::Arc}; - -use codegen::serialize_proof; -use serde::{Deserialize, Serialize}; -use zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, - bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, - witness::oracle::VmWitnessOracle, -}; -use zksync_basic_types::{ethabi::Token, L1BatchNumber}; - -use crate::{ - commitment::L1BatchWithMetadata, l1_batch_commit_data_generator::L1BatchCommitDataGenerator, - ProtocolVersionId, U256, -}; - -fn l1_batch_range_from_batches( - batches: &[L1BatchWithMetadata], -) -> ops::RangeInclusive { - let start = batches - .first() - .map(|l1_batch| l1_batch.header.number) - .unwrap_or_default(); - let end = batches - .last() - .map(|l1_batch| l1_batch.header.number) - .unwrap_or_default(); - start..=end -} - -#[derive(Debug, Clone)] -pub struct L1BatchCommitOperation { - pub last_committed_l1_batch: L1BatchWithMetadata, - pub l1_batches: Vec, - pub l1_batch_commit_data_generator: Arc, -} - -impl L1BatchCommitOperation { - pub fn get_eth_tx_args(&self) -> Vec { - let stored_batch_info = self.last_committed_l1_batch.l1_header_data(); - let l1_batches_to_commit = self - .l1_batches - .iter() - .map(|l1_batch_with_metadata| { - self.l1_batch_commit_data_generator - .l1_commit_data(l1_batch_with_metadata) - }) - .collect(); - - vec![stored_batch_info, Token::Array(l1_batches_to_commit)] - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - l1_batch_range_from_batches(&self.l1_batches) - } -} - -#[derive(Debug, Clone)] -pub struct L1BatchCreateProofOperation { - pub l1_batches: Vec, - pub proofs_to_pad: usize, -} - -#[derive(Clone, Serialize, Deserialize)] -pub struct L1BatchProofForL1 { - pub aggregation_result_coords: [[u8; 32]; 4], - pub scheduler_proof: Proof>>, -} - -impl fmt::Debug for L1BatchProofForL1 { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("L1BatchProofForL1") - .field("aggregation_result_coords", &self.aggregation_result_coords) - .finish_non_exhaustive() - } -} - -#[derive(Debug, Clone)] -pub struct L1BatchProofOperation { - pub prev_l1_batch: L1BatchWithMetadata, - pub l1_batches: Vec, - pub proofs: Vec, - pub should_verify: bool, -} - -impl L1BatchProofOperation { - pub fn get_eth_tx_args(&self) -> Vec { - let prev_l1_batch = self.prev_l1_batch.l1_header_data(); - let batches_arg = self - .l1_batches - .iter() - .map(L1BatchWithMetadata::l1_header_data) - .collect(); - let batches_arg = Token::Array(batches_arg); - - if self.should_verify { - // currently we only support submitting a single proof - assert_eq!(self.proofs.len(), 1); - assert_eq!(self.l1_batches.len(), 1); - - let L1BatchProofForL1 { - aggregation_result_coords, - scheduler_proof, - } = self.proofs.first().unwrap(); - - let (_, proof) = serialize_proof(scheduler_proof); - - let aggregation_result_coords = if self.l1_batches[0] - .header - .protocol_version - .unwrap() - .is_pre_boojum() - { - Token::Array( - aggregation_result_coords - .iter() - .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) - .collect(), - ) - } else { - Token::Array(Vec::new()) - }; - let proof_input = Token::Tuple(vec![ - aggregation_result_coords, - Token::Array(proof.into_iter().map(Token::Uint).collect()), - ]); - - vec![prev_l1_batch, batches_arg, proof_input] - } else { - vec![ - prev_l1_batch, - batches_arg, - Token::Tuple(vec![Token::Array(vec![]), Token::Array(vec![])]), - ] - } - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - l1_batch_range_from_batches(&self.l1_batches) - } -} - -#[derive(Debug, Clone)] -pub struct L1BatchExecuteOperation { - pub l1_batches: Vec, -} - -impl L1BatchExecuteOperation { - pub fn get_eth_tx_args(&self) -> Vec { - vec![Token::Array( - self.l1_batches - .iter() - .map(L1BatchWithMetadata::l1_header_data) - .collect(), - )] - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - l1_batch_range_from_batches(&self.l1_batches) - } -} +use std::{fmt, str::FromStr}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum AggregatedActionType { @@ -200,45 +39,3 @@ impl FromStr for AggregatedActionType { } } } - -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone)] -pub enum AggregatedOperation { - Commit(L1BatchCommitOperation), - PublishProofOnchain(L1BatchProofOperation), - Execute(L1BatchExecuteOperation), -} - -impl AggregatedOperation { - pub fn get_action_type(&self) -> AggregatedActionType { - match self { - Self::Commit(_) => AggregatedActionType::Commit, - Self::PublishProofOnchain(_) => AggregatedActionType::PublishProofOnchain, - Self::Execute(_) => AggregatedActionType::Execute, - } - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - match self { - Self::Commit(op) => op.l1_batch_range(), - Self::PublishProofOnchain(op) => op.l1_batch_range(), - Self::Execute(op) => op.l1_batch_range(), - } - } - - pub fn get_action_caption(&self) -> &'static str { - match self { - Self::Commit(_) => "commit", - Self::PublishProofOnchain(_) => "proof", - Self::Execute(_) => "execute", - } - } - - pub fn protocol_version(&self) -> ProtocolVersionId { - match self { - Self::Commit(op) => op.l1_batches[0].header.protocol_version.unwrap(), - Self::PublishProofOnchain(op) => op.l1_batches[0].header.protocol_version.unwrap(), - Self::Execute(op) => op.l1_batches[0].header.protocol_version.unwrap(), - } - } -} diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 2e7afcdfb73..6b74d6c6303 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -1,8 +1,7 @@ //! API types related to the External Node specific methods. use serde::{Deserialize, Serialize}; -use zk_evm::ethereum_types::Address; -use zksync_basic_types::{L1BatchNumber, MiniblockNumber, H256}; +use zksync_basic_types::{Address, L1BatchNumber, MiniblockNumber, H256}; use zksync_contracts::BaseSystemContractsHashes; use crate::ProtocolVersionId; diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 48765e27e0f..950c6f4e268 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,7 +1,7 @@ use std::{fmt, ops}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::{H2048, H256, U256}; +use zksync_basic_types::{Address, H2048, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use zksync_utils::concat_and_hash; @@ -11,7 +11,7 @@ use crate::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, priority_op_onchain_data::PriorityOpOnchainData, web3::signing::keccak256, - AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, + AccountTreeId, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, }; /// Represents a successfully deployed smart contract. @@ -35,12 +35,8 @@ impl DeployedContract { pub struct L1BatchHeader { /// Numeric ID of the block. Starts from 1, 0 block is considered genesis block and has no transactions. pub number: L1BatchNumber, - /// Whether block is sealed or not (doesn't correspond to committing/verifying it on the L1). - pub is_finished: bool, /// Timestamp when block was first created. pub timestamp: u64, - /// Address of the fee account that was used when block was created - pub fee_account_address: Address, /// Total number of processed priority operations in the block pub l1_tx_count: u16, /// Total number of processed txs that was requested offchain @@ -55,12 +51,6 @@ pub struct L1BatchHeader { pub bloom: H2048, /// Hashes of contracts used this block pub used_contract_hashes: Vec, - /// The EIP1559 base_fee used in this block. - pub base_fee_per_gas: u64, - /// The assumed L1 gas price within the block. - pub l1_gas_price: u64, - /// The L2 gas price that the operator agrees on. - pub l2_fair_gas_price: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, /// System logs are those emitted as part of the Vm execution. pub system_logs: Vec, @@ -77,6 +67,7 @@ pub struct MiniblockHeader { pub hash: H256, pub l1_tx_count: u16, pub l2_tx_count: u16, + pub fee_account_address: Address, pub base_fee_per_gas: u64, // Min wei per gas that txs in this miniblock need to have. pub batch_fee_input: BatchFeeInput, @@ -101,15 +92,12 @@ impl L1BatchHeader { pub fn new( number: L1BatchNumber, timestamp: u64, - fee_account_address: Address, base_system_contracts_hashes: BaseSystemContractsHashes, protocol_version: ProtocolVersionId, ) -> L1BatchHeader { Self { number, - is_finished: false, timestamp, - fee_account_address, l1_tx_count: 0, l2_tx_count: 0, priority_ops_onchain_data: vec![], @@ -117,9 +105,6 @@ impl L1BatchHeader { l2_to_l1_messages: vec![], bloom: H2048::default(), used_contract_hashes: vec![], - base_fee_per_gas: 0, - l1_gas_price: 0, - l2_fair_gas_price: 0, base_system_contracts_hashes, system_logs: vec![], protocol_version: Some(protocol_version), diff --git a/core/lib/types/src/circuit.rs b/core/lib/types/src/circuit.rs new file mode 100644 index 00000000000..d97594913c4 --- /dev/null +++ b/core/lib/types/src/circuit.rs @@ -0,0 +1,93 @@ +use std::ops::Add; + +use serde::{Deserialize, Serialize}; + +/// Holds information about number of cycles used per circuit type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub struct CircuitCycleStatistic { + pub main_vm_cycles: u32, + pub ram_permutation_cycles: u32, + pub storage_application_cycles: u32, + pub storage_sorter_cycles: u32, + pub code_decommitter_cycles: u32, + pub code_decommitter_sorter_cycles: u32, + pub log_demuxer_cycles: u32, + pub events_sorter_cycles: u32, + pub keccak256_cycles: u32, + pub ecrecover_cycles: u32, + pub sha256_cycles: u32, +} + +impl CircuitCycleStatistic { + pub fn new() -> Self { + Self::default() + } +} + +/// Holds information about number of circuits used per circuit type. +#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] +pub struct CircuitStatistic { + pub main_vm: f32, + pub ram_permutation: f32, + pub storage_application: f32, + pub storage_sorter: f32, + pub code_decommitter: f32, + pub code_decommitter_sorter: f32, + pub log_demuxer: f32, + pub events_sorter: f32, + pub keccak256: f32, + pub ecrecover: f32, + pub sha256: f32, +} + +impl CircuitStatistic { + /// Rounds up numbers and adds them. + pub fn total(&self) -> usize { + self.main_vm.ceil() as usize + + self.ram_permutation.ceil() as usize + + self.storage_application.ceil() as usize + + self.storage_sorter.ceil() as usize + + self.code_decommitter.ceil() as usize + + self.code_decommitter_sorter.ceil() as usize + + self.log_demuxer.ceil() as usize + + self.events_sorter.ceil() as usize + + self.keccak256.ceil() as usize + + self.ecrecover.ceil() as usize + + self.sha256.ceil() as usize + } + + /// Adds numbers. + pub fn total_f32(&self) -> f32 { + self.main_vm + + self.ram_permutation + + self.storage_application + + self.storage_sorter + + self.code_decommitter + + self.code_decommitter_sorter + + self.log_demuxer + + self.events_sorter + + self.keccak256 + + self.ecrecover + + self.sha256 + } +} + +impl Add for CircuitStatistic { + type Output = CircuitStatistic; + + fn add(self, other: CircuitStatistic) -> CircuitStatistic { + CircuitStatistic { + main_vm: self.main_vm + other.main_vm, + ram_permutation: self.ram_permutation + other.ram_permutation, + storage_application: self.storage_application + other.storage_application, + storage_sorter: self.storage_sorter + other.storage_sorter, + code_decommitter: self.code_decommitter + other.code_decommitter, + code_decommitter_sorter: self.code_decommitter_sorter + other.code_decommitter_sorter, + log_demuxer: self.log_demuxer + other.log_demuxer, + events_sorter: self.events_sorter + other.events_sorter, + keccak256: self.keccak256 + other.keccak256, + ecrecover: self.ecrecover + other.ecrecover, + sha256: self.sha256 + other.sha256, + } + } +} diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 4670c8d223d..50508bc4b8d 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -17,14 +17,13 @@ use zksync_utils::u256_to_h256; use crate::{ block::L1BatchHeader, - ethabi::Token, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, web3::signing::keccak256, writes::{ compress_state_diffs, InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord, PADDED_ENCODED_STORAGE_DIFF_LEN_BYTES, }, - ProtocolVersionId, H256, KNOWN_CODES_STORAGE_ADDRESS, U256, + ProtocolVersionId, H256, KNOWN_CODES_STORAGE_ADDRESS, }; /// Type that can be serialized for commitment. @@ -130,33 +129,6 @@ impl L1BatchWithMetadata { } }) } - - /// Encodes L1Batch into `StorageBatchInfo` (see `IExecutor.sol`) - pub fn l1_header_data(&self) -> Token { - Token::Tuple(vec![ - // `batchNumber` - Token::Uint(U256::from(self.header.number.0)), - // `batchHash` - Token::FixedBytes(self.metadata.root_hash.as_bytes().to_vec()), - // `indexRepeatedStorageChanges` - Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)), - // `numberOfLayer1Txs` - Token::Uint(U256::from(self.header.l1_tx_count)), - // `priorityOperationsHash` - Token::FixedBytes( - self.header - .priority_ops_onchain_data_hash() - .as_bytes() - .to_vec(), - ), - // `l2LogsTreeRoot` - Token::FixedBytes(self.metadata.l2_l1_merkle_root.as_bytes().to_vec()), - // timestamp - Token::Uint(U256::from(self.header.timestamp)), - // commitment - Token::FixedBytes(self.metadata.commitment.as_bytes().to_vec()), - ]) - } } impl SerializeCommitment for L2ToL1Log { diff --git a/core/lib/types/src/fee.rs b/core/lib/types/src/fee.rs index fad4d09f528..1f10d5d574b 100644 --- a/core/lib/types/src/fee.rs +++ b/core/lib/types/src/fee.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_utils::ceil_div; -use crate::U256; +use crate::{circuit::CircuitStatistic, U256}; #[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)] #[serde(rename_all = "camelCase", tag = "result")] @@ -24,7 +24,7 @@ pub struct TransactionExecutionMetrics { pub computational_gas_used: u32, pub total_updated_values_size: usize, pub pubdata_published: u32, - pub estimated_circuits_used: f32, + pub circuit_statistic: CircuitStatistic, } #[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 03ac163e559..434e17e8bb2 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -1,8 +1,4 @@ use serde::{Deserialize, Serialize}; -use zk_evm::reference_impls::event_sink::EventMessage; -use zk_evm_1_4_0::reference_impls::event_sink::EventMessage as EventMessage_1_4_0; -use zk_evm_1_4_1::reference_impls::event_sink::EventMessage as EventMessage_1_4_1; -use zksync_utils::u256_to_h256; use crate::{commitment::SerializeCommitment, Address, H256}; @@ -67,45 +63,6 @@ impl L2ToL1Log { } } -impl From for L2ToL1Log { - fn from(m: EventMessage) -> Self { - Self { - shard_id: m.shard_id, - is_service: m.is_first, - tx_number_in_block: m.tx_number_in_block, - sender: m.address, - key: u256_to_h256(m.key), - value: u256_to_h256(m.value), - } - } -} - -impl From for L2ToL1Log { - fn from(m: EventMessage_1_4_0) -> Self { - Self { - shard_id: m.shard_id, - is_service: m.is_first, - tx_number_in_block: m.tx_number_in_block, - sender: m.address, - key: u256_to_h256(m.key), - value: u256_to_h256(m.value), - } - } -} - -impl From for L2ToL1Log { - fn from(m: EventMessage_1_4_1) -> Self { - Self { - shard_id: m.shard_id, - is_service: m.is_first, - tx_number_in_block: m.tx_number_in_block, - sender: m.address, - key: u256_to_h256(m.key), - value: u256_to_h256(m.value), - } - } -} - #[cfg(test)] mod tests { use zksync_basic_types::U256; diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 918be9d1081..27cffb360a3 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -21,20 +21,15 @@ pub use protocol_version::{ProtocolUpgrade, ProtocolVersion, ProtocolVersionId}; pub use storage::*; pub use tx::{primitives::*, Execute}; pub use vm_version::VmVersion; -pub use zk_evm::{ - aux_structures::{LogQuery, Timestamp}, - reference_impls::event_sink::EventMessage, - zkevm_opcode_defs::FarCallOpcode, -}; pub use zksync_basic_types::*; use crate::{l2::TransactionType, protocol_version::ProtocolUpgradeTxCommonData}; pub mod aggregated_operations; pub mod block; +pub mod circuit; pub mod commitment; pub mod contract_verification_api; -pub mod contracts; pub mod event; pub mod fee; pub mod fee_model; @@ -50,21 +45,16 @@ pub mod system_contracts; pub mod tokens; pub mod tx; pub mod vm_trace; +pub mod zk_evm_types; pub mod api; pub mod eth_sender; pub mod helpers; -pub mod proofs; pub mod proto; -pub mod prover_server_api; -pub mod sort_storage_access; pub mod transaction_request; pub mod utils; -pub mod vk_transform; pub mod vm_version; -pub mod l1_batch_commit_data_generator; - /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. pub const EIP_712_TX_TYPE: u8 = 0x71; diff --git a/core/lib/types/src/proofs.rs b/core/lib/types/src/proofs.rs deleted file mode 100644 index 28f9523a89e..00000000000 --- a/core/lib/types/src/proofs.rs +++ /dev/null @@ -1,450 +0,0 @@ -use std::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - net::IpAddr, - ops::Add, - str::FromStr, -}; - -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, Bytes}; -use zksync_basic_types::{L1BatchNumber, H256, U256}; - -const HASH_LEN: usize = H256::len_bytes(); - -/// Metadata emitted by a Merkle tree after processing single storage log. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct StorageLogMetadata { - #[serde_as(as = "Bytes")] - pub root_hash: [u8; HASH_LEN], - pub is_write: bool, - pub first_write: bool, - #[serde_as(as = "Vec")] - pub merkle_paths: Vec<[u8; HASH_LEN]>, - pub leaf_hashed_key: U256, - pub leaf_enumeration_index: u64, - // **NB.** For compatibility reasons, `#[serde_as(as = "Bytes")]` attributes are not added below. - pub value_written: [u8; HASH_LEN], - pub value_read: [u8; HASH_LEN], -} - -impl StorageLogMetadata { - pub fn leaf_hashed_key_array(&self) -> [u8; 32] { - let mut result = [0_u8; 32]; - self.leaf_hashed_key.to_little_endian(&mut result); - result - } - - pub fn into_merkle_paths_array(self) -> Box<[[u8; HASH_LEN]; PATH_LEN]> { - let actual_len = self.merkle_paths.len(); - self.merkle_paths.try_into().unwrap_or_else(|_| { - panic!( - "Unexpected length of Merkle paths in `StorageLogMetadata`: expected {}, got {}", - PATH_LEN, actual_len - ); - }) - } -} - -/// Represents the sequential number of the proof aggregation round. -/// Mostly used to be stored in `aggregation_round` column in `prover_jobs` table -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] -pub enum AggregationRound { - BasicCircuits = 0, - LeafAggregation = 1, - NodeAggregation = 2, - Scheduler = 3, -} - -impl From for AggregationRound { - fn from(item: u8) -> Self { - match item { - 0 => AggregationRound::BasicCircuits, - 1 => AggregationRound::LeafAggregation, - 2 => AggregationRound::NodeAggregation, - 3 => AggregationRound::Scheduler, - _ => panic!("Invalid round"), - } - } -} - -impl AggregationRound { - pub fn next(&self) -> Option { - match self { - AggregationRound::BasicCircuits => Some(AggregationRound::LeafAggregation), - AggregationRound::LeafAggregation => Some(AggregationRound::NodeAggregation), - AggregationRound::NodeAggregation => Some(AggregationRound::Scheduler), - AggregationRound::Scheduler => None, - } - } -} - -impl std::fmt::Display for AggregationRound { - fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str(match self { - Self::BasicCircuits => "basic_circuits", - Self::LeafAggregation => "leaf_aggregation", - Self::NodeAggregation => "node_aggregation", - Self::Scheduler => "scheduler", - }) - } -} - -impl FromStr for AggregationRound { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "basic_circuits" => Ok(AggregationRound::BasicCircuits), - "leaf_aggregation" => Ok(AggregationRound::LeafAggregation), - "node_aggregation" => Ok(AggregationRound::NodeAggregation), - "scheduler" => Ok(AggregationRound::Scheduler), - other => Err(format!( - "{} is not a valid round name for witness generation", - other - )), - } - } -} - -impl TryFrom for AggregationRound { - type Error = (); - - fn try_from(v: i32) -> Result { - match v { - x if x == AggregationRound::BasicCircuits as i32 => Ok(AggregationRound::BasicCircuits), - x if x == AggregationRound::LeafAggregation as i32 => { - Ok(AggregationRound::LeafAggregation) - } - x if x == AggregationRound::NodeAggregation as i32 => { - Ok(AggregationRound::NodeAggregation) - } - x if x == AggregationRound::Scheduler as i32 => Ok(AggregationRound::Scheduler), - _ => Err(()), - } - } -} - -/// Witness data produced by the Merkle tree as a result of processing a single block. Used -/// as an input to the witness generator. -/// -/// # Stability -/// -/// This type is serialized using `bincode` to be passed from the metadata calculator -/// to the witness generator. As such, changes in its `serde` serialization -/// must be backwards-compatible. -/// -/// # Compact form -/// -/// In order to reduce storage space, this job supports a compact format. In this format, -/// only the first item in `merkle_paths` is guaranteed to have the full Merkle path (i.e., -/// 256 items with the current Merkle tree). The following items may have less hashes in their -/// Merkle paths; if this is the case, the starting hashes are skipped and are the same -/// as in the first path. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PrepareBasicCircuitsJob { - // Merkle paths and some auxiliary information for each read / write operation in a block. - merkle_paths: Vec, - next_enumeration_index: u64, -} - -impl PrepareBasicCircuitsJob { - /// Creates a new job with the specified leaf index and no included paths. - pub fn new(next_enumeration_index: u64) -> Self { - Self { - merkle_paths: vec![], - next_enumeration_index, - } - } - - /// Returns the next leaf index at the beginning of the block. - pub fn next_enumeration_index(&self) -> u64 { - self.next_enumeration_index - } - - /// Reserves additional capacity for Merkle paths. - pub fn reserve(&mut self, additional_capacity: usize) { - self.merkle_paths.reserve(additional_capacity); - } - - /// Pushes an additional Merkle path. - pub fn push_merkle_path(&mut self, mut path: StorageLogMetadata) { - let Some(first_path) = self.merkle_paths.first() else { - self.merkle_paths.push(path); - return; - }; - assert_eq!(first_path.merkle_paths.len(), path.merkle_paths.len()); - - let mut hash_pairs = path.merkle_paths.iter().zip(&first_path.merkle_paths); - let first_unique_idx = - hash_pairs.position(|(hash, first_path_hash)| hash != first_path_hash); - let first_unique_idx = first_unique_idx.unwrap_or(path.merkle_paths.len()); - path.merkle_paths = path.merkle_paths.split_off(first_unique_idx); - self.merkle_paths.push(path); - } - - /// Converts this job into an iterator over the contained Merkle paths. - pub fn into_merkle_paths(self) -> impl ExactSizeIterator { - let mut merkle_paths = self.merkle_paths; - if let [first, rest @ ..] = merkle_paths.as_mut_slice() { - for path in rest { - assert!( - path.merkle_paths.len() <= first.merkle_paths.len(), - "Merkle paths in `PrepareBasicCircuitsJob` are malformed; the first path is not \ - the longest one" - ); - let spliced_len = first.merkle_paths.len() - path.merkle_paths.len(); - let spliced_hashes = &first.merkle_paths[0..spliced_len]; - path.merkle_paths - .splice(0..0, spliced_hashes.iter().cloned()); - debug_assert_eq!(path.merkle_paths.len(), first.merkle_paths.len()); - } - } - merkle_paths.into_iter() - } -} - -/// Enriched `PrepareBasicCircuitsJob`. All the other fields are taken from the `l1_batches` table. -#[derive(Debug, Clone)] -pub struct BasicCircuitWitnessGeneratorInput { - pub block_number: L1BatchNumber, - pub previous_block_hash: H256, - pub previous_block_timestamp: u64, - pub block_timestamp: u64, - pub used_bytecodes_hashes: Vec, - pub initial_heap_content: Vec<(usize, U256)>, - pub merkle_paths_input: PrepareBasicCircuitsJob, -} - -#[derive(Debug, Clone)] -pub struct FriProverJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub aggregation_round: AggregationRound, - pub sequence_number: usize, - pub depth: u16, - pub is_node_final_proof: bool, -} - -#[derive(Debug, Clone)] -pub struct LeafAggregationJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub prover_job_ids_for_proofs: Vec, -} - -#[derive(Debug, Clone)] -pub struct NodeAggregationJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub depth: u16, - pub prover_job_ids_for_proofs: Vec, -} - -#[derive(Debug)] -pub struct JobPosition { - pub aggregation_round: AggregationRound, - pub sequence_number: usize, -} - -#[derive(Debug, Default)] -pub struct ProverJobStatusFailed { - pub started_at: DateTime, - pub error: String, -} - -#[derive(Debug)] -pub struct ProverJobStatusSuccessful { - pub started_at: DateTime, - pub time_taken: chrono::Duration, -} - -impl Default for ProverJobStatusSuccessful { - fn default() -> Self { - ProverJobStatusSuccessful { - started_at: DateTime::default(), - time_taken: chrono::Duration::zero(), - } - } -} - -#[derive(Debug, Default)] -pub struct ProverJobStatusInProgress { - pub started_at: DateTime, -} - -#[derive(Debug)] -pub struct WitnessJobStatusSuccessful { - pub started_at: DateTime, - pub time_taken: chrono::Duration, -} - -impl Default for WitnessJobStatusSuccessful { - fn default() -> Self { - WitnessJobStatusSuccessful { - started_at: DateTime::default(), - time_taken: chrono::Duration::zero(), - } - } -} - -#[derive(Debug, Default)] -pub struct WitnessJobStatusFailed { - pub started_at: DateTime, - pub error: String, -} - -#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] -pub enum ProverJobStatus { - #[strum(serialize = "queued")] - Queued, - #[strum(serialize = "in_progress")] - InProgress(ProverJobStatusInProgress), - #[strum(serialize = "successful")] - Successful(ProverJobStatusSuccessful), - #[strum(serialize = "failed")] - Failed(ProverJobStatusFailed), - #[strum(serialize = "skipped")] - Skipped, - #[strum(serialize = "ignored")] - Ignored, -} - -#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] -pub enum WitnessJobStatus { - #[strum(serialize = "failed")] - Failed(WitnessJobStatusFailed), - #[strum(serialize = "skipped")] - Skipped, - #[strum(serialize = "successful")] - Successful(WitnessJobStatusSuccessful), - #[strum(serialize = "waiting_for_artifacts")] - WaitingForArtifacts, - #[strum(serialize = "waiting_for_proofs")] - WaitingForProofs, - #[strum(serialize = "in_progress")] - InProgress, - #[strum(serialize = "queued")] - Queued, -} - -#[derive(Debug)] -pub struct WitnessJobInfo { - pub block_number: L1BatchNumber, - pub created_at: DateTime, - pub updated_at: DateTime, - pub status: WitnessJobStatus, - pub position: JobPosition, -} - -#[derive(Debug)] -pub struct ProverJobInfo { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_type: String, - pub position: JobPosition, - pub input_length: u64, - pub status: ProverJobStatus, - pub attempts: u32, - pub created_at: DateTime, - pub updated_at: DateTime, -} - -#[derive(Debug)] -pub struct JobExtendedStatistics { - pub successful_padding: L1BatchNumber, - pub queued_padding: L1BatchNumber, - pub queued_padding_len: u32, - pub active_area: Vec, -} - -#[derive(Debug, Clone, Copy, Default)] -pub struct JobCountStatistics { - pub queued: usize, - pub in_progress: usize, - pub failed: usize, - pub successful: usize, -} - -impl Add for JobCountStatistics { - type Output = JobCountStatistics; - - fn add(self, rhs: Self) -> Self::Output { - Self { - queued: self.queued + rhs.queued, - in_progress: self.in_progress + rhs.in_progress, - failed: self.failed + rhs.failed, - successful: self.successful + rhs.successful, - } - } -} - -#[derive(Debug)] -pub struct StuckJobs { - pub id: u64, - pub status: String, - pub attempts: u64, -} - -#[derive(Debug, Clone)] -pub struct SocketAddress { - pub host: IpAddr, - pub port: u16, -} - -#[derive(Debug, Copy, Clone)] -pub enum GpuProverInstanceStatus { - // The instance is available for processing. - Available, - // The instance is running at full capacity. - Full, - // The instance is reserved by an synthesizer. - Reserved, - // The instance is not alive anymore. - Dead, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn prepare_basic_circuits_job_roundtrip() { - let zero_hash = [0_u8; 32]; - let logs = (0..10).map(|i| { - let mut merkle_paths = vec![zero_hash; 255]; - merkle_paths.push([i as u8; 32]); - StorageLogMetadata { - root_hash: zero_hash, - is_write: i % 2 == 0, - first_write: i % 3 == 0, - merkle_paths, - leaf_hashed_key: U256::from(i), - leaf_enumeration_index: i + 1, - value_written: [i as u8; 32], - value_read: [0; 32], - } - }); - let logs: Vec<_> = logs.collect(); - - let mut job = PrepareBasicCircuitsJob::new(4); - job.reserve(logs.len()); - for log in &logs { - job.push_merkle_path(log.clone()); - } - - // Check that Merkle paths are compacted. - for (i, log) in job.merkle_paths.iter().enumerate() { - let expected_merkle_path_len = if i == 0 { 256 } else { 1 }; - assert_eq!(log.merkle_paths.len(), expected_merkle_path_len); - } - - let logs_from_job: Vec<_> = job.into_merkle_paths().collect(); - assert_eq!(logs_from_job, logs); - } -} diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs index 19f818bb5d1..2007c825902 100644 --- a/core/lib/types/src/snapshots.rs +++ b/core/lib/types/src/snapshots.rs @@ -1,11 +1,12 @@ -use std::convert::TryFrom; +use std::{convert::TryFrom, ops}; use anyhow::Context; use serde::{Deserialize, Serialize}; use zksync_basic_types::{AccountTreeId, L1BatchNumber, MiniblockNumber, H256}; use zksync_protobuf::{required, ProtoFmt}; +use zksync_utils::u256_to_h256; -use crate::{commitment::L1BatchWithMetadata, Bytes, StorageKey, StorageValue}; +use crate::{commitment::L1BatchWithMetadata, Bytes, StorageKey, StorageValue, U256}; /// Information about all snapshots persisted by the node. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -48,7 +49,7 @@ pub struct SnapshotHeader { pub last_l1_batch_with_metadata: L1BatchWithMetadata, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct SnapshotStorageLogsChunkMetadata { pub chunk_id: u64, @@ -187,12 +188,100 @@ impl ProtoFmt for SnapshotStorageLogsChunk { } } +/// Status of snapshot recovery process stored in Postgres. #[derive(Debug, PartialEq)] pub struct SnapshotRecoveryStatus { pub l1_batch_number: L1BatchNumber, pub l1_batch_root_hash: H256, pub miniblock_number: MiniblockNumber, pub miniblock_root_hash: H256, - pub last_finished_chunk_id: Option, - pub total_chunk_count: u64, + pub storage_logs_chunks_processed: Vec, +} + +impl SnapshotRecoveryStatus { + /// Returns the number of storage log chunks left to process. + pub fn storage_logs_chunks_left_to_process(&self) -> usize { + self.storage_logs_chunks_processed + .iter() + .filter(|&&is_processed| !is_processed) + .count() + } +} + +/// Returns a chunk of `hashed_keys` with 0-based index `chunk_id` among `count`. Chunks do not intersect and jointly cover +/// the entire `hashed_key` space. If `hashed_key`s are uniformly distributed (which is the case), the returned ranges +/// are expected to contain the same number of entries. +/// +/// Used by multiple components during snapshot creation and recovery. +/// +/// # Panics +/// +/// Panics if `chunk_count == 0` or `chunk_id >= chunk_count`. +pub fn uniform_hashed_keys_chunk(chunk_id: u64, chunk_count: u64) -> ops::RangeInclusive { + assert!(chunk_count > 0, "`chunk_count` must be positive"); + assert!( + chunk_id < chunk_count, + "Chunk index {} exceeds count {}", + chunk_id, + chunk_count + ); + + let mut stride = U256::MAX / chunk_count; + let stride_minus_one = if stride < U256::MAX { + stride += U256::one(); + stride - 1 + } else { + stride // `stride` is really 1 << 256 == U256::MAX + 1 + }; + + let start = stride * chunk_id; + let (mut end, is_overflow) = stride_minus_one.overflowing_add(start); + if is_overflow { + end = U256::MAX; + } + u256_to_h256(start)..=u256_to_h256(end) +} + +#[cfg(test)] +mod tests { + use zksync_utils::h256_to_u256; + + use super::*; + + #[test] + fn chunking_is_correct() { + for chunks_count in (2..10).chain([42, 256, 500, 1_001, 12_345]) { + println!("Testing chunks_count={chunks_count}"); + let chunked_ranges: Vec<_> = (0..chunks_count) + .map(|chunk_id| uniform_hashed_keys_chunk(chunk_id, chunks_count)) + .collect(); + + assert_eq!(*chunked_ranges[0].start(), H256::zero()); + assert_eq!( + *chunked_ranges.last().unwrap().end(), + H256::repeat_byte(0xff) + ); + for window in chunked_ranges.windows(2) { + let [prev_chunk, next_chunk] = window else { + unreachable!(); + }; + assert_eq!( + h256_to_u256(*prev_chunk.end()) + 1, + h256_to_u256(*next_chunk.start()) + ); + } + + let chunk_sizes: Vec<_> = chunked_ranges + .iter() + .map(|chunk| h256_to_u256(*chunk.end()) - h256_to_u256(*chunk.start()) + 1) + .collect(); + + // Check that chunk sizes are roughly equal. Due to how chunks are constructed, the sizes + // of all chunks except for the last one are the same, and the last chunk size may be slightly smaller; + // the difference in sizes is lesser than the number of chunks. + let min_chunk_size = chunk_sizes.iter().copied().min().unwrap(); + let max_chunk_size = chunk_sizes.iter().copied().max().unwrap(); + assert!(max_chunk_size - min_chunk_size < U256::from(chunks_count)); + } + } } diff --git a/core/lib/types/src/sort_storage_access.rs b/core/lib/types/src/sort_storage_access.rs deleted file mode 100644 index 0f9c5524e97..00000000000 --- a/core/lib/types/src/sort_storage_access.rs +++ /dev/null @@ -1,3 +0,0 @@ -/// Public reexport from `zkevm_test_harness`. -// The aim here is to minimize the surface of public APIs. -pub use zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries; diff --git a/core/lib/types/src/storage/log.rs b/core/lib/types/src/storage/log.rs index a64bbb50220..e5565cfb816 100644 --- a/core/lib/types/src/storage/log.rs +++ b/core/lib/types/src/storage/log.rs @@ -1,11 +1,13 @@ use std::mem; use serde::{Deserialize, Serialize}; -use zk_evm::aux_structures::{LogQuery, Timestamp}; use zksync_basic_types::AccountTreeId; use zksync_utils::u256_to_h256; -use crate::{StorageKey, StorageValue, U256}; +use crate::{ + zk_evm_types::{LogQuery, Timestamp}, + StorageKey, StorageValue, U256, +}; // TODO (SMA-1269): Refactor `StorageLog/StorageLogQuery and StorageLogKind/StorageLongQueryType`. #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] diff --git a/core/lib/types/src/storage_writes_deduplicator.rs b/core/lib/types/src/storage_writes_deduplicator.rs index 14a5413ee6a..19bf51b6eb0 100644 --- a/core/lib/types/src/storage_writes_deduplicator.rs +++ b/core/lib/types/src/storage_writes_deduplicator.rs @@ -219,10 +219,11 @@ impl StorageWritesDeduplicator { #[cfg(test)] mod tests { - use zk_evm::aux_structures::{LogQuery, Timestamp}; - use super::*; - use crate::H160; + use crate::{ + zk_evm_types::{LogQuery, Timestamp}, + H160, + }; fn storage_log_query( key: U256, diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 7fda18d70a4..f42a74bd2c5 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -182,7 +182,7 @@ pub enum SerializationTransactionError { /// OversizedData is returned if the raw tx size is greater /// than some meaningful limit a user might use. This is not a consensus error /// making the transaction invalid, rather a DOS protection. - #[error("oversized data. max: {0}; actual: {0}")] + #[error("oversized data. max: {0}; actual: {1}")] OversizedData(usize, usize), #[error("gas per pub data limit is zero")] GasPerPubDataLimitZero, diff --git a/core/lib/types/src/tx/tx_execution_info.rs b/core/lib/types/src/tx/tx_execution_info.rs index 968a56d6c55..7b2b0dbd27e 100644 --- a/core/lib/types/src/tx/tx_execution_info.rs +++ b/core/lib/types/src/tx/tx_execution_info.rs @@ -1,6 +1,7 @@ use std::ops::{Add, AddAssign}; use crate::{ + circuit::CircuitStatistic, commitment::SerializeCommitment, fee::TransactionExecutionMetrics, l2_to_l1_log::L2ToL1Log, @@ -69,7 +70,7 @@ pub struct ExecutionMetrics { pub cycles_used: u32, pub computational_gas_used: u32, pub pubdata_published: u32, - pub estimated_circuits_used: f32, + pub circuit_statistic: CircuitStatistic, } impl ExecutionMetrics { @@ -87,7 +88,7 @@ impl ExecutionMetrics { cycles_used: tx_metrics.cycles_used, computational_gas_used: tx_metrics.computational_gas_used, pubdata_published: tx_metrics.pubdata_published, - estimated_circuits_used: tx_metrics.estimated_circuits_used, + circuit_statistic: tx_metrics.circuit_statistic, } } @@ -121,7 +122,7 @@ impl Add for ExecutionMetrics { cycles_used: self.cycles_used + other.cycles_used, computational_gas_used: self.computational_gas_used + other.computational_gas_used, pubdata_published: self.pubdata_published + other.pubdata_published, - estimated_circuits_used: self.estimated_circuits_used + other.estimated_circuits_used, + circuit_statistic: self.circuit_statistic + other.circuit_statistic, } } } diff --git a/core/lib/types/src/vm_trace.rs b/core/lib/types/src/vm_trace.rs index d3a94d51fa5..9d36900e396 100644 --- a/core/lib/types/src/vm_trace.rs +++ b/core/lib/types/src/vm_trace.rs @@ -5,11 +5,10 @@ use std::{ }; use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use zk_evm::zkevm_opcode_defs::FarCallOpcode; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_utils::u256_to_h256; -use crate::{Address, U256}; +use crate::{zk_evm_types::FarCallOpcode, Address, U256}; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum VmTrace { diff --git a/core/lib/types/src/zk_evm_types.rs b/core/lib/types/src/zk_evm_types.rs new file mode 100644 index 00000000000..a7973ab36fe --- /dev/null +++ b/core/lib/types/src/zk_evm_types.rs @@ -0,0 +1,30 @@ +use zksync_basic_types::{Address, U256}; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(u8)] +pub enum FarCallOpcode { + Normal = 0, + Delegate, + Mimic, +} + +/// Struct representing the VM timestamp +#[derive( + Clone, Copy, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize, PartialOrd, Ord, +)] +pub struct Timestamp(pub u32); + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub struct LogQuery { + pub timestamp: Timestamp, + pub tx_number_in_block: u16, + pub aux_byte: u8, + pub shard_id: u8, + pub address: Address, + pub key: U256, + pub read_value: U256, + pub written_value: U256, + pub rw_flag: bool, + pub rollback: bool, + pub is_service: bool, +} diff --git a/core/lib/utils/src/wait_for_tasks.rs b/core/lib/utils/src/wait_for_tasks.rs index 74bb57f1fdb..eddb2db4733 100644 --- a/core/lib/utils/src/wait_for_tasks.rs +++ b/core/lib/utils/src/wait_for_tasks.rs @@ -25,7 +25,7 @@ pub async fn wait_for_tasks( } } Ok(Err(err)) => { - let err = format!("One of the tokio actors unexpectedly finished with error: {err}"); + let err = format!("One of the tokio actors unexpectedly finished with error: {err:#}"); tracing::error!("{err}"); vlog::capture_message(&err, vlog::AlertLevel::Warning); if let Some(graceful_shutdown) = graceful_shutdown { @@ -36,9 +36,7 @@ pub async fn wait_for_tasks( let is_panic = error.is_panic(); let panic_message = try_extract_panic_message(error); - tracing::info!( - "One of the tokio actors unexpectedly finished with error: {panic_message}" - ); + tracing::info!("One of the tokio actors panicked: {panic_message}"); if is_panic { if let Some(particular_alerts) = particular_crypto_alerts { diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index 62bb8c2b62e..a04ad45f748 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -41,10 +41,10 @@ pub fn create_vm( .block_on( connection .blocks_dal() - .get_fee_address_for_l1_batch(l1_batch_number), + .get_fee_address_for_miniblock(miniblock_number + 1), )? .with_context(|| { - format!("l1_batch_number {l1_batch_number:?} must have fee_address_account") + format!("l1_batch_number {l1_batch_number:?} must have fee_account_address") })?; // In the state keeper, this value is used to reject execution. diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 5ed49355fdd..8fa3b153205 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -3,7 +3,7 @@ use jsonrpsee::{ proc_macros::rpc, }; use zksync_types::{ - api::{BlockIdVariant, BlockNumber, Transaction, TransactionVariant}, + api::{BlockId, BlockIdVariant, BlockNumber, Transaction, TransactionVariant}, transaction_request::CallRequest, Address, H256, }; @@ -86,6 +86,9 @@ pub trait EthNamespace { block_number: BlockNumber, ) -> RpcResult>; + #[method(name = "getBlockReceipts")] + async fn get_block_receipts(&self, block_id: BlockId) -> RpcResult>; + #[method(name = "getBlockTransactionCountByHash")] async fn get_block_transaction_count_by_hash( &self, diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index ad422261334..032e58d2bcd 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -9,6 +9,8 @@ license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] +links = "zksync_core_proto" + [dependencies] vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } zksync_state = { path = "../state" } @@ -16,12 +18,14 @@ vm_utils = { path = "../vm_utils" } zksync_types = { path = "../types" } zksync_dal = { path = "../dal" } zksync_config = { path = "../config" } +zksync_env_config = { path = "../env_config" } zksync_utils = { path = "../utils" } zksync_contracts = { path = "../contracts" } zksync_system_constants = { path = "../../lib/constants" } zksync_commitment_utils = { path = "../commitment_utils" } zksync_eth_client = { path = "../eth_client" } zksync_eth_signer = { path = "../eth_signer" } +zksync_l1_contract_interface = { path = "../l1_contract_interface" } zksync_mempool = { path = "../mempool" } zksync_queued_job_processor = { path = "../queued_job_processor" } zksync_circuit_breaker = { path = "../circuit_breaker" } @@ -29,6 +33,7 @@ zksync_storage = { path = "../storage" } zksync_merkle_tree = { path = "../merkle_tree" } zksync_mini_merkle_tree = { path = "../mini_merkle_tree" } prometheus_exporter = { path = "../prometheus_exporter" } +zksync_prover_interface = { path = "../prover_interface" } zksync_web3_decl = { path = "../web3_decl", default-features = false, features = [ "server", "client", @@ -38,15 +43,16 @@ zksync_health_check = { path = "../health_check" } vlog = { path = "../vlog" } multivm = { path = "../multivm" } + # Consensus dependenices -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } prost = "0.12.1" serde = { version = "1.0", features = ["derive"] } @@ -92,3 +98,6 @@ assert_matches = "1.5" jsonrpsee = "0.21.0" tempfile = "3.0.2" test-casing = "0.1.2" + +[build-dependencies] +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } diff --git a/core/lib/zksync_core/build.rs b/core/lib/zksync_core/build.rs new file mode 100644 index 00000000000..25b323b9ff1 --- /dev/null +++ b/core/lib/zksync_core/build.rs @@ -0,0 +1,12 @@ +//! Generates rust code from protobufs. +fn main() { + zksync_protobuf_build::Config { + input_root: "src/consensus/proto".into(), + proto_root: "zksync/core/consensus".into(), + dependencies: vec![], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: true, + } + .generate() + .unwrap(); +} diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs index 82e082d4dd8..c23631ab435 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs @@ -240,6 +240,6 @@ pub(super) fn collect_tx_execution_metrics( computational_gas_used: result.statistics.computational_gas_used, total_updated_values_size: writes_metrics.total_updated_values_size, pubdata_published: result.statistics.pubdata_published, - estimated_circuits_used: result.statistics.estimated_circuits_used, + circuit_statistic: result.statistics.circuit_statistic, } } diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs index 5f3dfcd3417..17256c50fe4 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs @@ -112,6 +112,12 @@ impl EthNamespaceServer for EthNamespace { .map_err(into_jsrpc_error) } + async fn get_block_receipts(&self, block_id: BlockId) -> RpcResult> { + self.get_block_receipts_impl(block_id) + .await + .map_err(into_jsrpc_error) + } + async fn get_block_transaction_count_by_hash( &self, block_hash: H256, diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index d8a148a46aa..351ebf3dbd8 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -73,7 +73,7 @@ impl DebugNamespace { .await?; let call_traces = connection .blocks_web3_dal() - .get_trace_for_miniblock(block_number) // FIXME: is some ordering among transactions expected? + .get_traces_for_miniblock(block_number) .await .map_err(|err| internal_error(METHOD_NAME, err))?; let call_trace = call_traces @@ -109,7 +109,11 @@ impl DebugNamespace { .access_storage_tagged("api") .await .map_err(|err| internal_error(METHOD_NAME, err))?; - let call_trace = connection.transactions_dal().get_call_trace(tx_hash).await; + let call_trace = connection + .transactions_dal() + .get_call_trace(tx_hash) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; Ok(call_trace.map(|call_trace| { let mut result: DebugCall = call_trace.into(); if only_top_call { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs index 92781ae8f68..fdef4f4a36a 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs @@ -31,11 +31,7 @@ impl EnNamespace { .map_err(|err| internal_error(METHOD_NAME, err))?; storage .sync_dal() - .sync_block( - block_number, - self.state.tx_sender.0.sender_config.fee_account_addr, - include_transactions, - ) + .sync_block(block_number, include_transactions) .await .map_err(|err| internal_error(METHOD_NAME, err)) } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 70b445cd8fc..0098eacdbf0 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -323,6 +323,60 @@ impl EthNamespace { Ok(tx_count?.map(|(_, count)| count)) } + #[tracing::instrument(skip(self))] + pub async fn get_block_receipts_impl( + &self, + block_id: BlockId, + ) -> Result, Web3Error> { + const METHOD_NAME: &str = "get_block_receipts"; + + let method_latency = API_METRICS.start_block_call(METHOD_NAME, block_id); + + self.state.start_info.ensure_not_pruned(block_id)?; + + let block = self + .state + .connection_pool + .access_storage_tagged("api") + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + .blocks_web3_dal() + .get_block_by_web3_block_id(block_id, false, self.state.api_config.l2_chain_id) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; + + let transactions: &[TransactionVariant] = + block.as_ref().map_or(&[], |block| &block.transactions); + let hashes: Vec<_> = transactions + .iter() + .map(|tx| match tx { + TransactionVariant::Full(tx) => tx.hash, + TransactionVariant::Hash(hash) => *hash, + }) + .collect(); + + let mut receipts = self + .state + .connection_pool + .access_storage_tagged("api") + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + .transactions_web3_dal() + .get_transaction_receipts(&hashes) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; + + receipts.sort_unstable_by_key(|receipt| receipt.transaction_index); + + if let Some(block) = block { + self.report_latency_with_block_id(method_latency, block.number.as_u32().into()); + } else { + method_latency.observe_without_diff(); + } + + Ok(receipts) + } + #[tracing::instrument(skip(self))] pub async fn get_code_impl( &self, @@ -495,19 +549,20 @@ impl EthNamespace { const METHOD_NAME: &str = "get_transaction_receipt"; let method_latency = API_METRICS.start_call(METHOD_NAME); - let receipt = self + let receipts = self .state .connection_pool .access_storage_tagged("api") .await .unwrap() .transactions_web3_dal() - .get_transaction_receipt(hash) + .get_transaction_receipts(&[hash]) .await - .map_err(|err| internal_error(METHOD_NAME, err)); + .map_err(|err| internal_error(METHOD_NAME, err))?; method_latency.observe(); - receipt + + Ok(receipts.into_iter().next()) } #[tracing::instrument(skip(self))] diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 86f879a8737..269250c295d 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -444,10 +444,7 @@ impl ZksNamespace { let mut storage = self.access_storage(METHOD_NAME).await?; let block_details = storage .blocks_web3_dal() - .get_block_details( - block_number, - self.state.tx_sender.0.sender_config.fee_account_addr, - ) + .get_block_details(block_number) .await .map_err(|err| internal_error(METHOD_NAME, err)); diff --git a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs index 874cc019a3d..bf929469b44 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs @@ -5,17 +5,17 @@ use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; -fn execute_l2_transaction_with_traces() -> TransactionExecutionResult { +fn execute_l2_transaction_with_traces(index_in_block: u8) -> TransactionExecutionResult { let first_call_trace = Call { - from: Address::repeat_byte(1), - to: Address::repeat_byte(2), + from: Address::repeat_byte(index_in_block), + to: Address::repeat_byte(index_in_block + 1), gas: 100, gas_used: 42, ..Call::default() }; let second_call_trace = Call { - from: Address::repeat_byte(0xff), - to: Address::repeat_byte(0xab), + from: Address::repeat_byte(0xff - index_in_block), + to: Address::repeat_byte(0xab - index_in_block), value: 123.into(), gas: 58, gas_used: 10, @@ -35,7 +35,7 @@ struct TraceBlockTest(MiniblockNumber); #[async_trait] impl HttpTest for TraceBlockTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { - let tx_results = [execute_l2_transaction_with_traces()]; + let tx_results = [0, 1, 2].map(execute_l2_transaction_with_traces); let mut storage = pool.access_storage().await?; let new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?; drop(storage); @@ -45,11 +45,6 @@ impl HttpTest for TraceBlockTest { api::BlockId::Number(api::BlockNumber::Latest), api::BlockId::Hash(new_miniblock.hash), ]; - let expected_calls: Vec<_> = tx_results[0] - .call_traces - .iter() - .map(|call| api::DebugCall::from(call.clone())) - .collect(); for block_id in block_ids { let block_traces = match block_id { @@ -57,12 +52,19 @@ impl HttpTest for TraceBlockTest { api::BlockId::Hash(hash) => client.trace_block_by_hash(hash, None).await?, }; - assert_eq!(block_traces.len(), 1); // equals to the number of transactions in the block - let api::ResultDebugCall { result } = &block_traces[0]; - assert_eq!(result.from, Address::zero()); - assert_eq!(result.to, BOOTLOADER_ADDRESS); - assert_eq!(result.gas, tx_results[0].transaction.gas_limit()); - assert_eq!(result.calls, expected_calls); + assert_eq!(block_traces.len(), tx_results.len()); // equals to the number of transactions in the block + for (trace, tx_result) in block_traces.iter().zip(&tx_results) { + let api::ResultDebugCall { result } = trace; + assert_eq!(result.from, Address::zero()); + assert_eq!(result.to, BOOTLOADER_ADDRESS); + assert_eq!(result.gas, tx_result.transaction.gas_limit()); + let expected_calls: Vec<_> = tx_result + .call_traces + .iter() + .map(|call| api::DebugCall::from(call.clone())) + .collect(); + assert_eq!(result.calls, expected_calls); + } } let missing_block_number = api::BlockNumber::from(*self.0 + 100); @@ -96,7 +98,7 @@ struct TraceTransactionTest; #[async_trait] impl HttpTest for TraceTransactionTest { async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { - let tx_results = [execute_l2_transaction_with_traces()]; + let tx_results = [execute_l2_transaction_with_traces(0)]; let mut storage = pool.access_storage().await?; store_miniblock(&mut storage, MiniblockNumber(1), &tx_results).await?; drop(storage); diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 9b8e3707dfd..6b5f8a2fa1b 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -13,7 +13,8 @@ use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Storage use zksync_health_check::CheckHealth; use zksync_types::{ api, - block::{BlockGasCount, MiniblockHeader}, + api::BlockId, + block::MiniblockHeader, fee::TransactionExecutionMetrics, get_nonce_key, l2::L2Tx, @@ -227,7 +228,7 @@ impl StorageInitialization { MiniblockNumber(Self::SNAPSHOT_RECOVERY_BLOCK), factory_deps, ) - .await; + .await?; } } Ok(()) @@ -331,10 +332,7 @@ async fn seal_l1_batch( number: L1BatchNumber, ) -> anyhow::Result<()> { let header = create_l1_batch(number.0); - storage - .blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0) - .await?; + storage.blocks_dal().insert_mock_l1_batch(&header).await?; storage .blocks_dal() .mark_miniblocks_as_executed_in_l1_batch(number) @@ -822,3 +820,53 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { async fn getting_transaction_count_for_account_after_snapshot_recovery() { test_http_server(TransactionCountAfterSnapshotRecoveryTest).await; } + +#[derive(Debug)] +struct TransactionReceiptsTest; + +#[async_trait] +impl HttpTest for TransactionReceiptsTest { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + let mut storage = pool.access_storage().await?; + let miniblock_number = MiniblockNumber(1); + + let tx1 = create_l2_transaction(10, 200); + let tx2 = create_l2_transaction(10, 200); + + let tx_results = vec![ + execute_l2_transaction(tx1.clone()), + execute_l2_transaction(tx2.clone()), + ]; + + store_miniblock(&mut storage, miniblock_number, &tx_results).await?; + + let mut expected_receipts = Vec::new(); + + for tx in &tx_results { + expected_receipts.push( + client + .get_transaction_receipt(tx.hash) + .await? + .expect("Receipt found"), + ); + } + + for (tx_result, receipt) in tx_results.iter().zip(&expected_receipts) { + assert_eq!(tx_result.hash, receipt.transaction_hash); + } + + let receipts = client + .get_block_receipts(BlockId::Number(miniblock_number.0.into())) + .await?; + assert_eq!(receipts.len(), 2); + for (receipt, expected_receipt) in receipts.iter().zip(&expected_receipts) { + assert_eq!(receipt, expected_receipt); + } + Ok(()) + } +} + +#[tokio::test] +async fn transaction_receipts() { + test_http_server(TransactionReceiptsTest).await; +} diff --git a/core/lib/zksync_core/src/block_reverter/mod.rs b/core/lib/zksync_core/src/block_reverter/mod.rs index e45bef7eb21..9b3c23dfc84 100644 --- a/core/lib/zksync_core/src/block_reverter/mod.rs +++ b/core/lib/zksync_core/src/block_reverter/mod.rs @@ -189,7 +189,7 @@ impl BlockReverter { path: &Path, storage_root_hash: H256, ) { - let db = RocksDB::new(path); + let db = RocksDB::new(path).expect("Failed initializing RocksDB for Merkle tree"); let mut tree = ZkSyncTree::new_lightweight(db.into()); if tree.next_l1_batch_number() <= last_l1_batch_to_keep { @@ -207,14 +207,19 @@ impl BlockReverter { /// Reverts blocks in the state keeper cache. async fn rollback_state_keeper_cache(&self, last_l1_batch_to_keep: L1BatchNumber) { tracing::info!("opening DB with state keeper cache..."); - let mut sk_cache = RocksdbStorage::new(self.state_keeper_cache_path.as_ref()); + let sk_cache = RocksdbStorage::builder(self.state_keeper_cache_path.as_ref()) + .await + .expect("Failed initializing state keeper cache"); - if sk_cache.l1_batch_number() > last_l1_batch_to_keep + 1 { + if sk_cache.l1_batch_number().await > Some(last_l1_batch_to_keep + 1) { let mut storage = self.connection_pool.access_storage().await.unwrap(); - tracing::info!("rolling back state keeper cache..."); - sk_cache.rollback(&mut storage, last_l1_batch_to_keep).await; + tracing::info!("Rolling back state keeper cache..."); + sk_cache + .rollback(&mut storage, last_l1_batch_to_keep) + .await + .expect("Failed rolling back state keeper cache"); } else { - tracing::info!("nothing to revert in state keeper cache"); + tracing::info!("Nothing to revert in state keeper cache"); } } @@ -260,7 +265,8 @@ impl BlockReverter { transaction .storage_logs_dal() .rollback_storage(last_miniblock_to_keep) - .await; + .await + .expect("failed rolling back storage"); tracing::info!("rolling back storage logs..."); transaction .storage_logs_dal() @@ -272,6 +278,11 @@ impl BlockReverter { .delete_l1_batches(last_l1_batch_to_keep) .await .unwrap(); + transaction + .blocks_dal() + .delete_initial_writes(last_l1_batch_to_keep) + .await + .unwrap(); tracing::info!("rolling back miniblocks..."); transaction .blocks_dal() diff --git a/core/lib/zksync_core/src/consensus/config.rs b/core/lib/zksync_core/src/consensus/config.rs new file mode 100644 index 00000000000..649a90ebc76 --- /dev/null +++ b/core/lib/zksync_core/src/consensus/config.rs @@ -0,0 +1,149 @@ +//! Configuration utilities for the consensus component. +use std::collections::{BTreeMap, BTreeSet}; + +use anyhow::Context as _; +use zksync_consensus_crypto::{read_required_text, Text, TextFmt}; +use zksync_consensus_executor as executor; +use zksync_consensus_roles::{node, validator}; +use zksync_protobuf::{required, ProtoFmt}; + +use crate::consensus::proto; + +/// Decodes a proto message from json for arbitrary `ProtoFmt`. +pub fn decode_json(json: &str) -> anyhow::Result { + let mut d = serde_json::Deserializer::from_str(json); + let p: T = zksync_protobuf::serde::deserialize(&mut d)?; + d.end()?; + Ok(p) +} + +/// Decodes a secret of type T from an env var with name `var_name`. +/// It makes sure that the error message doesn't contain the secret. +pub fn read_secret(var_name: &str) -> anyhow::Result { + let raw = std::env::var(var_name).map_err(|_| anyhow::anyhow!("{var_name} not set"))?; + Text::new(&raw) + .decode() + .map_err(|_| anyhow::anyhow!("{var_name} has invalid format")) +} + +/// Config (shared between main node and external node). +#[derive(Clone, Debug, PartialEq)] +pub struct Config { + /// Local socket address to listen for the incoming connections. + pub server_addr: std::net::SocketAddr, + /// Public address of this node (should forward to `server_addr`) + /// that will be advertised to peers, so that they can connect to this + /// node. + pub public_addr: std::net::SocketAddr, + + /// Validators participating in consensus. + pub validators: validator::ValidatorSet, + + /// Maximal allowed size of the payload in bytes. + pub max_payload_size: usize, + + /// Limit on the number of inbound connections outside + /// of the `static_inbound` set. + pub gossip_dynamic_inbound_limit: usize, + /// Inbound gossip connections that should be unconditionally accepted. + pub gossip_static_inbound: BTreeSet, + /// Outbound gossip connections that the node should actively try to + /// establish and maintain. + pub gossip_static_outbound: BTreeMap, +} + +impl Config { + pub fn executor_config(&self, node_key: node::SecretKey) -> executor::Config { + executor::Config { + server_addr: self.server_addr, + validators: self.validators.clone(), + max_payload_size: self.max_payload_size, + node_key, + gossip_dynamic_inbound_limit: self.gossip_dynamic_inbound_limit, + gossip_static_inbound: self.gossip_static_inbound.clone().into_iter().collect(), + gossip_static_outbound: self.gossip_static_outbound.clone().into_iter().collect(), + } + } + + pub fn validator_config( + &self, + validator_key: validator::SecretKey, + ) -> executor::ValidatorConfig { + executor::ValidatorConfig { + public_addr: self.public_addr, + key: validator_key, + } + } +} + +impl ProtoFmt for Config { + type Proto = proto::ConsensusConfig; + fn read(r: &Self::Proto) -> anyhow::Result { + let validators = r + .validators + .iter() + .enumerate() + .map(|(i, v)| { + Text::new(v) + .decode() + .with_context(|| format!("validators[{i}]")) + }) + .collect::, _>>()?; + let validators = validator::ValidatorSet::new(validators).context("validators")?; + + let mut gossip_static_inbound = BTreeSet::new(); + for (i, v) in r.gossip_static_inbound.iter().enumerate() { + gossip_static_inbound.insert( + Text::new(v) + .decode() + .with_context(|| format!("gossip_static_inbound[{i}]"))?, + ); + } + let mut gossip_static_outbound = BTreeMap::new(); + for (i, e) in r.gossip_static_outbound.iter().enumerate() { + let key = read_required_text(&e.key) + .with_context(|| format!("gossip_static_outbound[{i}].key"))?; + let addr = read_required_text(&e.addr) + .with_context(|| format!("gossip_static_outbound[{i}].addr"))?; + gossip_static_outbound.insert(key, addr); + } + Ok(Self { + server_addr: read_required_text(&r.server_addr).context("server_addr")?, + public_addr: read_required_text(&r.public_addr).context("public_addr")?, + validators, + max_payload_size: required(&r.max_payload_size) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_payload_size")?, + gossip_dynamic_inbound_limit: required(&r.gossip_dynamic_inbound_limit) + .and_then(|x| Ok((*x).try_into()?)) + .context("gossip_dynamic_inbound_limit")?, + gossip_static_inbound, + gossip_static_outbound, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + server_addr: Some(self.server_addr.encode()), + public_addr: Some(self.public_addr.encode()), + validators: self.validators.iter().map(TextFmt::encode).collect(), + max_payload_size: Some(self.max_payload_size.try_into().unwrap()), + gossip_static_inbound: self + .gossip_static_inbound + .iter() + .map(TextFmt::encode) + .collect(), + gossip_static_outbound: self + .gossip_static_outbound + .iter() + .map(|(key, addr)| proto::NodeAddr { + key: Some(TextFmt::encode(key)), + addr: Some(TextFmt::encode(addr)), + }) + .collect(), + gossip_dynamic_inbound_limit: Some( + self.gossip_dynamic_inbound_limit.try_into().unwrap(), + ), + } + } +} diff --git a/core/lib/zksync_core/src/consensus/mod.rs b/core/lib/zksync_core/src/consensus/mod.rs index 423ef75b7ce..e0681502503 100644 --- a/core/lib/zksync_core/src/consensus/mod.rs +++ b/core/lib/zksync_core/src/consensus/mod.rs @@ -1,124 +1,29 @@ //! Consensus-related functionality. + #![allow(clippy::redundant_locals)] -use std::collections::{HashMap, HashSet}; -use anyhow::Context as _; -use serde::de::Error; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; -use zksync_consensus_crypto::{Text, TextFmt}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor as executor; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::validator; use zksync_consensus_storage::BlockStore; use zksync_dal::ConnectionPool; -use zksync_types::Address; use self::storage::Store; -use crate::sync_layer::sync_action::ActionQueueSender; +use crate::sync_layer::{sync_action::ActionQueueSender, MainNodeClient, SyncState}; +pub mod config; +pub mod proto; mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; -#[derive(PartialEq, Eq, Hash)] -pub struct SerdeText(pub T); - -impl<'de, T: TextFmt> serde::Deserialize<'de> for SerdeText { - fn deserialize>(d: D) -> Result { - Ok(Self( - T::decode(Text::new(<&str>::deserialize(d)?)).map_err(Error::custom)?, - )) - } -} - -/// Config (shared between main node and external node) which implements `serde` encoding -/// and therefore can be flattened into env vars. -#[derive(serde::Deserialize)] -pub struct SerdeConfig { - /// Local socket address to listen for the incoming connections. - pub server_addr: std::net::SocketAddr, - /// Public address of this node (should forward to `server_addr`) - /// that will be advertised to peers, so that they can connect to this - /// node. - pub public_addr: std::net::SocketAddr, - - /// Validator private key. Should be set only for the validator node. - pub validator_key: Option>, - - /// Validators participating in consensus. - pub validator_set: Vec>, - - /// Key of this node. It uniquely identifies the node. - pub node_key: SerdeText, - /// Limit on the number of inbound connections outside - /// of the `static_inbound` set. - pub gossip_dynamic_inbound_limit: u64, - /// Inbound gossip connections that should be unconditionally accepted. - pub gossip_static_inbound: HashSet>, - /// Outbound gossip connections that the node should actively try to - /// establish and maintain. - pub gossip_static_outbound: HashMap, std::net::SocketAddr>, - - pub operator_address: Option

, -} - -impl SerdeConfig { - /// Extracts consensus executor config from the `SerdeConfig`. - fn executor(&self) -> anyhow::Result { - Ok(executor::Config { - server_addr: self.server_addr, - validators: validator::ValidatorSet::new( - self.validator_set.iter().map(|k| k.0.clone()), - ) - .context("validator_set")?, - node_key: self.node_key.0.clone(), - gossip_dynamic_inbound_limit: self.gossip_dynamic_inbound_limit, - gossip_static_inbound: self - .gossip_static_inbound - .iter() - .map(|k| k.0.clone()) - .collect(), - gossip_static_outbound: self - .gossip_static_outbound - .iter() - .map(|(k, v)| (k.0.clone(), *v)) - .collect(), - }) - } - - /// Extracts a validator config from the `SerdeConfig`. - pub(crate) fn validator(&self) -> anyhow::Result { - let key = self - .validator_key - .as_ref() - .context("validator_key is required")?; - Ok(executor::ValidatorConfig { - key: key.0.clone(), - public_addr: self.public_addr, - }) - } -} - -impl TryFrom for MainNodeConfig { - type Error = anyhow::Error; - fn try_from(cfg: SerdeConfig) -> anyhow::Result { - Ok(Self { - executor: cfg.executor()?, - validator: cfg.validator()?, - operator_address: cfg - .operator_address - .context("operator_address is required")?, - }) - } -} - /// Main node consensus config. #[derive(Debug, Clone)] pub struct MainNodeConfig { pub executor: executor::Config, pub validator: executor::ValidatorConfig, - pub operator_address: Address, } impl MainNodeConfig { @@ -131,7 +36,7 @@ impl MainNodeConfig { "currently only consensus with just 1 validator is supported" ); scope::run!(&ctx, |ctx, s| async { - let store = Store::new(pool, self.operator_address); + let store = Store::new(pool); let mut block_store = store.clone().into_block_store(); block_store .try_init_genesis(ctx, &self.validator.key) @@ -156,23 +61,33 @@ impl MainNodeConfig { } } +/// Periodically fetches the head of the main node +/// and updates `SyncState` accordingly. +pub async fn run_main_node_state_fetcher( + ctx: &ctx::Ctx, + client: &dyn MainNodeClient, + sync_state: &SyncState, +) -> ctx::OrCanceled<()> { + const DELAY_INTERVAL: time::Duration = time::Duration::milliseconds(500); + const RETRY_DELAY_INTERVAL: time::Duration = time::Duration::seconds(5); + loop { + match ctx.wait(client.fetch_l2_block_number()).await? { + Ok(head) => { + sync_state.set_main_node_block(head); + ctx.sleep(DELAY_INTERVAL).await?; + } + Err(err) => { + tracing::warn!("main_node_client.fetch_l2_block_number(): {err}"); + ctx.sleep(RETRY_DELAY_INTERVAL).await?; + } + } + } +} + /// External node consensus config. #[derive(Debug, Clone)] pub struct FetcherConfig { - executor: executor::Config, - operator_address: Address, -} - -impl TryFrom for FetcherConfig { - type Error = anyhow::Error; - fn try_from(cfg: SerdeConfig) -> anyhow::Result { - Ok(Self { - executor: cfg.executor()?, - operator_address: cfg - .operator_address - .context("operator_address is required")?, - }) - } + pub executor: executor::Config, } impl FetcherConfig { @@ -183,14 +98,8 @@ impl FetcherConfig { pool: ConnectionPool, actions: ActionQueueSender, ) -> anyhow::Result<()> { - tracing::info!( - "Starting gossip fetcher with {:?} and node key {:?}", - self.executor, - self.executor.node_key.public(), - ); - scope::run!(ctx, |ctx, s| async { - let store = Store::new(pool, self.operator_address); + let store = Store::new(pool); let mut block_store = store.clone().into_block_store(); block_store .set_actions_queue(ctx, actions) diff --git a/core/lib/zksync_core/src/consensus/proto/mod.proto b/core/lib/zksync_core/src/consensus/proto/mod.proto new file mode 100644 index 00000000000..a943f9470e9 --- /dev/null +++ b/core/lib/zksync_core/src/consensus/proto/mod.proto @@ -0,0 +1,53 @@ +// For config readability and ease of use, some of the primitive types are +// encoded as strings. Fields of these types have a comment with the name of the type. +// Here is the list of string-encoded types and their corresponding string formats: +// +// IpAddr - TCP socket address, encoded as a string of the form "IP:port". +// Both IPv4 and IPv6 are supported +// (note that opening IPv6 ports may not work depending on the VM capabilities). +// examples: "203.0.113.7:3456", "[2001:DB8::1]:4567" +// +// ValidatorPublicKey - public key of the validator (consensus participant) of the form "validator:public::" +// Currently only bn254 signature scheme is supported for validators. +// example: "validator:public:bn254:4b0c4697f0a35eab30f63684ae4611f3c1d631eecfd97237e2345a9b3d0c472dbb16c49b793beceaab0cdd89cda6ff1099bd1aaf1ad6cabde9a15793cc09b407" +// +// NodePublicKey - public key of the node (gossip network participant) of the form "node:public::" +// Currently only ed25519 signature scheme is supported for nodes. +// example: "node:public:ed25519:d36607699a0a3fbe3de16947928cf299484219ff62ca20f387795b0859dbe501" +syntax = "proto3"; + +package zksync.core.consensus; + +// (public key, ip address) of a gossip network node. +message NodeAddr { + optional string key = 1; // required; NodePublicKey + optional string addr = 2; // required; IpAddr +} + +message ConsensusConfig { + // IP:port to listen on, for incoming TCP connections. + // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). + optional string server_addr = 1; // required; IpAddr + + // Public IP:port to advertise, should forward to server_addr. + // Can be `127.0.0.1:` for local tests. + optional string public_addr = 2; // required; IpAddr + + // Public keys of all validators. + // Currently it has to be a singleton with a public key corresponding to secret key in CONSENSUS_VALIDATOR_KEY env var. + repeated string validators = 3; // required; ValidatorPublicKey + + // Maximal allowed size of the payload. + optional uint64 max_payload_size = 4; // required; bytes + + // Inbound connections that should be unconditionally accepted on the gossip network. + repeated string gossip_static_inbound = 5; // required; NodePublicKey + + // Limit on the number of gossip network inbound connections outside + // of the `gossip_static_inbound` set. + optional uint64 gossip_dynamic_inbound_limit = 6; // required + + // Outbound gossip network connections that the node should actively try to + // establish and maintain. + repeated NodeAddr gossip_static_outbound = 7; +} diff --git a/core/lib/zksync_core/src/consensus/proto/mod.rs b/core/lib/zksync_core/src/consensus/proto/mod.rs new file mode 100644 index 00000000000..f5b556c455c --- /dev/null +++ b/core/lib/zksync_core/src/consensus/proto/mod.rs @@ -0,0 +1,3 @@ +#![allow(warnings)] + +include!(concat!(env!("OUT_DIR"), "/src/consensus/proto/gen.rs")); diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index 516ba7eb19c..bac1c54f3e6 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -1,11 +1,12 @@ //! Storage implementation based on DAL. + use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; use zksync_consensus_bft::PayloadManager; use zksync_consensus_roles::validator; use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; use zksync_dal::{consensus_dal::Payload, ConnectionPool}; -use zksync_types::{Address, MiniblockNumber}; +use zksync_types::MiniblockNumber; #[cfg(test)] mod testonly; @@ -61,14 +62,9 @@ impl<'a> CtxStorage<'a> { &mut self, ctx: &ctx::Ctx, number: validator::BlockNumber, - operator_address: Address, ) -> ctx::Result> { Ok(ctx - .wait( - self.0 - .consensus_dal() - .block_payload(number, operator_address), - ) + .wait(self.0.consensus_dal().block_payload(number)) .await??) } @@ -108,14 +104,9 @@ impl<'a> CtxStorage<'a> { &mut self, ctx: &ctx::Ctx, cert: &validator::CommitQC, - operator_address: Address, ) -> ctx::Result<()> { Ok(ctx - .wait( - self.0 - .consensus_dal() - .insert_certificate(cert, operator_address), - ) + .wait(self.0.consensus_dal().insert_certificate(cert)) .await??) } @@ -192,7 +183,6 @@ impl Cursor { #[derive(Clone, Debug)] pub(super) struct Store { pool: ConnectionPool, - operator_address: Address, } /// Wrapper of `ConnectionPool` implementing `PersistentBlockStore`. @@ -205,11 +195,8 @@ pub(super) struct BlockStore { impl Store { /// Creates a `Store`. `pool` should have multiple connections to work efficiently. - pub fn new(pool: ConnectionPool, operator_address: Address) -> Self { - Self { - pool, - operator_address, - } + pub fn new(pool: ConnectionPool) -> Self { + Self { pool } } /// Converts `Store` into a `BlockStore`. @@ -251,16 +238,20 @@ impl BlockStore { return Ok(()); } let payload = txn - .payload(ctx, number, self.inner.operator_address) + .payload(ctx, number) .await .wrap("payload()")? .context("miniblock disappeared")?; - let (genesis, _) = zksync_consensus_bft::testonly::make_genesis( - &[validator_key.clone()], - payload.encode(), - number, - ); - txn.insert_certificate(ctx, &genesis.justification, self.inner.operator_address) + let mut genesis = validator::GenesisSetup { + keys: vec![validator_key.clone()], + blocks: vec![], + }; + genesis + .next_block() + .block_number(number) + .payload(payload.encode()) + .push(); + txn.insert_certificate(ctx, &genesis.blocks[0].justification) .await .wrap("insert_certificate()")?; txn.commit(ctx).await.wrap("commit()") @@ -317,7 +308,7 @@ impl PersistentBlockStore for BlockStore { .wrap("certificate()")? .context("not found")?; let payload = storage - .payload(ctx, number, self.inner.operator_address) + .payload(ctx, number) .await .wrap("payload()")? .context("miniblock disappeared from storage")?; @@ -356,7 +347,7 @@ impl PersistentBlockStore for BlockStore { .wrap("last_miniblock_number()")?; if number >= block.header().number { storage - .insert_certificate(ctx, &block.justification, self.inner.operator_address) + .insert_certificate(ctx, &block.justification) .await .wrap("insert_certificate()")?; return Ok(()); @@ -402,12 +393,16 @@ impl PayloadManager for Store { drop(storage); loop { let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; - if let Some(payload) = storage - .payload(ctx, block_number, self.operator_address) - .await - .wrap("payload()")? - { - return Ok(payload.encode()); + if let Some(payload) = storage.payload(ctx, block_number).await.wrap("payload()")? { + let encoded_payload = payload.encode(); + if encoded_payload.0.len() > 1 << 20 { + tracing::warn!( + "large payload ({}B) with {} transactions", + encoded_payload.0.len(), + payload.transactions.len() + ); + } + return Ok(encoded_payload); } drop(storage); ctx.sleep(POLL_INTERVAL).await?; diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index ebbd43ee920..88a23d17ba3 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -1,8 +1,11 @@ //! Utilities for testing the consensus module. use anyhow::Context as _; -use rand::Rng; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::{node, validator}; use zksync_contracts::{BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ @@ -12,6 +15,7 @@ use zksync_types::{ use crate::{ consensus::{ + config::Config, storage::{BlockStore, CtxStorage}, Store, }, @@ -27,6 +31,30 @@ use crate::{ utils::testonly::{create_l1_batch_metadata, create_l2_transaction}, }; +fn make_addr(rng: &mut R) -> std::net::SocketAddr { + std::net::SocketAddr::new(std::net::IpAddr::from(rng.gen::<[u8; 16]>()), rng.gen()) +} + +fn make_node_key(rng: &mut R) -> node::PublicKey { + rng.gen::().public() +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Config { + Config { + server_addr: make_addr(rng), + public_addr: make_addr(rng), + validators: rng.gen(), + max_payload_size: usize::MAX, + gossip_dynamic_inbound_limit: rng.gen(), + gossip_static_inbound: (0..3).map(|_| make_node_key(rng)).collect(), + gossip_static_outbound: (0..5) + .map(|_| (make_node_key(rng), make_addr(rng))) + .collect(), + } + } +} + #[derive(Debug, Default)] pub(crate) struct MockMainNodeClient { prev_miniblock_hash: H256, @@ -150,7 +178,6 @@ pub(super) struct StateKeeper { fee_per_gas: u64, gas_per_pubdata: u32, - operator_address: Address, pub(super) actions_sender: ActionQueueSender, pub(super) pool: ConnectionPool, @@ -159,17 +186,13 @@ pub(super) struct StateKeeper { /// Fake StateKeeper task to be executed in the background. pub(super) struct StateKeeperRunner { actions_queue: ActionQueue, - operator_address: Address, pool: ConnectionPool, } impl StateKeeper { /// Constructs and initializes a new `StateKeeper`. /// Caller has to run `StateKeeperRunner.run()` task in the background. - pub async fn new( - pool: ConnectionPool, - operator_address: Address, - ) -> anyhow::Result<(Self, StateKeeperRunner)> { + pub async fn new(pool: ConnectionPool) -> anyhow::Result<(Self, StateKeeperRunner)> { // ensure genesis let mut storage = pool.access_storage().await.context("access_storage()")?; if storage @@ -178,9 +201,7 @@ impl StateKeeper { .await .context("is_genesis_needed()")? { - let mut params = GenesisParams::mock(); - params.first_validator = operator_address; - ensure_genesis_state(&mut storage, L2ChainId::default(), ¶ms) + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .context("ensure_genesis_state()")?; } @@ -212,12 +233,10 @@ impl StateKeeper { batch_sealed: !pending_batch, fee_per_gas: 10, gas_per_pubdata: 100, - operator_address, actions_sender, pool: pool.clone(), }, StateKeeperRunner { - operator_address, actions_queue, pool: pool.clone(), }, @@ -236,7 +255,7 @@ impl StateKeeper { l1_gas_price: 2, l2_fair_gas_price: 3, fair_pubdata_price: Some(24), - operator_address: self.operator_address, + operator_address: GenesisParams::mock().first_validator, protocol_version: ProtocolVersionId::latest(), first_miniblock_info: (self.last_block, 1), } @@ -293,7 +312,7 @@ impl StateKeeper { /// Creates a new `BlockStore` for the underlying `ConnectionPool`. pub fn store(&self) -> BlockStore { - Store::new(self.pool.clone(), self.operator_address).into_block_store() + Store::new(self.pool.clone()).into_block_store() } // Wait for all pushed miniblocks to be produced. @@ -303,7 +322,7 @@ impl StateKeeper { loop { let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; if storage - .payload(ctx, self.last_block(), self.operator_address) + .payload(ctx, self.last_block()) .await .wrap("storage.payload()")? .is_some() @@ -362,7 +381,7 @@ impl StateKeeperRunner { self.actions_queue, SyncState::new(), Box::::default(), - self.operator_address, + Address::repeat_byte(11), u32::MAX, L2ChainId::default(), ) diff --git a/core/lib/zksync_core/src/consensus/tests.rs b/core/lib/zksync_core/src/consensus/tests.rs index dddac56f99d..7d3ed8f0df8 100644 --- a/core/lib/zksync_core/src/consensus/tests.rs +++ b/core/lib/zksync_core/src/consensus/tests.rs @@ -1,5 +1,6 @@ use std::ops::Range; +use anyhow::Context as _; use tracing::Instrument as _; use zksync_concurrency::{ctx, scope}; use zksync_consensus_executor::testonly::{connect_full_node, ValidatorNode}; @@ -7,13 +8,11 @@ use zksync_consensus_storage as storage; use zksync_consensus_storage::PersistentBlockStore as _; use zksync_consensus_utils::no_copy::NoCopy; use zksync_dal::{connection::TestTemplate, ConnectionPool}; -use zksync_types::Address; +use zksync_protobuf::testonly::test_encode_random; use super::*; use crate::consensus::storage::CtxStorage; -const OPERATOR_ADDRESS: Address = Address::repeat_byte(17); - async fn make_blocks( ctx: &ctx::Ctx, pool: &ConnectionPool, @@ -24,7 +23,7 @@ async fn make_blocks( let mut blocks: Vec = vec![]; while !range.is_empty() { let payload = storage - .payload(ctx, range.start, OPERATOR_ADDRESS) + .payload(ctx, range.start) .await .wrap(range.start)? .context("payload not found")? @@ -57,7 +56,7 @@ async fn test_validator_block_store() { // Fetch a suffix of blocks that we will generate (fake) certs for. let want = scope::run!(ctx, |ctx, s| async { // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(pool.clone(), OPERATOR_ADDRESS).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool.clone()).await?; s.spawn_bg(runner.run(ctx)); sk.push_random_blocks(rng, 10).await; sk.wait_for_miniblocks(ctx).await?; @@ -74,7 +73,7 @@ async fn test_validator_block_store() { // Insert blocks one by one and check the storage state. for (i, block) in want.iter().enumerate() { - let store = Store::new(pool.clone(), OPERATOR_ADDRESS).into_block_store(); + let store = Store::new(pool.clone()).into_block_store(); store.store_next_block(ctx, block).await.unwrap(); assert_eq!(want[..i + 1], storage::testonly::dump(ctx, &store).await); } @@ -92,7 +91,7 @@ async fn test_validator() { scope::run!(ctx, |ctx, s| async { // Start state keeper. let pool = ConnectionPool::test_pool().await; - let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool).await?; s.spawn_bg(runner.run(ctx)); // Populate storage with a bunch of blocks. @@ -101,7 +100,7 @@ async fn test_validator() { .await .context("sk.wait_for_miniblocks(<1st phase>)")?; - let cfg = ValidatorNode::for_single_validator(&mut ctx.rng()); + let cfg = ValidatorNode::new(&mut ctx.rng()); let validators = cfg.node.validators.clone(); // Restart consensus actor a couple times, making it process a bunch of blocks each time. @@ -112,7 +111,6 @@ async fn test_validator() { let cfg = MainNodeConfig { executor: cfg.node.clone(), validator: cfg.validator.clone(), - operator_address: OPERATOR_ADDRESS, }; s.spawn_bg(cfg.run(ctx, sk.pool.clone())); sk.store() @@ -164,12 +162,11 @@ async fn test_fetcher() { // topology: // validator <-> fetcher <-> fetcher <-> ... - let cfg = ValidatorNode::for_single_validator(rng); + let cfg = ValidatorNode::new(rng); let validators = cfg.node.validators.clone(); let mut cfg = MainNodeConfig { executor: cfg.node, validator: cfg.validator, - operator_address: OPERATOR_ADDRESS, }; let mut fetcher_cfgs = vec![connect_full_node(rng, &mut cfg.executor)]; while fetcher_cfgs.len() < FETCHERS { @@ -178,16 +175,13 @@ async fn test_fetcher() { } let fetcher_cfgs: Vec<_> = fetcher_cfgs .into_iter() - .map(|executor| FetcherConfig { - executor, - operator_address: OPERATOR_ADDRESS, - }) + .map(|executor| FetcherConfig { executor }) .collect(); // Create an initial database snapshot, which contains a cert for genesis block. let pool = scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test_pool().await; - let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(cfg.clone().run(ctx, sk.pool.clone())); sk.push_random_blocks(rng, 5).await; @@ -204,7 +198,7 @@ async fn test_fetcher() { scope::run!(ctx, |ctx, s| async { // Run validator. let pool = template.create_db().await?; - let (mut validator, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let (mut validator, runner) = testonly::StateKeeper::new(pool).await?; s.spawn_bg(async { runner .run(ctx) @@ -219,7 +213,7 @@ async fn test_fetcher() { for (i, cfg) in fetcher_cfgs.into_iter().enumerate() { let i = NoCopy::from(i); let pool = template.create_db().await?; - let (fetcher, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let (fetcher, runner) = testonly::StateKeeper::new(pool).await?; fetchers.push(fetcher.store()); s.spawn_bg(async { let i = i; @@ -260,22 +254,20 @@ async fn test_fetcher_backfill_certs() { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); - let cfg = ValidatorNode::for_single_validator(rng); + let cfg = ValidatorNode::new(rng); let mut cfg = MainNodeConfig { executor: cfg.node, validator: cfg.validator, - operator_address: OPERATOR_ADDRESS, }; let fetcher_cfg = FetcherConfig { executor: connect_full_node(rng, &mut cfg.executor), - operator_address: OPERATOR_ADDRESS, }; // Create an initial database snapshot, which contains some blocks: some with certs, some // without. let pool = scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test_pool().await; - let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool).await?; s.spawn_bg(runner.run(ctx)); // Some blocks with certs. @@ -302,13 +294,13 @@ async fn test_fetcher_backfill_certs() { scope::run!(ctx, |ctx, s| async { // Run validator. let pool = template.create_db().await?; - let (mut validator, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let (mut validator, runner) = testonly::StateKeeper::new(pool).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(cfg.run(ctx, validator.pool.clone())); // Run fetcher. let pool = template.create_db().await?; - let (fetcher, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let (fetcher, runner) = testonly::StateKeeper::new(pool).await?; let fetcher_store = fetcher.store(); s.spawn_bg(runner.run(ctx)); s.spawn_bg(fetcher_cfg.run(ctx, fetcher.pool, fetcher.actions_sender)); @@ -324,3 +316,10 @@ async fn test_fetcher_backfill_certs() { .await .unwrap(); } + +#[test] +fn test_schema_encoding() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + test_encode_random::(rng); +} diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index 69e54b1141c..08d53d63fe4 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -1,13 +1,13 @@ -use std::{fmt, sync::Arc, time::Duration}; +use std::{fmt, time::Duration}; use anyhow::Context as _; use tokio::sync::watch; +use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::QueryClient, Error as L1ClientError, EthInterface}; -use zksync_types::{ - l1_batch_commit_data_generator::L1BatchCommitDataGenerator, web3::ethabi, L1BatchNumber, H256, -}; +use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; +use zksync_types::{web3::ethabi, L1BatchNumber, H256}; use crate::{ metrics::{CheckerComponent, EN_METRICS}, @@ -68,7 +68,7 @@ impl LocalL1BatchCommitData { async fn new( storage: &mut StorageProcessor<'_>, batch_number: L1BatchNumber, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> anyhow::Result> { let Some(storage_l1_batch) = storage .blocks_dal() @@ -116,7 +116,8 @@ impl LocalL1BatchCommitData { Ok(Some(Self { is_pre_boojum, - l1_commit_data: l1_batch_commit_data_generator.l1_commit_data(&l1_batch), + l1_commit_data: CommitBatchInfo::new(&l1_batch, l1_batch_commit_data_generator) + .into_token(), commit_tx_hash, })) } @@ -253,7 +254,7 @@ impl ConsistencyChecker { pub async fn run( mut self, mut stop_receiver: watch::Receiver, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> anyhow::Result<()> { // It doesn't make sense to start the checker until we have at least one L1 batch with metadata. let earliest_l1_batch_number = @@ -317,6 +318,7 @@ impl ConsistencyChecker { } L1DataMismatchBehavior::Log => { tracing::warn!("L1 Batch #{batch_number} is inconsistent with L1"); + batch_number += 1; // We don't want to infinitely loop failing the check on the same batch } }, Err(CheckError::Web3(err)) => { diff --git a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs index ca997d80931..03f11815932 100644 --- a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs @@ -5,13 +5,13 @@ use std::{collections::HashMap, slice}; use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use tokio::sync::mpsc; +use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; use zksync_dal::StorageProcessor; use zksync_eth_client::clients::MockEthereum; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_types::{ - aggregated_operations::AggregatedActionType, block::BlockGasCount, - commitment::L1BatchWithMetadata, - l1_batch_commit_data_generator::RollupModeL1BatchCommitDataGenerator, web3::contract::Options, - L2ChainId, ProtocolVersion, ProtocolVersionId, H256, + aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, + web3::contract::Options, L2ChainId, ProtocolVersion, ProtocolVersionId, H256, }; use super::*; @@ -45,11 +45,11 @@ fn create_pre_boojum_l1_batch_with_metadata(number: u32) -> L1BatchWithMetadata fn build_commit_tx_input_data( batches: &[L1BatchWithMetadata], - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Vec { let commit_tokens = batches .iter() - .map(|batch| l1_batch_commit_data_generator.l1_commit_data(batch)); + .map(|batch| CommitBatchInfo::new(batch, l1_batch_commit_data_generator).into_token()); let commit_tokens = ethabi::Token::Array(commit_tokens.collect()); let mut encoded = vec![]; @@ -58,7 +58,7 @@ fn build_commit_tx_input_data( // Mock an additional argument used in real `commitBlocks` / `commitBatches`. In real transactions, // it's taken from the L1 batch previous to `batches[0]`, but since this argument is not checked, // it's OK to use `batches[0]`. - let prev_header_tokens = batches[0].l1_header_data(); + let prev_header_tokens = StoredBatchInfo(&batches[0]).into_token(); encoded.extend_from_slice(ðabi::encode(&[prev_header_tokens, commit_tokens])); encoded } @@ -89,7 +89,7 @@ fn build_commit_tx_input_data_is_correct() { create_l1_batch_with_metadata(1), create_l1_batch_with_metadata(2), ]; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let commit_tx_input_data = build_commit_tx_input_data(&batches, l1_batch_commit_data_generator.clone()); @@ -103,7 +103,7 @@ fn build_commit_tx_input_data_is_correct() { .unwrap(); assert_eq!( commit_data, - l1_batch_commit_data_generator.l1_commit_data(batch) + CommitBatchInfo::new(batch, l1_batch_commit_data_generator).into_token() ); } } @@ -206,7 +206,7 @@ impl SaveAction<'_> { Self::InsertBatch(l1_batch) => { storage .blocks_dal() - .insert_l1_batch(&l1_batch.header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&l1_batch.header) .await .unwrap(); } @@ -309,7 +309,7 @@ async fn normal_checker_function( let mut commit_tx_hash_by_l1_batch = HashMap::with_capacity(l1_batches.len()); let client = MockEthereum::default(); - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; for (i, l1_batches) in l1_batches.chunks(batches_per_transaction).enumerate() { let input_data = build_commit_tx_input_data(l1_batches, l1_batch_commit_data_generator.clone()); @@ -390,7 +390,7 @@ async fn checker_processes_pre_boojum_batches( let mut commit_tx_hash_by_l1_batch = HashMap::with_capacity(l1_batches.len()); let client = MockEthereum::default(); - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; for (i, l1_batch) in l1_batches.iter().enumerate() { let input_data = build_commit_tx_input_data( slice::from_ref(l1_batch), @@ -452,7 +452,7 @@ async fn checker_functions_after_snapshot_recovery(delay_batch_insertion: bool) let l1_batch = create_l1_batch_with_metadata(99); - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let commit_tx_input_data = build_commit_tx_input_data( slice::from_ref(&l1_batch), @@ -535,7 +535,7 @@ impl IncorrectDataKind { self, client: &MockEthereum, l1_batch: &L1BatchWithMetadata, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> H256 { let (commit_tx_input_data, successful_status) = match self { Self::MissingStatus => { @@ -614,7 +614,7 @@ async fn checker_detects_incorrect_tx_data(kind: IncorrectDataKind, snapshot_rec } let l1_batch = create_l1_batch_with_metadata(if snapshot_recovery { 99 } else { 1 }); - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let client = MockEthereum::default(); let commit_tx_hash = kind .apply(&client, &l1_batch, l1_batch_commit_data_generator.clone()) diff --git a/core/lib/zksync_core/src/eth_sender/aggregated_operations.rs b/core/lib/zksync_core/src/eth_sender/aggregated_operations.rs new file mode 100644 index 00000000000..bb7cf75e50d --- /dev/null +++ b/core/lib/zksync_core/src/eth_sender/aggregated_operations.rs @@ -0,0 +1,55 @@ +use std::ops; + +use zksync_l1_contract_interface::i_executor::methods::{ + CommitBatches, ExecuteBatches, ProveBatches, +}; +use zksync_types::{aggregated_operations::AggregatedActionType, L1BatchNumber, ProtocolVersionId}; + +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Clone)] +pub enum AggregatedOperation { + Commit(CommitBatches), + PublishProofOnchain(ProveBatches), + Execute(ExecuteBatches), +} + +impl AggregatedOperation { + pub fn get_action_type(&self) -> AggregatedActionType { + match self { + Self::Commit(_) => AggregatedActionType::Commit, + Self::PublishProofOnchain(_) => AggregatedActionType::PublishProofOnchain, + Self::Execute(_) => AggregatedActionType::Execute, + } + } + + pub fn l1_batch_range(&self) -> ops::RangeInclusive { + let batches = match self { + Self::Commit(op) => &op.l1_batches, + Self::PublishProofOnchain(op) => &op.l1_batches, + Self::Execute(op) => &op.l1_batches, + }; + + if batches.is_empty() { + return L1BatchNumber(0)..=L1BatchNumber(0); + } + let first_batch = &batches[0]; + let last_batch = &batches[batches.len() - 1]; + first_batch.header.number..=last_batch.header.number + } + + pub fn get_action_caption(&self) -> &'static str { + match self { + Self::Commit(_) => "commit", + Self::PublishProofOnchain(_) => "proof", + Self::Execute(_) => "execute", + } + } + + pub fn protocol_version(&self) -> ProtocolVersionId { + match self { + Self::Commit(op) => op.l1_batches[0].header.protocol_version.unwrap(), + Self::PublishProofOnchain(op) => op.l1_batches[0].header.protocol_version.unwrap(), + Self::Execute(op) => op.l1_batches[0].header.protocol_version.unwrap(), + } + } +} diff --git a/core/lib/zksync_core/src/eth_sender/aggregator.rs b/core/lib/zksync_core/src/eth_sender/aggregator.rs index a6816384765..a7c6388fa41 100644 --- a/core/lib/zksync_core/src/eth_sender/aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/aggregator.rs @@ -1,24 +1,28 @@ use std::sync::Arc; -use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}; +use zksync_config::configs::{ + chain::L1BatchCommitDataGeneratorMode, + eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}, +}; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; +use zksync_l1_contract_interface::i_executor::methods::{ + CommitBatches, ExecuteBatches, ProveBatches, +}; use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_prover_interface::outputs::L1BatchProofForL1; use zksync_types::{ - aggregated_operations::{ - AggregatedActionType, AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, - L1BatchProofForL1, L1BatchProofOperation, - }, - commitment::L1BatchWithMetadata, - helpers::unix_timestamp_ms, - l1_batch_commit_data_generator::L1BatchCommitDataGenerator, - protocol_version::L1VerifierConfig, - L1BatchNumber, ProtocolVersionId, + aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, + helpers::unix_timestamp_ms, protocol_version::L1VerifierConfig, L1BatchNumber, + ProtocolVersionId, }; -use super::publish_criterion::{ - DataSizeCriterion, GasCriterion, L1BatchPublishCriterion, NumberCriterion, - TimestampDeadlineCriterion, +use super::{ + aggregated_operations::AggregatedOperation, + publish_criterion::{ + DataSizeCriterion, GasCriterion, L1BatchPublishCriterion, NumberCriterion, + TimestampDeadlineCriterion, + }, }; #[derive(Debug)] @@ -28,14 +32,14 @@ pub struct Aggregator { execute_criteria: Vec>, config: SenderConfig, blob_store: Arc, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, } impl Aggregator { pub fn new( config: SenderConfig, blob_store: Arc, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Self { Self { commit_criteria: vec![ @@ -149,7 +153,7 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, limit: usize, last_sealed_l1_batch: L1BatchNumber, - ) -> Option { + ) -> Option { let max_l1_batch_timestamp_millis = self .config .l1_batch_min_age_before_execute_seconds @@ -168,7 +172,7 @@ impl Aggregator { ) .await; - l1_batches.map(|l1_batches| L1BatchExecuteOperation { l1_batches }) + l1_batches.map(|l1_batches| ExecuteBatches { l1_batches }) } async fn get_commit_operation( @@ -178,7 +182,7 @@ impl Aggregator { last_sealed_batch: L1BatchNumber, base_system_contracts_hashes: BaseSystemContractsHashes, protocol_version_id: ProtocolVersionId, - ) -> Option { + ) -> Option { let mut blocks_dal = storage.blocks_dal(); let last_committed_l1_batch = blocks_dal .get_last_committed_to_eth_l1_batch() @@ -227,10 +231,10 @@ impl Aggregator { ) .await; - batches.map(|batches| L1BatchCommitOperation { + batches.map(|batches| CommitBatches { last_committed_l1_batch, l1_batches: batches, - l1_batch_commit_data_generator: self.l1_batch_commit_data_generator.clone(), + l1_batch_commit_data_generator: self.l1_batch_commit_data_generator, }) } @@ -239,7 +243,7 @@ impl Aggregator { l1_verifier_config: L1VerifierConfig, proof_loading_mode: &ProofLoadingMode, blob_store: &dyn ObjectStore, - ) -> Option { + ) -> Option { let previous_proven_batch_number = storage .blocks_dal() .get_last_l1_batch_with_prove_tx() @@ -307,7 +311,7 @@ impl Aggregator { ); }); - Some(L1BatchProofOperation { + Some(ProveBatches { prev_l1_batch: previous_proven_batch_metadata, l1_batches: vec![metadata_for_batch_being_proved], proofs, @@ -320,8 +324,8 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, ready_for_proof_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, - l1_batch_commit_data_generator: Arc, - ) -> Option { + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + ) -> Option { let batches = extract_ready_subrange( storage, &mut self.proof_criteria, @@ -338,7 +342,7 @@ impl Aggregator { .await .unwrap()?; - Some(L1BatchProofOperation { + Some(ProveBatches { prev_l1_batch: prev_batch, l1_batches: batches, proofs: vec![], @@ -352,7 +356,7 @@ impl Aggregator { limit: usize, last_sealed_l1_batch: L1BatchNumber, l1_verifier_config: L1VerifierConfig, - ) -> Option { + ) -> Option { match self.config.proof_sending_mode { ProofSendingMode::OnlyRealProofs => { Self::load_real_proof_operation( @@ -414,7 +418,7 @@ async fn extract_ready_subrange( publish_criteria: &mut [Box], unpublished_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Option> { let mut last_l1_batch: Option = None; for criterion in publish_criteria { diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 6211b636384..70a8b3d8ae4 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -5,20 +5,20 @@ use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs}; +use zksync_l1_contract_interface::{ + multicall3::{Multicall3Call, Multicall3Result}, + pre_boojum_verifier::old_l1_vk_commitment, + Detokenize, Tokenizable, Tokenize, +}; use zksync_types::{ - aggregated_operations::AggregatedOperation, - contracts::{Multicall3Call, Multicall3Result}, eth_sender::EthTx, ethabi::{Contract, Token}, protocol_version::{L1VerifierConfig, VerifierParams}, - vk_transform::l1_vk_commitment, - web3::contract::{ - tokens::{Detokenize, Tokenizable}, - Error, - }, + web3::contract::Error as Web3ContractError, Address, ProtocolVersionId, H256, U256, }; +use super::aggregated_operations::AggregatedOperation; use crate::{ eth_sender::{ metrics::{PubdataKind, METRICS}, @@ -189,9 +189,12 @@ impl EthTxAggregator { token: Token, ) -> Result { let parse_error = |tokens: &[Token]| { - Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!("Failed to parse multicall token: {:?}", tokens), - ))) + Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( + "Failed to parse multicall token: {:?}", + tokens + )), + )) }; if let Token::Array(call_results) = token { @@ -205,24 +208,24 @@ impl EthTxAggregator { Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_bootloader.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 bootloader hash data is not of the len of 32: {:?}", multicall3_bootloader - ), - ))); + )), + )); } let bootloader = H256::from_slice(&multicall3_bootloader); let multicall3_default_aa = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_default_aa.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 default aa hash data is not of the len of 32: {:?}", multicall3_default_aa - ), - ))); + )), + )); } let default_aa = H256::from_slice(&multicall3_default_aa); let base_system_contracts_hashes = BaseSystemContractsHashes { @@ -233,12 +236,12 @@ impl EthTxAggregator { let multicall3_verifier_params = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_params.len() != 96 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 verifier params data is not of the len of 96: {:?}", multicall3_default_aa - ), - ))); + )), + )); } let recursion_node_level_vk_hash = H256::from_slice(&multicall3_verifier_params[..32]); let recursion_leaf_level_vk_hash = @@ -254,24 +257,24 @@ impl EthTxAggregator { let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_address.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 verifier address data is not of the len of 32: {:?}", multicall3_verifier_address - ), - ))); + )), + )); } let verifier_address = Address::from_slice(&multicall3_verifier_address[12..]); let multicall3_protocol_version = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_protocol_version.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 protocol version data is not of the len of 32: {:?}", multicall3_protocol_version - ), - ))); + )), + )); } let protocol_version_id = U256::from_big_endian(&multicall3_protocol_version) .try_into() @@ -310,7 +313,7 @@ impl EthTxAggregator { .for_contract(verifier_address, abi); let vk = self.eth_client.call_contract_function(args).await?; - Ok(l1_vk_commitment(Token::from_tokens(vk)?)) + Ok(old_l1_vk_commitment(Token::from_tokens(vk)?)) } else { let get_vk_hash = self.functions.verification_key_hash.as_ref(); tracing::debug!("Calling verificationKeyHash"); @@ -406,7 +409,7 @@ impl EthTxAggregator { // For "commit" and "prove" operations it's necessary that the contracts are of the same version as L1 batches are. // For "execute" it's not required, i.e. we can "execute" pre-boojum batches with post-boojum contracts. - match &op { + match op.clone() { AggregatedOperation::Commit(op) => { assert_eq!(contracts_are_pre_boojum, operation_is_pre_boojum); let f = if contracts_are_pre_boojum { @@ -417,7 +420,8 @@ impl EthTxAggregator { .as_ref() .expect("Missing ABI for commitBatches") }; - f.encode_input(&op.get_eth_tx_args()) + f.encode_input(&op.into_tokens()) + .expect("Failed to encode commit transaction data") } AggregatedOperation::PublishProofOnchain(op) => { assert_eq!(contracts_are_pre_boojum, operation_is_pre_boojum); @@ -429,7 +433,8 @@ impl EthTxAggregator { .as_ref() .expect("Missing ABI for proveBatches") }; - f.encode_input(&op.get_eth_tx_args()) + f.encode_input(&op.into_tokens()) + .expect("Failed to encode prove transaction data") } AggregatedOperation::Execute(op) => { let f = if contracts_are_pre_boojum { @@ -440,10 +445,10 @@ impl EthTxAggregator { .as_ref() .expect("Missing ABI for executeBatches") }; - f.encode_input(&op.get_eth_tx_args()) + f.encode_input(&op.into_tokens()) + .expect("Failed to encode execute transaction data") } } - .expect("Failed to encode transaction data") } pub(super) async fn save_eth_tx( diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs index 99d46e31068..be0c8b3ba09 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -5,7 +5,7 @@ use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{ - BoundEthInterface, Error, ExecutedTxStatus, RawTransactionBytes, SignedCallResult, + BoundEthInterface, Error, EthInterface, ExecutedTxStatus, RawTransactionBytes, SignedCallResult, }; use zksync_types::{ eth_sender::EthTx, @@ -37,6 +37,7 @@ struct OperatorNonce { #[derive(Debug, Clone, Copy)] pub(super) struct L1BlockNumbers { + pub safe: L1BlockNumber, pub finalized: L1BlockNumber, pub latest: L1BlockNumber, } @@ -300,7 +301,22 @@ impl EthTxManager { .await? .as_u32() .into(); - Ok(L1BlockNumbers { finalized, latest }) + + let safe = self + .ethereum_gateway + .block(BlockId::Number(BlockNumber::Safe), "eth_tx_manager") + .await? + .expect("Safe block must be present on L1") + .number + .expect("Safe block must contain number") + .as_u32() + .into(); + + Ok(L1BlockNumbers { + finalized, + latest, + safe, + }) } // Monitors the in-flight transactions, marks mined ones as confirmed, @@ -310,9 +326,7 @@ impl EthTxManager { storage: &mut StorageProcessor<'_>, l1_block_numbers: L1BlockNumbers, ) -> Result, ETHSenderError> { - METRICS - .last_known_l1_block - .set(l1_block_numbers.latest.0.into()); + METRICS.track_block_numbers(&l1_block_numbers); let operator_nonce = self.get_operator_nonce(l1_block_numbers).await?; let inflight_txs = storage.eth_sender_dal().get_inflight_txs().await.unwrap(); METRICS.number_of_inflight_txs.set(inflight_txs.len()); diff --git a/core/lib/zksync_core/src/eth_sender/metrics.rs b/core/lib/zksync_core/src/eth_sender/metrics.rs index 4bce1bf1a1f..255eca2f8d7 100644 --- a/core/lib/zksync_core/src/eth_sender/metrics.rs +++ b/core/lib/zksync_core/src/eth_sender/metrics.rs @@ -7,7 +7,10 @@ use zksync_dal::StorageProcessor; use zksync_types::{aggregated_operations::AggregatedActionType, eth_sender::EthTx}; use zksync_utils::time::seconds_since_epoch; -use crate::metrics::{BlockL1Stage, BlockStage, APP_METRICS}; +use crate::{ + eth_sender::eth_tx_manager::L1BlockNumbers, + metrics::{BlockL1Stage, BlockStage, APP_METRICS}, +}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] #[metrics(label = "kind", rename_all = "snake_case")] @@ -18,6 +21,15 @@ pub(super) enum PubdataKind { RepeatedWritesCompressed, } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "block_number_variant", rename_all = "snake_case")] +#[allow(clippy::enum_variant_names)] +pub(super) enum BlockNumberVariant { + Latest, + Finalized, + Safe, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] #[metrics(label = "type")] pub(super) struct ActionTypeLabel(AggregatedActionType); @@ -76,20 +88,28 @@ pub(super) struct EthSenderMetrics { #[metrics(buckets = FEE_BUCKETS)] pub used_priority_fee_per_gas: Histogram, /// Last L1 block observed by the Ethereum sender. - pub last_known_l1_block: Gauge, + pub last_known_l1_block: Family>, /// Number of in-flight txs produced by the Ethereum sender. pub number_of_inflight_txs: Gauge, #[metrics(buckets = GAS_BUCKETS)] pub l1_gas_used: Family>, #[metrics(buckets = Buckets::LATENCIES)] pub l1_tx_mined_latency: Family>, - #[metrics(buckets = &[1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 20.0, 30.0, 50.0])] + #[metrics(buckets = & [1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 20.0, 30.0, 50.0])] pub l1_blocks_waited_in_mempool: Family>, /// Number of L1 batches aggregated for publishing with a specific reason. pub block_aggregation_reason: Family, } impl EthSenderMetrics { + pub fn track_block_numbers(&self, l1_block_numbers: &L1BlockNumbers) { + self.last_known_l1_block[&BlockNumberVariant::Latest] + .set(l1_block_numbers.latest.0 as usize); + self.last_known_l1_block[&BlockNumberVariant::Finalized] + .set(l1_block_numbers.finalized.0 as usize); + self.last_known_l1_block[&BlockNumberVariant::Safe].set(l1_block_numbers.safe.0 as usize); + } + pub async fn track_eth_tx_metrics( &self, connection: &mut StorageProcessor<'_>, diff --git a/core/lib/zksync_core/src/eth_sender/mod.rs b/core/lib/zksync_core/src/eth_sender/mod.rs index e5a47d3f62f..010441dc0d1 100644 --- a/core/lib/zksync_core/src/eth_sender/mod.rs +++ b/core/lib/zksync_core/src/eth_sender/mod.rs @@ -1,3 +1,4 @@ +mod aggregated_operations; mod aggregator; mod error; mod eth_tx_aggregator; diff --git a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs index 09909df4e68..c8b261cfd9c 100644 --- a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs +++ b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs @@ -1,11 +1,13 @@ -use std::{fmt, sync::Arc}; +use std::fmt; use async_trait::async_trait; use chrono::Utc; +use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; use zksync_dal::StorageProcessor; +use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; use zksync_types::{ - aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, - l1_batch_commit_data_generator::L1BatchCommitDataGenerator, L1BatchNumber, + aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, ethabi, + L1BatchNumber, }; use super::metrics::METRICS; @@ -23,7 +25,7 @@ pub trait L1BatchPublishCriterion: fmt::Debug + Send + Sync { storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: Arc, + _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Option; } @@ -45,7 +47,7 @@ impl L1BatchPublishCriterion for NumberCriterion { _storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: Arc, + _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Option { let mut batch_numbers = consecutive_l1_batches .iter() @@ -92,7 +94,7 @@ impl L1BatchPublishCriterion for TimestampDeadlineCriterion { _storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: Arc, + _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Option { let first_l1_batch = consecutive_l1_batches.iter().next()?; let last_l1_batch_number = consecutive_l1_batches.iter().last()?.header.number.0; @@ -157,7 +159,7 @@ impl L1BatchPublishCriterion for GasCriterion { storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: Arc, + _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Option { let base_cost = agg_l1_batch_base_cost(self.op); assert!( @@ -215,13 +217,20 @@ impl L1BatchPublishCriterion for DataSizeCriterion { _storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Option { const STORED_BLOCK_INFO_SIZE: usize = 96; // size of `StoredBlockInfo` solidity struct let mut data_size_left = self.data_limit - STORED_BLOCK_INFO_SIZE; for (index, l1_batch) in consecutive_l1_batches.iter().enumerate() { - let l1_commit_data_size = l1_batch_commit_data_generator.l1_commit_data_size(l1_batch); + // TODO (PLA-771): Make sure that this estimation is correct. + let l1_commit_data_size = + ethabi::encode(&[ethabi::Token::Array(vec![CommitBatchInfo::new( + l1_batch, + l1_batch_commit_data_generator, + ) + .into_token()])]) + .len(); if data_size_left < l1_commit_data_size { if index == 0 { panic!( diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index c9539dd230c..79ffed85f79 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -3,30 +3,31 @@ use std::sync::Arc; use assert_matches::assert_matches; use once_cell::sync::Lazy; use zksync_config::{ - configs::eth_sender::{ProofSendingMode, SenderConfig}, + configs::{ + chain::L1BatchCommitDataGeneratorMode, + eth_sender::{ProofSendingMode, SenderConfig}, + }, ContractsConfig, ETHSenderConfig, GasAdjusterConfig, }; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::MockEthereum, EthInterface}; +use zksync_l1_contract_interface::i_executor::methods::{ + CommitBatches, ExecuteBatches, ProveBatches, +}; use zksync_object_store::ObjectStoreFactory; use zksync_types::{ - aggregated_operations::{ - AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, L1BatchProofOperation, - }, block::L1BatchHeader, commitment::{L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata}, ethabi::Token, helpers::unix_timestamp_ms, - l1_batch_commit_data_generator::{ - L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, - }, web3::contract::Error, Address, L1BatchNumber, L1BlockNumber, ProtocolVersionId, H256, }; use crate::{ eth_sender::{ - eth_tx_manager::L1BlockNumbers, Aggregator, ETHSenderError, EthTxAggregator, EthTxManager, + aggregated_operations::AggregatedOperation, eth_tx_manager::L1BlockNumbers, Aggregator, + ETHSenderError, EthTxAggregator, EthTxManager, }, l1_gas_price::GasAdjuster, utils::testonly::create_l1_batch, @@ -36,7 +37,7 @@ use crate::{ type MockEthTxManager = EthTxManager; static DUMMY_OPERATION: Lazy = Lazy::new(|| { - AggregatedOperation::Execute(L1BatchExecuteOperation { + AggregatedOperation::Execute(ExecuteBatches { l1_batches: vec![L1BatchWithMetadata { header: create_l1_batch(1), metadata: default_l1_batch_metadata(), @@ -62,7 +63,7 @@ impl EthSenderTester { connection_pool: ConnectionPool, history: Vec, non_ordering_confirmations: bool, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> Self { let eth_sender_config = ETHSenderConfig::for_tests(); let contracts_config = ContractsConfig::for_tests(); @@ -139,7 +140,11 @@ impl EthSenderTester { async fn get_block_numbers(&self) -> L1BlockNumbers { let latest = self.gateway.block_number("").await.unwrap().as_u32().into(); let finalized = latest - Self::WAIT_CONFIRMATIONS as u32; - L1BlockNumbers { finalized, latest } + L1BlockNumbers { + finalized, + latest, + safe: finalized, + } } } @@ -147,7 +152,7 @@ impl EthSenderTester { #[tokio::test] async fn confirm_many() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool, vec![10; 100], @@ -230,7 +235,7 @@ async fn confirm_many() -> anyhow::Result<()> { #[tokio::test] async fn resend_each_block() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool, vec![7, 6, 5, 5, 5, 2, 1], @@ -348,7 +353,7 @@ async fn resend_each_block() -> anyhow::Result<()> { #[tokio::test] async fn dont_resend_already_mined() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -426,7 +431,7 @@ async fn dont_resend_already_mined() -> anyhow::Result<()> { #[tokio::test] async fn three_scenarios() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool.clone(), vec![100; 100], @@ -505,7 +510,7 @@ async fn three_scenarios() -> anyhow::Result<()> { #[tokio::test] async fn failed_eth_tx() { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool.clone(), vec![100; 100], @@ -584,7 +589,7 @@ fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { #[tokio::test] async fn correct_order_for_confirmations() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -654,7 +659,7 @@ async fn correct_order_for_confirmations() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -758,7 +763,7 @@ async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -856,7 +861,7 @@ async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { #[tokio::test] async fn test_parse_multicall_data() { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -943,7 +948,7 @@ async fn test_parse_multicall_data() { #[tokio::test] async fn get_multicall_data() { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); + let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -972,7 +977,7 @@ async fn insert_l1_batch(tester: &EthSenderTester, number: L1BatchNumber) -> L1B .storage() .await .blocks_dal() - .insert_l1_batch(&header, &[], Default::default(), &[], &[], 0) + .insert_mock_l1_batch(&header) .await .unwrap(); tester @@ -995,7 +1000,7 @@ async fn execute_l1_batches( l1_batches: Vec, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::Execute(L1BatchExecuteOperation { + let operation = AggregatedOperation::Execute(ExecuteBatches { l1_batches: l1_batches.into_iter().map(l1_batch_with_metadata).collect(), }); send_operation(tester, operation, confirm).await @@ -1007,7 +1012,7 @@ async fn prove_l1_batch( l1_batch: L1BatchHeader, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::PublishProofOnchain(L1BatchProofOperation { + let operation = AggregatedOperation::PublishProofOnchain(ProveBatches { prev_l1_batch: l1_batch_with_metadata(last_committed_l1_batch), l1_batches: vec![l1_batch_with_metadata(l1_batch)], proofs: vec![], @@ -1021,9 +1026,9 @@ async fn commit_l1_batch( last_committed_l1_batch: L1BatchHeader, l1_batch: L1BatchHeader, confirm: bool, - l1_batch_commit_data_generator: Arc, + l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, ) -> H256 { - let operation = AggregatedOperation::Commit(L1BatchCommitOperation { + let operation = AggregatedOperation::Commit(CommitBatches { last_committed_l1_batch: l1_batch_with_metadata(last_committed_l1_batch), l1_batches: vec![l1_batch_with_metadata(l1_batch)], l1_batch_commit_data_generator, diff --git a/core/lib/zksync_core/src/eth_watch/client.rs b/core/lib/zksync_core/src/eth_watch/client.rs index 08e62c3f4ea..28707ce4a4c 100644 --- a/core/lib/zksync_core/src/eth_watch/client.rs +++ b/core/lib/zksync_core/src/eth_watch/client.rs @@ -2,9 +2,9 @@ use std::fmt; use zksync_contracts::verifier_contract; use zksync_eth_client::{CallFunctionArgs, Error as EthClientError, EthInterface}; +use zksync_l1_contract_interface::pre_boojum_verifier::old_l1_vk_commitment; use zksync_types::{ ethabi::{Contract, Token}, - vk_transform::l1_vk_commitment, web3::{ self, contract::tokens::Detokenize, @@ -126,7 +126,7 @@ impl EthClient for EthHttpQueryClient { let args = CallFunctionArgs::new("get_verification_key", ()) .for_contract(verifier_address, self.verifier_contract_abi.clone()); let vk = self.client.call_contract_function(args).await?; - Ok(l1_vk_commitment(Token::from_tokens(vk)?)) + Ok(old_l1_vk_commitment(Token::from_tokens(vk)?)) } } diff --git a/core/lib/zksync_core/src/fee_model.rs b/core/lib/zksync_core/src/fee_model.rs index 2fffd6fc9e5..8d43cdc99a7 100644 --- a/core/lib/zksync_core/src/fee_model.rs +++ b/core/lib/zksync_core/src/fee_model.rs @@ -64,15 +64,11 @@ impl BatchFeeModelInputProvider for MainNodeFeeInputProvider { config, l1_gas_price: self.provider.estimate_effective_gas_price(), }), - FeeModelConfig::V2(config) => { - let validium_mode = std::env::var("VALIDIUM_MODE") == Ok("true".to_owned()); - FeeParams::V2(FeeParamsV2 { - config, - l1_gas_price: self.provider.estimate_effective_gas_price(), - l1_pubdata_price: self.provider.estimate_effective_pubdata_price() - * !validium_mode as u64, - }) - } + FeeModelConfig::V2(config) => FeeParams::V2(FeeParamsV2 { + config, + l1_gas_price: self.provider.estimate_effective_gas_price(), + l1_pubdata_price: self.provider.estimate_effective_pubdata_price(), + }), } } } diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 8d492ab42cf..9d9021c7c15 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -3,7 +3,11 @@ //! setups the required databases, and outputs the data required to initialize a smart contract. use anyhow::Context as _; -use multivm::utils::get_max_gas_per_pubdata_byte; +use multivm::{ + utils::get_max_gas_per_pubdata_byte, + zk_evm_latest::aux_structures::{LogQuery as MultiVmLogQuery, Timestamp as MultiVMTimestamp}, + zkevm_test_harness_latest::witness::sort_storage_access::sort_storage_access_queries, +}; use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; use zksync_merkle_tree::domain::ZkSyncTree; @@ -13,10 +17,10 @@ use zksync_types::{ fee_model::BatchFeeInput, get_code_key, get_system_context_init_logs, protocol_version::{L1VerifierConfig, ProtocolVersion}, - sort_storage_access::sort_storage_access_queries, tokens::{TokenInfo, TokenMetadata, ETHEREUM_ADDRESS}, - AccountTreeId, Address, L1BatchNumber, L2ChainId, LogQuery, MiniblockNumber, ProtocolVersionId, - StorageKey, StorageLog, StorageLogKind, Timestamp, H256, + zk_evm_types::{LogQuery, Timestamp}, + AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, + StorageKey, StorageLog, StorageLogKind, H256, }; use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; @@ -163,7 +167,8 @@ async fn insert_base_system_contracts_to_factory_deps( storage .storage_dal() .insert_factory_deps(MiniblockNumber(0), &factory_deps) - .await; + .await + .unwrap(); } async fn insert_system_contracts( @@ -197,7 +202,7 @@ async fn insert_system_contracts( // we don't produce proof for the genesis block, // but we still need to populate the table // to have the correct initial state of the merkle tree - let log_queries: Vec = storage_logs + let log_queries: Vec = storage_logs .iter() .enumerate() .flat_map(|(tx_index, (_, storage_logs))| { @@ -205,9 +210,9 @@ async fn insert_system_contracts( .iter() .enumerate() .map(move |(log_index, storage_log)| { - LogQuery { + MultiVmLogQuery { // Monotonically increasing Timestamp. Normally it's generated by the VM, but we don't have a VM in the genesis block. - timestamp: Timestamp(((tx_index << 16) + log_index) as u32), + timestamp: MultiVMTimestamp(((tx_index << 16) + log_index) as u32), tx_number_in_block: tx_index as u16, aux_byte: 0, shard_id: 0, @@ -220,11 +225,27 @@ async fn insert_system_contracts( is_service: false, } }) - .collect::>() + .collect::>() }) .collect(); - let (_, deduped_log_queries) = sort_storage_access_queries(&log_queries); + let deduped_log_queries: Vec = sort_storage_access_queries(&log_queries) + .1 + .into_iter() + .map(|log_query| LogQuery { + timestamp: Timestamp(log_query.timestamp.0), + tx_number_in_block: log_query.tx_number_in_block, + aux_byte: log_query.aux_byte, + shard_id: log_query.shard_id, + address: log_query.address, + key: log_query.key, + read_value: log_query.read_value, + written_value: log_query.written_value, + rw_flag: log_query.rw_flag, + rollback: log_query.rollback, + is_service: log_query.is_service, + }) + .collect(); let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries .into_iter() @@ -255,7 +276,8 @@ async fn insert_system_contracts( transaction .storage_dal() .insert_factory_deps(MiniblockNumber(0), &factory_deps) - .await; + .await + .unwrap(); transaction.commit().await.unwrap(); } @@ -280,14 +302,12 @@ pub(crate) async fn create_genesis_l1_batch( tx: None, }; - let mut genesis_l1_batch_header = L1BatchHeader::new( + let genesis_l1_batch_header = L1BatchHeader::new( L1BatchNumber(0), 0, - first_validator_address, base_system_contracts.hashes(), protocol_version, ); - genesis_l1_batch_header.is_finished = true; let genesis_miniblock_header = MiniblockHeader { number: MiniblockNumber(0), @@ -295,6 +315,7 @@ pub(crate) async fn create_genesis_l1_batch( hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), l1_tx_count: 0, l2_tx_count: 0, + fee_account_address: first_validator_address, base_fee_per_gas: 0, gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(protocol_version.into()), batch_fee_input: BatchFeeInput::l1_pegged(0, 0), @@ -317,7 +338,7 @@ pub(crate) async fn create_genesis_l1_batch( BlockGasCount::default(), &[], &[], - 0, + Default::default(), ) .await .unwrap(); diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs index 88e1f0f6465..0039b32bc77 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs @@ -1,6 +1,5 @@ use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_types::proofs::JobCountStatistics; +use zksync_dal::{fri_prover_dal::types::JobCountStatistics, ConnectionPool}; use crate::house_keeper::periodic_job::PeriodicJob; diff --git a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs index f198d27d97b..cf1cdc90314 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_types::proofs::{AggregationRound, JobCountStatistics}; +use zksync_dal::{fri_prover_dal::types::JobCountStatistics, ConnectionPool}; +use zksync_types::basic_fri_types::AggregationRound; use crate::house_keeper::periodic_job::PeriodicJob; diff --git a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs index ac5d0413438..97be31c3a49 100644 --- a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs @@ -8,7 +8,6 @@ use std::{ use tokio::sync::watch; use zksync_config::GasAdjusterConfig; use zksync_eth_client::{Error, EthInterface}; -use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; use self::metrics::METRICS; use super::{L1GasPriceProvider, L1TxParamsProvider}; @@ -130,7 +129,7 @@ impl L1GasPriceProvider for GasAdjuster { fn estimate_effective_pubdata_price(&self) -> u64 { // For now, pubdata is only sent via calldata, so its price is pegged to the L1 gas price. - self.estimate_effective_gas_price() * L1_GAS_PER_PUBDATA_BYTE as u64 + self.estimate_effective_gas_price() * self.config.l1_gas_per_pubdata_byte } } diff --git a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs index 2feb65f1cbd..473b77da98c 100644 --- a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs @@ -44,6 +44,7 @@ async fn kept_updated() { internal_enforced_l1_gas_price: None, poll_period: 5, max_l1_gas_price: None, + l1_gas_per_pubdata_byte: 17, }, ) .await diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index c29a462f63c..cf2df943f58 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -12,12 +12,13 @@ use zksync_circuit_breaker::{ l1_txs::FailedL1TransactionChecker, replication_lag::ReplicationLagChecker, CircuitBreaker, CircuitBreakerChecker, CircuitBreakerError, }; +use zksync_concurrency::{ctx, scope}; use zksync_config::{ configs::{ api::{MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{ - CircuitBreakerConfig, L1BatchCommitDataGeneratorMode, MempoolConfig, NetworkConfig, - OperationsManagerConfig, StateKeeperConfig, + CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, + StateKeeperConfig, }, contracts::ProverAtGenesis, database::{MerkleTreeConfig, MerkleTreeMode}, @@ -36,10 +37,6 @@ use zksync_queued_job_processor::JobProcessor; use zksync_state::PostgresStorageCaches; use zksync_types::{ fee_model::FeeModelConfig, - l1_batch_commit_data_generator::{ - L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, - ValidiumModeL1BatchCommitDataGenerator, - }, protocol_version::{L1VerifierConfig, VerifierParams}, system_contracts::get_system_smart_contracts, web3::contract::tokens::Detokenize, @@ -234,6 +231,8 @@ pub enum Component { Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. ProofDataHandler, + /// Component generating BFT consensus certificates for miniblocks. + Consensus, } #[derive(Debug)] @@ -268,6 +267,7 @@ impl FromStr for Components { "eth_tx_aggregator" => Ok(Components(vec![Component::EthTxAggregator])), "eth_tx_manager" => Ok(Components(vec![Component::EthTxManager])), "proof_data_handler" => Ok(Components(vec![Component::ProofDataHandler])), + "consensus" => Ok(Components(vec![Component::Consensus])), other => Err(format!("{} is not a valid component name", other)), } } @@ -509,6 +509,39 @@ pub async fn initialize_components( tracing::info!("initialized State Keeper in {elapsed:?}"); } + if components.contains(&Component::Consensus) { + let cfg = configs + .consensus_config + .clone() + .context("consensus component's config is missing")?; + let started_at = Instant::now(); + tracing::info!("initializing Consensus"); + let pool = connection_pool.clone(); + let mut stop_receiver = stop_receiver.clone(); + task_futures.push(tokio::spawn(async move { + scope::run!(&ctx::root(), |ctx, s| async { + s.spawn_bg(async { + // Consensus is a new component. + // For now in case of error we just log it and allow the server + // to continue running. + if let Err(err) = cfg.run(ctx, pool).await { + tracing::error!(%err, "Consensus actor failed"); + } else { + tracing::info!("Consensus actor stopped"); + } + Ok(()) + }); + let _ = stop_receiver.wait_for(|stop| *stop).await?; + Ok(()) + }) + .await + })); + + let elapsed = started_at.elapsed(); + APP_METRICS.init_latency[&InitStage::Consensus].set(elapsed); + tracing::info!("initialized Consensus in {elapsed:?}"); + } + let main_zksync_contract_address = contracts_config.diamond_proxy_addr; if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); @@ -558,23 +591,12 @@ pub async fn initialize_components( .state_keeper_config .clone() .context("state_keeper_config")?; - let l1_batch_commit_data_generator: Arc = - match state_keeper_config.l1_batch_commit_data_generator_mode { - L1BatchCommitDataGeneratorMode::Rollup => { - tracing::debug!("RollupModeL1BatchCommitDataGenerator"); - Arc::new(RollupModeL1BatchCommitDataGenerator {}) - } - L1BatchCommitDataGeneratorMode::Validium => { - tracing::debug!("ValidiumModeL1BatchCommitDataGenerator"); - Arc::new(ValidiumModeL1BatchCommitDataGenerator {}) - } - }; let eth_tx_aggregator_actor = EthTxAggregator::new( eth_sender.sender.clone(), Aggregator::new( eth_sender.sender.clone(), store_factory.create_store().await, - l1_batch_commit_data_generator, + state_keeper_config.l1_batch_commit_data_generator_mode, ), Arc::new(eth_client), contracts_config.validator_timelock_addr, @@ -736,7 +758,7 @@ async fn add_state_keeper_to_task_futures for Health { + fn from(details: MerkleTreeHealth) -> Self { + Self::from(HealthStatus::Ready).with_details(details) + } +} + impl From for Health { - fn from(tree_info: MerkleTreeInfo) -> Self { - Self::from(HealthStatus::Ready).with_details(tree_info) + fn from(info: MerkleTreeInfo) -> Self { + Self::from(HealthStatus::Ready).with_details(MerkleTreeHealth::MainLoop(info)) } } @@ -45,7 +64,7 @@ pub(super) async fn create_db( memtable_capacity: usize, stalled_writes_timeout: Duration, multi_get_chunk_size: usize, -) -> RocksDBWrapper { +) -> anyhow::Result { tokio::task::spawn_blocking(move || { create_db_sync( &path, @@ -56,7 +75,7 @@ pub(super) async fn create_db( ) }) .await - .unwrap() + .context("panicked creating Merkle tree RocksDB")? } fn create_db_sync( @@ -65,7 +84,7 @@ fn create_db_sync( memtable_capacity: usize, stalled_writes_timeout: Duration, multi_get_chunk_size: usize, -) -> RocksDBWrapper { +) -> anyhow::Result { tracing::info!( "Initializing Merkle tree database at `{path}` with {multi_get_chunk_size} multi-get chunk size, \ {block_cache_capacity}B block cache, {memtable_capacity}B memtable capacity, \ @@ -80,7 +99,7 @@ fn create_db_sync( large_memtable_capacity: Some(memtable_capacity), stalled_writes_retries: StalledWritesRetries::new(stalled_writes_timeout), }, - ); + )?; if cfg!(test) { // We need sync writes for the unit tests to execute reliably. With the default config, // some writes to RocksDB may occur, but not be visible to the test code. @@ -88,7 +107,7 @@ fn create_db_sync( } let mut db = RocksDBWrapper::from(db); db.set_multi_get_chunk_size(multi_get_chunk_size); - db + Ok(db) } /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. @@ -382,7 +401,8 @@ impl L1BatchWithLogs { let mut touched_slots = storage .storage_logs_dal() .get_touched_slots_for_l1_batch(l1_batch_number) - .await; + .await + .unwrap(); touched_slots_latency.observe_with_count(touched_slots.len()); let leaf_indices_latency = METRICS.start_load_stage(LoadChangesStage::LoadLeafIndices); @@ -391,7 +411,8 @@ impl L1BatchWithLogs { let l1_batches_for_initial_writes = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&hashed_keys_for_writes) - .await; + .await + .unwrap(); leaf_indices_latency.observe_with_count(hashed_keys_for_writes.len()); let mut storage_logs = BTreeMap::new(); @@ -434,7 +455,8 @@ impl L1BatchWithLogs { mod tests { use tempfile::TempDir; use zksync_dal::ConnectionPool; - use zksync_types::{proofs::PrepareBasicCircuitsJob, L2ChainId, StorageKey, StorageLog}; + use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; + use zksync_types::{L2ChainId, StorageKey, StorageLog}; use super::*; use crate::{ @@ -460,7 +482,8 @@ mod tests { let touched_slots = storage .storage_logs_dal() .get_touched_slots_for_l1_batch(l1_batch_number) - .await; + .await + .unwrap(); let mut storage_logs = BTreeMap::new(); @@ -472,11 +495,13 @@ mod tests { let previous_values = storage .storage_logs_dal() .get_previous_storage_values(&hashed_keys, l1_batch_number) - .await; + .await + .unwrap(); let l1_batches_for_initial_writes = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&hashed_keys) - .await; + .await + .unwrap(); for storage_key in protective_reads { let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); @@ -566,7 +591,8 @@ mod tests { Duration::ZERO, // writes should never be stalled in tests 500, ) - .await; + .await + .unwrap(); AsyncTree::new(db, MerkleTreeMode::Full) } diff --git a/core/lib/zksync_core/src/metadata_calculator/metrics.rs b/core/lib/zksync_core/src/metadata_calculator/metrics.rs index 87ab8fb377f..e1f63164128 100644 --- a/core/lib/zksync_core/src/metadata_calculator/metrics.rs +++ b/core/lib/zksync_core/src/metadata_calculator/metrics.rs @@ -197,7 +197,7 @@ pub(super) enum ChunkRecoveryStage { #[metrics(prefix = "server_metadata_calculator_recovery")] pub(super) struct MetadataCalculatorRecoveryMetrics { /// Number of chunks recovered. - pub recovered_chunk_count: Gauge, + pub recovered_chunk_count: Gauge, /// Latency of a tree recovery stage (not related to the recovery of a particular chunk; /// those metrics are tracked in the `chunk_latency` histogram). #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index 36c8d7eb5a6..846b73c1004 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -4,9 +4,10 @@ use std::{ future::{self, Future}, sync::Arc, - time::Duration, + time::{Duration, Instant}, }; +use anyhow::Context as _; use tokio::sync::watch; use zksync_config::configs::{ chain::OperationsManagerConfig, @@ -24,7 +25,7 @@ use zksync_types::{ pub(crate) use self::helpers::{AsyncTreeReader, L1BatchWithLogs, MerkleTreeInfo}; use self::{ - helpers::{create_db, Delayer, GenericAsyncTree}, + helpers::{create_db, Delayer, GenericAsyncTree, MerkleTreeHealth}, metrics::{TreeUpdateStage, METRICS}, updater::TreeUpdater, }; @@ -80,7 +81,7 @@ impl MetadataCalculatorConfig { #[derive(Debug)] pub struct MetadataCalculator { - tree: GenericAsyncTree, + config: MetadataCalculatorConfig, tree_reader: watch::Sender>, object_store: Option>, delayer: Delayer, @@ -93,31 +94,21 @@ impl MetadataCalculator { pub async fn new( config: MetadataCalculatorConfig, object_store: Option>, - ) -> Self { - assert!( + ) -> anyhow::Result { + anyhow::ensure!( config.max_l1_batches_per_iter > 0, "Maximum L1 batches per iteration is misconfigured to be 0; please update it to positive value" ); - let db = create_db( - config.db_path.clone().into(), - config.block_cache_capacity, - config.memtable_capacity, - config.stalled_writes_timeout, - config.multi_get_chunk_size, - ) - .await; - let tree = GenericAsyncTree::new(db, config.mode).await; - let (_, health_updater) = ReactiveHealthCheck::new("tree"); - Self { - tree, + Ok(Self { tree_reader: watch::channel(None).0, object_store, delayer: Delayer::new(config.delay_interval), health_updater, max_l1_batches_per_iter: config.max_l1_batches_per_iter, - } + config, + }) } /// Returns a health check for this calculator. @@ -141,19 +132,52 @@ impl MetadataCalculator { } } + async fn create_tree(&self) -> anyhow::Result { + self.health_updater + .update(MerkleTreeHealth::Initialization.into()); + + let started_at = Instant::now(); + let db = create_db( + self.config.db_path.clone().into(), + self.config.block_cache_capacity, + self.config.memtable_capacity, + self.config.stalled_writes_timeout, + self.config.multi_get_chunk_size, + ) + .await + .with_context(|| { + format!( + "failed opening Merkle tree RocksDB with configuration {:?}", + self.config + ) + })?; + tracing::info!( + "Opened Merkle tree RocksDB with configuration {:?} in {:?}", + self.config, + started_at.elapsed() + ); + + Ok(GenericAsyncTree::new(db, self.config.mode).await) + } + pub async fn run( self, pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { - let tree = self - .tree + let tree = self.create_tree().await?; + let tree = tree .ensure_ready(&pool, &stop_receiver, &self.health_updater) .await?; let Some(tree) = tree else { return Ok(()); // recovery was aborted because a stop signal was received }; - self.tree_reader.send_replace(Some(tree.reader())); + let tree_reader = tree.reader(); + tracing::info!( + "Merkle tree is initialized and ready to process L1 batches: {:?}", + tree_reader.clone().info().await + ); + self.tree_reader.send_replace(Some(tree_reader)); let updater = TreeUpdater::new(tree, self.max_l1_batches_per_iter, self.object_store); updater diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs index 0d37dd02417..f6b6f74fb2b 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs @@ -27,22 +27,23 @@ use std::{ fmt, ops, - sync::atomic::{AtomicUsize, Ordering}, + sync::atomic::{AtomicU64, Ordering}, }; use anyhow::Context as _; use async_trait::async_trait; use futures::future; -use serde::{Deserialize, Serialize}; use tokio::sync::{watch, Mutex, Semaphore}; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_health_check::{Health, HealthStatus, HealthUpdater}; +use zksync_health_check::HealthUpdater; use zksync_merkle_tree::TreeEntry; -use zksync_types::{snapshots::SnapshotRecoveryStatus, MiniblockNumber, H256, U256}; -use zksync_utils::u256_to_h256; +use zksync_types::{ + snapshots::{uniform_hashed_keys_chunk, SnapshotRecoveryStatus}, + MiniblockNumber, H256, +}; use super::{ - helpers::{AsyncTree, AsyncTreeRecovery, GenericAsyncTree}, + helpers::{AsyncTree, AsyncTreeRecovery, GenericAsyncTree, MerkleTreeHealth}, metrics::{ChunkRecoveryStage, RecoveryStage, RECOVERY_METRICS}, }; @@ -53,7 +54,7 @@ mod tests; /// to control recovery behavior in tests. #[async_trait] trait HandleRecoveryEvent: fmt::Debug + Send + Sync { - fn recovery_started(&mut self, _chunk_count: usize, _recovered_chunk_count: usize) { + fn recovery_started(&mut self, _chunk_count: u64, _recovered_chunk_count: u64) { // Default implementation does nothing } @@ -66,20 +67,12 @@ trait HandleRecoveryEvent: fmt::Debug + Send + Sync { } } -/// Information about a Merkle tree during its snapshot recovery. -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] -struct RecoveryMerkleTreeInfo { - mode: &'static str, // always set to "recovery" to distinguish from `MerkleTreeInfo` - chunk_count: usize, - recovered_chunk_count: usize, -} - /// [`HealthUpdater`]-based [`HandleRecoveryEvent`] implementation. #[derive(Debug)] struct RecoveryHealthUpdater<'a> { inner: &'a HealthUpdater, - chunk_count: usize, - recovered_chunk_count: AtomicUsize, + chunk_count: u64, + recovered_chunk_count: AtomicU64, } impl<'a> RecoveryHealthUpdater<'a> { @@ -87,14 +80,14 @@ impl<'a> RecoveryHealthUpdater<'a> { Self { inner, chunk_count: 0, - recovered_chunk_count: AtomicUsize::new(0), + recovered_chunk_count: AtomicU64::new(0), } } } #[async_trait] impl HandleRecoveryEvent for RecoveryHealthUpdater<'_> { - fn recovery_started(&mut self, chunk_count: usize, recovered_chunk_count: usize) { + fn recovery_started(&mut self, chunk_count: u64, recovered_chunk_count: u64) { self.chunk_count = chunk_count; *self.recovered_chunk_count.get_mut() = recovered_chunk_count; RECOVERY_METRICS @@ -107,12 +100,11 @@ impl HandleRecoveryEvent for RecoveryHealthUpdater<'_> { RECOVERY_METRICS .recovered_chunk_count .set(recovered_chunk_count); - let health = Health::from(HealthStatus::Ready).with_details(RecoveryMerkleTreeInfo { - mode: "recovery", + let health = MerkleTreeHealth::Recovery { chunk_count: self.chunk_count, recovered_chunk_count, - }); - self.inner.update(health); + }; + self.inner.update(health.into()); } } @@ -146,15 +138,15 @@ impl SnapshotParameters { }) } - fn chunk_count(&self) -> usize { - zksync_utils::ceil_div(self.log_count, Self::DESIRED_CHUNK_SIZE) as usize + fn chunk_count(&self) -> u64 { + self.log_count.div_ceil(Self::DESIRED_CHUNK_SIZE) } } /// Options for tree recovery. #[derive(Debug)] struct RecoveryOptions<'a> { - chunk_count: usize, + chunk_count: u64, concurrency_limit: usize, events: Box, } @@ -219,7 +211,9 @@ impl AsyncTreeRecovery { stop_receiver: &watch::Receiver, ) -> anyhow::Result> { let chunk_count = options.chunk_count; - let chunks: Vec<_> = Self::hashed_key_ranges(chunk_count).collect(); + let chunks: Vec<_> = (0..chunk_count) + .map(|chunk_id| uniform_hashed_keys_chunk(chunk_id, chunk_count)) + .collect(); tracing::info!( "Recovering Merkle tree from Postgres snapshot in {chunk_count} concurrent chunks" ); @@ -231,7 +225,7 @@ impl AsyncTreeRecovery { drop(storage); options .events - .recovery_started(chunk_count, chunk_count - remaining_chunks.len()); + .recovery_started(chunk_count, chunk_count - remaining_chunks.len() as u64); tracing::info!( "Filtered recovered key chunks; {} / {chunk_count} chunks remaining", remaining_chunks.len() @@ -271,26 +265,6 @@ impl AsyncTreeRecovery { Ok(Some(tree)) } - fn hashed_key_ranges(count: usize) -> impl Iterator> { - assert!(count > 0); - let mut stride = U256::MAX / count; - let stride_minus_one = if stride < U256::MAX { - stride += U256::one(); - stride - 1 - } else { - stride // `stride` is really 1 << 256 == U256::MAX + 1 - }; - - (0..count).map(move |i| { - let start = stride * i; - let (mut end, is_overflow) = stride_minus_one.overflowing_add(start); - if is_overflow { - end = U256::MAX; - } - u256_to_h256(start)..=u256_to_h256(end) - }) - } - /// Filters out `key_chunks` for which recovery was successfully performed. async fn filter_chunks( &mut self, @@ -317,7 +291,7 @@ impl AsyncTreeRecovery { .filter_map(|(i, &start)| Some((i, start?))); let start_keys = existing_starts .clone() - .map(|(_, start_entry)| start_entry.key) + .map(|(_, start_entry)| start_entry.tree_key()) .collect(); let tree_entries = self.entries(start_keys).await; @@ -389,7 +363,7 @@ impl AsyncTreeRecovery { let all_entries = all_entries .into_iter() .map(|entry| TreeEntry { - key: entry.key, + key: entry.tree_key(), value: entry.value, leaf_index: entry.leaf_index, }) diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs index 180894f2fc7..5d1d37deeab 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs @@ -10,10 +10,9 @@ use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, }; -use zksync_health_check::{CheckHealth, ReactiveHealthCheck}; +use zksync_health_check::{CheckHealth, HealthStatus, ReactiveHealthCheck}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; use zksync_types::{L1BatchNumber, L2ChainId, StorageLog}; -use zksync_utils::h256_to_u256; use super::*; use crate::{ @@ -29,45 +28,6 @@ use crate::{ utils::testonly::prepare_recovery_snapshot, }; -#[test] -fn calculating_hashed_key_ranges_with_single_chunk() { - let mut ranges = AsyncTreeRecovery::hashed_key_ranges(1); - let full_range = ranges.next().unwrap(); - assert_eq!(full_range, H256::zero()..=H256([0xff; 32])); -} - -#[test] -fn calculating_hashed_key_ranges_for_256_chunks() { - let ranges = AsyncTreeRecovery::hashed_key_ranges(256); - let mut start = H256::zero(); - let mut end = H256([0xff; 32]); - - for (i, range) in ranges.enumerate() { - let i = u8::try_from(i).unwrap(); - start.0[0] = i; - end.0[0] = i; - assert_eq!(range, start..=end); - } -} - -#[test_casing(5, [3, 7, 23, 100, 255])] -fn calculating_hashed_key_ranges_for_arbitrary_chunks(chunk_count: usize) { - let ranges: Vec<_> = AsyncTreeRecovery::hashed_key_ranges(chunk_count).collect(); - assert_eq!(ranges.len(), chunk_count); - - for window in ranges.windows(2) { - let [prev_range, range] = window else { - unreachable!(); - }; - assert_eq!( - h256_to_u256(*range.start()), - h256_to_u256(*prev_range.end()) + 1 - ); - } - assert_eq!(*ranges.first().unwrap().start(), H256::zero()); - assert_eq!(*ranges.last().unwrap().end(), H256([0xff; 32])); -} - #[test] fn calculating_chunk_count() { let mut snapshot = SnapshotParameters { @@ -92,7 +52,8 @@ async fn create_tree_recovery(path: PathBuf, l1_batch: L1BatchNumber) -> AsyncTr Duration::ZERO, // writes should never be stalled in tests 500, ) - .await; + .await + .unwrap(); AsyncTreeRecovery::new(db, l1_batch.0.into(), MerkleTreeMode::Full) } @@ -145,7 +106,8 @@ async fn prepare_recovery_snapshot_with_genesis( let genesis_logs = storage .storage_logs_dal() .get_touched_slots_for_l1_batch(L1BatchNumber(0)) - .await; + .await + .unwrap(); let genesis_logs = genesis_logs .into_iter() .map(|(key, value)| StorageLog::new_write_log(key, value)); @@ -162,30 +124,29 @@ async fn prepare_recovery_snapshot_with_genesis( l1_batch_root_hash, miniblock_number: MiniblockNumber(1), miniblock_root_hash: H256::zero(), // not used - last_finished_chunk_id: Some(0), - total_chunk_count: 1, + storage_logs_chunks_processed: vec![], } } #[derive(Debug)] struct TestEventListener { - expected_recovered_chunks: usize, - stop_threshold: usize, - processed_chunk_count: AtomicUsize, + expected_recovered_chunks: u64, + stop_threshold: u64, + processed_chunk_count: AtomicU64, stop_sender: watch::Sender, } impl TestEventListener { - fn new(stop_threshold: usize, stop_sender: watch::Sender) -> Self { + fn new(stop_threshold: u64, stop_sender: watch::Sender) -> Self { Self { expected_recovered_chunks: 0, stop_threshold, - processed_chunk_count: AtomicUsize::new(0), + processed_chunk_count: AtomicU64::new(0), stop_sender, } } - fn expect_recovered_chunks(mut self, count: usize) -> Self { + fn expect_recovered_chunks(mut self, count: u64) -> Self { self.expected_recovered_chunks = count; self } @@ -193,7 +154,7 @@ impl TestEventListener { #[async_trait] impl HandleRecoveryEvent for TestEventListener { - fn recovery_started(&mut self, _chunk_count: usize, recovered_chunk_count: usize) { + fn recovery_started(&mut self, _chunk_count: u64, recovered_chunk_count: u64) { assert_eq!(recovered_chunk_count, self.expected_recovered_chunks); } @@ -207,7 +168,7 @@ impl HandleRecoveryEvent for TestEventListener { #[test_casing(3, [5, 7, 8])] #[tokio::test] -async fn recovery_fault_tolerance(chunk_count: usize) { +async fn recovery_fault_tolerance(chunk_count: u64) { let pool = ConnectionPool::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let snapshot_recovery = prepare_recovery_snapshot_with_genesis(&pool, &temp_dir).await; @@ -251,9 +212,7 @@ async fn recovery_fault_tolerance(chunk_count: usize) { let recovery_options = RecoveryOptions { chunk_count, concurrency_limit: 1, - events: Box::new( - TestEventListener::new(usize::MAX, stop_sender).expect_recovered_chunks(3), - ), + events: Box::new(TestEventListener::new(u64::MAX, stop_sender).expect_recovered_chunks(3)), }; let tree = tree .recover(snapshot, recovery_options, &pool, &stop_receiver) @@ -291,7 +250,9 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { &merkle_tree_config, &OperationsManagerConfig { delay_interval: 50 }, ); - let mut calculator = MetadataCalculator::new(calculator_config, None).await; + let mut calculator = MetadataCalculator::new(calculator_config, None) + .await + .unwrap(); let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.delayer.delay_notifier = delay_sx; diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index da158ff11ef..c5c99db624c 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -14,11 +14,10 @@ use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_health_check::{CheckHealth, HealthStatus}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; use zksync_types::{ - block::{BlockGasCount, L1BatchHeader}, - proofs::PrepareBasicCircuitsJob, - AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, StorageKey, StorageLog, - H256, + block::L1BatchHeader, AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, + StorageKey, StorageLog, H256, }; use zksync_utils::u32_to_h256; @@ -50,8 +49,9 @@ async fn genesis_creation() { run_calculator(calculator, pool.clone()).await; let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; - let GenericAsyncTree::Ready(tree) = &calculator.tree else { - panic!("Unexpected tree state: {:?}", calculator.tree); + let tree = calculator.create_tree().await.unwrap(); + let GenericAsyncTree::Ready(tree) = tree else { + panic!("Unexpected tree state: {tree:?}"); }; assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); } @@ -78,8 +78,9 @@ async fn basic_workflow() { assert!(merkle_paths.iter().all(|log| log.is_write)); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; - let GenericAsyncTree::Ready(tree) = &calculator.tree else { - panic!("Unexpected tree state: {:?}", calculator.tree); + let tree = calculator.create_tree().await.unwrap(); + let GenericAsyncTree::Ready(tree) = tree else { + panic!("Unexpected tree state: {tree:?}"); }; assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(2)); } @@ -282,14 +283,7 @@ async fn test_postgres_backup_recovery( // Re-insert the last batch without metadata immediately. storage .blocks_dal() - .insert_l1_batch( - batch_without_metadata, - &[], - BlockGasCount::default(), - &[], - &[], - 0, - ) + .insert_mock_l1_batch(batch_without_metadata) .await .unwrap(); insert_initial_writes_for_batch(&mut storage, batch_without_metadata.number).await; @@ -314,7 +308,7 @@ async fn test_postgres_backup_recovery( for batch_header in &removed_batches { let mut txn = storage.start_transaction().await.unwrap(); txn.blocks_dal() - .insert_l1_batch(batch_header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(batch_header) .await .unwrap(); insert_initial_writes_for_batch(&mut txn, batch_header.number).await; @@ -400,7 +394,9 @@ async fn setup_calculator_with_options( ) -> MetadataCalculator { let calculator_config = MetadataCalculatorConfig::for_main_node(merkle_tree_config, operation_config); - let metadata_calculator = MetadataCalculator::new(calculator_config, object_store).await; + let metadata_calculator = MetadataCalculator::new(calculator_config, object_store) + .await + .unwrap(); let mut storage = pool.access_storage().await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { @@ -456,6 +452,11 @@ pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: usize) { .delete_l1_batches(L1BatchNumber(0)) .await .unwrap(); + storage + .blocks_dal() + .delete_initial_writes(L1BatchNumber(0)) + .await + .unwrap(); storage .basic_witness_input_producer_dal() .delete_all_jobs() @@ -497,7 +498,7 @@ pub(super) async fn extend_db_state_from_l1_batch( storage .blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&header) .await .unwrap(); storage @@ -526,6 +527,7 @@ async fn insert_initial_writes_for_batch( .storage_logs_dal() .get_touched_slots_for_l1_batch(l1_batch_number) .await + .unwrap() .into_iter() .filter_map(|(key, value)| (!value.is_zero()).then_some(key)) .collect(); @@ -613,6 +615,11 @@ async fn remove_l1_batches( .delete_l1_batches(last_l1_batch_to_keep) .await .unwrap(); + storage + .blocks_dal() + .delete_initial_writes(last_l1_batch_to_keep) + .await + .unwrap(); batch_headers } @@ -631,7 +638,8 @@ async fn deduplication_works_as_expected() { let initial_writes = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&hashed_keys) - .await; + .await + .unwrap(); assert_eq!(initial_writes.len(), hashed_keys.len()); assert!(initial_writes .values() @@ -650,7 +658,8 @@ async fn deduplication_works_as_expected() { let initial_writes = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&hashed_keys) - .await; + .await + .unwrap(); assert_eq!(initial_writes.len(), hashed_keys.len()); assert!(initial_writes .values() @@ -659,7 +668,8 @@ async fn deduplication_works_as_expected() { let initial_writes = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&new_hashed_keys) - .await; + .await + .unwrap(); assert_eq!(initial_writes.len(), new_hashed_keys.len()); assert!(initial_writes .values() @@ -675,7 +685,8 @@ async fn deduplication_works_as_expected() { let initial_writes = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&no_op_hashed_keys) - .await; + .await + .unwrap(); assert!(initial_writes.is_empty()); let updated_logs: Vec<_> = no_op_logs @@ -692,7 +703,8 @@ async fn deduplication_works_as_expected() { let initial_writes = storage .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&no_op_hashed_keys) - .await; + .await + .unwrap(); assert_eq!(initial_writes.len(), no_op_hashed_keys.len() / 2); for key in no_op_hashed_keys.iter().step_by(2) { assert_eq!(initial_writes[key].0, L1BatchNumber(4)); diff --git a/core/lib/zksync_core/src/metrics.rs b/core/lib/zksync_core/src/metrics.rs index 2c1559aae27..56e8223b893 100644 --- a/core/lib/zksync_core/src/metrics.rs +++ b/core/lib/zksync_core/src/metrics.rs @@ -18,6 +18,7 @@ pub(crate) enum InitStage { EthTxManager, Tree, BasicWitnessInputProducer, + Consensus, } impl fmt::Display for InitStage { @@ -32,6 +33,7 @@ impl fmt::Display for InitStage { Self::EthTxManager => formatter.write_str("eth_tx_manager"), Self::Tree => formatter.write_str("tree"), Self::BasicWitnessInputProducer => formatter.write_str("basic_witness_input_producer"), + Self::Consensus => formatter.write_str("consensus"), } } } diff --git a/core/lib/zksync_core/src/proof_data_handler/mod.rs b/core/lib/zksync_core/src/proof_data_handler/mod.rs index 7a5b8bc69b3..56a48e18cd6 100644 --- a/core/lib/zksync_core/src/proof_data_handler/mod.rs +++ b/core/lib/zksync_core/src/proof_data_handler/mod.rs @@ -9,9 +9,9 @@ use zksync_config::{ }; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; +use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; use zksync_types::{ protocol_version::{L1VerifierConfig, VerifierParams}, - prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}, H256, }; diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index bc9873d99ed..91b2f4124a0 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -11,13 +11,13 @@ use zksync_config::configs::{ }; use zksync_dal::{ConnectionPool, SqlxError}; use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_prover_interface::api::{ + ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, + SubmitProofRequest, SubmitProofResponse, +}; use zksync_types::{ commitment::serialize_commitments, protocol_version::{FriProtocolVersionId, L1VerifierConfig}, - prover_server_api::{ - ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, - SubmitProofRequest, SubmitProofResponse, - }, web3::signing::keccak256, L1BatchNumber, H256, }; diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index c399ed4c488..f645bfb96db 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -1,4 +1,4 @@ -use std::{fmt, future::Future, time::Duration}; +use std::{fmt, time::Duration}; use anyhow::Context as _; use async_trait::async_trait; @@ -15,7 +15,7 @@ use zksync_web3_decl::{ use crate::{ metrics::{CheckerComponent, EN_METRICS}, - utils::wait_for_l1_batch_with_metadata, + utils::{binary_search_with, wait_for_l1_batch_with_metadata}, }; #[cfg(test)] @@ -32,6 +32,13 @@ enum HashMatchError { Using an earlier snapshot could help." )] EarliestHashMismatch(L1BatchNumber), + #[error( + "Unrecoverable error: the earliest L1 batch #{0} in the local DB \ + is truncated on the main node. Make sure you're connected to the right network; \ + if you've recovered from a snapshot, re-check snapshot authenticity. \ + Using an earlier snapshot could help." + )] + EarliestL1BatchTruncated(L1BatchNumber), #[error("Internal error")] Internal(#[from] anyhow::Error), } @@ -48,6 +55,10 @@ fn is_transient_err(err: &RpcError) -> bool { #[async_trait] trait MainNodeClient: fmt::Debug + Send + Sync { + async fn sealed_miniblock_number(&self) -> Result; + + async fn sealed_l1_batch_number(&self) -> Result; + async fn miniblock_hash(&self, number: MiniblockNumber) -> Result, RpcError>; async fn l1_batch_root_hash(&self, number: L1BatchNumber) -> Result, RpcError>; @@ -55,6 +66,18 @@ trait MainNodeClient: fmt::Debug + Send + Sync { #[async_trait] impl MainNodeClient for HttpClient { + async fn sealed_miniblock_number(&self) -> Result { + let number = self.get_block_number().await?; + let number = u32::try_from(number).map_err(|err| RpcError::Custom(err.to_owned()))?; + Ok(MiniblockNumber(number)) + } + + async fn sealed_l1_batch_number(&self) -> Result { + let number = self.get_l1_batch_number().await?; + let number = u32::try_from(number).map_err(|err| RpcError::Custom(err.to_owned()))?; + Ok(L1BatchNumber(number)) + } + async fn miniblock_hash(&self, number: MiniblockNumber) -> Result, RpcError> { Ok(self .get_block_by_number(number.0.into(), false) @@ -85,10 +108,38 @@ impl UpdateCorrectBlock for () { last_correct_miniblock: MiniblockNumber, last_correct_l1_batch: L1BatchNumber, ) { - EN_METRICS.last_correct_batch[&CheckerComponent::ReorgDetector] - .set(last_correct_miniblock.0.into()); - EN_METRICS.last_correct_miniblock[&CheckerComponent::ReorgDetector] - .set(last_correct_l1_batch.0.into()); + let last_correct_miniblock = last_correct_miniblock.0.into(); + let prev_checked_miniblock = EN_METRICS.last_correct_miniblock + [&CheckerComponent::ReorgDetector] + .set(last_correct_miniblock); + if prev_checked_miniblock != last_correct_miniblock { + tracing::debug!("No reorg at miniblock #{last_correct_miniblock}"); + } + + let last_correct_l1_batch = last_correct_l1_batch.0.into(); + let prev_checked_l1_batch = EN_METRICS.last_correct_batch[&CheckerComponent::ReorgDetector] + .set(last_correct_l1_batch); + if prev_checked_l1_batch != last_correct_l1_batch { + tracing::debug!("No reorg at L1 batch #{last_correct_l1_batch}"); + } + } +} + +/// Output of hash match methods in [`ReorgDetector`]. +#[derive(Debug)] +enum MatchOutput { + Match, + Mismatch, + NoRemoteReference, +} + +impl MatchOutput { + fn new(is_match: bool) -> Self { + if is_match { + Self::Match + } else { + Self::Mismatch + } } } @@ -135,7 +186,7 @@ impl ReorgDetector { async fn miniblock_hashes_match( &self, miniblock_number: MiniblockNumber, - ) -> Result { + ) -> Result { let mut storage = self.pool.access_storage().await?; let local_hash = storage .blocks_dal() @@ -151,7 +202,7 @@ impl ReorgDetector { // Due to reorg, locally we may be ahead of the main node. // Lack of the hash on the main node is treated as a hash match, // We need to wait for our knowledge of main node to catch up. - return Ok(true); + return Ok(MatchOutput::NoRemoteReference); }; if remote_hash != local_hash { @@ -160,14 +211,39 @@ impl ReorgDetector { main node {remote_hash:?} (miniblock #{miniblock_number})" ); } - Ok(remote_hash == local_hash) + Ok(MatchOutput::new(remote_hash == local_hash)) + } + + /// Checks hash correspondence for the latest miniblock sealed both locally and on the main node. + async fn check_sealed_miniblock_hash( + &self, + sealed_miniblock_number: MiniblockNumber, + ) -> Result<(MiniblockNumber, bool), HashMatchError> { + let mut main_node_sealed_miniblock_number = sealed_miniblock_number; + loop { + let checked_number = sealed_miniblock_number.min(main_node_sealed_miniblock_number); + match self.miniblock_hashes_match(checked_number).await? { + MatchOutput::Match => break Ok((checked_number, true)), + MatchOutput::Mismatch => break Ok((checked_number, false)), + MatchOutput::NoRemoteReference => { + tracing::info!( + "Main node has no miniblock #{checked_number}; will check last miniblock on the main node" + ); + main_node_sealed_miniblock_number = + self.client.sealed_miniblock_number().await?; + tracing::debug!( + "Fetched last miniblock on the main node: #{main_node_sealed_miniblock_number}" + ); + } + } + } } /// Compares root hashes of the latest local batch and of the same batch from the main node. async fn root_hashes_match( &self, l1_batch_number: L1BatchNumber, - ) -> Result { + ) -> Result { let mut storage = self.pool.access_storage().await?; let local_hash = storage .blocks_dal() @@ -182,7 +258,7 @@ impl ReorgDetector { // Due to reorg, locally we may be ahead of the main node. // Lack of the root hash on the main node is treated as a hash match, // We need to wait for our knowledge of main node to catch up. - return Ok(true); + return Ok(MatchOutput::NoRemoteReference); }; if remote_hash != local_hash { @@ -191,7 +267,37 @@ impl ReorgDetector { main node {remote_hash:?} (L1 batch #{l1_batch_number})" ); } - Ok(remote_hash == local_hash) + Ok(MatchOutput::new(remote_hash == local_hash)) + } + + /// Checks hash correspondence for the latest L1 batch sealed and having metadata both locally and on the main node. + async fn check_sealed_l1_batch_root_hash( + &self, + sealed_l1_batch_number: L1BatchNumber, + ) -> Result<(L1BatchNumber, bool), HashMatchError> { + let mut main_node_sealed_l1_batch_number = sealed_l1_batch_number; + loop { + let checked_number = sealed_l1_batch_number.min(main_node_sealed_l1_batch_number); + match self.root_hashes_match(checked_number).await? { + MatchOutput::Match => break Ok((checked_number, true)), + MatchOutput::Mismatch => break Ok((checked_number, false)), + MatchOutput::NoRemoteReference => { + tracing::info!( + "Main node has no L1 batch #{checked_number}; will check last L1 batch on the main node" + ); + let fetched_number = self.client.sealed_l1_batch_number().await?; + tracing::debug!("Fetched last L1 batch on the main node: #{fetched_number}"); + let number_changed = fetched_number != main_node_sealed_l1_batch_number; + main_node_sealed_l1_batch_number = fetched_number; + + if !number_changed { + // May happen if the main node has an L1 batch, but its state root hash is not computed yet. + tracing::debug!("Last L1 batch number on the main node has not changed; waiting until its state hash is computed"); + tokio::time::sleep(self.sleep_interval / 10).await; + } + } + } + } } /// Localizes a re-org: performs binary search to determine the last non-diverged block. @@ -202,9 +308,16 @@ impl ReorgDetector { ) -> Result { // TODO (BFT-176, BFT-181): We have to look through the whole history, since batch status updater may mark // a block as executed even if the state diverges for it. - binary_search_with(known_valid_l1_batch.0, diverged_l1_batch.0, |number| { - self.root_hashes_match(L1BatchNumber(number)) - }) + binary_search_with( + known_valid_l1_batch.0, + diverged_l1_batch.0, + |number| async move { + Ok(match self.root_hashes_match(L1BatchNumber(number)).await? { + MatchOutput::Match | MatchOutput::NoRemoteReference => true, + MatchOutput::Mismatch => false, + }) + }, + ) .await .map(L1BatchNumber) } @@ -238,10 +351,18 @@ impl ReorgDetector { tracing::debug!( "Checking root hash match for earliest L1 batch #{earliest_l1_batch_number}" ); - if !self.root_hashes_match(earliest_l1_batch_number).await? { - return Err(HashMatchError::EarliestHashMismatch( - earliest_l1_batch_number, - )); + match self.root_hashes_match(earliest_l1_batch_number).await? { + MatchOutput::Match => { /* we're good */ } + MatchOutput::Mismatch => { + return Err(HashMatchError::EarliestHashMismatch( + earliest_l1_batch_number, + )) + } + MatchOutput::NoRemoteReference => { + return Err(HashMatchError::EarliestL1BatchTruncated( + earliest_l1_batch_number, + )) + } } loop { @@ -266,9 +387,12 @@ impl ReorgDetector { miniblock number #{sealed_miniblock_number}" ); - let root_hashes_match = self.root_hashes_match(sealed_l1_batch_number).await?; - let miniblock_hashes_match = - self.miniblock_hashes_match(sealed_miniblock_number).await?; + let (checked_l1_batch_number, root_hashes_match) = self + .check_sealed_l1_batch_root_hash(sealed_l1_batch_number) + .await?; + let (checked_miniblock_number, miniblock_hashes_match) = self + .check_sealed_miniblock_hash(sealed_miniblock_number) + .await?; // The only event that triggers re-org detection and node rollback is if the // hash mismatch at the same block height is detected, be it miniblocks or batches. @@ -278,12 +402,12 @@ impl ReorgDetector { // a re-org taking place. if root_hashes_match && miniblock_hashes_match { self.block_updater - .update_correct_block(sealed_miniblock_number, sealed_l1_batch_number); + .update_correct_block(checked_miniblock_number, checked_l1_batch_number); } else { let diverged_l1_batch_number = if root_hashes_match { - sealed_l1_batch_number + 1 // Non-sealed L1 batch has diverged + checked_l1_batch_number + 1 // Non-sealed L1 batch has diverged } else { - sealed_l1_batch_number + checked_l1_batch_number }; tracing::info!("Searching for the first diverged L1 batch"); @@ -304,21 +428,3 @@ impl ReorgDetector { } } } - -async fn binary_search_with(mut left: u32, mut right: u32, mut f: F) -> Result -where - F: FnMut(u32) -> Fut, - Fut: Future>, -{ - while left + 1 < right { - let middle = (left + right) / 2; - assert!(middle < right); // middle <= (right - 2 + right) / 2 = right - 1 - - if f(middle).await? { - left = middle; - } else { - right = middle; - } - } - Ok(left) -} diff --git a/core/lib/zksync_core/src/reorg_detector/tests.rs b/core/lib/zksync_core/src/reorg_detector/tests.rs index f9495286b1f..e9996b01139 100644 --- a/core/lib/zksync_core/src/reorg_detector/tests.rs +++ b/core/lib/zksync_core/src/reorg_detector/tests.rs @@ -10,7 +10,7 @@ use test_casing::{test_casing, Product}; use tokio::sync::mpsc; use zksync_dal::StorageProcessor; use zksync_types::{ - block::{BlockGasCount, MiniblockHeader}, + block::{MiniblockHasher, MiniblockHeader}, L2ChainId, ProtocolVersion, }; @@ -36,7 +36,7 @@ async fn seal_l1_batch(storage: &mut StorageProcessor<'_>, number: u32, hash: H2 let header = create_l1_batch(number); storage .blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&header) .await .unwrap(); storage @@ -61,8 +61,6 @@ async fn binary_search_with_simple_predicate() { } } -type ResponsesMap = HashMap; - #[derive(Debug, Clone, Copy)] enum RpcErrorKind { Transient, @@ -80,13 +78,33 @@ impl From for RpcError { #[derive(Debug, Default)] struct MockMainNodeClient { - miniblock_hash_responses: ResponsesMap, - l1_batch_root_hash_responses: ResponsesMap, + latest_miniblock_response: Option, + latest_l1_batch_response: Option, + miniblock_hash_responses: HashMap, + l1_batch_root_hash_responses: HashMap, error_kind: Arc>>, } #[async_trait] impl MainNodeClient for MockMainNodeClient { + async fn sealed_miniblock_number(&self) -> Result { + if let &Some(error_kind) = &*self.error_kind.lock().unwrap() { + return Err(error_kind.into()); + } + Ok(self + .latest_miniblock_response + .expect("unexpected `sealed_miniblock_number` request")) + } + + async fn sealed_l1_batch_number(&self) -> Result { + if let &Some(error_kind) = &*self.error_kind.lock().unwrap() { + return Err(error_kind.into()); + } + Ok(self + .latest_l1_batch_response + .expect("unexpected `sealed_l1_batch_number` request")) + } + async fn miniblock_hash(&self, number: MiniblockNumber) -> Result, RpcError> { if let &Some(error_kind) = &*self.error_kind.lock().unwrap() { return Err(error_kind.into()); @@ -128,18 +146,26 @@ impl UpdateCorrectBlock for mpsc::UnboundedSender<(MiniblockNumber, L1BatchNumbe async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: bool) { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); + let mut client = MockMainNodeClient::default(); if snapshot_recovery { storage .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; } else { - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); + let genesis_root_hash = + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + client.miniblock_hash_responses.insert( + MiniblockNumber(0), + MiniblockHasher::legacy_hash(MiniblockNumber(0)), + ); + client + .l1_batch_root_hash_responses + .insert(L1BatchNumber(0), genesis_root_hash); } - let mut client = MockMainNodeClient::default(); let l1_batch_numbers = if snapshot_recovery { 11_u32..=20 } else { @@ -230,12 +256,20 @@ async fn detector_stops_on_fatal_rpc_error() { async fn reorg_is_detected_on_batch_hash_mismatch() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); + let genesis_root_hash = + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + let mut client = MockMainNodeClient::default(); + client.miniblock_hash_responses.insert( + MiniblockNumber(0), + MiniblockHasher::legacy_hash(MiniblockNumber(0)), + ); + client + .l1_batch_root_hash_responses + .insert(L1BatchNumber(0), genesis_root_hash); let (_stop_sender, stop_receiver) = watch::channel(false); - let mut client = MockMainNodeClient::default(); let miniblock_hash = H256::from_low_u64_be(23); client .miniblock_hash_responses @@ -274,12 +308,20 @@ async fn reorg_is_detected_on_batch_hash_mismatch() { async fn reorg_is_detected_on_miniblock_hash_mismatch() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); + let mut client = MockMainNodeClient::default(); + let genesis_root_hash = + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + client.miniblock_hash_responses.insert( + MiniblockNumber(0), + MiniblockHasher::legacy_hash(MiniblockNumber(0)), + ); + client + .l1_batch_root_hash_responses + .insert(L1BatchNumber(0), genesis_root_hash); let (_stop_sender, stop_receiver) = watch::channel(false); - let mut client = MockMainNodeClient::default(); let miniblock_hash = H256::from_low_u64_be(23); client .miniblock_hash_responses @@ -352,6 +394,13 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( seal_l1_batch(&mut storage, earliest_l1_batch_number, H256::zero()).await; let mut client = MockMainNodeClient::default(); + client + .miniblock_hash_responses + .insert(MiniblockNumber(earliest_l1_batch_number), H256::zero()); + client + .l1_batch_root_hash_responses + .insert(L1BatchNumber(earliest_l1_batch_number), H256::zero()); + let miniblock_and_l1_batch_hashes = l1_batch_numbers.clone().map(|number| { let mut miniblock_hash = H256::from_low_u64_be(number.into()); client @@ -494,3 +543,54 @@ async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery( let err = detector.run_inner().await.unwrap_err(); assert_matches!(err, HashMatchError::EarliestHashMismatch(L1BatchNumber(3))); } + +#[tokio::test] +async fn reorg_is_detected_without_waiting_for_main_node_to_catch_up() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let genesis_root_hash = + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + // Fill in local storage with some data, so that it's ahead of the main node. + for number in 1..5 { + store_miniblock(&mut storage, number, H256::zero()).await; + seal_l1_batch(&mut storage, number, H256::zero()).await; + } + drop(storage); + + let mut client = MockMainNodeClient::default(); + client + .l1_batch_root_hash_responses + .insert(L1BatchNumber(0), genesis_root_hash); + for number in 1..3 { + client + .miniblock_hash_responses + .insert(MiniblockNumber(number), H256::zero()); + client + .l1_batch_root_hash_responses + .insert(L1BatchNumber(number), H256::zero()); + } + client + .miniblock_hash_responses + .insert(MiniblockNumber(3), H256::zero()); + client + .l1_batch_root_hash_responses + .insert(L1BatchNumber(3), H256::repeat_byte(0xff)); + client.latest_l1_batch_response = Some(L1BatchNumber(3)); + client.latest_miniblock_response = Some(MiniblockNumber(3)); + + let (_stop_sender, stop_receiver) = watch::channel(false); + let detector = ReorgDetector { + client: Box::new(client), + block_updater: Box::new(()), + pool, + stop_receiver, + sleep_interval: Duration::from_millis(10), + }; + let detector_task = tokio::spawn(detector.run()); + + let task_result = detector_task.await.unwrap(); + let last_correct_l1_batch = task_result.unwrap(); + assert_eq!(last_correct_l1_batch, Some(L1BatchNumber(2))); +} diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs index 16e1e83f677..65c71846478 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -12,7 +12,7 @@ use multivm::{ }; use once_cell::sync::OnceCell; use tokio::{ - sync::{mpsc, oneshot}, + sync::{mpsc, oneshot, watch}, task::JoinHandle, }; use zksync_dal::ConnectionPool; @@ -37,8 +37,8 @@ pub(crate) enum TxExecutionResult { /// Successful execution of the tx and the block tip dry run. Success { tx_result: Box, - tx_metrics: ExecutionMetricsForCriteria, - bootloader_dry_run_metrics: ExecutionMetricsForCriteria, + tx_metrics: Box, + bootloader_dry_run_metrics: Box, bootloader_dry_run_result: Box, compressed_bytecodes: Vec, call_tracer_result: Vec, @@ -75,7 +75,8 @@ pub trait L1BatchExecutorBuilder: 'static + Send + Sync + fmt::Debug { &mut self, l1_batch_params: L1BatchEnv, system_env: SystemEnv, - ) -> BatchExecutorHandle; + stop_receiver: &watch::Receiver, + ) -> Option; } /// The default implementation of [`L1BatchExecutorBuilder`]. @@ -119,18 +120,23 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { &mut self, l1_batch_params: L1BatchEnv, system_env: SystemEnv, - ) -> BatchExecutorHandle { - let mut secondary_storage = RocksdbStorage::new(self.state_keeper_db_path.as_ref()); + stop_receiver: &watch::Receiver, + ) -> Option { + let mut secondary_storage = RocksdbStorage::builder(self.state_keeper_db_path.as_ref()) + .await + .expect("Failed initializing state keeper storage"); secondary_storage.enable_enum_index_migration(self.enum_index_migration_chunk_size); let mut conn = self .pool .access_storage_tagged("state_keeper") .await .unwrap(); - secondary_storage.update_from_postgres(&mut conn).await; - drop(conn); + let secondary_storage = secondary_storage + .synchronize(&mut conn, stop_receiver) + .await + .expect("Failed synchronizing secondary state keeper storage")?; - BatchExecutorHandle::new( + Some(BatchExecutorHandle::new( self.save_call_traces, self.max_allowed_tx_gas_limit, secondary_storage, @@ -138,7 +144,7 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { system_env, self.upload_witness_inputs_to_gcs, self.optional_bytecode_compression, - ) + )) } } @@ -209,7 +215,7 @@ impl BatchExecutorHandle { let res = response_receiver.await.unwrap(); let elapsed = latency.observe(); - if let TxExecutionResult::Success { tx_metrics, .. } = res { + if let TxExecutionResult::Success { tx_metrics, .. } = &res { let gas_per_nanosecond = tx_metrics.execution_metrics.computational_gas_used as f64 / elapsed.as_nanos() as f64; EXECUTOR_METRICS @@ -389,12 +395,16 @@ impl BatchExecutor { let tx_metrics = ExecutionMetricsForCriteria::new(Some(tx), &tx_result); + if !vm.has_enough_gas_for_batch_tip() { + return TxExecutionResult::BootloaderOutOfGasForBlockTip; + } + let (bootloader_dry_run_result, bootloader_dry_run_metrics) = self.dryrun_block_tip(vm); match &bootloader_dry_run_result.result { ExecutionResult::Success { .. } => TxExecutionResult::Success { tx_result: Box::new(tx_result), - tx_metrics, - bootloader_dry_run_metrics, + tx_metrics: Box::new(tx_metrics), + bootloader_dry_run_metrics: Box::new(bootloader_dry_run_metrics), bootloader_dry_run_result: Box::new(bootloader_dry_run_result), compressed_bytecodes, call_tracer_result, diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index 413a12bdf2e..dc7f9d0d979 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -6,10 +6,10 @@ use multivm::{ vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use tempfile::TempDir; +use tokio::sync::watch; use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_dal::ConnectionPool; -use zksync_state::RocksdbStorage; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ ethabi::Token, fee::Fee, system_contracts::get_system_smart_contracts, @@ -24,6 +24,7 @@ use crate::{ state_keeper::{ batch_executor::BatchExecutorHandle, tests::{default_l1_batch_env, default_system_env, BASE_SYSTEM_CONTRACTS}, + L1BatchExecutorBuilder, MainBatchExecutorBuilder, }, }; @@ -86,34 +87,27 @@ impl Tester { /// Creates a batch executor instance. /// This function intentionally uses sensible defaults to not introduce boilerplate. pub(super) async fn create_batch_executor(&self) -> BatchExecutorHandle { + let mut builder = MainBatchExecutorBuilder::new( + self.db_dir.path().to_str().unwrap().to_owned(), + self.pool.clone(), + self.config.max_allowed_tx_gas_limit.into(), + self.config.save_call_traces, + self.config.upload_witness_inputs_to_gcs, + 100, + false, + ); + // Not really important for the batch executor - it operates over a single batch. - let (l1_batch, system_env) = self.batch_params( + let (l1_batch_env, system_env) = self.batch_params( L1BatchNumber(1), 100, self.config.validation_computational_gas_limit, ); - - let mut secondary_storage = RocksdbStorage::new(self.db_dir.path()); - let mut conn = self - .pool - .access_storage_tagged("state_keeper") + let (_stop_sender, stop_receiver) = watch::channel(false); + builder + .init_batch(l1_batch_env, system_env, &stop_receiver) .await - .unwrap(); - - secondary_storage.update_from_postgres(&mut conn).await; - drop(conn); - - // We don't use the builder because it would require us to clone the `ConnectionPool`, which is forbidden - // for the test pool (see the doc-comment on `TestPool` for details). - BatchExecutorHandle::new( - self.config.save_call_traces, - self.config.max_allowed_tx_gas_limit.into(), - secondary_storage, - l1_batch, - system_env, - self.config.upload_witness_inputs_to_gcs, - false, - ) + .expect("Batch executor was interrupted") } /// Creates test batch params that can be fed into the VM. diff --git a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs new file mode 100644 index 00000000000..aa1256a70ff --- /dev/null +++ b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs @@ -0,0 +1,340 @@ +//! Temporary module for migrating fee addresses from L1 batches to miniblocks. + +// FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration + +use std::time::{Duration, Instant}; + +use anyhow::Context as _; +use tokio::sync::watch; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_types::MiniblockNumber; + +/// Runs the migration for pending miniblocks. +pub(crate) async fn migrate_pending_miniblocks(storage: &mut StorageProcessor<'_>) { + let started_at = Instant::now(); + tracing::info!("Started migrating `fee_account_address` for pending miniblocks"); + + #[allow(deprecated)] + let l1_batches_have_fee_account_address = storage + .blocks_dal() + .check_l1_batches_have_fee_account_address() + .await + .expect("Failed getting metadata for l1_batches table"); + if !l1_batches_have_fee_account_address { + tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); + return; + } + + #[allow(deprecated)] + let rows_affected = storage + .blocks_dal() + .copy_fee_account_address_for_pending_miniblocks() + .await + .expect("Failed migrating `fee_account_address` for pending miniblocks"); + let elapsed = started_at.elapsed(); + tracing::info!("Migrated `fee_account_address` for {rows_affected} miniblocks in {elapsed:?}"); +} + +/// Runs the migration for non-pending miniblocks. Should be run as a background task. +pub(crate) async fn migrate_miniblocks( + pool: ConnectionPool, + last_miniblock: MiniblockNumber, + stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let MigrationOutput { + miniblocks_affected, + } = migrate_miniblocks_inner( + pool, + last_miniblock, + 100_000, + Duration::from_secs(1), + stop_receiver, + ) + .await?; + + tracing::info!("Finished fee address migration with {miniblocks_affected} affected miniblocks"); + Ok(()) +} + +#[derive(Debug, Default)] +struct MigrationOutput { + miniblocks_affected: u64, +} + +/// It's important for the `chunk_size` to be a constant; this ensures that each chunk is migrated atomically. +async fn migrate_miniblocks_inner( + pool: ConnectionPool, + last_miniblock: MiniblockNumber, + chunk_size: u32, + sleep_interval: Duration, + stop_receiver: watch::Receiver, +) -> anyhow::Result { + anyhow::ensure!(chunk_size > 0, "Chunk size must be positive"); + + let mut storage = pool.access_storage().await?; + #[allow(deprecated)] + let l1_batches_have_fee_account_address = storage + .blocks_dal() + .check_l1_batches_have_fee_account_address() + .await + .expect("Failed getting metadata for l1_batches table"); + drop(storage); + if !l1_batches_have_fee_account_address { + tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); + return Ok(MigrationOutput::default()); + } + + let mut chunk_start = MiniblockNumber(0); + let mut miniblocks_affected = 0; + + tracing::info!( + "Migrating `fee_account_address` for miniblocks {chunk_start}..={last_miniblock} \ + in chunks of {chunk_size} miniblocks" + ); + while chunk_start <= last_miniblock { + let chunk_end = last_miniblock.min(chunk_start + chunk_size - 1); + let chunk = chunk_start..=chunk_end; + + let mut storage = pool.access_storage().await?; + let is_chunk_migrated = is_fee_address_migrated(&mut storage, chunk_start).await?; + + if is_chunk_migrated { + tracing::debug!("`fee_account_address` is migrated for chunk {chunk:?}"); + } else { + tracing::debug!("Migrating `fee_account_address` for miniblocks chunk {chunk:?}"); + + #[allow(deprecated)] + let rows_affected = storage + .blocks_dal() + .copy_fee_account_address_for_miniblocks(chunk.clone()) + .await + .with_context(|| format!("Failed migrating miniblocks chunk {chunk:?}"))?; + tracing::debug!("Migrated {rows_affected} miniblocks in chunk {chunk:?}"); + miniblocks_affected += rows_affected; + } + drop(storage); + + if *stop_receiver.borrow() { + tracing::info!("Stop signal received; fee address migration shutting down"); + return Ok(MigrationOutput { + miniblocks_affected, + }); + } + chunk_start = chunk_end + 1; + + if !is_chunk_migrated { + tokio::time::sleep(sleep_interval).await; + } + } + + Ok(MigrationOutput { + miniblocks_affected, + }) +} + +#[allow(deprecated)] +async fn is_fee_address_migrated( + storage: &mut StorageProcessor<'_>, + miniblock: MiniblockNumber, +) -> anyhow::Result { + storage + .blocks_dal() + .is_fee_address_migrated(miniblock) + .await + .with_context(|| format!("Failed getting fee address for miniblock #{miniblock}"))? + .with_context(|| format!("Miniblock #{miniblock} disappeared")) +} + +#[cfg(test)] +mod tests { + use test_casing::test_casing; + use zksync_contracts::BaseSystemContractsHashes; + use zksync_types::{ + block::L1BatchHeader, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, + }; + + use super::*; + use crate::utils::testonly::create_miniblock; + + async fn prepare_storage(storage: &mut StorageProcessor<'_>) { + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + for number in 0..5 { + let miniblock = create_miniblock(number); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let l1_batch = L1BatchHeader::new( + L1BatchNumber(number), + number.into(), + BaseSystemContractsHashes::default(), + ProtocolVersionId::latest(), + ); + storage + .blocks_dal() + .insert_mock_l1_batch(&l1_batch) + .await + .unwrap(); + #[allow(deprecated)] + storage + .blocks_dal() + .set_l1_batch_fee_address( + l1_batch.number, + Address::from_low_u64_be(u64::from(number) + 1), + ) + .await + .unwrap(); + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(l1_batch.number) + .await + .unwrap(); + } + } + + async fn assert_migration(storage: &mut StorageProcessor<'_>) { + for number in 0..5 { + assert!(is_fee_address_migrated(storage, MiniblockNumber(number)) + .await + .unwrap()); + + let fee_address = storage + .blocks_dal() + .get_fee_address_for_miniblock(MiniblockNumber(number)) + .await + .unwrap() + .expect("no fee address"); + let expected_address = Address::from_low_u64_be(u64::from(number) + 1); + assert_eq!(fee_address, expected_address); + } + } + + #[test_casing(3, [1, 2, 3])] + #[tokio::test] + async fn migration_basics(chunk_size: u32) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + prepare_storage(&mut storage).await; + drop(storage); + + let (_stop_sender, stop_receiver) = watch::channel(false); + let result = migrate_miniblocks_inner( + pool.clone(), + MiniblockNumber(4), + chunk_size, + Duration::ZERO, + stop_receiver.clone(), + ) + .await + .unwrap(); + + assert_eq!(result.miniblocks_affected, 5); + + // Check that all blocks are migrated. + let mut storage = pool.access_storage().await.unwrap(); + assert_migration(&mut storage).await; + drop(storage); + + // Check that migration can run again w/o returning an error, hanging up etc. + let result = migrate_miniblocks_inner( + pool.clone(), + MiniblockNumber(4), + chunk_size, + Duration::ZERO, + stop_receiver, + ) + .await + .unwrap(); + + assert_eq!(result.miniblocks_affected, 0); + } + + #[test_casing(3, [1, 2, 3])] + #[tokio::test] + async fn stopping_and_resuming_migration(chunk_size: u32) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + prepare_storage(&mut storage).await; + + let (_stop_sender, stop_receiver) = watch::channel(true); // signal stop right away + let result = migrate_miniblocks_inner( + pool.clone(), + MiniblockNumber(4), + chunk_size, + Duration::from_secs(1_000), + stop_receiver, + ) + .await + .unwrap(); + + // Migration should stop after a single chunk. + assert_eq!(result.miniblocks_affected, u64::from(chunk_size)); + + // Check that migration resumes from the same point. + let (_stop_sender, stop_receiver) = watch::channel(false); + let result = migrate_miniblocks_inner( + pool.clone(), + MiniblockNumber(4), + chunk_size, + Duration::ZERO, + stop_receiver, + ) + .await + .unwrap(); + + assert_eq!(result.miniblocks_affected, 5 - u64::from(chunk_size)); + assert_migration(&mut storage).await; + } + + #[test_casing(3, [1, 2, 3])] + #[tokio::test] + async fn new_blocks_added_during_migration(chunk_size: u32) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + prepare_storage(&mut storage).await; + + let (_stop_sender, stop_receiver) = watch::channel(true); // signal stop right away + let result = migrate_miniblocks_inner( + pool.clone(), + MiniblockNumber(4), + chunk_size, + Duration::from_secs(1_000), + stop_receiver, + ) + .await + .unwrap(); + + // Migration should stop after a single chunk. + assert_eq!(result.miniblocks_affected, u64::from(chunk_size)); + + // Insert a new miniblock to the storage with a defined fee account address. + let mut miniblock = create_miniblock(5); + miniblock.fee_account_address = Address::repeat_byte(1); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + // Resume the migration. + let (_stop_sender, stop_receiver) = watch::channel(false); + let result = migrate_miniblocks_inner( + pool.clone(), + MiniblockNumber(5), + chunk_size, + Duration::ZERO, + stop_receiver, + ) + .await + .unwrap(); + + // The new miniblock should not be affected. + assert_eq!(result.miniblocks_affected, 5 - u64::from(chunk_size)); + assert_migration(&mut storage).await; + } +} diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index df124335b4b..f2686011003 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -29,7 +29,8 @@ use crate::{ extractors, io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, + fee_address_migration, MiniblockParams, MiniblockSealerHandle, PendingBatchData, + StateKeeperIO, }, mempool_actor::l2_tx_filter, metrics::KEEPER_METRICS, @@ -430,7 +431,7 @@ impl MempoolIO { .await .unwrap() .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage - + fee_address_migration::migrate_pending_miniblocks(&mut storage).await; drop(storage); Self { diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index d1366858116..16cc15e03b0 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -21,6 +21,7 @@ use super::{ }; pub(crate) mod common; +pub(crate) mod fee_address_migration; pub(crate) mod mempool; pub(crate) mod seal_logic; #[cfg(test)] diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 5197c6f8e30..974ec769325 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -6,10 +6,11 @@ use std::{ time::{Duration, Instant}, }; +use chrono::Utc; use itertools::Itertools; use multivm::{ interface::{FinishedL1Batch, L1BatchEnv}, - utils::{get_batch_base_fee, get_max_gas_per_pubdata_byte}, + utils::get_max_gas_per_pubdata_byte, }; use vm_utils::storage::wait_for_prev_l1_batch_params; use zksync_dal::StorageProcessor; @@ -21,13 +22,13 @@ use zksync_types::{ l2::L2Tx, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, protocol_version::ProtocolUpgradeTx, - sort_storage_access::sort_storage_access_queries, storage_writes_deduplicator::{ModifiedSlot, StorageWritesDeduplicator}, tx::{ tx_execution_info::DeduplicatedWritesMetrics, IncludedTxLocation, TransactionExecutionResult, }, - AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, LogQuery, + zk_evm_types::LogQuery, + AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, MiniblockNumber, ProtocolVersionId, StorageKey, StorageLog, StorageLogQuery, StorageValue, Transaction, VmEvent, CURRENT_VIRTUAL_BLOCK_INFO_POSITION, H256, SYSTEM_CONTEXT_ADDRESS, }; @@ -38,7 +39,10 @@ use crate::{ metrics::{BlockStage, MiniblockStage, APP_METRICS}, state_keeper::{ extractors, - metrics::{L1BatchSealStage, MiniblockSealStage, L1_BATCH_METRICS, MINIBLOCK_METRICS}, + metrics::{ + L1BatchSealStage, MiniblockSealStage, KEEPER_METRICS, L1_BATCH_METRICS, + MINIBLOCK_METRICS, + }, types::ExecutionMetricsForCriteria, updates::{MiniblockSealCommand, UpdatesManager}, }, @@ -82,21 +86,24 @@ impl UpdatesManager { progress.observe(None); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::LogDeduplication); - let (_, deduped_log_queries) = sort_storage_access_queries( + + progress.observe( finished_batch .final_execution_state - .storage_log_queries - .iter() - .map(|log| &log.log_query), + .deduplicated_storage_log_queries + .len(), ); - progress.observe(deduped_log_queries.len()); let (l1_tx_count, l2_tx_count) = l1_l2_tx_count(&self.l1_batch.executed_transactions); let (writes_count, reads_count) = storage_log_query_write_read_counts( &finished_batch.final_execution_state.storage_log_queries, ); - let (dedup_writes_count, dedup_reads_count) = - log_query_write_read_counts(deduped_log_queries.iter()); + let (dedup_writes_count, dedup_reads_count) = log_query_write_read_counts( + finished_batch + .final_execution_state + .deduplicated_storage_log_queries + .iter(), + ); tracing::info!( "Sealing L1 batch {current_l1_batch_number} with {total_tx_count} \ @@ -129,9 +136,7 @@ impl UpdatesManager { let l1_batch = L1BatchHeader { number: l1_batch_env.number, - is_finished: true, timestamp: l1_batch_env.timestamp, - fee_account_address: l1_batch_env.fee_account, priority_ops_onchain_data: self.l1_batch.priority_ops_onchain_data.clone(), l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, @@ -139,9 +144,6 @@ impl UpdatesManager { l2_to_l1_messages, bloom: Default::default(), used_contract_hashes: finished_batch.final_execution_state.used_contract_hashes, - base_fee_per_gas: get_batch_base_fee(l1_batch_env, self.protocol_version().into()), - l1_gas_price: self.l1_gas_price(), - l2_fair_gas_price: self.fair_l2_gas_price(), base_system_contracts_hashes: self.base_system_contract_hashes(), protocol_version: Some(self.protocol_version()), system_logs: finished_batch.final_execution_state.system_logs, @@ -152,17 +154,19 @@ impl UpdatesManager { .final_execution_state .deduplicated_events_logs; + let final_bootloader_memory = finished_batch + .final_bootloader_memory + .clone() + .unwrap_or_default(); transaction .blocks_dal() .insert_l1_batch( &l1_batch, - finished_batch.final_bootloader_memory.as_ref().unwrap(), + &final_bootloader_memory, self.pending_l1_gas_count(), &events_queue, &finished_batch.final_execution_state.storage_refunds, - self.pending_execution_metrics() - .estimated_circuits_used - .ceil() as u32, + self.pending_execution_metrics().circuit_statistic, ) .await .unwrap(); @@ -187,7 +191,9 @@ impl UpdatesManager { progress.observe(None); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::InsertProtectiveReads); - let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries + let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = finished_batch + .final_execution_state + .deduplicated_storage_log_queries .into_iter() .partition(|log_query| log_query.rw_flag); transaction @@ -345,6 +351,7 @@ impl MiniblockSealCommand { hash: self.miniblock.get_miniblock_hash(), l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, + fee_account_address: self.fee_account_address, base_fee_per_gas: self.base_fee_per_gas, batch_fee_input: self.fee_input, base_system_contracts_hashes: self.base_system_contracts_hashes, @@ -399,7 +406,8 @@ impl MiniblockSealCommand { transaction .storage_dal() .insert_factory_deps(miniblock_number, new_factory_deps) - .await; + .await + .unwrap(); } progress.observe(new_factory_deps_count); @@ -470,6 +478,17 @@ impl MiniblockSealCommand { transaction.commit().await.unwrap(); progress.observe(None); + + let progress = MINIBLOCK_METRICS.start(MiniblockSealStage::ReportTxMetrics, is_fictive); + self.miniblock.executed_transactions.iter().for_each(|tx| { + KEEPER_METRICS + .transaction_inclusion_delay + .observe(Duration::from_millis( + Utc::now().timestamp_millis() as u64 - tx.transaction.received_timestamp_ms, + )) + }); + progress.observe(Some(self.miniblock.executed_transactions.len())); + self.report_miniblock_metrics(started_at, current_l2_virtual_block_number); } diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 69c68f3f5c1..0dcd0408493 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -242,6 +242,7 @@ async fn processing_storage_logs_when_sealing_miniblock() { miniblock_number: MiniblockNumber(3), miniblock, first_tx_index: 0, + fee_account_address: Address::repeat_byte(0x23), fee_input: BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { l1_gas_price: 100, fair_l2_gas_price: 100, @@ -270,7 +271,8 @@ async fn processing_storage_logs_when_sealing_miniblock() { let touched_slots = conn .storage_logs_dal() .get_touched_slots_for_l1_batch(l1_batch_number) - .await; + .await + .unwrap(); // Keys that are only read must not be written to `storage_logs`. let account = AccountTreeId::default(); @@ -321,6 +323,7 @@ async fn processing_events_when_sealing_miniblock() { miniblock_number, miniblock, first_tx_index: 0, + fee_account_address: Address::repeat_byte(0x23), fee_input: BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { l1_gas_price: 100, fair_l2_gas_price: 100, @@ -418,7 +421,6 @@ async fn test_miniblock_and_l1_batch_processing( .unwrap() .expect("No L1 batch #1"); assert_eq!(l1_batch_header.l2_tx_count, 1); - assert!(l1_batch_header.is_finished); } #[tokio::test] diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 27261f4e36c..d955eca5610 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -52,6 +52,7 @@ impl Tester { internal_enforced_l1_gas_price: None, poll_period: 10, max_l1_gas_price: None, + l1_gas_per_pubdata_byte: 17, }; GasAdjuster::new(eth_client, gas_adjuster_config) @@ -164,7 +165,7 @@ impl Tester { let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); storage .blocks_dal() - .insert_l1_batch(&batch_header, &[], Default::default(), &[], &[], 0) + .insert_mock_l1_batch(&batch_header) .await .unwrap(); storage diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 209809d33f9..14829c5cf61 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -1,11 +1,13 @@ use std::{ convert::Infallible, + future::{self, Future}, time::{Duration, Instant}, }; use anyhow::Context as _; use multivm::interface::{Halt, L1BatchEnv, SystemEnv}; use tokio::sync::watch; +use zksync_dal::ConnectionPool; use zksync_types::{ block::MiniblockExecutionData, l2::TransactionType, protocol_version::ProtocolUpgradeTx, storage_writes_deduplicator::StorageWritesDeduplicator, Transaction, @@ -20,7 +22,7 @@ use super::{ types::ExecutionMetricsForCriteria, updates::UpdatesManager, }; -use crate::gas_tracker::gas_count_from_writes; +use crate::{gas_tracker::gas_count_from_writes, state_keeper::io::fee_address_migration}; /// Amount of time to block on waiting for some resource. The exact value is not really important, /// we only need it to not block on waiting indefinitely and be able to process cancellation requests. @@ -76,6 +78,21 @@ impl ZkSyncStateKeeper { } } + /// Temporary method to migrate fee addresses from L1 batches to miniblocks. + pub fn run_fee_address_migration( + &self, + pool: ConnectionPool, + ) -> impl Future> { + let last_miniblock = self.io.current_miniblock_number() - 1; + let stop_receiver = self.stop_receiver.clone(); + async move { + fee_address_migration::migrate_miniblocks(pool, last_miniblock, stop_receiver).await?; + future::pending::<()>().await; + // ^ Since this is run as a task, we don't want it to exit on success (this would shut down the node). + anyhow::Ok(()) + } + } + pub async fn run(mut self) -> anyhow::Result<()> { match self.run_inner().await { Ok(_) => unreachable!(), @@ -158,8 +175,13 @@ impl ZkSyncStateKeeper { let mut batch_executor = self .batch_executor_base - .init_batch(l1_batch_env.clone(), system_env.clone()) - .await; + .init_batch( + l1_batch_env.clone(), + system_env.clone(), + &self.stop_receiver, + ) + .await + .ok_or(Error::Canceled)?; self.restore_state(&batch_executor, &mut updates_manager, pending_miniblocks) .await?; @@ -210,8 +232,13 @@ impl ZkSyncStateKeeper { ); batch_executor = self .batch_executor_base - .init_batch(l1_batch_env.clone(), system_env.clone()) - .await; + .init_batch( + l1_batch_env.clone(), + system_env.clone(), + &self.stop_receiver, + ) + .await + .ok_or(Error::Canceled)?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -309,9 +336,13 @@ impl ZkSyncStateKeeper { .. } = result else { - return Err(anyhow::anyhow!( + tracing::error!( "Re-executing stored tx failed. Tx: {tx:?}. Err: {:?}", result.err() + ); + return Err(anyhow::anyhow!( + "Re-executing stored tx failed. It means that transaction was executed \ + successfully before, but failed after a restart." ) .into()); }; @@ -319,7 +350,7 @@ impl ZkSyncStateKeeper { let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, - } = tx_metrics; + } = *tx_metrics; let tx_hash = tx.hash(); let is_l1 = tx.is_l1(); @@ -349,6 +380,10 @@ impl ZkSyncStateKeeper { } } + tracing::debug!( + "All the transactions from the pending state were re-executed successfully" + ); + // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. let new_miniblock_params = self .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) @@ -434,7 +469,7 @@ impl ZkSyncStateKeeper { let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, - } = tx_metrics; + } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, *tx_result, @@ -504,7 +539,7 @@ impl ZkSyncStateKeeper { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, .. - } = tx_metrics; + } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, *tx_result, @@ -575,7 +610,7 @@ impl ZkSyncStateKeeper { let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, - } = *tx_metrics; + } = **tx_metrics; tracing::trace!( "finished tx {:?} by {:?} (is_l1: {}) (#{} in l1 batch {}) (#{} in miniblock {}) \ @@ -598,7 +633,7 @@ impl ZkSyncStateKeeper { let ExecutionMetricsForCriteria { l1_gas: finish_block_l1_gas, execution_metrics: finish_block_execution_metrics, - } = *bootloader_dry_run_metrics; + } = **bootloader_dry_run_metrics; let encoding_len = tx.encoding_len(); diff --git a/core/lib/zksync_core/src/state_keeper/metrics.rs b/core/lib/zksync_core/src/state_keeper/metrics.rs index 8f1b3319df5..aa65c16f7de 100644 --- a/core/lib/zksync_core/src/state_keeper/metrics.rs +++ b/core/lib/zksync_core/src/state_keeper/metrics.rs @@ -52,6 +52,9 @@ pub(crate) struct StateKeeperMetrics { /// Time spent waiting for the header of a previous miniblock. #[metrics(buckets = Buckets::LATENCIES)] pub load_previous_miniblock_header: Histogram, + /// The time it takes for transactions to be included in a block. Representative of the time user must wait before their transaction is confirmed. + #[metrics(buckets = Buckets::LATENCIES)] + pub transaction_inclusion_delay: Histogram, /// Time spent by the state keeper on transaction execution. #[metrics(buckets = Buckets::LATENCIES)] pub tx_execution_time: Family>, @@ -249,6 +252,7 @@ pub(super) enum MiniblockSealStage { ExtractL2ToL1Logs, InsertL2ToL1Logs, CommitMiniblock, + ReportTxMetrics, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] diff --git a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index 7878621a729..1ae5899a9c4 100644 --- a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -70,12 +70,14 @@ impl MetricExtractor for CircuitsCriterion { } fn extract(metrics: &ExecutionMetrics) -> usize { - metrics.estimated_circuits_used.ceil() as usize + metrics.circuit_statistic.total() } } #[cfg(test)] mod tests { + use zksync_types::circuit::CircuitStatistic; + use super::*; fn get_config() -> StateKeeperConfig { @@ -170,56 +172,61 @@ mod tests { ); } - macro_rules! test_scenario_execution_metrics { - ($criterion: tt, $metric_name: ident, $metric_type: ty, $protocol_version: expr) => { - let config = get_config(); - let block_execution_metrics = ExecutionMetrics { - $metric_name: ($criterion::limit_per_block($protocol_version) / 2) as $metric_type, - ..ExecutionMetrics::default() - }; - test_no_seal_block_resolution(block_execution_metrics, &$criterion, $protocol_version); - - let block_execution_metrics = ExecutionMetrics { - $metric_name: ($criterion::limit_per_block($protocol_version) - 1) as $metric_type, - ..ExecutionMetrics::default() - }; - - test_include_and_seal_block_resolution( - block_execution_metrics, - &$criterion, - $protocol_version, - ); - - let block_execution_metrics = ExecutionMetrics { - $metric_name: ($criterion::limit_per_block($protocol_version)) as $metric_type, - ..ExecutionMetrics::default() - }; - - test_exclude_and_seal_block_resolution( - block_execution_metrics, - &$criterion, - $protocol_version, - ); - - let tx_execution_metrics = ExecutionMetrics { - $metric_name: ($criterion::limit_per_block($protocol_version) as f64 - * config.reject_tx_at_geometry_percentage - + 1f64) - .round() as $metric_type, - ..ExecutionMetrics::default() - }; - - test_unexecutable_tx_resolution(tx_execution_metrics, &$criterion, $protocol_version); + #[test] + fn circuits_seal_criterion() { + let config = get_config(); + let protocol_version = ProtocolVersionId::latest(); + let block_execution_metrics = ExecutionMetrics { + circuit_statistic: CircuitStatistic { + main_vm: (CircuitsCriterion::limit_per_block(protocol_version) / 2) as f32, + ..CircuitStatistic::default() + }, + ..ExecutionMetrics::default() }; - } + test_no_seal_block_resolution( + block_execution_metrics, + &CircuitsCriterion, + protocol_version, + ); - #[test] - fn computational_gas_seal_criterion() { - test_scenario_execution_metrics!( - CircuitsCriterion, - estimated_circuits_used, - f32, - ProtocolVersionId::Version17 + let block_execution_metrics = ExecutionMetrics { + circuit_statistic: CircuitStatistic { + main_vm: (CircuitsCriterion::limit_per_block(protocol_version) - 1) as f32, + ..CircuitStatistic::default() + }, + ..ExecutionMetrics::default() + }; + + test_include_and_seal_block_resolution( + block_execution_metrics, + &CircuitsCriterion, + protocol_version, ); + + let block_execution_metrics = ExecutionMetrics { + circuit_statistic: CircuitStatistic { + main_vm: CircuitsCriterion::limit_per_block(protocol_version) as f32, + ..CircuitStatistic::default() + }, + ..ExecutionMetrics::default() + }; + + test_exclude_and_seal_block_resolution( + block_execution_metrics, + &CircuitsCriterion, + protocol_version, + ); + + let tx_execution_metrics = ExecutionMetrics { + circuit_statistic: CircuitStatistic { + main_vm: CircuitsCriterion::limit_per_block(protocol_version) as f32 + * config.reject_tx_at_geometry_percentage as f32 + + 1.0, + ..CircuitStatistic::default() + }, + ..ExecutionMetrics::default() + }; + + test_unexecutable_tx_resolution(tx_execution_metrics, &CircuitsCriterion, protocol_version); } } diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index 6f71dc35bd9..bfcfc524ffa 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -22,8 +22,9 @@ use zksync_types::{ block::{BlockGasCount, MiniblockExecutionData, MiniblockHasher}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, tx::tx_execution_info::ExecutionMetrics, - Address, L1BatchNumber, L2ChainId, LogQuery, MiniblockNumber, ProtocolVersionId, - StorageLogQuery, StorageLogQueryType, Timestamp, Transaction, H256, U256, + zk_evm_types::{LogQuery, Timestamp}, + Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, StorageLogQuery, + StorageLogQueryType, Transaction, H256, U256, }; mod tester; @@ -98,6 +99,7 @@ pub(super) fn default_vm_block_result() -> FinishedL1Batch { final_execution_state: CurrentExecutionState { events: vec![], storage_log_queries: vec![], + deduplicated_storage_log_queries: vec![], used_contract_hashes: vec![], user_l2_to_l1_logs: vec![], system_logs: vec![], @@ -150,7 +152,7 @@ pub(super) fn create_execution_result( computational_gas_used: 0, total_log_queries, pubdata_published: 0, - estimated_circuits_used: 0.0, + circuit_statistic: Default::default(), }, refunds: Refunds::default(), } diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index 9ac886270d3..ca65b165326 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -242,14 +242,14 @@ pub(crate) fn successful_exec() -> TxExecutionResult { statistics: Default::default(), refunds: Default::default(), }), - tx_metrics: ExecutionMetricsForCriteria { + tx_metrics: Box::new(ExecutionMetricsForCriteria { l1_gas: Default::default(), execution_metrics: Default::default(), - }, - bootloader_dry_run_metrics: ExecutionMetricsForCriteria { + }), + bootloader_dry_run_metrics: Box::new(ExecutionMetricsForCriteria { l1_gas: Default::default(), execution_metrics: Default::default(), - }, + }), bootloader_dry_run_result: Box::new(VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, logs: Default::default(), @@ -272,11 +272,11 @@ pub(crate) fn successful_exec_with_metrics( statistics: Default::default(), refunds: Default::default(), }), - tx_metrics, - bootloader_dry_run_metrics: ExecutionMetricsForCriteria { + tx_metrics: Box::new(tx_metrics), + bootloader_dry_run_metrics: Box::new(ExecutionMetricsForCriteria { l1_gas: Default::default(), execution_metrics: Default::default(), - }, + }), bootloader_dry_run_result: Box::new(VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, logs: Default::default(), @@ -452,7 +452,8 @@ impl L1BatchExecutorBuilder for TestBatchExecutorBuilder { &mut self, _l1batch_params: L1BatchEnv, _system_env: SystemEnv, - ) -> BatchExecutorHandle { + _stop_receiver: &watch::Receiver, + ) -> Option { let (commands_sender, commands_receiver) = mpsc::channel(1); let executor = TestBatchExecutor::new( @@ -462,7 +463,7 @@ impl L1BatchExecutorBuilder for TestBatchExecutorBuilder { ); let handle = tokio::task::spawn_blocking(move || executor.run()); - BatchExecutorHandle::from_raw(handle, commands_sender) + Some(BatchExecutorHandle::from_raw(handle, commands_sender)) } } @@ -780,7 +781,8 @@ impl L1BatchExecutorBuilder for MockBatchExecutorBuilder { &mut self, _l1batch_params: L1BatchEnv, _system_env: SystemEnv, - ) -> BatchExecutorHandle { + _stop_receiver: &watch::Receiver, + ) -> Option { let (send, recv) = mpsc::channel(1); let handle = tokio::task::spawn(async { let mut recv = recv; @@ -797,6 +799,6 @@ impl L1BatchExecutorBuilder for MockBatchExecutorBuilder { } } }); - BatchExecutorHandle::from_raw(handle, send) + Some(BatchExecutorHandle::from_raw(handle, send)) } } diff --git a/core/lib/zksync_core/src/state_keeper/updates/mod.rs b/core/lib/zksync_core/src/state_keeper/updates/mod.rs index 7718882af28..faee5a5fbff 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/mod.rs @@ -26,6 +26,7 @@ pub mod miniblock_updates; #[derive(Debug, Clone, PartialEq)] pub struct UpdatesManager { batch_timestamp: u64, + fee_account_address: Address, batch_fee_input: BatchFeeInput, base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, @@ -43,6 +44,7 @@ impl UpdatesManager { ) -> Self { Self { batch_timestamp: l1_batch_env.timestamp, + fee_account_address: l1_batch_env.fee_account, batch_fee_input: l1_batch_env.fee_input, base_fee_per_gas: get_batch_base_fee(&l1_batch_env, protocol_version.into()), protocol_version, @@ -67,14 +69,6 @@ impl UpdatesManager { self.base_system_contract_hashes } - pub(crate) fn l1_gas_price(&self) -> u64 { - self.batch_fee_input.l1_gas_price() - } - - pub(crate) fn fair_l2_gas_price(&self) -> u64 { - self.batch_fee_input.fair_l2_gas_price() - } - pub(crate) fn seal_miniblock_command( &self, l1_batch_number: L1BatchNumber, @@ -87,6 +81,7 @@ impl UpdatesManager { miniblock_number, miniblock: self.miniblock.clone(), first_tx_index: self.l1_batch.executed_transactions.len(), + fee_account_address: self.fee_account_address, fee_input: self.batch_fee_input, base_fee_per_gas: self.base_fee_per_gas, base_system_contracts_hashes: self.base_system_contract_hashes, @@ -172,6 +167,7 @@ pub(crate) struct MiniblockSealCommand { pub miniblock_number: MiniblockNumber, pub miniblock: MiniblockUpdates, pub first_tx_index: usize, + pub fee_account_address: Address, pub fee_input: BatchFeeInput, pub base_fee_per_gas: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index 7ca6e73c37c..f18f316a968 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -6,7 +6,7 @@ use chrono::TimeZone; use test_casing::{test_casing, Product}; use tokio::sync::{watch, Mutex}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::{block::BlockGasCount, Address, L2ChainId, ProtocolVersionId}; +use zksync_types::{Address, L2ChainId, ProtocolVersionId}; use super::*; use crate::{ @@ -28,7 +28,7 @@ async fn seal_l1_batch(storage: &mut StorageProcessor<'_>, number: L1BatchNumber let l1_batch = create_l1_batch(number.0); storage .blocks_dal() - .insert_l1_batch(&l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .insert_mock_l1_batch(&l1_batch) .await .unwrap(); storage @@ -108,7 +108,7 @@ impl L1BatchStagesMap { for (number, stage) in self.iter() { let local_details = storage .blocks_web3_dal() - .get_block_details(MiniblockNumber(number.0), Address::zero()) + .get_block_details(MiniblockNumber(number.0)) .await .unwrap() .unwrap_or_else(|| panic!("no details for block #{number}")); diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index c6ca76026ff..c800e83703f 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -23,7 +23,8 @@ use crate::{ state_keeper::{ io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, + fee_address_migration, MiniblockParams, MiniblockSealerHandle, PendingBatchData, + StateKeeperIO, }, metrics::KEEPER_METRICS, seal_criteria::IoSealCriteria, @@ -84,6 +85,9 @@ impl ExternalIO { .await .unwrap() .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage + // We must run the migration for pending miniblocks synchronously, since we use `fee_account_address` + // from a pending miniblock in `load_pending_batch()` implementation. + fee_address_migration::migrate_pending_miniblocks(&mut storage).await; drop(storage); tracing::info!( @@ -211,7 +215,8 @@ impl ExternalIO { self.current_miniblock_number, &HashMap::from_iter([(contract.hash, be_words_to_bytes(&contract.code))]), ) - .await; + .await + .unwrap(); contract } } @@ -244,19 +249,6 @@ impl StateKeeperIO for ExternalIO { async fn load_pending_batch(&mut self) -> Option { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - // TODO (BFT-99): Do not assume that fee account is the same as in previous batch. - let fee_account = storage - .blocks_dal() - .get_l1_batch_header(self.current_l1_batch_number - 1) - .await - .unwrap() - .unwrap_or_else(|| { - panic!( - "No block header for batch {}", - self.current_l1_batch_number - 1 - ) - }) - .fee_account_address; let pending_miniblock_number = { let (_, last_miniblock_number_included_in_l1_batch) = storage .blocks_dal() @@ -271,6 +263,7 @@ impl StateKeeperIO for ExternalIO { .get_miniblock_header(pending_miniblock_number) .await .unwrap()?; + let fee_account = pending_miniblock_header.fee_account_address; if pending_miniblock_header.protocol_version.is_none() { // Fetch protocol version ID for pending miniblocks to know which VM to use to re-execute them. diff --git a/core/lib/zksync_core/src/sync_layer/sync_state.rs b/core/lib/zksync_core/src/sync_layer/sync_state.rs index 78b78d4aa4d..32a5c29c198 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_state.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_state.rs @@ -36,7 +36,7 @@ impl SyncState { self.inner.read().unwrap().local_block.unwrap_or_default() } - pub(super) fn set_main_node_block(&self, block: MiniblockNumber) { + pub(crate) fn set_main_node_block(&self, block: MiniblockNumber) { let mut inner = self.inner.write().unwrap(); if let Some(local_block) = inner.local_block { if block.0 < local_block.0 { diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 3e508accefc..2fc010f4a78 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -28,7 +28,7 @@ use crate::{ const TEST_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(50); -pub const OPERATOR_ADDRESS: Address = Address::repeat_byte(1); +pub(crate) const OPERATOR_ADDRESS: Address = Address::repeat_byte(1); fn open_l1_batch(number: u32, timestamp: u64, first_miniblock_number: u32) -> SyncAction { SyncAction::OpenBatch { @@ -68,7 +68,7 @@ impl StateKeeperHandles { actions, sync_state.clone(), Box::::default(), - OPERATOR_ADDRESS, + Address::repeat_byte(1), u32::MAX, L2ChainId::default(), ) @@ -181,9 +181,11 @@ async fn external_io_basics() { let tx_receipt = storage .transactions_web3_dal() - .get_transaction_receipt(tx_hash) + .get_transaction_receipts(&[tx_hash]) .await .unwrap() + .get(0) + .cloned() .expect("Transaction not persisted"); assert_eq!(tx_receipt.block_number, 1.into()); assert_eq!(tx_receipt.transaction_index, 0.into()); @@ -251,7 +253,7 @@ async fn external_io_with_multiple_miniblocks() { let sync_block = storage .sync_dal() - .sync_block(MiniblockNumber(number), OPERATOR_ADDRESS, true) + .sync_block(MiniblockNumber(number), true) .await .unwrap() .unwrap_or_else(|| panic!("Sync block #{} is not persisted", number)); diff --git a/core/lib/zksync_core/src/temp_config_store.rs b/core/lib/zksync_core/src/temp_config_store.rs index cfa9ceed379..d50460a0924 100644 --- a/core/lib/zksync_core/src/temp_config_store.rs +++ b/core/lib/zksync_core/src/temp_config_store.rs @@ -14,6 +14,8 @@ use zksync_config::{ GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, }; +use crate::consensus; + // TODO (QIT-22): This structure is going to be removed when components will be responsible for their own configs. /// A temporary config store allowing to pass deserialized configs from `zksync_server` to `zksync_core`. /// All the configs are optional, since for some component combination it is not needed to pass all the configs. @@ -44,4 +46,5 @@ pub struct TempConfigStore { pub eth_watch_config: Option, pub gas_adjuster_config: Option, pub object_store_config: Option, + pub consensus_config: Option, } diff --git a/core/lib/zksync_core/src/utils/mod.rs b/core/lib/zksync_core/src/utils/mod.rs index 7d919d31f88..9b3fa3da799 100644 --- a/core/lib/zksync_core/src/utils/mod.rs +++ b/core/lib/zksync_core/src/utils/mod.rs @@ -1,8 +1,9 @@ //! Miscellaneous utils used by multiple components. -use std::time::Duration; +use std::{future::Future, time::Duration}; use anyhow::Context as _; +use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::L1BatchNumber; @@ -10,6 +11,44 @@ use zksync_types::L1BatchNumber; #[cfg(test)] pub(crate) mod testonly; +/// Fallible and async predicate for binary search. +#[async_trait] +pub(crate) trait BinarySearchPredicate: Send { + type Error; + + async fn eval(&mut self, argument: u32) -> Result; +} + +#[async_trait] +impl BinarySearchPredicate for F +where + F: Send + FnMut(u32) -> Fut, + Fut: Send + Future>, +{ + type Error = E; + + async fn eval(&mut self, argument: u32) -> Result { + self(argument).await + } +} + +/// Finds the greatest `u32` value for which `f` returns `true`. +pub(crate) async fn binary_search_with( + mut left: u32, + mut right: u32, + mut predicate: P, +) -> Result { + while left + 1 < right { + let middle = (left + right) / 2; + if predicate.eval(middle).await? { + left = middle; + } else { + right = middle; + } + } + Ok(left) +} + /// Repeatedly polls the DB until there is an L1 batch. We may not have such a batch initially /// if the DB is recovered from an application-level snapshot. /// @@ -94,6 +133,15 @@ mod tests { use super::*; use crate::genesis::{ensure_genesis_state, GenesisParams}; + #[tokio::test] + async fn test_binary_search() { + for divergence_point in [1, 50, 51, 100] { + let mut f = |x| async move { Ok::<_, ()>(x < divergence_point) }; + let result = binary_search_with(0, 100, &mut f).await; + assert_eq!(result, Ok(divergence_point - 1)); + } + } + #[tokio::test] async fn waiting_for_l1_batch_success() { let pool = ConnectionPool::test_pool().await; diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index e6883f2585e..3684e2af909 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -29,6 +29,7 @@ pub(crate) fn create_miniblock(number: u32) -> MiniblockHeader { l2_tx_count: 0, base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), + fee_account_address: Address::zero(), gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(ProtocolVersionId::latest().into()), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), @@ -38,15 +39,12 @@ pub(crate) fn create_miniblock(number: u32) -> MiniblockHeader { /// Creates an L1 batch header with the specified number and deterministic contents. pub(crate) fn create_l1_batch(number: u32) -> L1BatchHeader { - let mut header = L1BatchHeader::new( + L1BatchHeader::new( L1BatchNumber(number), number.into(), - Address::default(), BaseSystemContractsHashes::default(), ProtocolVersionId::latest(), - ); - header.is_finished = true; - header + ) } /// Creates metadata for an L1 batch with the specified number. @@ -131,7 +129,7 @@ pub(crate) async fn prepare_recovery_snapshot( let l1_batch = create_l1_batch(l1_batch_number); storage .blocks_dal() - .insert_l1_batch(&l1_batch, &[], Default::default(), &[], &[], 0) + .insert_mock_l1_batch(&l1_batch) .await .unwrap(); @@ -153,12 +151,11 @@ pub(crate) async fn prepare_recovery_snapshot( l1_batch_root_hash, miniblock_number: miniblock.number, miniblock_root_hash: H256::zero(), // not used - last_finished_chunk_id: None, - total_chunk_count: 100, + storage_logs_chunks_processed: vec![true; 100], }; storage .snapshot_recovery_dal() - .set_applied_snapshot_status(&snapshot_recovery) + .insert_initial_recovery_status(&snapshot_recovery) .await .unwrap(); storage.commit().await.unwrap(); @@ -180,12 +177,11 @@ pub(crate) async fn prepare_empty_recovery_snapshot( l1_batch_root_hash: H256::zero(), miniblock_number: l1_batch_number.into(), miniblock_root_hash: H256::zero(), // not used - last_finished_chunk_id: None, - total_chunk_count: 100, + storage_logs_chunks_processed: vec![true; 100], }; storage .snapshot_recovery_dal() - .set_applied_snapshot_status(&snapshot_recovery) + .insert_initial_recovery_status(&snapshot_recovery) .await .unwrap(); snapshot_recovery diff --git a/core/tests/cross_external_nodes_checker/README.md b/core/tests/cross_external_nodes_checker/README.md deleted file mode 100644 index 78c1fe48b40..00000000000 --- a/core/tests/cross_external_nodes_checker/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# zkSync Cross External Nodes Consistency Checker - -This tool is used to check the consistency of external node instances against the main node. The tool has two main -checkers: - -1. RPC Checker, which checks the consistency of the RPC API of the external node against the main node. -2. PubSub Checker, which checks the consistency of the PubSub API of the external node against the main node. - -Without any arguments, the tool will run both checkers. The RPC Checker will run in Triggered mode, checking all -available blocks, and the PubSub Checker will run for as long as the RPC Checker is working. - -Do note that for the PubSub Checker to properly check the consistency between the nodes, enough time needs to pass. That -is because the PubSub clients may start out of sync. Minimal recommended amount of time for the PubSub Checker is 80 -seconds, which would guarantee at least 20 miniblocks checked. - -## Running locally - -Run the server - -``` -zk init -zk server --components api,tree,eth,state_keeper -``` - -Run the EN - -``` -zk env ext-node -zk clean --database -zk db setup -zk external-node -``` - -Run integration tests to populate the main node with data. - -``` -zk test i server -``` - -Run the checker - -``` -zk run cross-en-checker -``` diff --git a/core/tests/cross_external_nodes_checker/src/checker.rs b/core/tests/cross_external_nodes_checker/src/checker.rs deleted file mode 100644 index 0ddd179c266..00000000000 --- a/core/tests/cross_external_nodes_checker/src/checker.rs +++ /dev/null @@ -1,906 +0,0 @@ -use std::{ - cmp::Ordering::{Equal, Greater, Less}, - collections::HashMap, - fmt::Debug, - time::Duration, -}; - -use serde_json::Value; -use tokio::{sync::watch::Receiver, time::sleep}; -use zksync_types::{ - api::{BlockDetails, BlockNumber, L1BatchDetails}, - web3::types::U64, - L1BatchNumber, MiniblockNumber, H256, -}; -use zksync_utils::wait_for_tasks::wait_for_tasks; -use zksync_web3_decl::{ - jsonrpsee::{ - core::ClientError, - http_client::{HttpClient, HttpClientBuilder}, - }, - namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, - types::FilterBuilder, - RpcResult, -}; - -use crate::{ - config::{CheckerConfig, RpcMode}, - divergence::{Divergence, DivergenceDetails}, - helpers::compare_json, -}; - -#[derive(Debug, Clone)] -pub struct Checker { - /// 'Triggered' to run once. 'Continuous' to run forever. - mode: RpcMode, - /// Client for interacting with the main node. - main_node_client: HttpClient, - /// Client for interacting with the instance nodes. - instance_clients: Vec, - /// Check all miniblocks starting from this. If 'None' then check from genesis. Inclusive. - start_miniblock: Option, - /// For Triggered mode. If 'None' then check all available miniblocks. Inclusive. - finish_miniblock: Option, - /// In seconds, how often to poll the instance node for new miniblocks. - instance_poll_period: u64, - /// Maps instance URL to a list of its divergences. - divergences: HashMap>, - /// How often should blocks logs be checked. - log_check_interval: u32, - /// Next batch number to check for each instance. - next_batch_to_check: HashMap, - /// The maximum number of transactions to be checked at random in each miniblock. - max_transactions_to_check: Option, -} - -#[derive(Debug, Clone)] -pub struct InstanceHttpClient { - pub url: String, - pub client: HttpClient, -} - -impl Checker { - pub fn new(config: &CheckerConfig) -> Self { - let (main_node_client, instance_clients) = Self::setup_clients( - config - .main_node_http_url - .clone() - .expect("An RPC URL for the main node has to be provided for RPC mode."), - config - .instances_http_urls - .clone() - .expect("RPC URLs for the EN instances have to be provided for RPC mode."), - ); - - let last_checked_batch = instance_clients - .iter() - .map(|instance| (instance.url.clone(), L1BatchNumber(0))) - .collect(); - - let mode = config - .rpc_mode - .expect("The RPC Checker has to be provided an RPC mode"); - - Self { - mode, - main_node_client, - instance_clients, - start_miniblock: config.start_miniblock.map(|n| n.into()), - finish_miniblock: config.finish_miniblock.map(|n| n.into()), - instance_poll_period: config.instance_poll_period.unwrap_or(10), - divergences: HashMap::new(), - log_check_interval: 1, // TODO (BFT-192): make configurable if we want to keep it. - next_batch_to_check: last_checked_batch, - max_transactions_to_check: config.max_transactions_to_check, - } - } - - // Set up clients for the main node and all EN instances we want to check. - fn setup_clients( - main_node_url: String, - instances_urls: Vec, - ) -> (HttpClient, Vec) { - let main_node_client = HttpClientBuilder::default() - .build(main_node_url) - .expect("Failed to create an HTTP client for the main node"); - - let mut instance_clients: Vec = Vec::new(); - for url in instances_urls { - let client = HttpClientBuilder::default() - .build(url.clone()) - .expect("Failed to create an HTTP client for an instance of the external node"); - instance_clients.push(InstanceHttpClient { url, client }); - } - - (main_node_client, instance_clients) - } - - pub async fn run(mut self, stop_receiver: Receiver) -> anyhow::Result<()> { - match self.mode { - RpcMode::Triggered => { - tracing::info!("Starting Checker in Triggered mode"); - if let Err(e) = self.run_triggered().await { - self.log_divergences(); - tracing::error!("Error running in Triggered mode: {:?}", e); - } - // Ensure CI will fail if any divergences were found. - assert!(self.divergences.is_empty(), "Divergences found"); - } - RpcMode::Continuous => { - tracing::info!("Starting Checker in Continuous mode"); - if let Err(e) = self.run_continuous(stop_receiver).await { - tracing::error!("Error running in Continuous mode: {:?}", e); - } - } - } - Ok(()) - } - - // For each instance, spawn a task that will continuously poll the instance for new miniblocks - // and compare them with corresponding main node miniblocks. - // - // Errors in task loops exist the loop, stop the tasks, and cause all other tasks to exit too. - async fn run_continuous(&mut self, mut stop_receiver: Receiver) -> RpcResult<()> { - let mut join_handles = Vec::new(); - - for instance in &self.instance_clients { - let main_node_client = self.main_node_client.clone(); - let instance_client = instance.clone(); - let instance_stop_receiver = stop_receiver.clone(); - let mut checker = self.clone(); - - let handle = tokio::spawn(async move { - tracing::info!("Started a task to check instance {}", instance_client.url); - if let Err(e) = checker.run_node_level_checkers(&instance_client).await { - tracing::error!("Error checking instance {}: {:?}", instance_client.url, e); - }; - let mut next_block_to_check = checker.start_miniblock.unwrap_or(MiniblockNumber(0)); - - // - Get the next block the instance has to be checked. - // - Get the corresponding block from the main node. - // - Run the checkers through the blocks. - // - Maybe check batches. - loop { - tracing::debug!( - "entered loop to check miniblock #({}) for instance: {}", - next_block_to_check, - instance_client.url - ); - - if *instance_stop_receiver.borrow() { - break; - } - - let instance_miniblock = match instance_client - .client - .get_block_details(next_block_to_check) - .await - { - Ok(Some(miniblock)) => miniblock, - Ok(None) => { - tracing::debug!( - "No miniblock found for miniblock #({}). Sleeping for {} seconds", - next_block_to_check, - checker.instance_poll_period - ); - // The instance doesn't have a next block to check yet. For now, we wait until it does. - // TODO(BFT-165): Implement miniblock existence divergence checker. - sleep(Duration::from_secs(checker.instance_poll_period)).await; - continue; - } - Err(e) => { - tracing::error!( - "Error getting miniblock #({}) from instance: {}: {:?}", - next_block_to_check, - instance_client.url, - e - ); - break; - } - }; - - let main_node_miniblock = match main_node_client - .get_block_details(next_block_to_check) - .await - { - Ok(Some(miniblock)) => miniblock, - Ok(None) => { - tracing::error!( - "Miniblock #({}), which exists in external node instance {}, was not found in the main node", - next_block_to_check, instance_client.url - ); - break; - } - Err(e) => { - tracing::error!("Error getting miniblock from main node while checking instance {}: {:?}", instance_client.url, e); - break; - } - }; - - let main_node_miniblock_txs = match checker - .create_tx_map(&main_node_client, main_node_miniblock.number) - .await - { - Ok(tx_map) => tx_map, - Err(e) => { - tracing::error!("Error creating tx map for main node miniblock while checking instance {}: {}", instance_client.url, e); - break; - } - }; - - match checker - .compare_miniblocks( - &instance_client, - &main_node_miniblock_txs, - &main_node_miniblock, - &instance_miniblock, - ) - .await - { - Ok(_) => { - tracing::info!( - "successfully checked miniblock #({}) for instance: {}", - next_block_to_check, - instance_client.url - ); - } - Err(e) => { - tracing::error!( - "Error checking miniblock #({}) for instance {}: {:?}. Skipping this miniblock", - next_block_to_check, - instance_client.url, - e - ); - } - } - next_block_to_check += 1; - - if let Err(e) = checker - .maybe_check_batches(&instance_client, instance_miniblock.l1_batch_number) - .await - { - tracing::error!( - "Error comparing batch {} for instance {}: {:?}", - instance_miniblock.l1_batch_number, - instance_client.url, - e - ); - } - } - Ok(()) - }); - join_handles.push(handle); - } - - // Wait for either all tasks to finish or a stop signal. - tokio::select! { - _ = wait_for_tasks(join_handles, None, None::>, false) => {}, - _ = stop_receiver.changed() => { - tracing::info!("Stop signal received, shutting down"); - }, - } - - Ok(()) - } - - // Iterate through all miniblocks to be checked. For each, run the checkers through every given instance. - async fn run_triggered(&mut self) -> RpcResult<()> { - let start_miniblock = self.start_miniblock.unwrap_or(MiniblockNumber(0)); - let finish_miniblock = match self.finish_miniblock { - Some(finish_miniblock) => finish_miniblock, - None => { - let highest_main_node_miniblock = self.main_node_client.get_block_number().await?; - MiniblockNumber(highest_main_node_miniblock.as_u32()) - } - }; - - for instance_client in self.instance_clients.clone() { - self.run_node_level_checkers(&instance_client).await?; - } - - for miniblock_num_to_check in start_miniblock.0..=finish_miniblock.0 { - let main_node_miniblock = match self - .main_node_client - .get_block_details(MiniblockNumber(miniblock_num_to_check)) - .await - { - Ok(Some(miniblock)) => miniblock, - Ok(None) => panic!("No miniblock found for existing miniblock number {:?}", miniblock_num_to_check), - Err(e) => panic!("Couldn't fetch existing main node miniblock header for miniblock {:?} due to error: {:?}", miniblock_num_to_check, e), - }; - - let main_node_miniblock_txs = self - .create_tx_map(&self.main_node_client, main_node_miniblock.number) - .await?; - - for instance_client in self.instance_clients.clone() { - let instance_miniblock = match instance_client - .client - .get_block_details(MiniblockNumber(miniblock_num_to_check)) - .await? - { - Some(miniblock) => miniblock, - None => { - // TODO(BFT-165): Implement Miniblock Existence Checker - tracing::warn!( - "No miniblock found for miniblock #({}) in instance {}. skipping checking it for now.", - miniblock_num_to_check, - instance_client.url - ); - continue; - } - }; - - self.compare_miniblocks( - &instance_client, - &main_node_miniblock_txs, - &main_node_miniblock, - &instance_miniblock, - ) - .await?; - - self.maybe_check_batches(&instance_client, main_node_miniblock.l1_batch_number) - .await?; - - tracing::info!( - "successfully checked miniblock #({}) for instance: {}", - miniblock_num_to_check, - instance_client.url - ); - } - } - - self.log_divergences(); - - Ok(()) - } - - async fn maybe_check_batches( - &mut self, - instance_client: &InstanceHttpClient, - miniblock_batch_number: L1BatchNumber, - ) -> RpcResult<()> { - let instance_batch_to_check = self - .next_batch_to_check - .get(instance_client.url.as_str()) - .expect("All instance URLs must exists in next_batch_to_check"); - tracing::debug!("Maybe checking batch {}", miniblock_batch_number); - - // We should check batches only the first time we encounter them per instance - // (i.e., `next_instance_batch_to_check == miniblock_batch_number`) - match instance_batch_to_check.cmp(&miniblock_batch_number) { - Greater => return Ok(()), // This batch has already been checked. - Less => { - // Either somehow a batch wasn't checked or a non-genesis miniblock was set as the start - // miniblock. In the latter case, update the `next_batch_to_check` map and check the batch. - if self.start_miniblock == Some(MiniblockNumber(0)) { - return Err(ClientError::Custom(format!( - "the next batch number to check (#{}) is less than current miniblock batch number (#{}) for instance {}", - instance_batch_to_check, - miniblock_batch_number, - instance_client.url - ))); - } - *self - .next_batch_to_check - .get_mut(instance_client.url.as_str()) - .unwrap() = miniblock_batch_number; - } - Equal => {} - } - - let main_node_batch = match self - .main_node_client - .get_l1_batch_details(miniblock_batch_number) - .await - { - Ok(Some(batch)) => batch, - Ok(None) => panic!( - "No batch found for existing batch with batch number {}", - miniblock_batch_number - ), - Err(e) => panic!( - "Couldn't fetch existing main node batch for batch number {} due to error: {:?}", - miniblock_batch_number, e - ), - }; - - let instance_batch = match instance_client - .client - .get_l1_batch_details(miniblock_batch_number) - .await? - { - Some(batch) => batch, - None => { - // TODO(BFT-165): Implement batch existence checker. - tracing::warn!( - "No batch found for batch #({}) in instance {}. skipping checking it for now.", - miniblock_batch_number, - instance_client.url - ); - return Ok(()); - } - }; - - self.check_batch_details(main_node_batch, instance_batch, &instance_client.url); - - *self - .next_batch_to_check - .get_mut(instance_client.url.as_str()) - .unwrap() += 1; - - Ok(()) - } - - // Check divergences using all checkers for every given pair of miniblocks. - async fn compare_miniblocks( - &mut self, - instance_client: &InstanceHttpClient, - main_node_tx_map: &HashMap, - main_node_miniblock: &BlockDetails, - instance_miniblock: &BlockDetails, - ) -> RpcResult<()> { - self.check_miniblock_details( - &instance_client.url, - main_node_miniblock, - instance_miniblock, - ); - - // Also checks tx receipts and tx details - self.check_transactions(main_node_tx_map, instance_miniblock, instance_client) - .await?; - - self.check_logs(instance_client, main_node_miniblock.number) - .await?; - - Ok(()) - } - - // Run all the checkers that ought to be run once per instance (the non block-dependent checkers.) - async fn run_node_level_checkers( - &mut self, - instance_client: &InstanceHttpClient, - ) -> RpcResult<()> { - self.check_chain_id(instance_client).await?; - self.check_main_contract(instance_client).await?; - self.check_bridge_contracts(instance_client).await?; - self.check_l1_chain_id(instance_client).await?; - Ok(()) - } - - // Add a divergence in Triggered mode; log it in Continuous mode. - fn communicate_divergence(&mut self, url: &str, divergence: Divergence) { - match self.mode { - RpcMode::Triggered => { - // Add a divergence to the list of divergences for the given EN instance. - let divergences = self.divergences.entry(url.to_string()).or_default(); - divergences.push(divergence.clone()); - tracing::error!("{}", divergence); - } - RpcMode::Continuous => { - // Simply log for now. TODO(BFT-177): Add grafana metrics. - tracing::error!("{}", divergence); - } - } - } - - // Create a mapping from the tx hash to a json representation of the tx. - async fn create_tx_map( - &self, - client: &HttpClient, - miniblock_num: MiniblockNumber, - ) -> RpcResult> { - let txs = client - .sync_l2_block(miniblock_num, true) - .await? - .and_then(|block| block.transactions) - .unwrap_or_default(); - - let mut tx_map = HashMap::new(); - for tx in txs { - tx_map.insert( - tx.hash(), - serde_json::to_value(tx).expect("tx serialization fail"), - ); - } - - Ok(tx_map) - } - - fn log_divergences(&mut self) { - if self.divergences.is_empty() { - tracing::info!("No divergences found"); - return; - } - for (url, divergences) in &self.divergences { - tracing::error!("Divergences found for URL: {}", url); - for divergence in divergences { - tracing::error!("{}", divergence); - } - } - } -} - -// Separate impl for the checkers. -impl Checker { - fn check_batch_details( - &mut self, - main_node_batch: L1BatchDetails, - instance_batch: L1BatchDetails, - instance_url: &str, - ) { - tracing::debug!( - "Checking batch details for batch #({})", - main_node_batch.number - ); - let batch_differences = compare_json(&main_node_batch, &instance_batch, "".to_string()); - for (key, (main_node_val, instance_val)) in batch_differences { - self.communicate_divergence( - instance_url, - Divergence::BatchDetails(DivergenceDetails { - en_instance_url: instance_url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: Some(format!("Batch Number: {}", main_node_batch.number)), - miniblock_number: None, - }), - ); - } - } - - // TODO: What if when we checked the miniblock when the status was Sealed but not Verified? - fn check_miniblock_details( - &mut self, - instance_url: &str, - main_node_miniblock: &BlockDetails, - instance_miniblock: &BlockDetails, - ) { - tracing::debug!( - "Checking miniblock details for miniblock #({})", - main_node_miniblock.number - ); - let details_differences = - compare_json(main_node_miniblock, instance_miniblock, "".to_string()); - for (key, (main_node_val, instance_val)) in details_differences { - self.communicate_divergence( - instance_url, - Divergence::MiniblockDetails(DivergenceDetails { - en_instance_url: instance_url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: None, - miniblock_number: Some(main_node_miniblock.number), - }), - ); - } - } - - // Looks for txs existing in one node's miniblock and not the other, for - // discrepancies in the content of txs, and runs the individual transaction checkers. - async fn check_transactions( - &mut self, - main_node_tx_map: &HashMap, - instance_miniblock: &BlockDetails, - instance_client: &InstanceHttpClient, - ) -> RpcResult<()> { - let mut instance_tx_map = self - .create_tx_map(&instance_client.client, instance_miniblock.number) - .await?; - - tracing::debug!( - "Checking transactions for miniblock #({}) that has {} transactions", - instance_miniblock.number, - instance_tx_map.len(), - ); - - for (i, (tx_hash, main_node_tx)) in main_node_tx_map.iter().enumerate() { - if let Some(max_num) = self.max_transactions_to_check { - if i >= max_num { - return Ok(()); - } - } - match instance_tx_map.remove(tx_hash) { - Some(instance_tx) => { - if *main_node_tx != instance_tx { - let tx_differences = - compare_json(main_node_tx, &instance_tx, "".to_string()); - for (key, (main_node_val, instance_val)) in tx_differences { - self.communicate_divergence( - &instance_client.url, - Divergence::Transaction(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: Some(format!("Tx Hash: {}", tx_hash)), - miniblock_number: Some(instance_miniblock.number), - }), - ); - } - } else { - self.check_transaction_receipt( - instance_client, - tx_hash, - instance_miniblock.number, - ) - .await?; - - self.check_transaction_details( - instance_client, - tx_hash, - instance_miniblock.number, - ) - .await?; - } - } - None => { - self.communicate_divergence( - &instance_client.url, - Divergence::Transaction(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(tx_hash.to_string()), - en_instance_value: None, - entity_id: Some(format!("Tx Hash: {}", tx_hash)), - miniblock_number: Some(instance_miniblock.number), - }), - ); - tracing::debug!( - "Added divergence for a tx that is in main node but not in instance: {:?}", - tx_hash - ); - } - } - } - - // If there are txs left in the instance tx map, then they don't exist in the main node. - for tx_hash in instance_tx_map.keys() { - self.communicate_divergence( - &instance_client.url, - Divergence::Transaction(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: None, - en_instance_value: Some(tx_hash.to_string()), - entity_id: Some(format!("Tx Hash: {}", tx_hash)), - miniblock_number: Some(instance_miniblock.number), - }), - ); - tracing::debug!( - "Added divergence for a tx that is in instance but not in main node: {:?}", - tx_hash - ); - } - - Ok(()) - } - - async fn check_transaction_receipt( - &mut self, - instance_client: &InstanceHttpClient, - tx_hash: &H256, - miniblock_number: MiniblockNumber, - ) -> RpcResult<()> { - tracing::debug!( - "Checking receipts for a tx in miniblock {}", - miniblock_number - ); - - let main_node_receipt = self - .main_node_client - .get_transaction_receipt(*tx_hash) - .await?; - let instance_receipt = instance_client - .client - .get_transaction_receipt(*tx_hash) - .await?; - - let receipt_differences = - compare_json(&main_node_receipt, &instance_receipt, "".to_string()); - for (key, (main_node_val, instance_val)) in receipt_differences { - self.communicate_divergence( - &instance_client.url, - Divergence::TransactionReceipt(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: Some(format!("Tx Hash: {}", tx_hash)), - miniblock_number: Some(miniblock_number), - }), - ); - } - - Ok(()) - } - - async fn check_transaction_details( - &mut self, - instance_client: &InstanceHttpClient, - tx_hash: &H256, - miniblock_number: MiniblockNumber, - ) -> RpcResult<()> { - tracing::debug!( - "Checking transaction details for a tx in miniblock {}", - miniblock_number - ); - - let main_node_tx_details = self - .main_node_client - .get_transaction_details(*tx_hash) - .await?; - let instance_tx_details = instance_client - .client - .get_transaction_details(*tx_hash) - .await?; - - let tx_details_differences = - compare_json(&main_node_tx_details, &instance_tx_details, "".to_string()); - for (key, (main_node_val, instance_val)) in tx_details_differences { - self.communicate_divergence( - &instance_client.url, - Divergence::TransactionDetails(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: Some(format!("Tx Hash: {}", tx_hash)), - miniblock_number: Some(miniblock_number), - }), - ); - } - - Ok(()) - } - - async fn check_logs( - &mut self, - instance_client: &InstanceHttpClient, - current_miniblock_block_num: MiniblockNumber, - ) -> RpcResult<()> { - let from_block = current_miniblock_block_num - .0 - .checked_sub(self.log_check_interval); - let to_block = current_miniblock_block_num.0; - - if from_block < Some(0) || to_block % self.log_check_interval != 0 { - tracing::debug!("Skipping log check for miniblock {}", to_block); - return Ok(()); - } - tracing::debug!( - "Checking logs for miniblocks {}-{}", - from_block.unwrap(), - to_block - 1 - ); - - let filter = FilterBuilder::default() - .set_from_block(BlockNumber::Number(U64::from(from_block.unwrap()))) - .set_to_block(BlockNumber::Number(U64::from(&to_block - 1))) - .build(); - - let main_node_logs = match self.main_node_client.get_logs(filter.clone()).await { - Ok(logs) => logs, - Err(e) => { - // TODO(BFT-192): Be more specific with checking logs - tracing::error!("Failed to get logs from main node: {}", e); - return Ok(()); - } - }; - let instance_logs = match instance_client.client.get_logs(filter).await { - Ok(logs) => logs, - Err(e) => { - // TODO(BFT-192): Be more specific with checking logs - tracing::error!("Failed to get logs from instance: {}", e); - return Ok(()); - } - }; - - for (main_node_log, instance_log) in main_node_logs.iter().zip(instance_logs.iter()) { - let log_differences = compare_json(&main_node_log, &instance_log, "".to_string()); - for (key, (main_node_val, instance_val)) in log_differences { - self.communicate_divergence( - &instance_client.url, - Divergence::Log(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: None, - miniblock_number: Some(MiniblockNumber( - main_node_log.block_number.unwrap().as_u32(), - )), - }), - ); - } - } - - Ok(()) - } - - async fn check_main_contract(&mut self, instance_client: &InstanceHttpClient) -> RpcResult<()> { - let main_node_main_contract = self.main_node_client.get_main_contract().await?; - let instance_main_contract = instance_client.client.get_main_contract().await?; - - let contract_differences = compare_json( - &main_node_main_contract, - &instance_main_contract, - "".to_string(), - ); - for (key, (main_node_val, instance_val)) in contract_differences { - self.communicate_divergence( - &instance_client.url, - Divergence::MainContracts(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(format!("{} {:?}", key, main_node_val)), - en_instance_value: Some(format!("{} {:?}", key, instance_val)), - entity_id: None, - miniblock_number: None, - }), - ); - } - - Ok(()) - } - - async fn check_chain_id(&mut self, instance_client: &InstanceHttpClient) -> RpcResult<()> { - let main_node_chain_id = self.main_node_client.chain_id().await?; - let instance_chain_id = instance_client.client.chain_id().await?; - - if main_node_chain_id != instance_chain_id { - self.communicate_divergence( - &instance_client.url, - Divergence::ChainID(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(main_node_chain_id), - en_instance_value: Some(instance_chain_id), - entity_id: None, - miniblock_number: None, - }), - ); - } - - Ok(()) - } - - async fn check_l1_chain_id(&mut self, instance_client: &InstanceHttpClient) -> RpcResult<()> { - let main_node_chain_id = self.main_node_client.l1_chain_id().await?; - let instance_chain_id = instance_client.client.l1_chain_id().await?; - - if main_node_chain_id != instance_chain_id { - self.communicate_divergence( - &instance_client.url, - Divergence::L1ChainID(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(main_node_chain_id), - en_instance_value: Some(instance_chain_id), - entity_id: None, - miniblock_number: None, - }), - ); - } - - Ok(()) - } - - async fn check_bridge_contracts( - &mut self, - instance_client: &InstanceHttpClient, - ) -> RpcResult<()> { - let main_node_bridge_contracts = self.main_node_client.get_bridge_contracts().await?; - let instance_bridge_contracts = instance_client.client.get_bridge_contracts().await?; - - let receipt_differences = compare_json( - &main_node_bridge_contracts, - &instance_bridge_contracts, - "".to_string(), - ); - for (key, (main_node_val, instance_val)) in receipt_differences { - self.communicate_divergence( - &instance_client.url, - Divergence::BridgeContracts(DivergenceDetails { - en_instance_url: instance_client.url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: None, - miniblock_number: None, - }), - ); - } - - Ok(()) - } -} diff --git a/core/tests/cross_external_nodes_checker/src/config.rs b/core/tests/cross_external_nodes_checker/src/config.rs deleted file mode 100644 index 636a4fd9ae5..00000000000 --- a/core/tests/cross_external_nodes_checker/src/config.rs +++ /dev/null @@ -1,171 +0,0 @@ -use envy::prefixed; -use serde::Deserialize; - -#[derive(Debug, Deserialize, PartialEq)] -pub struct CheckerConfig { - #[serde(default = "default_mode")] - pub mode: Mode, - - #[serde(default = "default_rpc_mode")] - pub rpc_mode: Option, - - #[serde(default = "default_start_miniblock")] - pub start_miniblock: Option, - - #[serde(default = "default_finish_miniblock")] - pub finish_miniblock: Option, - - #[serde(default = "default_main_node_http_url")] - pub main_node_http_url: Option, - - #[serde(default = "default_instances_http_urls")] - pub instances_http_urls: Option>, - - #[serde(default = "default_main_node_ws_url")] - pub main_node_ws_url: Option, - - #[serde(default = "default_instances_ws_urls")] - pub instances_ws_urls: Option>, - - #[serde(default = "default_max_transactions_to_check")] - pub max_transactions_to_check: Option, - - #[serde(default = "default_instance_poll_period")] - pub instance_poll_period: Option, - - #[serde(default = "default_subscription_duration")] - pub subscription_duration: Option, -} - -#[derive(Copy, Clone, Debug, Deserialize, PartialEq)] -pub enum Mode { - Rpc, - PubSub, - All, -} - -impl Mode { - pub fn run_rpc(&self) -> bool { - matches!(self, Mode::Rpc | Mode::All) - } - - pub fn run_pubsub(&self) -> bool { - matches!(self, Mode::PubSub | Mode::All) - } -} - -#[derive(Copy, Clone, Debug, Deserialize, PartialEq)] -pub enum RpcMode { - Triggered, - Continuous, -} - -impl CheckerConfig { - pub fn from_env() -> Self { - prefixed("CHECKER_") - .from_env() - .unwrap_or_else(|err| panic!("Failed to load the checker config with error: {}", err)) - } -} - -// Default functions for each of the fields - -fn default_mode() -> Mode { - Mode::All -} - -fn default_rpc_mode() -> Option { - Some(RpcMode::Triggered) -} - -fn default_start_miniblock() -> Option { - None -} - -fn default_finish_miniblock() -> Option { - None -} - -fn default_main_node_http_url() -> Option { - Some("http://127.0.0.1:3050".to_string()) -} - -fn default_instances_http_urls() -> Option> { - Some(vec!["http://127.0.0.1:3060".to_string()]) -} - -fn default_main_node_ws_url() -> Option { - Some("ws://127.0.0.1:3051".to_string()) -} - -fn default_instances_ws_urls() -> Option> { - Some(vec!["ws://127.0.0.1:3061".to_string()]) -} - -fn default_max_transactions_to_check() -> Option { - Some(3) -} - -fn default_instance_poll_period() -> Option { - Some(10) -} - -fn default_subscription_duration() -> Option { - None -} - -#[cfg(test)] -mod tests { - use std::env; - - use super::*; - - #[test] - fn success() { - let config = r#" - CHECKER_MODE="Rpc" - CHECKER_RPC_MODE="Continuous" - CHECKER_START_MINIBLOCK="2" - CHECKER_FINISH_MINIBLOCK="4" - CHECKER_MAIN_NODE_HTTP_URL="http://127.0.0.1:1020" - CHECKER_INSTANCES_HTTP_URLS="http://127.0.0.1:1030,http://127.0.0.1:1020" - CHECKER_INSTANCE_POLL_PERIOD="60" - CHECKER_MAX_TRANSACTIONS_TO_CHECK="10" - CHECKER_SUBSCRIPTION_DURATION="120" - "#; - - set_env(config); - - let actual = CheckerConfig::from_env(); - let want = CheckerConfig { - mode: Mode::Rpc, - rpc_mode: Some(RpcMode::Continuous), - start_miniblock: Some(2), - finish_miniblock: Some(4), - main_node_http_url: Some("http://127.0.0.1:1020".into()), - instances_http_urls: Some(vec![ - "http://127.0.0.1:1030".into(), - "http://127.0.0.1:1020".into(), - ]), - main_node_ws_url: Some("ws://127.0.0.1:3051".into()), - instances_ws_urls: Some(vec!["ws://127.0.0.1:3061".into()]), - instance_poll_period: Some(60), - max_transactions_to_check: Some(10), - subscription_duration: Some(120), - }; - assert_eq!(actual, want); - } - - pub fn set_env(fixture: &str) { - for line in fixture.split('\n').map(str::trim) { - if line.is_empty() { - continue; - } - let elements: Vec<_> = line.split('=').collect(); - let variable_name = elements[0]; - let variable_value = elements[1].trim_matches('"'); - - env::set_var(variable_name, variable_value); - } - } -} diff --git a/core/tests/cross_external_nodes_checker/src/divergence.rs b/core/tests/cross_external_nodes_checker/src/divergence.rs deleted file mode 100644 index 18c910349f7..00000000000 --- a/core/tests/cross_external_nodes_checker/src/divergence.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::fmt; - -use zksync_types::{web3::types::U64, MiniblockNumber}; - -#[derive(Debug, Clone)] -pub(crate) enum Divergence { - BatchDetails(DivergenceDetails>), - MiniblockDetails(DivergenceDetails>), - Transaction(DivergenceDetails>), - TransactionReceipt(DivergenceDetails>), - TransactionDetails(DivergenceDetails>), - Log(DivergenceDetails>), - MainContracts(DivergenceDetails>), - BridgeContracts(DivergenceDetails>), - ChainID(DivergenceDetails>), - L1ChainID(DivergenceDetails>), - PubSubHeader(DivergenceDetails>), -} - -impl fmt::Display for Divergence { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Divergence::BatchDetails(details) => { - write!(f, "Batch Details divergence found: {}", details) - } - Divergence::MiniblockDetails(details) => { - write!(f, "Miniblock Details divergence found: {}", details) - } - Divergence::Transaction(details) => { - write!(f, "Transaction divergence found: {}", details) - } - Divergence::TransactionReceipt(details) => { - write!(f, "TransactionReceipt divergence found: {}", details) - } - Divergence::TransactionDetails(details) => { - write!(f, "TransactionDetails divergence found: {}", details) - } - Divergence::Log(details) => write!(f, "Log divergence found: {}", details), - Divergence::MainContracts(details) => { - write!(f, "MainContracts divergence found: {}", details) - } - Divergence::BridgeContracts(details) => { - write!(f, "BridgeContracts divergence found: {}", details) - } - Divergence::ChainID(details) => write!(f, "ChainID divergence found: {}", details), - Divergence::L1ChainID(details) => { - write!(f, "L1ChainID divergence found: {}", details) - } - Divergence::PubSubHeader(details) => { - write!(f, "PubSubHeader divergence found: {}", details) - } - } - } -} - -#[derive(Debug, Clone)] -pub(crate) struct DivergenceDetails { - pub(crate) en_instance_url: String, - pub(crate) main_node_value: T, - pub(crate) en_instance_value: T, - pub(crate) entity_id: Option, - pub(crate) miniblock_number: Option, -} - -impl fmt::Display for DivergenceDetails> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let main_node_value = match &self.main_node_value { - Some(value) => format!("{}", value), - None => String::from("None"), - }; - let en_instance_value = match &self.en_instance_value { - Some(value) => format!("{}", value), - None => String::from("None"), - }; - let entity_info = match self.entity_id { - Some(ref entity_id) => format!(", Entity ID: {}", entity_id), - None => String::from(""), - }; - let miniblock_number = match self.miniblock_number { - Some(ref number) => format!(", Miniblock number: {}", number), - None => String::from(""), - }; - write!( - f, - "Main node value: {}, EN instance value: {}{} in EN instance: {}{}", - main_node_value, en_instance_value, miniblock_number, self.en_instance_url, entity_info - ) - } -} diff --git a/core/tests/cross_external_nodes_checker/src/helpers.rs b/core/tests/cross_external_nodes_checker/src/helpers.rs deleted file mode 100644 index 6247b5e8c8a..00000000000 --- a/core/tests/cross_external_nodes_checker/src/helpers.rs +++ /dev/null @@ -1,326 +0,0 @@ -use std::{collections::HashMap, future::Future, time::Duration}; - -use futures::channel::oneshot; -use serde_json::{Map, Value}; -use tokio::time::sleep; - -/// Sets up an interrupt handler and returns a future that resolves once an interrupt signal is received. -pub fn setup_sigint_handler() -> oneshot::Receiver<()> { - let (sigint_sender, sigint_receiver) = oneshot::channel(); - let mut sigint_sender = Some(sigint_sender); - ctrlc::set_handler(move || { - if let Some(sigint_sender) = sigint_sender.take() { - sigint_sender.send(()).ok(); - // ^ The send fails if `sigint_receiver` is dropped. We're OK with this, - // since at this point the node should be stopping anyway, or is not interested - // in listening to interrupt signals. - } - }) - .expect("Error setting Ctrl+C handler"); - - sigint_receiver -} - -pub fn compare_json( - a: &T, - b: &T, - path: String, -) -> HashMap, Option)> { - let a = serde_json::to_value(a).expect("serialization failure"); - let b = serde_json::to_value(b).expect("serialization failure"); - - if a == b { - return HashMap::new(); - } - - match (a, b) { - (Value::Object(ref a), Value::Object(ref b)) => compare_json_object(a, b, path), - (Value::Array(ref a), Value::Array(ref b)) => compare_json_array(a, b, path), - (a, b) => { - let mut res = HashMap::new(); - let a_val = if a.is_null() { None } else { Some(a) }; - let b_val = if b.is_null() { None } else { Some(b) }; - res.insert(path, (a_val, b_val)); - res - } - } -} - -fn compare_json_object( - a: &Map, - b: &Map, - path: String, -) -> HashMap, Option)> { - let mut differences = HashMap::new(); - - for (k, v) in a.iter() { - let new_path = if path.is_empty() { - k.clone() - } else { - format!("{}.{}", path, k) - }; - - differences.extend(compare_json(v, b.get(k).unwrap_or(&Value::Null), new_path)); - } - - for (k, v) in b.iter() { - if !a.contains_key(k) { - let new_path = if path.is_empty() { - k.clone() - } else { - format!("{}.{}", path, k) - }; - differences.insert(new_path, (None, Some(v.clone()))); - } - } - - differences -} - -fn compare_json_array( - a: &Vec, - b: &Vec, - path: String, -) -> HashMap, Option)> { - let mut differences = HashMap::new(); - - let len = a.len().max(b.len()); - for i in 0..len { - let new_path = format!("{}[{}]", path, i); - differences.extend(compare_json( - a.get(i).unwrap_or(&Value::Null), - b.get(i).unwrap_or(&Value::Null), - new_path, - )); - } - - differences -} - -#[derive(Debug, Clone)] -pub struct ExponentialBackoff { - pub max_retries: u32, - pub base_delay: Duration, - pub retry_message: String, -} - -impl ExponentialBackoff { - // Keep retrying until the operation returns Some or we reach the max number of retries. - pub async fn retry(&self, mut operation: F) -> Option - where - F: FnMut() -> Fut, - Fut: Future>, - { - for retry in 1..=self.max_retries { - if let Some(result) = operation().await { - return Some(result); - } - if retry == self.max_retries { - break; - } - let delay = self.base_delay * retry; - tracing::warn!( - "{} Retrying in {} seconds", - self.retry_message, - delay.as_secs() - ); - sleep(delay).await; - } - None - } -} - -#[cfg(test)] -mod tests { - use serde_json::json; - - use super::*; - - #[test] - fn test_same_json() { - let json1 = json!({ - "key1": "value1", - "key2": 2, - "key3": [ - "value2", - "+value3" - ] - }); - - let differences = compare_json(&json1, &json1, "".to_string()); - assert_eq!(differences.len(), 0); - } - - #[test] - fn test_deeply_nested_objects() { - let a = json!({ - "key1": { - "subkey1": { - "subsubkey1": "value1", - "subsubkey2": "value2" - }, - "subkey2": "value3" - }, - "key2": "value4" - }); - - let b = json!({ - "key1": { - "subkey1": { - "subsubkey1": "value1", - "subsubkey2": "value5" - }, - "subkey2": "value6" - }, - "key2": "value4" - }); - - let differences = compare_json(&a, &b, "".to_string()); - - assert_eq!(differences.len(), 2); - assert_eq!( - differences.get("key1.subkey1.subsubkey2"), - Some(&(Some(json!("value2")), Some(json!("value5")))) - ); - assert_eq!( - differences.get("key1.subkey2"), - Some(&(Some(json!("value3")), Some(json!("value6")))) - ); - } - - #[test] - fn test_diff_different_keys() { - let a = json!({ - "key1": "value1", - "key2": "value2" - }); - - let b = json!({ - "key1": "value1", - "key3": "value3" - }); - - let differences = compare_json(&a, &b, "".to_string()); - - assert_eq!(differences.len(), 2); - assert_eq!( - differences.get("key2"), - Some(&(Some(json!("value2")), None)) - ); - assert_eq!( - differences.get("key3"), - Some(&(None, Some(json!("value3")))) - ); - } - - #[test] - fn test_diff_different_types() { - let a = json!({ - "key1": true, - "key2": 123, - "key3": "value1" - }); - - let b = json!({ - "key1": false, - "key2": "123", - "key3": "value2" - }); - - let differences = compare_json(&a, &b, "".to_string()); - - assert_eq!(differences.len(), 3); - assert_eq!( - differences.get("key1"), - Some(&(Some(json!(true)), Some(json!(false)))) - ); - assert_eq!( - differences.get("key2"), - Some(&(Some(json!(123)), Some(json!("123")))) - ); - assert_eq!( - differences.get("key3"), - Some(&(Some(json!("value1")), Some(json!("value2")))) - ); - } - - #[test] - fn test_empty_jsons() { - let json1 = json!({}); - let json2 = json!([]); - - let differences = compare_json(&json1, &json1, "".to_string()); - assert_eq!(differences.len(), 0); - - let differences = compare_json(&json2, &json2, "".to_string()); - assert_eq!(differences.len(), 0); - - let differences = compare_json(&json1, &json2, "".to_string()); - assert_eq!(differences.len(), 1); - } - - #[test] - fn test_one_empty_json() { - let json1 = json!({}); - let json2 = json!({ - "key1": "value1", - "key2": 2, - }); - - let differences = compare_json(&json1, &json2, "".to_string()); - assert_eq!(differences.len(), 2); - - let differences = compare_json(&json2, &json1, "".to_string()); - assert_eq!(differences.len(), 2); - } - - #[test] - fn test_json_with_null() { - let a = json!({ - "key1": null, - "key2": "value2" - }); - - let b = json!({ - "key1": "value1", - "key2": null - }); - - let differences = compare_json(&a, &b, "".to_string()); - - assert_eq!(differences.len(), 2); - assert_eq!( - differences.get("key1"), - Some(&(None, Some(json!("value1")))) - ); - assert_eq!( - differences.get("key2"), - Some(&(Some(json!("value2")), None)) - ); - } - - #[test] - fn test_arrays_different_lengths() { - let a = json!([1, 2, 3]); - let b = json!([1, 2, 3, 4]); - - let differences = compare_json(&a, &b, "".to_string()); - - assert_eq!(differences.len(), 1); - assert_eq!(differences.get("[3]"), Some(&(None, Some(json!(4))))); - } - - #[test] - fn test_arrays_with_nested_objects() { - let a = json!([{"key1": "value1"}, {"key2": "value2"}]); - let b = json!([{"key1": "value1"}, {"key2": "value3"}]); - - let differences = compare_json(&a, &b, "".to_string()); - - assert_eq!(differences.len(), 1); - assert_eq!( - differences.get("[1].key2"), - Some(&(Some(json!("value2")), Some(json!("value3")))) - ); - } -} diff --git a/core/tests/cross_external_nodes_checker/src/main.rs b/core/tests/cross_external_nodes_checker/src/main.rs deleted file mode 100644 index 7199c1cbd32..00000000000 --- a/core/tests/cross_external_nodes_checker/src/main.rs +++ /dev/null @@ -1,65 +0,0 @@ -use tokio::sync::watch; -use zksync_utils::wait_for_tasks::wait_for_tasks; - -use self::{checker::Checker, pubsub_checker::PubSubChecker}; -use crate::{config::CheckerConfig, helpers::setup_sigint_handler}; - -mod checker; -mod config; -mod divergence; -mod helpers; -mod pubsub_checker; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. - let log_format = vlog::log_format_from_env(); - #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. - let sentry_url = vlog::sentry_url_from_env(); - #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. - let environment = vlog::environment_from_env(); - - let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); - if let Some(sentry_url) = sentry_url { - builder = builder - .with_sentry_url(&sentry_url) - .expect("Invalid Sentry URL") - .with_sentry_environment(environment); - } - let _guard = builder.build(); - - tracing::info!("Started the Cross Node Checker"); - - let config = CheckerConfig::from_env(); - tracing::info!("Loaded the checker config: {:?}", config); - - let mut join_handles = Vec::new(); - let sigint_receiver = setup_sigint_handler(); - let (stop_sender, stop_receiver) = watch::channel::(false); - - if config.mode.run_rpc() { - let cross_node_checker = Checker::new(&config); - let checker_stop_receiver = stop_receiver.clone(); - let checker_handle = - tokio::spawn(async move { cross_node_checker.run(checker_stop_receiver).await }); - join_handles.push(checker_handle); - } - - if config.mode.run_pubsub() { - let pubsub_checker = PubSubChecker::new(config).await; - let pubsub_stop_receiver = stop_receiver.clone(); - let pubsub_handle = - tokio::spawn(async move { pubsub_checker.run(pubsub_stop_receiver).await }); - join_handles.push(pubsub_handle); - } - - tokio::select! { - _ = wait_for_tasks(join_handles, None, None::>, false) => {}, - _ = sigint_receiver => { - let _ = stop_sender.send(true); - tracing::info!("Stop signal received, shutting down the cross EN Checker"); - }, - } - - Ok(()) -} diff --git a/core/tests/cross_external_nodes_checker/src/pubsub_checker.rs b/core/tests/cross_external_nodes_checker/src/pubsub_checker.rs deleted file mode 100644 index 3e000e83d8f..00000000000 --- a/core/tests/cross_external_nodes_checker/src/pubsub_checker.rs +++ /dev/null @@ -1,311 +0,0 @@ -use std::{ - collections::HashMap, - sync::Arc, - time::{Duration, Instant}, -}; - -use anyhow::Context as _; -use tokio::{ - select, spawn, - sync::{watch::Receiver, Mutex as TokioMutex}, - time::timeout, -}; -use zksync_types::{web3::types::U64, MiniblockNumber}; -use zksync_utils::wait_for_tasks::wait_for_tasks; -use zksync_web3_decl::{ - jsonrpsee::{ - core::{ - client::{Subscription, SubscriptionClientT}, - ClientError, - }, - rpc_params, - ws_client::{WsClient, WsClientBuilder}, - }, - types::{BlockHeader, PubSubResult}, -}; - -use crate::{ - config::CheckerConfig, - divergence::{Divergence, DivergenceDetails}, - helpers::{compare_json, ExponentialBackoff}, -}; - -const MAX_RETRIES: u32 = 6; -const GRACE_PERIOD: Duration = Duration::from_secs(60); -const SUBSCRIPTION_TIMEOUT: Duration = Duration::from_secs(120); - -#[derive(Debug, Clone)] -pub struct PubSubChecker { - main_node_url: String, - instance_urls: Vec, - /// Time in seconds for a subscription to be active. If `None`, the subscription will run forever. - subscription_duration: Option, - /// Mapping of block numbers to the block header and the number of instances that still need to - /// check the corresponding header. This Hashmap is shared between all threads. - /// The number of instances is used to determine when to remove the block from the hashmap. - pub blocks: Arc>>, -} - -impl PubSubChecker { - pub async fn new(config: CheckerConfig) -> Self { - let duration = config.subscription_duration.map(Duration::from_secs); - Self { - main_node_url: config - .main_node_ws_url - .expect("WS URL for the main node has to be provided for PubSub mode."), - instance_urls: config - .instances_ws_urls - .expect("WS URLs for the EN instances have to be provided for PubSub mode."), - subscription_duration: duration, - blocks: Arc::new(TokioMutex::new(HashMap::new())), - } - } - - pub async fn run(&self, mut stop_receiver: Receiver) -> anyhow::Result<()> { - tracing::info!("Started pubsub checker"); - - let mut join_handles = Vec::new(); - - let this = self.clone(); - let main_stop_receiver = stop_receiver.clone(); - let handle = spawn(async move { - tracing::info!("Started a task to subscribe to the main node"); - if let Err(e) = this.subscribe_main(main_stop_receiver).await { - tracing::error!("Error in main node subscription task: {}", e); - } - Ok(()) - }); - join_handles.push(handle); - - let instance_urls = self.instance_urls.clone(); - for instance_url in &instance_urls { - let this = self.clone(); - let instance_stop_receiver = stop_receiver.clone(); - let url = instance_url.clone(); - let handle = spawn(async move { - tracing::info!("Started a task to subscribe to instance {}", url); - this.subscribe_instance(&url, instance_stop_receiver) - .await - .with_context(|| format!("Error in instance {} subscription task", url)) - }); - join_handles.push(handle); - } - - select! { - _ = wait_for_tasks(join_handles, None, None::>, false) => {}, - _ = stop_receiver.changed() => { - tracing::info!("Stop signal received, shutting down pubsub checker"); - }, - } - Ok(()) - } - - // Setup a client for the main node, subscribe, and insert incoming pubsub results into the shared hashmap. - async fn subscribe_main(&self, stop_receiver: Receiver) -> anyhow::Result<()> { - let client = self.setup_client(&self.main_node_url).await; - let params = rpc_params!["newHeads"]; - - let mut subscription: Subscription = client - .subscribe("eth_subscribe", params, "eth_unsubscribe") - .await?; - - let start = Instant::now(); - loop { - if self.check_if_loop_should_break(&stop_receiver, &start, &self.main_node_url) { - break; - } - - let no_res_timeout_duration = self.get_timeout_duration(&start); - let stream_res = timeout(no_res_timeout_duration, subscription.next()) - .await - .map_err(|_| - anyhow::anyhow!( - "OperationTimeout: Haven't gotten an item for over {} seconds subscribing to the main node", - no_res_timeout_duration.as_secs() - ) - )?; - let pubsub_res = stream_res.ok_or_else(|| anyhow::anyhow!("Stream has ended"))?; - - let (block_header, block_number) = self.extract_block_info(pubsub_res).await?; - - // Secure the lock for the map and insert the new header. - let mut blocks = self.blocks.lock().await; - blocks.insert(block_number, (block_header, self.instance_urls.len())); - tracing::debug!("Inserted block {} to main node map", block_number); - } - - Ok(()) - } - - // Setup a client for the instance node, subscribe, and compare incoming pubsub results to the main node's. - async fn subscribe_instance( - &self, - url: &str, - stop_receiver: Receiver, - ) -> anyhow::Result<()> { - let client = self.setup_client(url).await; - let params = rpc_params!["newHeads"]; - - let mut subscription: Subscription = client - .subscribe("eth_subscribe", params, "eth_unsubscribe") - .await?; - - let start = Instant::now(); - loop { - if self.check_if_loop_should_break(&stop_receiver, &start, url) { - break; - } - - let no_res_timeout_duration = self.get_timeout_duration(&start); - let stream_res = timeout(no_res_timeout_duration, subscription.next()) - .await - .map_err(|_| - anyhow::anyhow!( - "OperationTimeout: Haven't gotten an item for over {} seconds subscribing to instance {}", - no_res_timeout_duration.as_secs(), url - ) - )?; - let pubsub_res = stream_res.ok_or_else(|| anyhow::anyhow!("Stream has ended"))?; - let (instance_block_header, block_number) = self.extract_block_info(pubsub_res).await?; - tracing::debug!("Got block {} from instance {}", block_number, url); - - // Get the main node block header from the map and update its count. - // This should be retried because the map not having the block the instance does might - // just mean the main node subscriber is lagging. - let backoff = ExponentialBackoff { - max_retries: MAX_RETRIES, - base_delay: Duration::from_secs(1), - retry_message: format!( - "block {} is still not present in main node map for instance {}.", - block_number, url, - ), - }; - let main_node_value = backoff - // Wait for the block to appear in the main node map. - .retry(|| { - async move { - let mut blocks = self.blocks.lock().await; - let main_node_value = blocks.get(&block_number).cloned(); - match main_node_value { - Some((header, count)) => { - if count > 1 { - blocks.insert(block_number, (header.clone(), count - 1)); - } else { - blocks.remove(&block_number); - } - tracing::debug!("Updated blocks map: {:?}", blocks.keys()); - Some((header, count)) - } - None => None, // Retry - } - } - }) - .await; - - // If main node map contained the header, compare main & instance headers. - match main_node_value { - Some((main_node_header, _)) => { - self.check_headers(&main_node_header, &instance_block_header, url); - } - None => { - // If the main subscriber starts ahead of an instance subscriber, the map may - // start with block X while instance is looking for block X-1, which will never - // be in the map. We don't want to log an error for this case. - if start.elapsed() > GRACE_PERIOD { - tracing::error!( - "block {} has not been found in the main node map for instance {} after {} retries", - block_number, - url, - MAX_RETRIES - ); - } - } - }; - } - - Ok(()) - } - - fn get_timeout_duration(&self, start: &Instant) -> Duration { - match self.subscription_duration { - Some(duration) => std::cmp::min( - duration.checked_sub(start.elapsed()), - Some(SUBSCRIPTION_TIMEOUT), - ) - .unwrap(), - None => SUBSCRIPTION_TIMEOUT, - } - } - - fn check_if_loop_should_break( - &self, - stop_receiver: &Receiver, - start: &Instant, - url: &str, - ) -> bool { - if *stop_receiver.borrow() { - tracing::info!("Stop signal received, shutting down pubsub checker"); - return true; - } - if let Some(duration) = self.subscription_duration { - if start.elapsed() > duration { - tracing::info!("Client {} reached its subscription duration", url); - return true; - } - } - false - } - - async fn setup_client(&self, url: &str) -> WsClient { - WsClientBuilder::default() - .build(url) - .await - .expect("Failed to create a WS client") - } - - // Extract the block header and block number from the pubsub result that is expected to be a header. - async fn extract_block_info( - &self, - pubsub_res: Result, - ) -> Result<(BlockHeader, U64), anyhow::Error> { - let PubSubResult::Header(header) = pubsub_res? else { - return Err(anyhow::anyhow!("Received non-header pubsub result")); - }; - - let Some(block_number) = header.number else { - return Err(anyhow::anyhow!("Received header without block number.")); - }; - - Ok((header, block_number)) - } - - fn check_headers( - &self, - main_node_header: &BlockHeader, - instance_header: &BlockHeader, - instance_url: &str, - ) { - let header_differences = compare_json(&main_node_header, &instance_header, "".to_string()); - if header_differences.is_empty() { - tracing::info!( - "No divergences found in header for block {} for instance {}", - instance_header.number.unwrap().as_u64(), - instance_url - ); - } - for (key, (main_node_val, instance_val)) in header_differences { - tracing::error!( - "{}", - Divergence::PubSubHeader(DivergenceDetails { - en_instance_url: instance_url.to_string(), - main_node_value: Some(format!("{}: {:?}", key, main_node_val)), - en_instance_value: Some(format!("{}: {:?}", key, instance_val)), - entity_id: None, - miniblock_number: Some(MiniblockNumber( - main_node_header.number.unwrap().as_u32() - )), - }), - ); - } - } -} diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index bbbd5136859..9911a5b06b5 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -60,6 +60,12 @@ describe('Block reverting test', function () { let blocksCommittedBeforeRevert: number; let logs: fs.WriteStream; + let enable_consensus = process.env.ENABLE_CONSENSUS == 'true'; + let components = 'api,tree,eth,state_keeper'; + if (enable_consensus) { + components += ',consensus'; + } + before('create test wallet', async () => { tester = await Tester.init(process.env.CHAIN_ETH_NETWORK || 'localhost'); alice = tester.emptyWallet(); @@ -76,7 +82,7 @@ describe('Block reverting test', function () { process.env.DATABASE_MERKLE_TREE_MODE = 'full'; // Run server in background. - const components = 'api,tree,eth,state_keeper'; + utils.background(`zk server --components ${components}`, [null, logs, logs]); // Server may need some time to recompile if it's a cold run, so wait for it. let iter = 0; @@ -172,7 +178,7 @@ describe('Block reverting test', function () { process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1'; // Run server. - utils.background('zk server --components api,tree,eth,state_keeper', [null, logs, logs]); + utils.background(`zk server --components ${components}`, [null, logs, logs]); await utils.sleep(10); const balanceBefore = await alice.getBalance(); @@ -209,7 +215,7 @@ describe('Block reverting test', function () { await killServerAndWaitForShutdown(tester); // Run again. - utils.background(`zk server --components=api,tree,eth,state_keeper`, [null, logs, logs]); + utils.background(`zk server --components=${components}`, [null, logs, logs]); await utils.sleep(10); // Trying to send a transaction from the same address again diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 1f376e815d7..a0e5fc9ced4 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -137,6 +137,9 @@ type L1Token = { function getTokens(network: string): L1Token[] { const configPath = `${process.env.ZKSYNC_HOME}/etc/tokens/${network}.json`; + if (!fs.existsSync(configPath)) { + return []; + } return JSON.parse( fs.readFileSync(configPath, { encoding: 'utf-8' diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index f8d0ea284df..70ea34816e2 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -39,12 +39,23 @@ describe('web3 API compatibility tests', () => { const blockWithTxsByNumber = await alice.provider.getBlockWithTransactions(blockNumber); expect(blockWithTxsByNumber.gasLimit).bnToBeGt(0); let sumTxGasUsed = ethers.BigNumber.from(0); + for (const tx of blockWithTxsByNumber.transactions) { const receipt = await alice.provider.getTransactionReceipt(tx.hash); sumTxGasUsed = sumTxGasUsed.add(receipt.gasUsed); } expect(blockWithTxsByNumber.gasUsed).bnToBeGte(sumTxGasUsed); + let expectedReceipts = []; + + for (const tx of blockWithTxsByNumber.transactions) { + const receipt = await alice.provider.send('eth_getTransactionReceipt', [tx.hash]); + expectedReceipts.push(receipt); + } + + let receipts = await alice.provider.send('eth_getBlockReceipts', [blockNumberHex]); + expect(receipts).toEqual(expectedReceipts); + // eth_getBlockByHash await alice.provider.getBlock(blockHash); const blockWithTxsByHash = await alice.provider.getBlockWithTransactions(blockHash); diff --git a/docker/build-base/Dockerfile b/docker/build-base/Dockerfile new file mode 100644 index 00000000000..1fec4cca7e0 --- /dev/null +++ b/docker/build-base/Dockerfile @@ -0,0 +1,15 @@ +FROM debian:bookworm-slim + +RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ + pkg-config build-essential libclang-dev linux-libc-dev liburing-dev && \ + rm -rf /var/lib/apt/lists/* + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 + +RUN cargo install sqlx-cli --version 0.7.3 diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 475a835e44c..6941de6321f 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,17 +1,5 @@ # syntax=docker/dockerfile:experimental -FROM debian:bookworm-slim as builder - -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 +FROM matterlabs/zksync-build-base:latest as builder WORKDIR /usr/src/zksync COPY . . diff --git a/docker/cross-external-nodes-checker/Dockerfile b/docker/cross-external-nodes-checker/Dockerfile deleted file mode 100644 index 87b5d67d719..00000000000 --- a/docker/cross-external-nodes-checker/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# syntax=docker/dockerfile:experimental -FROM debian:bookworm-slim as builder - -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 - -WORKDIR /usr/src/zksync -COPY . . - -RUN cargo build --release - -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl ca-certificates && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /usr/src/zksync/target/release/cross_external_nodes_checker /usr/bin - -ENTRYPOINT ["cross_external_nodes_checker"] diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index c21f00daad2..02dca4cd50c 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,24 +1,11 @@ # Will work locally only after prior contracts build -FROM debian:bookworm-slim as builder - -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 +FROM matterlabs/zksync-build-base:latest as builder WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -RUN cargo install sqlx-cli --version 0.7.3 FROM debian:bookworm-slim diff --git a/docker/proof-fri-compressor/Dockerfile b/docker/proof-fri-compressor/Dockerfile index e18c0c27f55..c6739dca827 100644 --- a/docker/proof-fri-compressor/Dockerfile +++ b/docker/proof-fri-compressor/Dockerfile @@ -1,21 +1,9 @@ # Will work locally only after prior universal setup key download -FROM debian:bookworm-slim as builder +FROM matterlabs/zksync-build-base:latest as builder ARG DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 - WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index 256621e8df7..f381a5902d7 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,19 +1,7 @@ -FROM debian:bookworm-slim as builder +FROM matterlabs/zksync-build-base:latest as builder ARG DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 - WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri/Dockerfile b/docker/prover-fri/Dockerfile index 8244aea06b2..fd85801b729 100644 --- a/docker/prover-fri/Dockerfile +++ b/docker/prover-fri/Dockerfile @@ -1,19 +1,7 @@ -FROM debian:bookworm-slim as builder +FROM matterlabs/zksync-build-base:latest as builder ARG DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 - WORKDIR /usr/src/zksync COPY . . diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index a7d8fc7487f..e5d378c3b6d 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,21 +1,10 @@ # Will work locally only after prior contracts build # syntax=docker/dockerfile:experimental -FROM debian:bookworm-slim as builder +FROM matterlabs/zksync-build-base:latest as builder WORKDIR /usr/src/zksync -COPY . . - -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev linux-libc-dev liburing-dev && \ - rm -rf /var/lib/apt/lists/* -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 +COPY . . RUN cargo build --release --features=rocksdb/io-uring diff --git a/docker/snapshots-creator/Dockerfile b/docker/snapshots-creator/Dockerfile index 897f28f8780..10eef06dfbb 100644 --- a/docker/snapshots-creator/Dockerfile +++ b/docker/snapshots-creator/Dockerfile @@ -1,21 +1,9 @@ # syntax=docker/dockerfile:experimental -FROM debian:bookworm-slim as builder +FROM matterlabs/zksync-build-base:latest as builder WORKDIR /usr/src/zksync COPY . . -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev linux-libc-dev liburing-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 - RUN cargo build --release --bin snapshots_creator FROM debian:bookworm-slim diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index f431339d3e9..42dee7ba5d0 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,19 +1,7 @@ -FROM debian:bookworm-slim as builder +FROM matterlabs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 - WORKDIR /usr/src/zksync COPY . . diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index 5861f3e5162..7d3c44f67fc 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,19 +1,7 @@ -FROM debian:bookworm-slim as builder +FROM matterlabs/zksync-build-base:latest as builder ARG DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ - pkg-config build-essential libclang-dev && \ - rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-08-21 && \ - rustup default nightly-2023-08-21 - WORKDIR /usr/src/zksync COPY . . diff --git a/docs/guides/advanced/how_transaction_works.md b/docs/guides/advanced/how_transaction_works.md index 3ee9c30a205..83471bfd7e8 100644 --- a/docs/guides/advanced/how_transaction_works.md +++ b/docs/guides/advanced/how_transaction_works.md @@ -85,7 +85,7 @@ The transaction can have three different results in state keeper: [l1_tx]: https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/l1/mod.rs#L183 'l1 tx' [l2_tx]: https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/l2/mod.rs#L140 'l2 tx' [submit_tx]: - https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/api_server/tx_sender/mod.rs#L309 + https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/api_server/tx_sender/mod.rs#L288 'submit tx' [process_l1_batch]: https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/state_keeper/keeper.rs#L257 diff --git a/docs/specs/prover/boojum_function_check_if_satisfied.md b/docs/specs/prover/boojum_function_check_if_satisfied.md index 922889b90d4..48fa095637d 100644 --- a/docs/specs/prover/boojum_function_check_if_satisfied.md +++ b/docs/specs/prover/boojum_function_check_if_satisfied.md @@ -12,7 +12,7 @@ variables circuit columns that are under PLONK copy-permutation constraints (so in programming languages), and the witness ephemeral values that can be used to prove certain constraints, for example by providing an inverse if the variable must be non-zero. -![Check_if_satisfied.png](./img/boojum_function_check_if_satisfied/check_if_satisfied.png) +![Check_if_satisfied.png](./img/boojum_function_check_if_satisfied/Check_if_satisfied.png) Next we prepare a view. Instead of working with all of the columns at once, it can be helpful to work with only a subset. diff --git a/docs/specs/prover/overview.md b/docs/specs/prover/overview.md index a6025e23bb8..a7f814a458a 100644 --- a/docs/specs/prover/overview.md +++ b/docs/specs/prover/overview.md @@ -1,7 +1,7 @@ # Intro to zkSync’s ZK This page is specific to our cryptography. For a general introduction, please read: -[https://docs.zksync.io/userdocs/intro/#introduction](https://docs.zksync.io/userdocs/intro/#introduction) +[https://docs.zksync.io/build/developer-reference/rollups.html](https://docs.zksync.io/build/developer-reference/rollups.html) As a ZK rollup, we want everything to be verified by cryptography and secured by Ethereum. The power of ZK allows for transaction compression, reducing fees for users while inheriting the same security. diff --git a/docs/specs/zk_evm/precompiles.md b/docs/specs/zk_evm/precompiles.md index f1f812c68e1..4874bcdf940 100644 --- a/docs/specs/zk_evm/precompiles.md +++ b/docs/specs/zk_evm/precompiles.md @@ -167,7 +167,7 @@ function montgomeryMul(multiplicand, multiplier) -> ret { ```solidity /// @notice Computes the Montgomery modular inverse skipping the Montgomery reduction step. -/// @dev The Montgomery reduction step is skept because a modification in the binary extended Euclidean algorithm is used to compute the modular inverse. +/// @dev The Montgomery reduction step is skipped because a modification in the binary extended Euclidean algorithm is used to compute the modular inverse. /// @dev See the function `binaryExtendedEuclideanAlgorithm` for further details. /// @param a The field element in Montgomery form to compute the modular inverse of. /// @return invmod The result of the Montgomery modular inverse (in Montgomery form). diff --git a/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol b/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol index 83321ec4727..e65f51d5652 100644 --- a/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol +++ b/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol @@ -11,18 +11,24 @@ import "./msg-sender.sol"; contract ComplexUpgrade { constructor() {} - function mimicCall( - address _address, - address _whoToMimic, - bytes memory _calldata - ) internal { + struct MimicCallInfo { + address to; + address whoToMimic; + bytes data; + } + + function _mimicCall(MimicCallInfo memory info) internal { address callAddr = MIMIC_CALL_CALL_ADDRESS; + bytes memory data = info.data; + address to = info.to; + address whoToMimic = info.whoToMimic; + uint32 dataStart; uint32 dataLength; assembly { - dataStart := add(_calldata, 0x20) - dataLength := mload(_calldata) + dataStart := add(data, 0x20) + dataLength := mload(data) } uint256 farCallAbi = SystemContractsCaller.getFarCallABI( @@ -39,7 +45,7 @@ contract ComplexUpgrade { ); assembly { - let success := call(_address, callAddr, 0, farCallAbi, _whoToMimic, 0, 0) + let success := call(to, callAddr, 0, farCallAbi, whoToMimic, 0, 0) if iszero(success) { returndatacopy(0, 0, returndatasize()) @@ -48,6 +54,14 @@ contract ComplexUpgrade { } } + function mimicCalls( + MimicCallInfo[] memory info + ) public { + for (uint256 i = 0; i < info.length; i++) { + _mimicCall(info[i]); + } + } + // This function is used to imitate some complex upgrade logic function someComplexUpgrade( address _address1, @@ -86,6 +100,13 @@ contract ComplexUpgrade { MsgSenderTest.testMsgSender.selector, toMimic ); - mimicCall(address(msgSenderTest), toMimic, _mimicCallCalldata); + + MimicCallInfo memory info = MimicCallInfo({ + to: address(msgSenderTest), + whoToMimic: toMimic, + data: _mimicCallCalldata + }); + + _mimicCall(info); } } diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index bfea2d2479b..b3cfcbd3295 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -63,3 +63,4 @@ internal_l1_pricing_multiplier=0.8 # Node polling period in seconds. poll_period=5 internal_enforced_l1_gas_price=45_000_000_000 +l1_gas_per_pubdata_byte=0 diff --git a/etc/env/base/private.toml b/etc/env/base/private.toml index 60d5bfb2d9c..0a6ff7eeb2d 100644 --- a/etc/env/base/private.toml +++ b/etc/env/base/private.toml @@ -11,6 +11,14 @@ operator_private_key="0x27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e5 # Derived from the `OPERATOR_PRIVATE_KEY`. operator_commit_eth_addr="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" +[consensus] +config_path = "etc/env/consensus_config.json" +# generated with zksync_consensus_tools/src/bin/keys.rs +# node:public:ed25519:ee717abba6aec5baae5e09d457bd2ffc2f121b576cf4170ce15a68163ce4c868 +node_key="node:secret:ed25519:b6666c3be2703e15028bbebd220d2678fde7431038641f36c52f02849595a8ab" +# validator:public:bn254:8b0ff0ad1a250e64b0209277148ccee3b64534d8fa60cf25ba0bcc8b65d4d89309cdae79197c2db873d351401093fa0542a5a2071c1a247f2e1abe56d08cbabb +validator_key="validator:secret:bn254:038ec13f4dca210c9d3525204422f0584e1653a5684bff47f98316d9e64b6746" + [misc] # Private key for the fee seller account fee_account_private_key="0x27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be" diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 0374867630d..8eef7700067 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -5,6 +5,9 @@ # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. RUST_LOG="""\ +zksync_consensus_bft=info,\ +zksync_consensus_network=info,\ +zksync_consensus_storage=info,\ zksync_core=debug,\ zksync_server=debug,\ zksync_contract_verifier=debug,\ @@ -24,7 +27,6 @@ block_sizes_test=info,\ zksync_object_store=info,\ en_playground=info,\ zksync_external_node=info,\ -cross_nodes_checker=debug,\ zksync_witness_generator=info,\ zksync_prover_fri=info,\ zksync_witness_vector_generator=info,\ diff --git a/etc/env/consensus_config.json b/etc/env/consensus_config.json new file mode 100644 index 00000000000..4dc62be7324 --- /dev/null +++ b/etc/env/consensus_config.json @@ -0,0 +1,13 @@ +{ + "server_addr": "127.0.0.1:3054", + "public_addr": "127.0.0.1:3054", + "validators": [ + "validator:public:bn254:8b0ff0ad1a250e64b0209277148ccee3b64534d8fa60cf25ba0bcc8b65d4d89309cdae79197c2db873d351401093fa0542a5a2071c1a247f2e1abe56d08cbabb" + ], + "max_payload_size": 5000000, + "gossip_static_outbound": [], + "gossip_static_inbound": [ + "node:public:ed25519:147bb71be895846e1d6f5b1c6a8be53848b82bdafcf66e9dfe6ca65581076a1d" + ], + "gossip_dynamic_inbound_limit": 0 +} diff --git a/etc/env/en_consensus_config.json b/etc/env/en_consensus_config.json new file mode 100644 index 00000000000..29ec0ca1e9a --- /dev/null +++ b/etc/env/en_consensus_config.json @@ -0,0 +1,16 @@ +{ + "server_addr": "127.0.0.1:3055", + "public_addr": "127.0.0.1:3055", + "validators": [ + "validator:public:bn254:8b0ff0ad1a250e64b0209277148ccee3b64534d8fa60cf25ba0bcc8b65d4d89309cdae79197c2db873d351401093fa0542a5a2071c1a247f2e1abe56d08cbabb" + ], + "max_payload_size": 5000000, + "gossip_static_outbound": [ + { + "key": "node:public:ed25519:ee717abba6aec5baae5e09d457bd2ffc2f121b576cf4170ce15a68163ce4c868", + "addr": "127.0.0.1:3054" + } + ], + "gossip_static_inbound": [], + "gossip_dynamic_inbound_limit": 0 +} diff --git a/etc/env/ext-node-docker.toml b/etc/env/ext-node-docker.toml index cf7332ee6e4..129b41a4181 100644 --- a/etc/env/ext-node-docker.toml +++ b/etc/env/ext-node-docker.toml @@ -36,11 +36,23 @@ api_namespaces = ["eth", "web3", "net", "pubsub", "zks", "en", "debug"] bootloader_hash="0x0100038581be3d0e201b3cc45d151ef5cc59eb3a0f146ad44f0f72abf00b594c" default_aa_hash="0x0100038dc66b69be75ec31653c64cb931678299b9b659472772b2550b703f41c" +# Should be the same as chain.state_keeper.fee_account_addr. +operator_addr="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" + +[en.consensus] +config_path="etc/env/en_consensus_config.json" +# generated with zksync_consensus_tools/src/bin/keys.rs +# node:public:ed25519:147bb71be895846e1d6f5b1c6a8be53848b82bdafcf66e9dfe6ca65581076a1d +node_key="node:secret:ed25519:d56de77c738326c305c64c25bffe1cc94ea7c639cf71ca3ff94229df27f167ac" + [rust] # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. log="""\ warn,\ +zksync_consensus_bft=info,\ +zksync_consensus_network=info,\ +zksync_consensus_storage=info,\ zksync_core=debug,\ zksync_dal=info,\ zksync_eth_client=info,\ @@ -52,7 +64,6 @@ zksync_types=info,\ loadnext=info,\ vm=info,\ zksync_external_node=info,\ -cross_nodes_checker=debug,\ """ # `RUST_BACKTRACE` variable diff --git a/etc/env/ext-node.toml b/etc/env/ext-node.toml index 9feabe84247..58298a5501b 100644 --- a/etc/env/ext-node.toml +++ b/etc/env/ext-node.toml @@ -36,11 +36,23 @@ api_namespaces = ["eth", "web3", "net", "pubsub", "zks", "en", "debug"] bootloader_hash="0x0100038581be3d0e201b3cc45d151ef5cc59eb3a0f146ad44f0f72abf00b594c" default_aa_hash="0x0100038dc66b69be75ec31653c64cb931678299b9b659472772b2550b703f41c" +# Should be the same as chain.state_keeper.fee_account_addr. +operator_addr="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" + +[en.consensus] +config_path="etc/env/en_consensus_config.json" +# generated with zksync_consensus_tools/src/bin/keys.rs +# node:public:ed25519:147bb71be895846e1d6f5b1c6a8be53848b82bdafcf66e9dfe6ca65581076a1d +node_key="node:secret:ed25519:d56de77c738326c305c64c25bffe1cc94ea7c639cf71ca3ff94229df27f167ac" + [rust] # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. log="""\ warn,\ +zksync_consensus_bft=info,\ +zksync_consensus_network=info,\ +zksync_consensus_storage=info,\ zksync_core=debug,\ zksync_dal=info,\ zksync_eth_client=info,\ @@ -52,7 +64,6 @@ zksync_types=info,\ loadnext=info,\ vm=info,\ zksync_external_node=info,\ -cross_nodes_checker=debug,\ """ # `RUST_BACKTRACE` variable diff --git a/etc/upgrades/1705556759-fee-model-and-1.4.1/common.json b/etc/upgrades/1705556759-fee-model-and-1.4.1/common.json new file mode 100644 index 00000000000..a8b657906be --- /dev/null +++ b/etc/upgrades/1705556759-fee-model-and-1.4.1/common.json @@ -0,0 +1,5 @@ +{ + "name": "fee-model-and-1.4.1", + "creationTimestamp": 1705556759, + "protocolVersion": "20" +} \ No newline at end of file diff --git a/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/crypto.json b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/crypto.json new file mode 100644 index 00000000000..83cbf12094e --- /dev/null +++ b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/crypto.json @@ -0,0 +1,11 @@ +{ + "verifier": { + "address": "0x3390051435eCB25a9610A1cF17d1BA0a228A0560", + "txHash": "0x2c3b8915e8ebc617cc13e462df334f4197d48b19b302538a519f55acbda285cb" + }, + "keys": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x062362cb3eaf1f631406cbe19bf2a2c5d0d9ea69d069309a6003addae9f387be", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/facetCuts.json b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/facetCuts.json new file mode 100644 index 00000000000..67a75603652 --- /dev/null +++ b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/facetCuts.json @@ -0,0 +1,165 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0xE6426c725cB507168369c10284390E59d91eC821", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xc4a5e861df9DD9495f8Dba1c260913d1A9b8Ec2B", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x0f58Fd6c9Ed966e09C1dFFBc8E6FF600ec65f6eB", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0x5b86821c1B4B55deF404c9551EC2d2Cc0aE70f5C", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/facets.json b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/facets.json new file mode 100644 index 00000000000..b9f6acbb6fa --- /dev/null +++ b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/facets.json @@ -0,0 +1,18 @@ +{ + "ExecutorFacet": { + "address": "0x5b86821c1B4B55deF404c9551EC2d2Cc0aE70f5C", + "txHash": "0x27598585a0471d1d95358bb93ade96fc4729d4531b08daf47f6d09e1fa3a82a2" + }, + "AdminFacet": { + "address": "0xE6426c725cB507168369c10284390E59d91eC821", + "txHash": "0x016a39f72fdd7245ebfbc07581823756d11b0638c27f32b1bc07794928b0abbb" + }, + "GettersFacet": { + "address": "0xc4a5e861df9DD9495f8Dba1c260913d1A9b8Ec2B", + "txHash": "0xbfa6c2995d7cc5c4502a3966f6c6c133dd5b45b15bd117acf2ca8413639da671" + }, + "MailboxFacet": { + "address": "0x0f58Fd6c9Ed966e09C1dFFBc8E6FF600ec65f6eB", + "txHash": "0xce16a8f2d210ee4b57159ca3f2a760af0c249234193fcf983a5a3050b29c8a5d" + } +} \ No newline at end of file diff --git a/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/l2Upgrade.json b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/l2Upgrade.json new file mode 100644 index 00000000000..918c93b0004 --- /dev/null +++ b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/l2Upgrade.json @@ -0,0 +1,225 @@ +{ + "systemContracts": [ + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x01000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b3" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a8" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x0100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d57" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x01000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x0100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d80" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x01000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d9" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2EthToken", + "bytecodeHashes": [ + "0x010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x01000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "Compressor", + "bytecodeHashes": [ + "0x01000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841" + ], + "address": "0x000000000000000000000000000000000000800f" + }, + { + "name": "Keccak256", + "bytecodeHashes": [ + "0x0100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef" + ], + "address": "0x0000000000000000000000000000000000008010" + } + ], + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9" + ] + }, + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d" + ] + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x01000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b3", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a8", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d57", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d80", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d9", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef", + "newAddress": "0x0000000000000000000000000000000000008010", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "20", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + } +} \ No newline at end of file diff --git a/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/transactions.json b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/transactions.json new file mode 100644 index 00000000000..c393a582055 --- /dev/null +++ b/etc/upgrades/1705556759-fee-model-and-1.4.1/stage2/transactions.json @@ -0,0 +1,230 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "20", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "defaultAccountHash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "verifier": "0x3390051435eCB25a9610A1cF17d1BA0a228A0560", + "verifierParams": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x062362cb3eaf1f631406cbe19bf2a2c5d0d9ea69d069309a6003addae9f387be", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x65aa4820" + }, + "factoryDeps": [], + "newProtocolVersion": "20", + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000001060010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a90000000000000000000000003390051435ecb25a9610a1cf17d1ba0a228a05605a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080062362cb3eaf1f631406cbe19bf2a2c5d0d9ea69d069309a6003addae9f387be0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000108000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000065aa48200000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000e400000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000ea00000000000000000000000000000000000000000000000000000000000000ba4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0x38283BE1B217873DDacb599e727669E88c8f36C7", + "protocolVersion": "20", + "upgradeTimestamp": "1705658400", + "scheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000002524a9f6d9410000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000038283be1b217873ddacb599e727669e88c8f36c700000000000000000000000000000000000000000000000000000000000013e00000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000007a000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a200000000000000000000000000000000000000000000000000000000000000c2000000000000000000000000000000000000000000000000000000000000010e000000000000000000000000000000000000000000000000000000000000012400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000173389450000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000021cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed6270000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb6724190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d3400000000000000000000000000000000000000000000000000000000000000000000000000000000e6426c725cb507168369c10284390e59d91ec821000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b0e18b68100000000000000000000000000000000000000000000000000000000e58bb6390000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000c4a5e861df9dd9495f8dba1c260913d1a9b8ec2b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000021cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed6270000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000f58fd6c9ed966e09c1dffbc8e6ff600ec65f6eb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb672419000000000000000000000000000000000000000000000000000000000000000000000000000000005b86821c1b4b55def404c9551ec2d2cc0ae70f5c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010e41ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000001060010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a90000000000000000000000003390051435ecb25a9610a1cf17d1ba0a228a05605a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080062362cb3eaf1f631406cbe19bf2a2c5d0d9ea69d069309a6003addae9f387be0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000108000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000065aa48200000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000e400000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000ea00000000000000000000000000000000000000000000000000000000000000ba4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000002524a9f6d9410000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000038283be1b217873ddacb599e727669e88c8f36c700000000000000000000000000000000000000000000000000000000000013e00000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000007a000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a200000000000000000000000000000000000000000000000000000000000000c2000000000000000000000000000000000000000000000000000000000000010e000000000000000000000000000000000000000000000000000000000000012400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000173389450000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000021cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed6270000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb6724190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d3400000000000000000000000000000000000000000000000000000000000000000000000000000000e6426c725cb507168369c10284390e59d91ec821000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b0e18b68100000000000000000000000000000000000000000000000000000000e58bb6390000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000c4a5e861df9dd9495f8dba1c260913d1a9b8ec2b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000021cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed6270000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000f58fd6c9ed966e09c1dffbc8e6ff600ec65f6eb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb672419000000000000000000000000000000000000000000000000000000000000000000000000000000005b86821c1b4b55def404c9551ec2d2cc0ae70f5c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010e41ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000001060010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a90000000000000000000000003390051435ecb25a9610a1cf17d1ba0a228a05605a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080062362cb3eaf1f631406cbe19bf2a2c5d0d9ea69d069309a6003addae9f387be0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000108000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000065aa48200000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000e400000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000ea00000000000000000000000000000000000000000000000000000000000000ba4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "governanceOperation": { + "calls": [ + { + "target": "0x6d6e010A2680E2E5a3b097ce411528b36d880EF6", + "value": 0, + "data": "0xa9f6d9410000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000038283be1b217873ddacb599e727669e88c8f36c700000000000000000000000000000000000000000000000000000000000013e00000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000007a000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a200000000000000000000000000000000000000000000000000000000000000c2000000000000000000000000000000000000000000000000000000000000010e000000000000000000000000000000000000000000000000000000000000012400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000173389450000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000021cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed6270000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb6724190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d3400000000000000000000000000000000000000000000000000000000000000000000000000000000e6426c725cb507168369c10284390e59d91ec821000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b0e18b68100000000000000000000000000000000000000000000000000000000e58bb6390000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000c4a5e861df9dd9495f8dba1c260913d1a9b8ec2b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000021cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed6270000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000f58fd6c9ed966e09c1dffbc8e6ff600ec65f6eb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb672419000000000000000000000000000000000000000000000000000000000000000000000000000000005b86821c1b4b55def404c9551ec2d2cc0ae70f5c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010e41ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000001060010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a90000000000000000000000003390051435ecb25a9610a1cf17d1ba0a228a05605a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080062362cb3eaf1f631406cbe19bf2a2c5d0d9ea69d069309a6003addae9f387be0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000108000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000065aa48200000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000e400000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000ea00000000000000000000000000000000000000000000000000000000000000ba4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "transparentUpgrade": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0xE6426c725cB507168369c10284390E59d91eC821", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xc4a5e861df9DD9495f8Dba1c260913d1A9b8Ec2B", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x0f58Fd6c9Ed966e09C1dFFBc8E6FF600ec65f6eB", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0x5b86821c1B4B55deF404c9551EC2d2Cc0aE70f5C", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0x38283BE1B217873DDacb599e727669E88c8f36C7", + "initCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000001060010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a90000000000000000000000003390051435ecb25a9610a1cf17d1ba0a228a05605a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080062362cb3eaf1f631406cbe19bf2a2c5d0d9ea69d069309a6003addae9f387be0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000108000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000065aa48200000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000e400000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000ea00000000000000000000000000000000000000000000000000000000000000ba4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa001000075bc9de2129f5d58efa04515bbf24610645546eab19192d7f94a23f83e00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e5eef000fb93f3b7f746149d0f467fe99e0f628aa76520b18321eeb7b300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007d88348c8092dd260d3ba1b90da3d693c5d416b7078b2faca348e2f3a800000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100003ddb0142c77e7e36c37910cd90b07e48bb952168e66c79519953d32d5700000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000555b2471aa863b7da5360cc0d2459a8aa5ad9feb6ad8ea5666aee0b5f4c00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100028d5519113834685985178f33d36dd855e0b0835e2dad3892ddc3244d8000000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000063cb83b923ab1e67bb7944c6493286ba7c1c5614c0cb17155c5eef82d900000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001014336cee5c792682bf2c2079807e643c491d879c07de9dea482a78e39000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000181b1c963c230c8521d78a0a650cf7c1879cc6b38e9315035c5596cd914000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c96884dfd5de1a2e02616564c057e67c423d31c589df25bf25b08dd3d6000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000167b75441cbdf3edc039678e2e57bb28d87ca3b76c88ba153be0e65f366000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000553156325702c61297c4ebe6171f7d64845d548311e0fe88792cd86841000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000fb004b644efe76e9ef3ba89dfa3eaac946e3fa19f8a046ed27465eeef00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/flake.lock b/flake.lock index 3b683c15c85..44c9f48b291 100644 --- a/flake.lock +++ b/flake.lock @@ -7,16 +7,16 @@ }, "stable": { "locked": { - "lastModified": 1683478192, - "narHash": "sha256-7f7RR71w0jRABDgBwjq3vE1yY3nrVJyXk8hDzu5kl1E=", + "lastModified": 1705331948, + "narHash": "sha256-qjQXfvrAT1/RKDFAMdl8Hw3m4tLVvMCc8fMqzJv0pP4=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c568239bcc990050b7aedadb7387832440ad8fb1", + "rev": "b8dd8be3c790215716e7c12b247f45ca525867e2", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index e4fe935907e..d34ecd551ab 100644 --- a/flake.nix +++ b/flake.nix @@ -1,40 +1,43 @@ { - description = "zkSync development shell"; - inputs = { - stable.url = "github:NixOS/nixpkgs/nixos-22.11"; - }; - outputs = {self, stable}: { - packages.x86_64-linux.default = - with import stable { system = "x86_64-linux"; }; - pkgs.mkShell { - name = "zkSync"; - src = ./.; - buildInputs = [ - docker-compose - nodejs - yarn - axel - libclang - openssl - pkg-config - postgresql - python3 - solc - ]; + description = "zkSync development shell"; + inputs = { + stable.url = "github:NixOS/nixpkgs/nixos-23.11"; + }; + outputs = { self, stable }: { + formatter.x86_64-linux = stable.legacyPackages.x86_64-linux.nixpkgs-fmt; + devShells.x86_64-linux.default = + with import stable { system = "x86_64-linux"; }; + pkgs.mkShell.override { stdenv = pkgs.stdenvAdapters.useMoldLinker pkgs.gccStdenv; } { + name = "zkSync"; + src = ./.; + buildInputs = [ + docker-compose + nodejs + yarn + axel + libclang + openssl + pkg-config + postgresql + python3 + solc + sqlx-cli + rustup + ]; - # for RocksDB and other Rust bindgen libraries - LIBCLANG_PATH = lib.makeLibraryPath [ libclang.lib ]; - BINDGEN_EXTRA_CLANG_ARGS = ''-I"${libclang.lib}/lib/clang/${libclang.version}/include"''; + # for RocksDB and other Rust bindgen libraries + LIBCLANG_PATH = lib.makeLibraryPath [ libclang.lib ]; + BINDGEN_EXTRA_CLANG_ARGS = ''-I"${libclang.lib}/lib/clang/${builtins.elemAt (builtins.splitVersion libclang.version) 0}/include"''; - shellHook = '' - export ZKSYNC_HOME=$PWD - export PATH=$ZKSYNC_HOME/bin:$PATH - ''; + shellHook = '' + export ZKSYNC_HOME=$PWD + export PATH=$ZKSYNC_HOME/bin:$PATH + ''; - # hardhat solc requires ld-linux - # Nixos has to fake it with nix-ld - NIX_LD_LIBRARY_PATH = lib.makeLibraryPath []; - NIX_LD = builtins.readFile "${stdenv.cc}/nix-support/dynamic-linker"; - }; - }; + # hardhat solc requires ld-linux + # Nixos has to fake it with nix-ld + NIX_LD_LIBRARY_PATH = lib.makeLibraryPath []; + NIX_LD = builtins.readFile "${stdenv.cc}/nix-support/dynamic-linker"; + }; + }; } diff --git a/infrastructure/protocol-upgrade/src/crypto/crypto.ts b/infrastructure/protocol-upgrade/src/crypto/crypto.ts index df7aa6bd44b..1f87b215ab6 100644 --- a/infrastructure/protocol-upgrade/src/crypto/crypto.ts +++ b/infrastructure/protocol-upgrade/src/crypto/crypto.ts @@ -1,6 +1,6 @@ import { getCryptoFileName, getUpgradePath, VerifierParams } from '../utils'; import fs from 'fs'; -import { BytesLike } from 'ethers'; +import { BytesLike, ethers } from 'ethers'; import { Command } from 'commander'; import { deployVerifier } from './deployer'; @@ -12,8 +12,7 @@ function saveVerificationKeys( ) { recursionNodeLevelVkHash = recursionNodeLevelVkHash ?? process.env.CONTRACTS_FRI_RECURSION_NODE_LEVEL_VK_HASH; recursionLeafLevelVkHash = recursionLeafLevelVkHash ?? process.env.CONTRACTS_FRI_RECURSION_LEAF_LEVEL_VK_HASH; - recursionCircuitsSetVksHash = - recursionCircuitsSetVksHash ?? process.env.CONTRACTS_FRI_RECURSION_SCHEDULER_LEVEL_VK_HASH; + recursionCircuitsSetVksHash = recursionCircuitsSetVksHash ?? ethers.constants.HashZero; const verificationParams: VerifierParams = { recursionNodeLevelVkHash, recursionLeafLevelVkHash, diff --git a/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts b/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts index 95364215da3..4f89ec477a7 100644 --- a/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts +++ b/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts @@ -143,7 +143,7 @@ export const command = new Command('facets').description('Deploy facets and gene command .command('deploy-all') - .description('Deploy all facets') + .description('Deploy all facets and generate facet cuts') .option('--private-key ') .option('--l1rpc ') .option('--gas-price ') diff --git a/infrastructure/zk/src/clean.ts b/infrastructure/zk/src/clean.ts index 11763511184..96c68d5908b 100644 --- a/infrastructure/zk/src/clean.ts +++ b/infrastructure/zk/src/clean.ts @@ -27,6 +27,7 @@ export const command = new Command('clean') const env = process.env.ZKSYNC_ENV; clean(`etc/env/${env}.env`); clean('etc/env/.init.env'); + clean('etc/env/.current'); } if (cmd.all || cmd.artifacts) { diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 7b571ebcc34..143a41bd154 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -4,7 +4,6 @@ import * as utils from './utils'; const IMAGES = [ 'server-v2', 'external-node', - 'cross-external-nodes-checker', 'contract-verifier', 'prover-v2', 'geth', @@ -70,7 +69,6 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin const tagList = [ 'server-v2', 'external-node', - 'cross-external-nodes-checker', 'prover', 'contract-verifier', 'prover-v2', diff --git a/infrastructure/zk/src/format_sql.ts b/infrastructure/zk/src/format_sql.ts index 2465ec20ba2..1e2bd2261c5 100644 --- a/infrastructure/zk/src/format_sql.ts +++ b/infrastructure/zk/src/format_sql.ts @@ -34,7 +34,7 @@ function formatQuery(query: string) { return formattedQuery; } -function extractQueryFromRustString(query: string): string { +function extractQueryFromRustString(query: string, isRaw: boolean): string { query = query.trim(); if (query.endsWith(',')) { query = query.slice(0, query.length - 1); @@ -46,9 +46,10 @@ function extractQueryFromRustString(query: string): string { query = query.slice(3, query.length - 2); } - //getting rid of all "\" characters, both from escapes and line breaks - query = query.replace(/\\/g, ''); - + // Get rid of all "\" characters, both from escapes and line breaks. + if (!isRaw) { + query = query.replace(/\\(.|\n)/g, '$1'); + } return query; } @@ -63,9 +64,9 @@ function addIndent(query: string, indent: number) { .join('\n'); } -function formatRustStringQuery(query: string) { +function formatRustStringQuery(query: string, isRaw: boolean) { const baseIndent = query.search(/\S/); - const rawQuery = extractQueryFromRustString(query); + const rawQuery = extractQueryFromRustString(query, isRaw); const formattedQuery = formatQuery(rawQuery); const reconstructedRustString = embedTextInsideRustString(formattedQuery); @@ -81,7 +82,7 @@ function formatOneLineQuery(line: string): string { const queryEnd = isRawString ? line.indexOf('"#') + 2 : line.slice(1).search(/(^|[^\\])"/) + 3; const suffix = line.slice(queryEnd); const query = line.slice(0, queryEnd); - let formattedQuery = formatRustStringQuery(query); + let formattedQuery = formatRustStringQuery(query, isRawString); formattedQuery = addIndent(formattedQuery, baseIndent); return prefix + '\n' + formattedQuery + '\n' + suffix; @@ -124,7 +125,7 @@ async function formatFile(filePath: string, check: boolean) { } if (isInsideQuery) { - const queryNotEmpty = builtQuery || line.trim().length > 1; + const queryNotEmpty = builtQuery !== '' || line.trim().length > 1; const rawStringQueryEnded = line.endsWith('"#,') || line.endsWith('"#'); const regularStringQueryEnded = (line.endsWith('",') || line.endsWith('"')) && queryNotEmpty; builtQuery += line + '\n'; @@ -135,7 +136,7 @@ async function formatFile(filePath: string, check: boolean) { ) { isInsideQuery = false; let endedWithComma = builtQuery.trimEnd().endsWith(','); - modifiedFile += formatRustStringQuery(builtQuery).trimEnd(); + modifiedFile += formatRustStringQuery(builtQuery, isRawString).trimEnd(); modifiedFile += endedWithComma ? ',' : ''; modifiedFile += '\n'; } diff --git a/infrastructure/zk/src/hyperchain_wizard.ts b/infrastructure/zk/src/hyperchain_wizard.ts index 2bfaa7d5c8b..290e868439f 100644 --- a/infrastructure/zk/src/hyperchain_wizard.ts +++ b/infrastructure/zk/src/hyperchain_wizard.ts @@ -606,6 +606,9 @@ type L1Token = { export function getTokens(network: string): L1Token[] { const configPath = `${process.env.ZKSYNC_HOME}/etc/tokens/${network}.json`; + if (!fs.existsSync(configPath)) { + return []; + } try { return JSON.parse( fs.readFileSync(configPath, { @@ -772,7 +775,7 @@ async function configDemoHyperchain(cmd: Command) { const deployerPrivateKey = process.env.DEPLOYER_PRIVATE_KEY; const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; const deployL2Weth = Boolean(process.env.DEPLOY_L2_WETH || false); - const deployTestTokens = Boolean(process.env.DEPLOY_TEST_TOKENS || false); + const deployTestTokens = Boolean(process.env.DEPLOY_TEST_TOKENS || true); const initArgs: InitArgs = { skipSubmodulesCheckout: false, diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts index fc83655b48f..4b7ed461dc4 100644 --- a/infrastructure/zk/src/lint.ts +++ b/infrastructure/zk/src/lint.ts @@ -30,12 +30,12 @@ async function lintContracts(check: boolean = false) { async function clippy() { process.chdir(process.env.ZKSYNC_HOME!); - await utils.spawn('cargo clippy --tests -- -D warnings'); + await utils.spawn('cargo clippy --tests --locked -- -D warnings -D unstable_features'); } async function proverClippy() { process.chdir(process.env.ZKSYNC_HOME! + '/prover'); - await utils.spawn('cargo clippy --tests -- -D warnings -A incomplete_features'); + await utils.spawn('cargo clippy --tests --locked -- -D warnings -A incomplete_features'); } const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts']; diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts index e4132e64b20..a8044e75b1e 100644 --- a/infrastructure/zk/src/run/run.ts +++ b/infrastructure/zk/src/run/run.ts @@ -56,7 +56,7 @@ export async function tokenInfo(address: string) { // installs all dependencies export async function yarn() { - await utils.spawn('yarn'); + await utils.spawn('yarn install --frozen-lockfile'); } export async function deployTestkit(genesisRoot: string) { @@ -113,12 +113,6 @@ export async function readVariable(address: string, contractName: string, variab ); } -export async function cross_en_checker() { - let logLevel = 'RUST_LOG=cross_external_nodes_checker=debug'; - let suffix = 'cargo run --release --bin cross_external_nodes_checker'; - await utils.spawn(`${logLevel} ${suffix}`); -} - export async function snapshots_creator() { process.chdir(`${process.env.ZKSYNC_HOME}`); let logLevel = 'RUST_LOG=snapshots_creator=debug'; @@ -127,7 +121,7 @@ export async function snapshots_creator() { export const command = new Command('run').description('run miscellaneous applications').addCommand(dataRestore.command); command.command('test-accounts').description('print ethereum test accounts').action(testAccounts); -command.command('yarn').description('install all JS dependencies').action(yarn); +command.command('yarn install --frozen-lockfile').description('install all JS dependencies').action(yarn); command.command('cat-logs [exit_code]').description('print server and prover logs').action(catLogs); command @@ -194,107 +188,3 @@ command }); command.command('snapshots-creator').action(snapshots_creator); - -command - .command('cross-en-checker') - .description('run the cross external nodes checker. See Checker Readme the default run mode and configuration.') - .option( - '--mode ', - '`Rpc` to run only the RPC checker; `PubSub` to run only the PubSub checker; `All` to run both.' - ) - .option( - '--env ', - `Provide the env the checker will test in to use the default urls for that env. 'Local', 'Stage, 'Testnet', or 'Mainnet'` - ) - .option('--main_node_http_url ', 'Manually provide the HTTP URL of the main node') - .option('--instances_http_urls ', 'Manually provide the HTTP URLs of the instances to check') - .option('--main_node_ws_url ', 'Manually provide the WS URL of the main node') - .option('--instances_ws_urls ', 'Manually provide the WS URLs of the instances to check') - .option( - '--rpc_mode ', - 'The mode to run the RPC checker in. `Triggered` to run once; `Continuous` to run forever.' - ) - .option( - '--start_miniblock ', - 'Check all miniblocks starting from this. If not set, then check from genesis. Inclusive.' - ) - .option( - '--finish_miniblock ', - 'For Triggered mode. If not set, then check all available miniblocks. Inclusive.' - ) - .option( - '--max_transactions_to_check ', - 'The maximum number of transactions to be checked at random in each miniblock.' - ) - .option( - '--instance_poll_period ', - 'For RPC mode. In seconds, how often to poll the instance node for new miniblocks.' - ) - .option( - '--subscription_duration ', - 'For PubSub mode. Time in seconds for a subscription to be active. If not set, then the subscription will run forever.' - ) - .action(async (cmd: Command) => { - interface Environment { - httpMain: string; - httpInstances: string; - wsMain: string; - wsInstances: string; - } - - const nodeUrls: Record = { - Local: { - httpMain: 'http://127.0.0.1:3050', - httpInstances: 'http://127.0.0.1:3060', - wsMain: 'ws://127.0.0.1:3051', - wsInstances: 'ws://127.0.0.1:3061' - }, - Stage: { - httpMain: 'https://z2-dev-api.zksync.dev:443', - httpInstances: 'https://external-node-dev.zksync.dev:443', - wsMain: 'wss://z2-dev-api.zksync.dev:443/ws', - wsInstances: 'wss://external-node-dev.zksync.dev:443/ws' - }, - Testnet: { - httpMain: 'https://zksync2-testnet.zksync.dev:443', - httpInstances: 'https://external-node-testnet.zksync.dev:443', - wsMain: 'wss://zksync2-testnet.zksync.dev:443/ws', - wsInstances: 'wss://external-node-testnet.zksync.dev:443/ws' - }, - Mainnet: { - httpMain: 'https://zksync2-mainnet.zksync.io:443', - httpInstances: 'https://external-node-mainnet.zksync.dev:443', - wsMain: 'wss://zksync2-mainnet.zksync.io:443/ws', - wsInstances: 'wss://external-node-mainnet.zksync.dev:443/ws' - } - }; - - if (cmd.env && nodeUrls[cmd.env]) { - process.env.CHECKER_MAIN_NODE_HTTP_URL = nodeUrls[cmd.env].httpMain; - process.env.CHECKER_INSTANCES_HTTP_URLS = nodeUrls[cmd.env].httpInstances; - process.env.CHECKER_MAIN_NODE_WS_URL = nodeUrls[cmd.env].wsMain; - process.env.CHECKER_INSTANCES_WS_URLS = nodeUrls[cmd.env].wsInstances; - } - - const envVarMap = { - mode: 'CHECKER_MODE', - rpc_mode: 'CHECKER_RPC_MODE', - main_node_http_url: 'CHECKER_MAIN_NODE_HTTP_URL', - instances_http_urls: 'CHECKER_INSTANCES_HTTP_URLS', - main_node_ws_url: 'CHECKER_MAIN_NODE_WS_URL', - instances_ws_urls: 'CHECKER_INSTANCES_WS_URLS', - start_miniblock: 'CHECKER_START_MINIBLOCK', - finish_miniblock: 'CHECKER_FINISH_MINIBLOCK', - max_transactions_to_check: 'CHECKER_MAX_TRANSACTIONS_TO_CHECK', - instance_poll_period: 'CHECKER_INSTANCE_POLL_PERIOD', - subscription_duration: 'CHECKER_SUBSCRIPTION_DURATION' - }; - - for (const [cmdOption, envVar] of Object.entries(envVarMap)) { - if (cmd[cmdOption]) { - process.env[envVar] = cmd[cmdOption]; - } - } - - await cross_en_checker(); - }); diff --git a/infrastructure/zk/src/server.ts b/infrastructure/zk/src/server.ts index e7ae689057f..52b3e66c744 100644 --- a/infrastructure/zk/src/server.ts +++ b/infrastructure/zk/src/server.ts @@ -25,7 +25,7 @@ export async function server(rebuildTree: boolean, uring: boolean, components?: await utils.spawn(`cargo run --bin zksync_server --release ${options}`); } -export async function externalNode(reinit: boolean = false) { +export async function externalNode(reinit: boolean = false, enableConsensus: boolean = false) { if (process.env.ZKSYNC_ENV != 'ext-node') { console.warn(`WARNING: using ${process.env.ZKSYNC_ENV} environment for external node`); console.warn('If this is a mistake, set $ZKSYNC_ENV to "ext-node" or other environment'); @@ -45,7 +45,11 @@ export async function externalNode(reinit: boolean = false) { clean(path.dirname(process.env.EN_MERKLE_TREE_PATH!)); } - await utils.spawn('cargo run --release --bin zksync_external_node'); + let options = ''; + if (enableConsensus) { + options += ' --enable-consensus'; + } + await utils.spawn(`cargo run --release --bin zksync_external_node -- ${options}`); } async function create_genesis(cmd: string) { @@ -135,6 +139,7 @@ export const serverCommand = new Command('server') export const enCommand = new Command('external-node') .description('start zksync external node') .option('--reinit', 'reset postgres and rocksdb before starting') + .option('--enable-consensus', 'enables consensus component') .action(async (cmd: Command) => { - await externalNode(cmd.reinit); + await externalNode(cmd.reinit, cmd.enableConsensus); }); diff --git a/infrastructure/zk/src/test/test.ts b/infrastructure/zk/src/test/test.ts index 7bcdd813514..b5c7f28f5d0 100644 --- a/infrastructure/zk/src/test/test.ts +++ b/infrastructure/zk/src/test/test.ts @@ -12,7 +12,7 @@ export async function l1Contracts() { export async function prover() { process.chdir(process.env.ZKSYNC_HOME! + '/prover'); - await utils.spawn('cargo test --release --workspace'); + await utils.spawn('cargo test --release --workspace --locked'); } export async function js() { diff --git a/infrastructure/zk/src/up.ts b/infrastructure/zk/src/up.ts index de0ef41cf8c..5cfed342669 100644 --- a/infrastructure/zk/src/up.ts +++ b/infrastructure/zk/src/up.ts @@ -44,7 +44,7 @@ export async function up(composeFile?: string) { export const command = new Command('up') .description('start development containers') - .option('--docker-file', 'path to a custom docker file') + .option('--docker-file ', 'path to a custom docker file') .action(async (cmd) => { await up(cmd.dockerFile); }); diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index d0975e59d7d..90033112e96 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## [11.0.0](https://github.com/matter-labs/zksync-era/compare/prover-v10.1.0...prover-v11.0.0) (2024-01-29) + + +### âš  BREAKING CHANGES + +* **vm:** fee model updates + 1.4.1 ([#791](https://github.com/matter-labs/zksync-era/issues/791)) + +### Features + +* **api:** Make Web3 API server work with pruned data ([#838](https://github.com/matter-labs/zksync-era/issues/838)) ([0b7cd0b](https://github.com/matter-labs/zksync-era/commit/0b7cd0b50ead2406915528becad2fac8b7e48f85)) +* consensus component config for main node and external node ([#881](https://github.com/matter-labs/zksync-era/issues/881)) ([1aed8de](https://github.com/matter-labs/zksync-era/commit/1aed8de0f1651686bf9e9f8aa7dc9ba15625cc42)) +* **en:** Restore state keeper storage from snapshot ([#885](https://github.com/matter-labs/zksync-era/issues/885)) ([a9553b5](https://github.com/matter-labs/zksync-era/commit/a9553b537a857a6f6a755cd700da4c096c1f80f0)) +* fee model updates + 1.4.1 stage upgrade ([#897](https://github.com/matter-labs/zksync-era/issues/897)) ([fa48c13](https://github.com/matter-labs/zksync-era/commit/fa48c13da0cfa20117f68c51c243ee3738184408)) +* protobuf-generated json configs for the main node (BFT-371) ([#458](https://github.com/matter-labs/zksync-era/issues/458)) ([f938314](https://github.com/matter-labs/zksync-era/commit/f9383143b4f1f0c18af658980bae8ec93b6b588f)) +* Remove zkevm_test_harness public reexport from zksync_types ([#929](https://github.com/matter-labs/zksync-era/issues/929)) ([dd1a35e](https://github.com/matter-labs/zksync-era/commit/dd1a35eec006b40db66da73e6fa3d8963efb7d60)) +* **state-keeper:** circuits seal criterion ([#729](https://github.com/matter-labs/zksync-era/issues/729)) ([c4a86bb](https://github.com/matter-labs/zksync-era/commit/c4a86bbbc5697b5391a517299bbd7a5e882a7314)) +* **vm:** fee model updates + 1.4.1 ([#791](https://github.com/matter-labs/zksync-era/issues/791)) ([3564aff](https://github.com/matter-labs/zksync-era/commit/3564affbb246c87d668ea2ec74809384bc9d621f)) + + +### Bug Fixes + +* address issue with spellchecker not checking against prover workspace ([#855](https://github.com/matter-labs/zksync-era/issues/855)) ([4f55926](https://github.com/matter-labs/zksync-era/commit/4f55926f48aaec3f43322594626148af0a0358dd)) +* addresses broken links in preparation for ci link check ([#869](https://github.com/matter-labs/zksync-era/issues/869)) ([a78d03c](https://github.com/matter-labs/zksync-era/commit/a78d03cc53d0097f6be892de65a2c35bd7f1baa3)) +* **prover:** Update shivini ([#915](https://github.com/matter-labs/zksync-era/issues/915)) ([f141a00](https://github.com/matter-labs/zksync-era/commit/f141a00cd25ae5e5d2a054aa4ecda544a2abbbd7)) +* **witness-generator:** Update era-zkevm_test_harness ([#912](https://github.com/matter-labs/zksync-era/issues/912)) ([c03c2e3](https://github.com/matter-labs/zksync-era/commit/c03c2e3df71b5737cbdae889c8330511345c52c2)) +* **witness-generator:** Update zkevm_test_harness ([#930](https://github.com/matter-labs/zksync-era/issues/930)) ([16fdcff](https://github.com/matter-labs/zksync-era/commit/16fdcffc67274f30a4a254c26b8969f4928918bc)) + ## [10.1.0](https://github.com/matter-labs/zksync-era/compare/prover-v10.0.2...prover-v10.1.0) (2024-01-05) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index f2ccc5f92db..9363db75dbe 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -28,56 +28,13 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher", -] - -[[package]] -name = "aes-ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7729c3cde54d67063be556aeac75a81330d802f0259500ca40cb52967f975763" -dependencies = [ - "aes-soft", - "aesni", - "cipher", - "ctr", -] - -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher", - "opaque-debug", -] - [[package]] name = "ahash" version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" dependencies = [ - "getrandom 0.2.12", + "getrandom", "once_cell", "version_check", ] @@ -89,7 +46,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.12", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -263,15 +220,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -473,28 +421,16 @@ dependencies = [ "serde", ] -[[package]] -name = "bitvec" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty 1.1.0", - "radium 0.6.2", - "tap", - "wyz 0.2.0", -] - [[package]] name = "bitvec" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ - "funty 2.0.0", - "radium 0.7.0", + "funty", + "radium", "tap", - "wyz 0.5.1", + "wyz", ] [[package]] @@ -503,7 +439,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac", "digest 0.9.0", "opaque-debug", ] @@ -586,16 +522,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher", -] - [[package]] name = "block-padding" version = "0.2.1" @@ -628,7 +554,7 @@ dependencies = [ "crypto-bigint 0.5.5", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum?branch=main)", "derivative", - "ethereum-types 0.14.1", + "ethereum-types", "firestorm", "itertools 0.10.5", "lazy_static", @@ -862,19 +788,10 @@ dependencies = [ "half", ] -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - [[package]] name = "circuit_definitions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#fb47657ae3b6ff6e4bb5199964d3d37212978200" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#de2ecad62ac8c12777e576dca20311ad8ec770d1" dependencies = [ "crossbeam 0.8.4", "derivative", @@ -888,7 +805,7 @@ dependencies = [ [[package]] name = "circuit_definitions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#ef77c44f919ba161df5976ec3899cf57a1585e7c" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#badf56d613b77c25f79b684c161eab7d1e385176" dependencies = [ "crossbeam 0.8.4", "derivative", @@ -958,15 +875,6 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "cmake" version = "0.1.50" @@ -976,22 +884,6 @@ dependencies = [ "cc", ] -[[package]] -name = "codegen" -version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#82f96b7156551087f1c9bfe4f0ea68845b6debfc" -dependencies = [ - "ethereum-types 0.14.1", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "handlebars", - "hex", - "paste", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon)", - "serde", - "serde_derive", - "serde_json", -] - [[package]] name = "codegen" version = "0.2.0" @@ -1227,7 +1119,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 1.1.0", + "autocfg", "cfg-if 0.1.10", "crossbeam-utils 0.7.2", "lazy_static", @@ -1271,7 +1163,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "autocfg 1.1.0", + "autocfg", "cfg-if 0.1.10", "lazy_static", ] @@ -1332,16 +1224,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "cs_derive" version = "0.1.0" @@ -1365,15 +1247,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" -dependencies = [ - "cipher", -] - [[package]] name = "ctrlc" version = "3.4.2" @@ -1798,7 +1671,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" dependencies = [ - "ethereum-types 0.14.1", + "ethereum-types", "hex", "once_cell", "regex", @@ -1809,19 +1682,6 @@ dependencies = [ "uint", ] -[[package]] -name = "ethbloom" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" -dependencies = [ - "crunchy", - "fixed-hash 0.7.0", - "impl-rlp", - "impl-serde 0.3.2", - "tiny-keccak 2.0.2", -] - [[package]] name = "ethbloom" version = "0.13.0" @@ -1829,37 +1689,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", - "fixed-hash 0.8.0", + "fixed-hash", "impl-rlp", - "impl-serde 0.4.0", + "impl-serde", "tiny-keccak 2.0.2", ] -[[package]] -name = "ethereum-types" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" -dependencies = [ - "ethbloom 0.11.1", - "fixed-hash 0.7.0", - "impl-rlp", - "impl-serde 0.3.2", - "primitive-types 0.10.1", - "uint", -] - [[package]] name = "ethereum-types" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ - "ethbloom 0.13.0", - "fixed-hash 0.8.0", + "ethbloom", + "fixed-hash", "impl-rlp", - "impl-serde 0.4.0", - "primitive-types 0.12.2", + "impl-serde", + "primitive-types", "uint", ] @@ -1953,18 +1799,6 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c5f6c2c942da57e2aaaa84b8a521489486f14e75e7fa91dab70aba913975f98" -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - [[package]] name = "fixed-hash" version = "0.8.0" @@ -2102,12 +1936,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - [[package]] name = "funty" version = "2.0.0" @@ -2239,17 +2067,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.12" @@ -2258,7 +2075,7 @@ checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -2393,20 +2210,6 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" -[[package]] -name = "handlebars" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94eae21d01d20dabef65d8eda734d83df6e2dea8166788804be9bd6bc92448fa" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -2513,17 +2316,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ - "hmac 0.12.1", -] - -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", + "hmac", ] [[package]] @@ -2695,22 +2488,13 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec 2.3.1", -] - [[package]] name = "impl-codec" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.6.9", + "parity-scale-codec", ] [[package]] @@ -2722,15 +2506,6 @@ dependencies = [ "rlp", ] -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - [[package]] name = "impl-serde" version = "0.4.0" @@ -2757,7 +2532,7 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.1.0", + "autocfg", "hashbrown 0.12.3", ] @@ -3030,7 +2805,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ - "autocfg 1.1.0", + "autocfg", "scopeguard", ] @@ -3124,7 +2899,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -3258,7 +3033,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -3373,20 +3148,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "num" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7a8e9be5e039e2ff869df49155f1c06bd01ade2117ec783e56ab0932b67a8f" -dependencies = [ - "num-bigint 0.3.3", - "num-complex 0.3.1", - "num-integer", - "num-iter", - "num-rational 0.3.2", - "num-traits", -] - [[package]] name = "num" version = "0.4.1" @@ -3394,10 +3155,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" dependencies = [ "num-bigint 0.4.4", - "num-complex 0.4.4", + "num-complex", "num-integer", "num-iter", - "num-rational 0.4.1", + "num-rational", "num-traits", ] @@ -3407,7 +3168,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-integer", "num-traits", ] @@ -3418,7 +3179,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-integer", "num-traits", "serde", @@ -3441,15 +3202,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "num-complex" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5" -dependencies = [ - "num-traits", -] - [[package]] name = "num-complex" version = "0.4.4" @@ -3488,7 +3240,7 @@ version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-traits", ] @@ -3498,7 +3250,7 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-integer", "num-traits", ] @@ -3513,25 +3265,13 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-rational" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" -dependencies = [ - "autocfg 1.1.0", - "num-bigint 0.3.3", - "num-integer", - "num-traits", -] - [[package]] name = "num-rational" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-bigint 0.4.4", "num-integer", "num-traits", @@ -3544,7 +3284,7 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ - "autocfg 1.1.0", + "autocfg", "libm", ] @@ -3723,44 +3463,6 @@ dependencies = [ "serde", ] -[[package]] -name = "parity-crypto" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b92ea9ddac0d6e1db7c49991e7d397d34a9fd814b4c93cda53788e8eef94e35" -dependencies = [ - "aes", - "aes-ctr", - "block-modes", - "digest 0.9.0", - "ethereum-types 0.12.1", - "hmac 0.10.1", - "lazy_static", - "pbkdf2 0.7.5", - "ripemd160", - "rustc-hex", - "scrypt", - "secp256k1 0.20.3", - "sha2 0.9.9", - "subtle", - "tiny-keccak 2.0.2", - "zeroize", -] - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.4", - "bitvec 0.20.4", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive 2.3.1", - "serde", -] - [[package]] name = "parity-scale-codec" version = "3.6.9" @@ -3768,25 +3470,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" dependencies = [ "arrayvec 0.7.4", - "bitvec 1.0.1", + "bitvec", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.6.9", + "parity-scale-codec-derive", "serde", ] -[[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 1.0.109", -] - [[package]] name = "parity-scale-codec-derive" version = "3.6.9" @@ -3822,44 +3512,12 @@ dependencies = [ "windows-targets 0.48.5", ] -[[package]] -name = "password-hash" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54986aa4bfc9b98c6a5f40184223658d187159d7b3c6af33f2b2aa25ae1db0fa" -dependencies = [ - "base64ct", - "rand_core 0.6.4", -] - [[package]] name = "paste" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" -[[package]] -name = "pbkdf2" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b8c0d71734018084da0c0354193a5edfb81b20d2d57a92c5b154aefc554a4a" -dependencies = [ - "crypto-mac 0.10.1", -] - -[[package]] -name = "pbkdf2" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf916dd32dd26297907890d99dc2740e33f6bd9073965af4ccff2967962f5508" -dependencies = [ - "base64ct", - "crypto-mac 0.10.1", - "hmac 0.10.1", - "password-hash", - "sha2 0.9.9", -] - [[package]] name = "peeking_take_while" version = "0.1.2" @@ -3890,51 +3548,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "pest" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", -] - -[[package]] -name = "pest_meta" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.8", -] - [[package]] name = "petgraph" version = "0.6.4" @@ -4076,29 +3689,16 @@ dependencies = [ "syn 2.0.48", ] -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash 0.7.0", - "impl-codec 0.5.1", - "impl-rlp", - "impl-serde 0.3.2", - "uint", -] - [[package]] name = "primitive-types" version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ - "fixed-hash 0.8.0", - "impl-codec 0.6.0", + "fixed-hash", + "impl-codec", "impl-rlp", - "impl-serde 0.4.0", + "impl-serde", "uint", ] @@ -4225,8 +3825,8 @@ dependencies = [ "lazy_static", "num-traits", "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift 0.3.0", + "rand_chacha", + "rand_xorshift", "regex-syntax 0.8.2", "rusty-fork", "tempfile", @@ -4372,7 +3972,7 @@ dependencies = [ "mach2", "once_cell", "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "web-sys", "winapi", ] @@ -4416,12 +4016,6 @@ dependencies = [ "proc-macro2 1.0.76", ] -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - [[package]] name = "radium" version = "0.7.0" @@ -4441,36 +4035,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.8", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift 0.1.1", - "winapi", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - [[package]] name = "rand" version = "0.8.5" @@ -4478,30 +4042,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", + "rand_chacha", "rand_core 0.6.4", ] -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - [[package]] name = "rand_chacha" version = "0.3.1" @@ -4527,93 +4071,13 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", + "getrandom", ] [[package]] @@ -4820,7 +4284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ "crypto-bigint 0.4.9", - "hmac 0.12.1", + "hmac", "zeroize", ] @@ -4830,7 +4294,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ - "hmac 0.12.1", + "hmac", "subtle", ] @@ -4856,31 +4320,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", - "getrandom 0.2.12", + "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", "windows-sys 0.48.0", ] -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "rkyv" version = "0.7.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5" dependencies = [ - "bitvec 1.0.1", + "bitvec", "bytecheck", "bytes", "hashbrown 0.12.3", @@ -5054,15 +4507,6 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" -[[package]] -name = "salsa20" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" -dependencies = [ - "cipher", -] - [[package]] name = "same-file" version = "1.0.6" @@ -5087,22 +4531,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "scrypt" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da492dab03f925d977776a0b7233d7b934d6dc2b94faead48928e2e9bacedb9" -dependencies = [ - "base64 0.13.1", - "hmac 0.10.1", - "pbkdf2 0.6.0", - "rand 0.7.3", - "rand_core 0.5.1", - "salsa20", - "sha2 0.9.9", - "subtle", -] - [[package]] name = "sct" version = "0.7.1" @@ -5147,32 +4575,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" -dependencies = [ - "rand 0.6.5", - "secp256k1-sys 0.4.2", -] - [[package]] name = "secp256k1" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" dependencies = [ - "secp256k1-sys 0.8.1", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", + "secp256k1-sys", ] [[package]] @@ -5590,7 +4999,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -5806,7 +5215,7 @@ dependencies = [ "generic-array", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "itoa", "log", "md-5", @@ -5848,7 +5257,7 @@ dependencies = [ "futures-util", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "home", "ipnetwork", "itoa", @@ -6471,12 +5880,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - [[package]] name = "uint" version = "0.9.5" @@ -6751,12 +6154,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6863,7 +6260,7 @@ dependencies = [ "bytes", "derive_more", "ethabi", - "ethereum-types 0.14.1", + "ethereum-types", "futures 0.3.30", "futures-timer", "headers", @@ -6876,7 +6273,7 @@ dependencies = [ "pin-project", "reqwest", "rlp", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "tiny-keccak 2.0.2", @@ -7098,12 +6495,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - [[package]] name = "wyz" version = "0.5.1" @@ -7161,7 +6552,7 @@ dependencies = [ "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", "k256 0.11.6", "lazy_static", - "num 0.4.1", + "num", "serde", "serde_json", "sha2 0.10.6", @@ -7177,7 +6568,7 @@ source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2#fbee2 dependencies = [ "anyhow", "lazy_static", - "num 0.4.1", + "num", "serde", "serde_json", "static_assertions", @@ -7192,7 +6583,7 @@ source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#fbee20 dependencies = [ "anyhow", "lazy_static", - "num 0.4.1", + "num", "serde", "serde_json", "static_assertions", @@ -7207,7 +6598,7 @@ source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.0#dd76fc dependencies = [ "anyhow", "lazy_static", - "num 0.4.1", + "num", "serde", "serde_json", "static_assertions", @@ -7222,7 +6613,7 @@ source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.1#6250db dependencies = [ "anyhow", "lazy_static", - "num 0.4.1", + "num", "serde", "serde_json", "static_assertions", @@ -7340,7 +6731,7 @@ version = "1.3.1" source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.1#00d4ad2292bd55374a0fa10fe11686d7a109d8a0" dependencies = [ "bitflags 1.3.2", - "ethereum-types 0.14.1", + "ethereum-types", "lazy_static", "sha2 0.10.8", ] @@ -7352,7 +6743,7 @@ source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1 dependencies = [ "bitflags 2.4.1", "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", - "ethereum-types 0.14.1", + "ethereum-types", "k256 0.11.6", "lazy_static", "sha2 0.10.6", @@ -7366,7 +6757,7 @@ source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1 dependencies = [ "bitflags 2.4.1", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "ethereum-types 0.14.1", + "ethereum-types", "k256 0.13.3", "lazy_static", "sha2 0.10.8", @@ -7380,7 +6771,7 @@ source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v dependencies = [ "bincode", "circuit_testing", - "codegen 0.2.0", + "codegen", "crossbeam 0.8.4", "derivative", "env_logger 0.9.3", @@ -7403,11 +6794,11 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#fb47657ae3b6ff6e4bb5199964d3d37212978200" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#de2ecad62ac8c12777e576dca20311ad8ec770d1" dependencies = [ "bincode", "circuit_definitions 0.1.0 (git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0)", - "codegen 0.2.0", + "codegen", "crossbeam 0.8.4", "derivative", "env_logger 0.9.3", @@ -7426,11 +6817,11 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.4.1" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#ef77c44f919ba161df5976ec3899cf57a1585e7c" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1#badf56d613b77c25f79b684c161eab7d1e385176" dependencies = [ "bincode", "circuit_definitions 0.1.0 (git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1)", - "codegen 0.2.0", + "codegen", "crossbeam 0.8.4", "curl", "derivative", @@ -7464,7 +6855,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "once_cell", @@ -7484,6 +6875,7 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", + "rand 0.8.5", "serde", "zksync_basic_types", ] @@ -7491,7 +6883,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "blst", @@ -7509,7 +6901,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "bit-vec", @@ -7529,7 +6921,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "async-trait", @@ -7547,7 +6939,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "thiserror", "zksync_concurrency", @@ -7586,9 +6978,10 @@ dependencies = [ "anyhow", "bigdecimal", "bincode", + "chrono", "hex", "itertools 0.10.5", - "num 0.4.1", + "num", "once_cell", "prost", "rand 0.8.5", @@ -7689,6 +7082,7 @@ dependencies = [ "zksync_env_config", "zksync_object_store", "zksync_prover_fri_types", + "zksync_prover_interface", "zksync_queued_job_processor", "zksync_types", "zksync_utils", @@ -7697,7 +7091,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "bit-vec", @@ -7715,7 +7109,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5b3d383d7a65b0fbe2a771fecf4313f5083be9ae#5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" dependencies = [ "anyhow", "heck 0.4.1", @@ -7780,6 +7174,7 @@ dependencies = [ "zksync_dal", "zksync_env_config", "zksync_object_store", + "zksync_prover_interface", "zksync_types", "zksync_utils", ] @@ -7812,6 +7207,19 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_prover_interface" +version = "0.1.0" +dependencies = [ + "chrono", + "serde", + "serde_with", + "strum", + "zkevm_test_harness 1.3.3", + "zksync_object_store", + "zksync_types", +] + [[package]] name = "zksync_queued_job_processor" version = "0.1.0" @@ -7855,8 +7263,6 @@ dependencies = [ name = "zksync_system_constants" version = "0.1.0" dependencies = [ - "anyhow", - "num 0.3.1", "once_cell", "zksync_basic_types", "zksync_utils", @@ -7869,24 +7275,18 @@ dependencies = [ "anyhow", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", - "codegen 0.1.0", - "ethereum-types 0.12.1", "hex", - "num 0.4.1", + "num", "num_enum", "once_cell", - "parity-crypto", "prost", "rlp", + "secp256k1", "serde", "serde_json", "serde_with", "strum", "thiserror", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", - "zk_evm 1.4.0", - "zk_evm 1.4.1", - "zkevm_test_harness 1.3.3", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -7907,7 +7307,7 @@ dependencies = [ "hex", "itertools 0.10.5", "metrics", - "num 0.4.1", + "num", "reqwest", "serde", "thiserror", @@ -7949,6 +7349,7 @@ dependencies = [ "zksync_object_store", "zksync_prover_fri_types", "zksync_prover_fri_utils", + "zksync_prover_interface", "zksync_queued_job_processor", "zksync_state", "zksync_system_constants", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index e450fce09c0..fd0d764ea55 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -15,6 +15,7 @@ members = [ resolver = "2" + # for `perf` profiling [profile.perf] inherits = "release" diff --git a/prover/proof_fri_compressor/Cargo.toml b/prover/proof_fri_compressor/Cargo.toml index 4ac0701acd1..e0c314e6131 100644 --- a/prover/proof_fri_compressor/Cargo.toml +++ b/prover/proof_fri_compressor/Cargo.toml @@ -13,6 +13,7 @@ zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_object_store = { path = "../../core/lib/object_store" } +zksync_prover_interface = { path = "../../core/lib/prover_interface" } zksync_utils = { path = "../../core/lib/utils" } prometheus_exporter = { path = "../../core/lib/prometheus_exporter" } zksync_prover_fri_types = { path = "../prover_fri_types" } diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/proof_fri_compressor/src/compressor.rs index d224ca60abe..ecf26cacd4d 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/proof_fri_compressor/src/compressor.rs @@ -23,8 +23,9 @@ use zksync_prover_fri_types::{ }, get_current_pod_name, AuxOutputWitnessWrapper, FriProofWrapper, }; +use zksync_prover_interface::outputs::L1BatchProofForL1; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{aggregated_operations::L1BatchProofForL1, L1BatchNumber}; +use zksync_types::L1BatchNumber; use zksync_vk_setup_data_server_fri::{get_recursive_layer_vk_for_circuit_type, get_snark_vk}; use crate::metrics::METRICS; diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs index f880a296af5..82b78024a98 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -6,7 +6,7 @@ pub mod gpu_prover { use shivini::{gpu_prove_from_external_witness_data, ProverContext}; use tokio::task::JoinHandle; use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; - use zksync_dal::ConnectionPool; + use zksync_dal::{fri_prover_dal::types::SocketAddress, ConnectionPool}; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ @@ -26,7 +26,7 @@ pub mod gpu_prover { CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_queued_job_processor::{async_trait, JobProcessor}; - use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::SocketAddress}; + use zksync_types::basic_fri_types::CircuitIdRoundTuple; use zksync_vk_setup_data_server_fri::{ get_setup_data_for_circuit_type, GoldilocksGpuProverSetupData, }; diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index ab2dfa30a9a..d867fd5e93c 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -11,7 +11,10 @@ use tokio::{ use zksync_config::configs::{ fri_prover_group::FriProverGroupConfig, FriProverConfig, PostgresConfig, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ + fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, + ConnectionPool, +}; use zksync_env_config::{ object_store::{ProverObjectStoreConfig, PublicObjectStoreConfig}, FromEnv, @@ -19,10 +22,7 @@ use zksync_env_config::{ use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, - proofs::{GpuProverInstanceStatus, SocketAddress}, -}; +use zksync_types::basic_fri_types::CircuitIdRoundTuple; use zksync_utils::wait_for_tasks::wait_for_tasks; mod gpu_prover_job_processor; diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs index 0f84ad9587d..8c564ea13a0 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/prover_fri/src/socket_listener.rs @@ -11,10 +11,13 @@ pub mod gpu_socket_listener { net::{TcpListener, TcpStream}, sync::watch, }; - use zksync_dal::ConnectionPool; + use zksync_dal::{ + fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, + ConnectionPool, + }; use zksync_object_store::bincode; use zksync_prover_fri_types::{CircuitWrapper, ProverServiceDataKey, WitnessVectorArtifacts}; - use zksync_types::proofs::{AggregationRound, GpuProverInstanceStatus, SocketAddress}; + use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_finalization_hints, get_round_for_recursive_circuit_type, }; diff --git a/prover/prover_fri/src/utils.rs b/prover/prover_fri/src/utils.rs index 37f5eea645b..b111f22605c 100644 --- a/prover/prover_fri/src/utils.rs +++ b/prover/prover_fri/src/utils.rs @@ -27,7 +27,10 @@ use zksync_prover_fri_types::{ CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_prover_fri_utils::get_base_layer_circuit_id_for_recursive_layer; -use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, L1BatchNumber}; +use zksync_types::{ + basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, + L1BatchNumber, +}; use crate::metrics::METRICS; diff --git a/prover/prover_fri/tests/basic_test.rs b/prover/prover_fri/tests/basic_test.rs index 89089ac8249..ebcc43e93af 100644 --- a/prover/prover_fri/tests/basic_test.rs +++ b/prover/prover_fri/tests/basic_test.rs @@ -4,10 +4,12 @@ use anyhow::Context as _; use serde::Serialize; use zksync_config::{configs::FriProverConfig, ObjectStoreConfig}; use zksync_env_config::FromEnv; -use zksync_object_store::{bincode, FriCircuitKey, ObjectStoreFactory}; +use zksync_object_store::{bincode, ObjectStoreFactory}; use zksync_prover_fri::prover_job_processor::Prover; -use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_prover_fri_types::{ + keys::FriCircuitKey, CircuitWrapper, ProverJob, ProverServiceDataKey, +}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use zksync_vk_setup_data_server_fri::generate_cpu_base_layer_setup_data; fn compare_serialized(expected: &T, actual: &T) { diff --git a/prover/prover_fri_gateway/Cargo.toml b/prover/prover_fri_gateway/Cargo.toml index 3e826a2f5c5..bfe772bef64 100644 --- a/prover/prover_fri_gateway/Cargo.toml +++ b/prover/prover_fri_gateway/Cargo.toml @@ -11,6 +11,7 @@ zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_object_store = { path = "../../core/lib/object_store" } +zksync_prover_interface = { path = "../../core/lib/prover_interface" } zksync_utils = { path = "../../core/lib/utils" } prometheus_exporter = { path = "../../core/lib/prometheus_exporter" } vlog = { path = "../../core/lib/vlog" } diff --git a/prover/prover_fri_gateway/src/main.rs b/prover/prover_fri_gateway/src/main.rs index 15329ce955a..0ab2475d419 100644 --- a/prover/prover_fri_gateway/src/main.rs +++ b/prover/prover_fri_gateway/src/main.rs @@ -6,7 +6,7 @@ use zksync_config::configs::{FriProverGatewayConfig, PostgresConfig}; use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; -use zksync_types::prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}; +use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::api_data_fetcher::{PeriodicApiStruct, PROOF_GENERATION_DATA_PATH, SUBMIT_PROOF_PATH}; diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index a25d447ad22..09d322ce940 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use zksync_types::prover_server_api::{ +use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, }; diff --git a/prover/prover_fri_gateway/src/proof_submitter.rs b/prover/prover_fri_gateway/src/proof_submitter.rs index 78c7a6a6d8e..3af3e81e20f 100644 --- a/prover/prover_fri_gateway/src/proof_submitter.rs +++ b/prover/prover_fri_gateway/src/proof_submitter.rs @@ -1,9 +1,7 @@ use async_trait::async_trait; use zksync_dal::fri_proof_compressor_dal::ProofCompressionJobStatus; -use zksync_types::{ - prover_server_api::{SubmitProofRequest, SubmitProofResponse}, - L1BatchNumber, -}; +use zksync_prover_interface::api::{SubmitProofRequest, SubmitProofResponse}; +use zksync_types::L1BatchNumber; use crate::api_data_fetcher::{PeriodicApi, PeriodicApiStruct}; diff --git a/prover/prover_fri_types/src/keys.rs b/prover/prover_fri_types/src/keys.rs new file mode 100644 index 00000000000..729db754178 --- /dev/null +++ b/prover/prover_fri_types/src/keys.rs @@ -0,0 +1,37 @@ +//! Different key types for object store. + +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +/// Storage key for a [AggregationWrapper`]. +#[derive(Debug, Clone, Copy)] +pub struct AggregationsKey { + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub depth: u16, +} + +/// Storage key for a [ClosedFormInputWrapper`]. +#[derive(Debug, Clone, Copy)] +pub struct ClosedFormInputKey { + pub block_number: L1BatchNumber, + pub circuit_id: u8, +} + +/// Storage key for a [`CircuitWrapper`]. +#[derive(Debug, Clone, Copy)] +pub struct FriCircuitKey { + pub block_number: L1BatchNumber, + pub sequence_number: usize, + pub circuit_id: u8, + pub aggregation_round: AggregationRound, + pub depth: u16, +} + +/// Storage key for a [`ZkSyncCircuit`]. +#[derive(Debug, Clone, Copy)] +pub struct CircuitKey<'a> { + pub block_number: L1BatchNumber, + pub sequence_number: usize, + pub circuit_type: &'a str, + pub aggregation_round: AggregationRound, +} diff --git a/prover/prover_fri_types/src/lib.rs b/prover/prover_fri_types/src/lib.rs index c244cb99f5a..a1572ee2a2c 100644 --- a/prover/prover_fri_types/src/lib.rs +++ b/prover/prover_fri_types/src/lib.rs @@ -11,9 +11,12 @@ use circuit_definitions::{ zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness, ZkSyncDefaultRoundFunction, }; -use zksync_object_store::{serialize_using_bincode, Bucket, FriCircuitKey, StoredObject}; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; +use crate::keys::FriCircuitKey; + +pub mod keys; pub mod queue; #[derive(serde::Serialize, serde::Deserialize, Clone)] diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/prover_fri_utils/src/lib.rs index 991683b7f9b..39971555f93 100644 --- a/prover/prover_fri_utils/src/lib.rs +++ b/prover/prover_fri_utils/src/lib.rs @@ -1,7 +1,7 @@ use std::time::Instant; use zksync_dal::StorageProcessor; -use zksync_object_store::{FriCircuitKey, ObjectStore}; +use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ circuit_definitions::recursion_layer::{ @@ -9,10 +9,12 @@ use zksync_prover_fri_types::{ }, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }, - get_current_pod_name, CircuitWrapper, ProverJob, ProverServiceDataKey, + get_current_pod_name, + keys::FriCircuitKey, + CircuitWrapper, ProverJob, ProverServiceDataKey, }; use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, + basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, protocol_version::L1VerifierConfig, }; diff --git a/prover/prover_fri_utils/src/metrics.rs b/prover/prover_fri_utils/src/metrics.rs index acb48bacb3e..b33bcc6d448 100644 --- a/prover/prover_fri_utils/src/metrics.rs +++ b/prover/prover_fri_utils/src/metrics.rs @@ -1,7 +1,7 @@ use std::time::Duration; use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] pub struct CircuitLabels { diff --git a/prover/prover_fri_utils/src/socket_utils.rs b/prover/prover_fri_utils/src/socket_utils.rs index c0c5ddcbcb9..d6d7e80f8cb 100644 --- a/prover/prover_fri_utils/src/socket_utils.rs +++ b/prover/prover_fri_utils/src/socket_utils.rs @@ -4,20 +4,17 @@ use std::{ time::{Duration, Instant}, }; -use zksync_types::proofs::SocketAddress; - pub fn send_assembly( job_id: u32, mut serialized: &[u8], - address: &SocketAddress, + socket_address: &SocketAddr, ) -> Result<(Duration, u64), String> { tracing::trace!( "Sending assembly to {}:{}, job id {{{job_id}}}", - address.host, - address.port + socket_address.ip(), + socket_address.port() ); - let socket_address = SocketAddr::new(address.host, address.port); let started_at = Instant::now(); let mut error_messages = vec![]; diff --git a/prover/vk_setup_data_generator_server_fri/src/lib.rs b/prover/vk_setup_data_generator_server_fri/src/lib.rs index bd3a8cfcb6f..b3d28631458 100644 --- a/prover/vk_setup_data_generator_server_fri/src/lib.rs +++ b/prover/vk_setup_data_generator_server_fri/src/lib.rs @@ -51,7 +51,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; #[cfg(feature = "gpu")] use {shivini::cs::GpuSetup, std::alloc::Global}; diff --git a/prover/vk_setup_data_generator_server_fri/src/main.rs b/prover/vk_setup_data_generator_server_fri/src/main.rs index 158a4390a96..ec4ef461d65 100644 --- a/prover/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/vk_setup_data_generator_server_fri/src/main.rs @@ -14,7 +14,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, save_recursive_layer_vk, save_snark_vk, diff --git a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs index 5df4b75b3a6..bd36e8e2b3b 100644 --- a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs @@ -14,7 +14,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ generate_cpu_base_layer_setup_data, get_finalization_hints, get_recursive_layer_vk_for_circuit_type, get_round_for_recursive_circuit_type, save_setup_data, diff --git a/prover/vk_setup_data_generator_server_fri/src/tests.rs b/prover/vk_setup_data_generator_server_fri/src/tests.rs index 8c2c6fa9937..0059a646fd8 100644 --- a/prover/vk_setup_data_generator_server_fri/src/tests.rs +++ b/prover/vk_setup_data_generator_server_fri/src/tests.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_base_path, get_file_path, get_finalization_hints, get_recursive_layer_vk_for_circuit_type, get_round_for_recursive_circuit_type, diff --git a/prover/vk_setup_data_generator_server_fri/src/utils.rs b/prover/vk_setup_data_generator_server_fri/src/utils.rs index ee23e171d55..8278d9e9eb5 100644 --- a/prover/vk_setup_data_generator_server_fri/src/utils.rs +++ b/prover/vk_setup_data_generator_server_fri/src/utils.rs @@ -15,10 +15,6 @@ use zkevm_test_harness::{ sha3::{Digest, Keccak256}, toolset::GeometryConfig, witness::{ - full_block_artifact::{ - BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, - BlockBasicCircuitsPublicInputs, - }, recursive_aggregation::compute_leaf_params, tree::{BinarySparseStorageTree, ZKSyncTestingTree}, }, @@ -27,10 +23,9 @@ use zksync_prover_fri_types::circuit_definitions::{ aux_definitions::witness_oracle::VmWitnessOracle, base_layer_proof_config, boojum::{ - field::goldilocks::{GoldilocksExt2, GoldilocksField}, + field::goldilocks::GoldilocksField, gadgets::{ queue::full_state_queue::FullStateCircuitQueueRawWitness, - recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, traits::allocatable::CSAllocatable, }, }, @@ -91,10 +86,92 @@ pub fn get_basic_circuits( >, > { let path = format!("{}/witness_artifacts.json", get_base_path()); - let test_artifact = read_witness_artifact(&path).context("read_withess_artifact()")?; - let (base_layer_circuit, _, _, _) = get_circuits(test_artifact, cycle_limit, geometry); - Ok(base_layer_circuit - .into_flattened_set() + let mut test_artifact = read_witness_artifact(&path).context("read_withess_artifact()")?; + + let mut storage_impl = InMemoryStorage::new(); + let mut tree = ZKSyncTestingTree::empty(); + + test_artifact.entry_point_address = + *zk_evm::zkevm_opcode_defs::system_params::BOOTLOADER_FORMAL_ADDRESS; + + let predeployed_contracts = test_artifact + .predeployed_contracts + .clone() + .into_iter() + .chain(Some(( + test_artifact.entry_point_address, + test_artifact.entry_point_code.clone(), + ))) + .collect::>(); + save_predeployed_contracts(&mut storage_impl, &mut tree, &predeployed_contracts); + + let used_bytecodes = HashMap::from_iter( + test_artifact + .predeployed_contracts + .values() + .map(|bytecode| { + ( + bytecode_to_code_hash(bytecode).unwrap().into(), + bytecode.clone(), + ) + }) + .chain( + Some(test_artifact.default_account_code.clone()).map(|bytecode| { + ( + bytecode_to_code_hash(&bytecode).unwrap().into(), + bytecode.clone(), + ) + }), + ), + ); + + let previous_enumeration_index = tree.next_enumeration_index(); + let previous_root = tree.root(); + // simulate content hash + + let mut hasher = Keccak256::new(); + hasher.update(previous_enumeration_index.to_be_bytes()); + hasher.update(previous_root); + hasher.update(0u64.to_be_bytes()); // porter shard + hasher.update([0u8; 32]); // porter shard + + let mut previous_data_hash = [0u8; 32]; + previous_data_hash[..].copy_from_slice(hasher.finalize().as_slice()); + + let previous_aux_hash = [0u8; 32]; + let previous_meta_hash = [0u8; 32]; + + let mut hasher = Keccak256::new(); + hasher.update(previous_data_hash); + hasher.update(previous_meta_hash); + hasher.update(previous_aux_hash); + + let mut previous_content_hash = [0u8; 32]; + previous_content_hash[..].copy_from_slice(hasher.finalize().as_slice()); + + let default_account_codehash = + bytecode_to_code_hash(&test_artifact.default_account_code).unwrap(); + let default_account_codehash = U256::from_big_endian(&default_account_codehash); + + let mut base_layer_circuits = vec![]; + let _ = run( + Address::zero(), + test_artifact.entry_point_address, + test_artifact.entry_point_code, + vec![], + false, + default_account_codehash, + used_bytecodes, + vec![], + cycle_limit, + geometry, + storage_impl, + &mut tree, + |circuit| base_layer_circuits.push(circuit), + |_, _, _| {}, + ); + + Ok(base_layer_circuits .into_iter() .dedup_by(|a, b| a.numeric_circuit_type() == b.numeric_circuit_type()) .collect()) @@ -242,115 +319,3 @@ pub fn get_leaf_vk_params( } Ok(leaf_vk_commits) } - -#[allow(clippy::type_complexity)] -fn get_circuits( - mut test_artifact: TestArtifact, - cycle_limit: usize, - geometry: GeometryConfig, -) -> ( - BlockBasicCircuits, - BlockBasicCircuitsPublicInputs, - BlockBasicCircuitsPublicCompactFormsWitnesses, - SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, -) { - let round_function = ZkSyncDefaultRoundFunction::default(); - - let mut storage_impl = InMemoryStorage::new(); - let mut tree = ZKSyncTestingTree::empty(); - - test_artifact.entry_point_address = - *zk_evm::zkevm_opcode_defs::system_params::BOOTLOADER_FORMAL_ADDRESS; - - let predeployed_contracts = test_artifact - .predeployed_contracts - .clone() - .into_iter() - .chain(Some(( - test_artifact.entry_point_address, - test_artifact.entry_point_code.clone(), - ))) - .collect::>(); - save_predeployed_contracts(&mut storage_impl, &mut tree, &predeployed_contracts); - - let used_bytecodes = HashMap::from_iter( - test_artifact - .predeployed_contracts - .values() - .map(|bytecode| { - ( - bytecode_to_code_hash(bytecode).unwrap().into(), - bytecode.clone(), - ) - }) - .chain( - Some(test_artifact.default_account_code.clone()).map(|bytecode| { - ( - bytecode_to_code_hash(&bytecode).unwrap().into(), - bytecode.clone(), - ) - }), - ), - ); - - let previous_enumeration_index = tree.next_enumeration_index(); - let previous_root = tree.root(); - // simulate content hash - - let mut hasher = Keccak256::new(); - hasher.update(previous_enumeration_index.to_be_bytes()); - hasher.update(previous_root); - hasher.update(0u64.to_be_bytes()); // porter shard - hasher.update([0u8; 32]); // porter shard - - let mut previous_data_hash = [0u8; 32]; - previous_data_hash[..].copy_from_slice(hasher.finalize().as_slice()); - - let previous_aux_hash = [0u8; 32]; - let previous_meta_hash = [0u8; 32]; - - let mut hasher = Keccak256::new(); - hasher.update(previous_data_hash); - hasher.update(previous_meta_hash); - hasher.update(previous_aux_hash); - - let mut previous_content_hash = [0u8; 32]; - previous_content_hash[..].copy_from_slice(hasher.finalize().as_slice()); - - let default_account_codehash = - bytecode_to_code_hash(&test_artifact.default_account_code).unwrap(); - let default_account_codehash = U256::from_big_endian(&default_account_codehash); - - let ( - basic_block_circuits, - basic_block_circuits_inputs, - closed_form_inputs, - scheduler_partial_input, - _, - ) = run( - Address::zero(), - test_artifact.entry_point_address, - test_artifact.entry_point_code, - vec![], - false, - default_account_codehash, - used_bytecodes, - vec![], - cycle_limit, - round_function, - geometry, - storage_impl, - &mut tree, - ); - - ( - basic_block_circuits, - basic_block_circuits_inputs, - closed_form_inputs, - scheduler_partial_input, - ) -} diff --git a/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs index 2b633bc6d08..ced67af82e6 100644 --- a/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs @@ -14,7 +14,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, save_recursive_layer_vk, diff --git a/prover/witness_generator/Cargo.toml b/prover/witness_generator/Cargo.toml index b3235744af4..515f0b0dd14 100644 --- a/prover/witness_generator/Cargo.toml +++ b/prover/witness_generator/Cargo.toml @@ -15,6 +15,7 @@ vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } +zksync_prover_interface = { path = "../../core/lib/prover_interface" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_system_constants = { path = "../../core/lib/constants" } prometheus_exporter = { path = "../../core/lib/prometheus_exporter" } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 3835dcccdcd..6d7d15b7861 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -7,24 +7,20 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; +use circuit_definitions::{ + circuit_definitions::base_layer::ZkSyncBaseLayerStorage, + encodings::recursion_request::RecursionQueueSimulator, + zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, +}; use multivm::vm_latest::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, }; use rand::Rng; use serde::{Deserialize, Serialize}; -use zkevm_test_harness::{ - geometry_config::get_geometry_config, - toolset::GeometryConfig, - witness::full_block_artifact::{ - BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, - BlockBasicCircuitsPublicInputs, - }, -}; +use zkevm_test_harness::{geometry_config::get_geometry_config, toolset::GeometryConfig}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::{fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool}; -use zksync_object_store::{ - Bucket, ClosedFormInputKey, ObjectStore, ObjectStoreFactory, StoredObject, -}; +use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -34,17 +30,18 @@ use zksync_prover_fri_types::{ zkevm_circuits::scheduler::{ block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, }, - ZkSyncDefaultRoundFunction, }, - get_current_pod_name, AuxOutputWitnessWrapper, + get_current_pod_name, + keys::ClosedFormInputKey, + AuxOutputWitnessWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_prover_interface::inputs::{BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}; use zksync_queued_job_processor::JobProcessor; use zksync_state::{PostgresStorage, StorageView}; use zksync_types::{ - proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, - protocol_version::FriProtocolVersionId, - Address, L1BatchNumber, ProtocolVersionId, BOOTLOADER_ADDRESS, H256, U256, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, Address, + L1BatchNumber, ProtocolVersionId, BOOTLOADER_ADDRESS, H256, U256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -53,16 +50,14 @@ use crate::{ precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, storage_oracle::StorageOracle, utils::{ - expand_bootloader_contents, save_base_prover_input_artifacts, ClosedFormInputWrapper, + expand_bootloader_contents, save_circuit, ClosedFormInputWrapper, SchedulerPartialInputWrapper, }, }; pub struct BasicCircuitArtifacts { - basic_circuits: BlockBasicCircuits, - basic_circuits_inputs: BlockBasicCircuitsPublicInputs, - per_circuit_closed_form_inputs: BlockBasicCircuitsPublicCompactFormsWitnesses, - #[allow(dead_code)] + circuit_urls: Vec<(u8, String)>, + queue_urls: Vec<(u8, String, usize)>, scheduler_witness: SchedulerCircuitInstanceWitness< GoldilocksField, CircuitGoldilocksPoseidon2Sponge, @@ -257,9 +252,10 @@ impl JobProcessor for BasicWitnessGenerator { None => Ok(()), Some(artifacts) => { let blob_started_at = Instant::now(); - let blob_urls = save_artifacts( + let scheduler_witness_url = save_scheduler_artifacts( job_id, - artifacts, + artifacts.scheduler_witness, + artifacts.aux_output_witness, &*self.object_store, self.public_blob_store.as_deref(), self.config.shall_save_to_public_bucket, @@ -269,7 +265,17 @@ impl JobProcessor for BasicWitnessGenerator { WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] .observe(blob_started_at.elapsed()); - update_database(&self.prover_connection_pool, started_at, job_id, blob_urls).await; + update_database( + &self.prover_connection_pool, + started_at, + job_id, + BlobUrls { + circuit_ids_and_urls: artifacts.circuit_urls, + closed_form_inputs_and_urls: artifacts.queue_urls, + scheduler_witness_url, + }, + ) + .await; Ok(()) } } @@ -304,16 +310,16 @@ async fn process_basic_circuits_job( ) -> BasicCircuitArtifacts { let witness_gen_input = build_basic_circuits_witness_generator_input(&connection_pool, job, block_number).await; - let ( - basic_circuits, - basic_circuits_inputs, - per_circuit_closed_form_inputs, - scheduler_witness, - aux_output_witness, - ) = generate_witness(object_store, config, connection_pool, witness_gen_input).await; + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( + block_number, + object_store, + config, + connection_pool, + witness_gen_input, + ) + .await; WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] .observe(started_at.elapsed()); - tracing::info!( "Witness generation for block {} is complete in {:?}", block_number.0, @@ -321,9 +327,8 @@ async fn process_basic_circuits_job( ); BasicCircuitArtifacts { - basic_circuits, - basic_circuits_inputs, - per_circuit_closed_form_inputs, + circuit_urls, + queue_urls, scheduler_witness, aux_output_witness, } @@ -374,44 +379,6 @@ async fn get_artifacts( BasicWitnessGeneratorJob { block_number, job } } -async fn save_artifacts( - block_number: L1BatchNumber, - artifacts: BasicCircuitArtifacts, - object_store: &dyn ObjectStore, - public_object_store: Option<&dyn ObjectStore>, - shall_save_to_public_bucket: bool, -) -> BlobUrls { - let circuit_ids_and_urls = save_base_prover_input_artifacts( - block_number, - artifacts.basic_circuits, - object_store, - AggregationRound::BasicCircuits, - ) - .await; - let closed_form_inputs_and_urls = save_leaf_aggregation_artifacts( - block_number, - artifacts.basic_circuits_inputs, - artifacts.per_circuit_closed_form_inputs, - object_store, - ) - .await; - let scheduler_witness_url = save_scheduler_artifacts( - block_number, - artifacts.scheduler_witness, - artifacts.aux_output_witness, - object_store, - public_object_store, - shall_save_to_public_bucket, - ) - .await; - - BlobUrls { - circuit_ids_and_urls, - closed_form_inputs_and_urls, - scheduler_witness_url, - } -} - async fn save_scheduler_artifacts( block_number: L1BatchNumber, scheduler_partial_input: SchedulerCircuitInstanceWitness< @@ -440,28 +407,25 @@ async fn save_scheduler_artifacts( object_store.put(block_number, &wrapper).await.unwrap() } -async fn save_leaf_aggregation_artifacts( +async fn save_recursion_queue( block_number: L1BatchNumber, - basic_circuits_inputs: BlockBasicCircuitsPublicInputs, - per_circuit_closed_form_inputs: BlockBasicCircuitsPublicCompactFormsWitnesses, + circuit_id: u8, + recursion_queue_simulator: RecursionQueueSimulator, + closed_form_inputs: &[ClosedFormInputCompactFormWitness], object_store: &dyn ObjectStore, -) -> Vec<(u8, String, usize)> { - let round_function = ZkSyncDefaultRoundFunction::default(); - let queues = basic_circuits_inputs - .into_recursion_queues(per_circuit_closed_form_inputs, &round_function); - let mut circuit_id_urls_with_count = Vec::with_capacity(queues.len()); - for (circuit_id_ref, recursion_queue_simulator, inputs) in queues { - let circuit_id = circuit_id_ref as u8; - let key = ClosedFormInputKey { - block_number, - circuit_id, - }; - let basic_circuit_count = inputs.len(); - let wrapper = ClosedFormInputWrapper(inputs, recursion_queue_simulator); - let blob_url = object_store.put(key, &wrapper).await.unwrap(); - circuit_id_urls_with_count.push((circuit_id, blob_url, basic_circuit_count)) - } - circuit_id_urls_with_count +) -> (u8, String, usize) { + let key = ClosedFormInputKey { + block_number, + circuit_id, + }; + let basic_circuit_count = closed_form_inputs.len(); + let closed_form_inputs = closed_form_inputs + .iter() + .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) + .collect(); + let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); + let blob_url = object_store.put(key, &wrapper).await.unwrap(); + (circuit_id, blob_url, basic_circuit_count) } // If making changes to this method, consider moving this logic to the DAL layer and make @@ -508,14 +472,14 @@ async fn build_basic_circuits_witness_generator_input( } async fn generate_witness( + block_number: L1BatchNumber, object_store: &dyn ObjectStore, config: Arc, connection_pool: ConnectionPool, input: BasicCircuitWitnessGeneratorInput, ) -> ( - BlockBasicCircuits, - BlockBasicCircuitsPublicInputs, - BlockBasicCircuitsPublicCompactFormsWitnesses, + Vec<(u8, String)>, + Vec<(u8, String, usize)>, SchedulerCircuitInstanceWitness< GoldilocksField, CircuitGoldilocksPoseidon2Sponge, @@ -636,13 +600,10 @@ async fn generate_witness( // The following part is CPU-heavy, so we move it to a separate thread. let rt_handle = tokio::runtime::Handle::current(); - let ( - basic_circuits, - basic_circuits_public_inputs, - basic_circuits_public_compact_witness, - mut scheduler_witness, - block_aux_witness, - ) = tokio::task::spawn_blocking(move || { + let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); + let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); + + let make_circuits = tokio::task::spawn_blocking(move || { let connection = rt_handle .block_on(connection_pool.access_storage()) .unwrap(); @@ -654,7 +615,7 @@ async fn generate_witness( VmStorageOracle::new(storage_view.clone()); let storage_oracle = StorageOracle::new(vm_storage_oracle, storage_refunds); - zkevm_test_harness::external_calls::run_with_fixed_params( + let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, bootloader_code, @@ -667,10 +628,37 @@ async fn generate_witness( geometry_config, storage_oracle, &mut tree, - ) - }) - .await - .unwrap(); + |circuit| { + circuit_sender.blocking_send(circuit).unwrap(); + }, + |a, b, c| queue_sender.blocking_send((a as u8, b, c)).unwrap(), + ); + (scheduler_witness, block_witness) + }); + + let mut circuit_urls = vec![]; + let mut recursion_urls = vec![]; + + let save_circuits = async { + loop { + tokio::select! { + Some(circuit) = circuit_receiver.recv() => { + circuit_urls.push( + save_circuit(block_number, circuit, circuit_urls.len(), object_store).await, + ); + } + Some((circuit_id, queue, inputs)) = queue_receiver.recv() =>recursion_urls.push( + save_recursion_queue(block_number, circuit_id, queue, &inputs, object_store) + .await, + ), + else => break, + }; + } + }; + + let (witnesses, ()) = tokio::join!(make_circuits, save_circuits); + + let (mut scheduler_witness, block_aux_witness) = witnesses.unwrap(); scheduler_witness.previous_block_meta_hash = previous_batch_with_metadata.metadata.meta_parameters_hash.0; @@ -678,9 +666,8 @@ async fn generate_witness( previous_batch_with_metadata.metadata.aux_data_hash.0; ( - basic_circuits, - basic_circuits_public_inputs, - basic_circuits_public_compact_witness, + circuit_urls, + recursion_urls, scheduler_witness, block_aux_witness, ) diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index eb28936085f..dd2b5805e42 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -6,8 +6,8 @@ use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_params, create_leaf_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{ClosedFormInputKey, ObjectStore, ObjectStoreFactory}; +use zksync_dal::{fri_prover_dal::types::LeafAggregationJobMetadata, ConnectionPool}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -21,14 +21,14 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, FriProofWrapper, + get_current_pod_name, + keys::ClosedFormInputKey, + FriProofWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - proofs::{AggregationRound, LeafAggregationJobMetadata}, - protocol_version::FriProtocolVersionId, - L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_recursive_layer_vk_for_circuit_type, diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index 28f42037ca3..7e92397dd1c 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -15,7 +15,7 @@ use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{proofs::AggregationRound, web3::futures::StreamExt}; +use zksync_types::{basic_fri_types::AggregationRound, web3::futures::StreamExt}; use zksync_utils::wait_for_tasks::wait_for_tasks; use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index 5f817dd8886..3c46ed98d50 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -6,8 +6,8 @@ use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{AggregationsKey, ObjectStore, ObjectStoreFactory}; +use zksync_dal::{fri_prover_dal::types::NodeAggregationJobMetadata, ConnectionPool}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -18,13 +18,13 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, FriProofWrapper, + get_current_pod_name, + keys::AggregationsKey, + FriProofWrapper, }; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - proofs::{AggregationRound, NodeAggregationJobMetadata}, - protocol_version::FriProtocolVersionId, - L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::{ get_recursive_layer_vk_for_circuit_type, utils::get_leaf_vk_params, diff --git a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs index c26efa4cf39..2cfadc93fc6 100644 --- a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs +++ b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs @@ -3,7 +3,7 @@ use zk_evm::blake2::Blake2s256; use zkevm_test_harness::witness::tree::{ BinaryHasher, BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, }; -use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct PrecalculatedMerklePathsProvider { diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index a6aa372b41e..68e48f83289 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use async_trait::async_trait; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::{FriCircuitKey, ObjectStore, ObjectStoreFactory}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -18,11 +18,13 @@ use zksync_prover_fri_types::{ recursion_layer_proof_config, zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, }, - get_current_pod_name, CircuitWrapper, FriProofWrapper, + get_current_pod_name, + keys::FriCircuitKey, + CircuitWrapper, FriProofWrapper, }; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - proofs::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::{ get_recursive_layer_vk_for_circuit_type, utils::get_leaf_vk_params, diff --git a/prover/witness_generator/src/tests.rs b/prover/witness_generator/src/tests.rs index 7fd95a7c7d8..e167c82aba9 100644 --- a/prover/witness_generator/src/tests.rs +++ b/prover/witness_generator/src/tests.rs @@ -2,10 +2,8 @@ use std::iter; use const_decoder::Decoder::Hex; use zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}; -use zksync_types::{ - proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, - U256, -}; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_types::U256; use super::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; diff --git a/prover/witness_generator/src/utils.rs b/prover/witness_generator/src/utils.rs index f3ae4de4993..17e6533344a 100644 --- a/prover/witness_generator/src/utils.rs +++ b/prover/witness_generator/src/utils.rs @@ -1,11 +1,10 @@ -use multivm::utils::get_used_bootloader_memory_bytes; -use zkevm_test_harness::{ - boojum::field::goldilocks::GoldilocksField, witness::full_block_artifact::BlockBasicCircuits, -}; -use zksync_object_store::{ - serialize_using_bincode, AggregationsKey, Bucket, ClosedFormInputKey, FriCircuitKey, - ObjectStore, StoredObject, +use circuit_definitions::{ + aux_definitions::witness_oracle::VmWitnessOracle, + circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, }; +use multivm::utils::get_used_bootloader_memory_bytes; +use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; +use zksync_object_store::{serialize_using_bincode, Bucket, ObjectStore, StoredObject}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -20,9 +19,10 @@ use zksync_prover_fri_types::{ zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, ZkSyncDefaultRoundFunction, }, + keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey}, CircuitWrapper, FriProofWrapper, }; -use zksync_types::{proofs::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; pub fn expand_bootloader_contents( packed: &[(usize, U256)], @@ -105,30 +105,29 @@ impl StoredObject for SchedulerPartialInputWrapper { serialize_using_bincode!(); } -pub async fn save_base_prover_input_artifacts( +pub async fn save_circuit( block_number: L1BatchNumber, - circuits: BlockBasicCircuits, + circuit: ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, + sequence_number: usize, object_store: &dyn ObjectStore, - aggregation_round: AggregationRound, -) -> Vec<(u8, String)> { - let circuits = circuits.into_flattened_set(); - let mut ids_and_urls = Vec::with_capacity(circuits.len()); - for (sequence_number, circuit) in circuits.into_iter().enumerate() { - let circuit_id = circuit.numeric_circuit_type(); - let circuit_key = FriCircuitKey { - block_number, - sequence_number, - circuit_id, - aggregation_round, - depth: 0, - }; - let blob_url = object_store - .put(circuit_key, &CircuitWrapper::Base(circuit)) - .await - .unwrap(); - ids_and_urls.push((circuit_id, blob_url)); - } - ids_and_urls +) -> (u8, String) { + let circuit_id = circuit.numeric_circuit_type(); + let circuit_key = FriCircuitKey { + block_number, + sequence_number, + circuit_id, + aggregation_round: AggregationRound::BasicCircuits, + depth: 0, + }; + let blob_url = object_store + .put(circuit_key, &CircuitWrapper::Base(circuit)) + .await + .unwrap(); + (circuit_id, blob_url) } pub async fn save_recursive_layer_prover_input_artifacts( diff --git a/prover/witness_generator/tests/basic_test.rs b/prover/witness_generator/tests/basic_test.rs index 16cce19929d..446ee71c922 100644 --- a/prover/witness_generator/tests/basic_test.rs +++ b/prover/witness_generator/tests/basic_test.rs @@ -2,14 +2,15 @@ use std::time::Instant; use serde::Serialize; use zksync_config::ObjectStoreConfig; +use zksync_dal::fri_prover_dal::types::{LeafAggregationJobMetadata, NodeAggregationJobMetadata}; use zksync_env_config::FromEnv; -use zksync_object_store::{AggregationsKey, FriCircuitKey, ObjectStoreFactory}; -use zksync_prover_fri_types::CircuitWrapper; -use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_types::{ - proofs::{AggregationRound, LeafAggregationJobMetadata, NodeAggregationJobMetadata}, - L1BatchNumber, +use zksync_object_store::ObjectStoreFactory; +use zksync_prover_fri_types::{ + keys::{AggregationsKey, FriCircuitKey}, + CircuitWrapper, }; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use zksync_witness_generator::{ leaf_aggregation::{prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator}, node_aggregation, diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs index 0c73a8df900..cbc2da1f5f3 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/witness_vector_generator/src/generator.rs @@ -1,4 +1,5 @@ use std::{ + net::SocketAddr, sync::Arc, time::{Duration, Instant}, }; @@ -7,7 +8,7 @@ use anyhow::Context as _; use async_trait::async_trait; use tokio::{task::JoinHandle, time::sleep}; use zksync_config::configs::FriWitnessVectorGeneratorConfig; -use zksync_dal::ConnectionPool; +use zksync_dal::{fri_prover_dal::types::GpuProverInstanceStatus, ConnectionPool}; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::boojum::field::goldilocks::GoldilocksField, CircuitWrapper, ProverJob, @@ -17,11 +18,7 @@ use zksync_prover_fri_utils::{ fetch_next_circuit, get_numeric_circuit_id, socket_utils::send_assembly, }; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, - proofs::{GpuProverInstanceStatus, SocketAddress}, - protocol_version::L1VerifierConfig, -}; +use zksync_types::{basic_fri_types::CircuitIdRoundTuple, protocol_version::L1VerifierConfig}; use zksync_vk_setup_data_server_fri::get_finalization_hints; use crate::metrics::METRICS; @@ -155,6 +152,7 @@ impl JobProcessor for WitnessVectorGenerator { .await; if let Some(address) = prover { + let address = SocketAddr::from(address); tracing::info!( "Found prover after {:?}. Sending witness vector job...", now.elapsed() @@ -216,7 +214,7 @@ impl JobProcessor for WitnessVectorGenerator { async fn handle_send_result( result: &Result<(Duration, u64), String>, job_id: u32, - address: &SocketAddress, + address: &SocketAddr, pool: &ConnectionPool, zone: String, ) { @@ -250,7 +248,11 @@ async fn handle_send_result( .await .unwrap() .fri_gpu_prover_queue_dal() - .update_prover_instance_status(address.clone(), GpuProverInstanceStatus::Dead, zone) + .update_prover_instance_status( + (*address).into(), + GpuProverInstanceStatus::Dead, + zone, + ) .await; // mark the job as failed diff --git a/prover/witness_vector_generator/tests/basic_test.rs b/prover/witness_vector_generator/tests/basic_test.rs index 648b1ee4d9e..54898cf94d5 100644 --- a/prover/witness_vector_generator/tests/basic_test.rs +++ b/prover/witness_vector_generator/tests/basic_test.rs @@ -1,7 +1,7 @@ use std::fs; use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use zksync_witness_vector_generator::generator::WitnessVectorGenerator; #[test]