diff --git a/.github/renovate.json5 b/.github/renovate.json5 new file mode 100644 index 00000000..db09c1c7 --- /dev/null +++ b/.github/renovate.json5 @@ -0,0 +1,122 @@ +{ + $schema: 'https://docs.renovatebot.com/renovate-schema.json', + extends: [ + 'config:recommended', + ':semanticCommits', + ':semanticCommitTypeAll(chore)', + 'helpers:pinGitHubActionDigests', + ], + schedule: [ + 'before 6am on Monday', + ], + configMigration: true, + rebaseWhen: 'behind-base-branch', + lockFileMaintenance: { + enabled: true, + }, + packageRules: [ + { + groupName: 'futures crates', + groupSlug: 'futures', + matchManagers: [ + 'cargo', + ], + matchPackageNames: [ + 'futures', + ], + matchPackagePrefixes: [ + 'futures-', + 'futures_', + ], + }, + { + groupName: 'serde crates', + groupSlug: 'serde', + matchManagers: [ + 'cargo', + ], + matchPackageNames: [ + 'serde', + ], + matchPackagePrefixes: [ + 'serde-', + 'serde_', + ], + }, + { + groupName: 'tonic crates', + groupSlug: 'tonic', + matchManagers: [ + 'cargo', + ], + matchSourceUrlPrefixes: [ + 'https://github.com/hyperium/tonic', + 'https://github.com/tokio-rs/prost', + ], + }, + { + groupName: 'tracing crates', + groupSlug: 'tracing', + matchManagers: [ + 'cargo', + ], + matchSourceUrlPrefixes: [ + 'https://github.com/tokio-rs/tracing', + ], + matchPackagePrefixes: [ + 'tracing-', + 'tracing_', + ], + }, + { + groupName: 'alloy-rs core types monorepo', + groupSlug: 'alloy-core', + matchManagers: [ + 'cargo', + ], + matchSourceUrlPrefixes: [ + 'https://github.com/alloy-rs/core', + ], + }, + { + groupName: 'async-graphql crates', + groupSlug: 'async-graphql', + matchManagers: [ + 'cargo', + ], + matchPackageNames: [ + 'async-graphql', + ], + matchPackagePrefixes: [ + 'async-graphql-', + ], + }, + { + groupName: 'build-info crates', + groupSlug: 'build-info', + matchManagers: [ + 'cargo', + ], + matchPackageNames: [ + 'build-info', + ], + matchPackagePrefixes: [ + 'build-info-', + ], + }, + ], + customManagers: [ + { + customType: 'regex', + fileMatch: [ + '^rust-toolchain(\\.toml)?$', + ], + matchStrings: [ + 'channel\\s*=\\s*"(?\\d+\\.\\d+\\.\\d+)"', + ], + depNameTemplate: 'rust', + packageNameTemplate: 'rust-lang/rust', + datasourceTemplate: 'github-releases', + }, + ], +} diff --git a/.github/workflows/containers.yml b/.github/workflows/containers.yml index 5ca53aa8..5863ddef 100644 --- a/.github/workflows/containers.yml +++ b/.github/workflows/containers.yml @@ -26,11 +26,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Docker meta id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5 with: # list of Docker images to use as base name for tags images: | @@ -47,14 +47,14 @@ jobs: type=sha - name: Log in to the Container registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@15560696de535e4014efeff63c48f16952e52dd1 # v6 with: context: ./ push: true diff --git a/.github/workflows/conventional_commits.yaml b/.github/workflows/conventional_commits.yaml new file mode 100644 index 00000000..59d602aa --- /dev/null +++ b/.github/workflows/conventional_commits.yaml @@ -0,0 +1,22 @@ +name: "Lint PR" + +on: + pull_request: + +jobs: + conventional-commits-check: + name: Check conventional commits + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.sha }} + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5 + with: + python-version: "3.10" + - name: Install commitizen + run: pip install commitizen + - name: commitizen check + run: cz check --rev-range ${{ github.event.pull_request.base.sha }}..HEAD diff --git a/.github/workflows/license_headers_check.yml b/.github/workflows/license_headers_check.yml index 502f4f1e..9d8b72a3 100644 --- a/.github/workflows/license_headers_check.yml +++ b/.github/workflows/license_headers_check.yml @@ -11,7 +11,7 @@ jobs: name: License headers check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Install addlicense run: | wget https://github.com/google/addlicense/releases/download/v1.1.1/addlicense_1.1.1_Linux_x86_64.tar.gz diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4c3f3bd5..029e9c21 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ jobs: container: image: rust:1.76-bookworm steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - run: | rustup component add rustfmt cargo fmt --all -- --check @@ -38,8 +38,8 @@ jobs: env: DATABASE_URL: postgres://postgres@postgres:5432 steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: | ~/.cargo/bin/ @@ -64,8 +64,8 @@ jobs: DATABASE_URL: postgres://postgres@postgres:5432 SQLX_OFFLINE: true steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: | ~/.cargo/bin/ @@ -103,8 +103,8 @@ jobs: env: DATABASE_URL: postgres://postgres@postgres:5432 steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: | ~/.cargo/bin/ @@ -122,7 +122,7 @@ jobs: - name: Run tests and generate coverage report run: cargo llvm-cov test --all-features --workspace --lcov --output-path lcov.info - name: Upload coverage to Coveralls - uses: coverallsapp/github-action@v2.2.0 + uses: coverallsapp/github-action@643bc377ffa44ace6394b2b5d0d3950076de9f63 # v2.3.0 with: file: ./lcov.info @@ -151,8 +151,8 @@ jobs: env: DATABASE_URL: postgres://postgres@postgres:5432 steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 with: path: | ~/.cargo/bin/ diff --git a/.sqlx/query-019bf1fa6c038ad1027e2b67a949d67ae9dd5fbc3f3e0091f127c95264319e63.json b/.sqlx/query-019bf1fa6c038ad1027e2b67a949d67ae9dd5fbc3f3e0091f127c95264319e63.json new file mode 100644 index 00000000..c7feed38 --- /dev/null +++ b/.sqlx/query-019bf1fa6c038ad1027e2b67a949d67ae9dd5fbc3f3e0091f127c95264319e63.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT sender_address FROM scalar_tap_denylist\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sender_address", + "type_info": "Bpchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "019bf1fa6c038ad1027e2b67a949d67ae9dd5fbc3f3e0091f127c95264319e63" +} diff --git a/.sqlx/query-0393f3408c1802950e39b3ecbcfae3ac4ff99253a4973d18df83d8645ac75cfc.json b/.sqlx/query-0393f3408c1802950e39b3ecbcfae3ac4ff99253a4973d18df83d8645ac75cfc.json new file mode 100644 index 00000000..ee19733b --- /dev/null +++ b/.sqlx/query-0393f3408c1802950e39b3ecbcfae3ac4ff99253a4973d18df83d8645ac75cfc.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(id),\n SUM(value)\n FROM\n scalar_tap_receipts_invalid\n WHERE\n allocation_id = $1\n AND signer_address IN (SELECT unnest($2::text[]))\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "sum", + "type_info": "Numeric" + } + ], + "parameters": { + "Left": [ + "Bpchar", + "TextArray" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "0393f3408c1802950e39b3ecbcfae3ac4ff99253a4973d18df83d8645ac75cfc" +} diff --git a/tap-agent/.sqlx/query-2cfcdd0b2aca57b1d0b4c54aef18b200386f4eb7cf441c43d5e9f899f408cc49.json b/.sqlx/query-2cfcdd0b2aca57b1d0b4c54aef18b200386f4eb7cf441c43d5e9f899f408cc49.json similarity index 100% rename from tap-agent/.sqlx/query-2cfcdd0b2aca57b1d0b4c54aef18b200386f4eb7cf441c43d5e9f899f408cc49.json rename to .sqlx/query-2cfcdd0b2aca57b1d0b4c54aef18b200386f4eb7cf441c43d5e9f899f408cc49.json diff --git a/tap-agent/.sqlx/query-4356a61ea23649ceac4e5dd730a93abe03abdd8d1227a87285435fbeb762a345.json b/.sqlx/query-4356a61ea23649ceac4e5dd730a93abe03abdd8d1227a87285435fbeb762a345.json similarity index 100% rename from tap-agent/.sqlx/query-4356a61ea23649ceac4e5dd730a93abe03abdd8d1227a87285435fbeb762a345.json rename to .sqlx/query-4356a61ea23649ceac4e5dd730a93abe03abdd8d1227a87285435fbeb762a345.json diff --git a/tap-agent/.sqlx/query-6389d2951877d3211943268c36145f3665501acea0adadea3f09695d0503ee7b.json b/.sqlx/query-6389d2951877d3211943268c36145f3665501acea0adadea3f09695d0503ee7b.json similarity index 100% rename from tap-agent/.sqlx/query-6389d2951877d3211943268c36145f3665501acea0adadea3f09695d0503ee7b.json rename to .sqlx/query-6389d2951877d3211943268c36145f3665501acea0adadea3f09695d0503ee7b.json diff --git a/tap-agent/.sqlx/query-7487b58e603ccc4ac2e55e676295517040b78eb9bba04c9db33229fe52f85259.json b/.sqlx/query-7487b58e603ccc4ac2e55e676295517040b78eb9bba04c9db33229fe52f85259.json similarity index 100% rename from tap-agent/.sqlx/query-7487b58e603ccc4ac2e55e676295517040b78eb9bba04c9db33229fe52f85259.json rename to .sqlx/query-7487b58e603ccc4ac2e55e676295517040b78eb9bba04c9db33229fe52f85259.json diff --git a/.sqlx/query-842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14.json b/.sqlx/query-842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14.json new file mode 100644 index 00000000..2b0cde21 --- /dev/null +++ b/.sqlx/query-842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment != 'global'\n ORDER BY deployment ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "deployment", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "model", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "variables", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + true + ] + }, + "hash": "842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14" +} diff --git a/tap-agent/.sqlx/query-8fe28629e2453852a41abd452ed519167d3f358d25aa02f306779270a084f8c3.json b/.sqlx/query-8fe28629e2453852a41abd452ed519167d3f358d25aa02f306779270a084f8c3.json similarity index 100% rename from tap-agent/.sqlx/query-8fe28629e2453852a41abd452ed519167d3f358d25aa02f306779270a084f8c3.json rename to .sqlx/query-8fe28629e2453852a41abd452ed519167d3f358d25aa02f306779270a084f8c3.json diff --git a/tap-agent/.sqlx/query-948ccdb443b4e9e2ae96b1f3873c7979efb384c2b79535cc46a171248986d00f.json b/.sqlx/query-948ccdb443b4e9e2ae96b1f3873c7979efb384c2b79535cc46a171248986d00f.json similarity index 100% rename from tap-agent/.sqlx/query-948ccdb443b4e9e2ae96b1f3873c7979efb384c2b79535cc46a171248986d00f.json rename to .sqlx/query-948ccdb443b4e9e2ae96b1f3873c7979efb384c2b79535cc46a171248986d00f.json diff --git a/tap-agent/.sqlx/query-a24f3bde2965abe825d896dd7fd65783fea041032e08b6c7ecd65a4b6599a81c.json b/.sqlx/query-a24f3bde2965abe825d896dd7fd65783fea041032e08b6c7ecd65a4b6599a81c.json similarity index 100% rename from tap-agent/.sqlx/query-a24f3bde2965abe825d896dd7fd65783fea041032e08b6c7ecd65a4b6599a81c.json rename to .sqlx/query-a24f3bde2965abe825d896dd7fd65783fea041032e08b6c7ecd65a4b6599a81c.json diff --git a/tap-agent/.sqlx/query-a6f746e408a9c77e6025495dbc41a5d989c8b5c5b6d364b61767a67085deaf8a.json b/.sqlx/query-a6f746e408a9c77e6025495dbc41a5d989c8b5c5b6d364b61767a67085deaf8a.json similarity index 100% rename from tap-agent/.sqlx/query-a6f746e408a9c77e6025495dbc41a5d989c8b5c5b6d364b61767a67085deaf8a.json rename to .sqlx/query-a6f746e408a9c77e6025495dbc41a5d989c8b5c5b6d364b61767a67085deaf8a.json diff --git a/tap-agent/.sqlx/query-b1d4dfcd202af310df032edc34d83dabb56d7b947b023ee3a8b32b24b07bcd18.json b/.sqlx/query-b1d4dfcd202af310df032edc34d83dabb56d7b947b023ee3a8b32b24b07bcd18.json similarity index 100% rename from tap-agent/.sqlx/query-b1d4dfcd202af310df032edc34d83dabb56d7b947b023ee3a8b32b24b07bcd18.json rename to .sqlx/query-b1d4dfcd202af310df032edc34d83dabb56d7b947b023ee3a8b32b24b07bcd18.json diff --git a/.sqlx/query-b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d.json b/.sqlx/query-b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d.json new file mode 100644 index 00000000..7dbd0863 --- /dev/null +++ b/.sqlx/query-b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = $1\n AND deployment != 'global'\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "deployment", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "model", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "variables", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + true, + true + ] + }, + "hash": "b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d" +} diff --git a/tap-agent/.sqlx/query-b66975fd6a9a26ece9e9baf14975620f674d9ce80715128bb383a5a022d8a77f.json b/.sqlx/query-b66975fd6a9a26ece9e9baf14975620f674d9ce80715128bb383a5a022d8a77f.json similarity index 100% rename from tap-agent/.sqlx/query-b66975fd6a9a26ece9e9baf14975620f674d9ce80715128bb383a5a022d8a77f.json rename to .sqlx/query-b66975fd6a9a26ece9e9baf14975620f674d9ce80715128bb383a5a022d8a77f.json diff --git a/tap-agent/.sqlx/query-d5d3cf9d34bb31de9a1ee55e57f6c830f18f04f4c4cf59e08948c7e188a8eace.json b/.sqlx/query-d5d3cf9d34bb31de9a1ee55e57f6c830f18f04f4c4cf59e08948c7e188a8eace.json similarity index 100% rename from tap-agent/.sqlx/query-d5d3cf9d34bb31de9a1ee55e57f6c830f18f04f4c4cf59e08948c7e188a8eace.json rename to .sqlx/query-d5d3cf9d34bb31de9a1ee55e57f6c830f18f04f4c4cf59e08948c7e188a8eace.json diff --git a/.sqlx/query-d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4.json b/.sqlx/query-d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4.json new file mode 100644 index 00000000..6b2da69c --- /dev/null +++ b/.sqlx/query-d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = ANY($1)\n AND deployment != 'global'\n ORDER BY deployment ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "deployment", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "model", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "variables", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "TextArray" + ] + }, + "nullable": [ + false, + true, + true + ] + }, + "hash": "d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4" +} diff --git a/tap-agent/.sqlx/query-dbdcb666214a40762607e872c680bba5c3d01bc2106abe5839f1801d1683b8f6.json b/.sqlx/query-dbdcb666214a40762607e872c680bba5c3d01bc2106abe5839f1801d1683b8f6.json similarity index 100% rename from tap-agent/.sqlx/query-dbdcb666214a40762607e872c680bba5c3d01bc2106abe5839f1801d1683b8f6.json rename to .sqlx/query-dbdcb666214a40762607e872c680bba5c3d01bc2106abe5839f1801d1683b8f6.json diff --git a/.sqlx/query-e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a.json b/.sqlx/query-e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a.json new file mode 100644 index 00000000..61996727 --- /dev/null +++ b/.sqlx/query-e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "deployment", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "model", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "variables", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + true, + true + ] + }, + "hash": "e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a" +} diff --git a/tap-agent/.sqlx/query-eb167fed852786fdbeabb636b40ed6080ba673e4dda0ed07fe008095d43d049b.json b/.sqlx/query-eb167fed852786fdbeabb636b40ed6080ba673e4dda0ed07fe008095d43d049b.json similarity index 100% rename from tap-agent/.sqlx/query-eb167fed852786fdbeabb636b40ed6080ba673e4dda0ed07fe008095d43d049b.json rename to .sqlx/query-eb167fed852786fdbeabb636b40ed6080ba673e4dda0ed07fe008095d43d049b.json diff --git a/.sqlx/query-ed054cb84e667373b57cbb62dbd4a96ec4b8b54f04355188eb7c16703e41e7b3.json b/.sqlx/query-ed054cb84e667373b57cbb62dbd4a96ec4b8b54f04355188eb7c16703e41e7b3.json new file mode 100644 index 00000000..f67f9203 --- /dev/null +++ b/.sqlx/query-ed054cb84e667373b57cbb62dbd4a96ec4b8b54f04355188eb7c16703e41e7b3.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO scalar_tap_receipts (signer_address, signature, allocation_id, timestamp_ns, nonce, value)\n VALUES ($1, $2, $3, $4, $5, $6)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bpchar", + "Bytea", + "Bpchar", + "Numeric", + "Numeric", + "Numeric" + ] + }, + "nullable": [] + }, + "hash": "ed054cb84e667373b57cbb62dbd4a96ec4b8b54f04355188eb7c16703e41e7b3" +} diff --git a/tap-agent/.sqlx/query-fef5849dc64c15d1188d6398c93a59bfaafd9a38cf342739cdabf8b7bba073d3.json b/.sqlx/query-fef5849dc64c15d1188d6398c93a59bfaafd9a38cf342739cdabf8b7bba073d3.json similarity index 100% rename from tap-agent/.sqlx/query-fef5849dc64c15d1188d6398c93a59bfaafd9a38cf342739cdabf8b7bba073d3.json rename to .sqlx/query-fef5849dc64c15d1188d6398c93a59bfaafd9a38cf342739cdabf8b7bba073d3.json diff --git a/Cargo.lock b/Cargo.lock index 18c26fe2..82bdf688 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -628,17 +628,6 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "auto_impl" version = "1.2.0" @@ -913,6 +902,17 @@ dependencies = [ "serde", ] +[[package]] +name = "bip39" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +dependencies = [ + "bitcoin_hashes", + "serde", + "unicode-normalization", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -928,6 +928,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bitcoin_hashes" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" + [[package]] name = "bitflags" version = "1.3.2" @@ -1809,19 +1815,6 @@ dependencies = [ "regex", ] -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.11.5" @@ -2783,15 +2776,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.3.9" @@ -3146,7 +3130,7 @@ dependencies = [ "axum-extra", "bigdecimal 0.4.5", "build-info", - "env_logger 0.9.3", + "env_logger", "ethers", "ethers-core", "eventuals", @@ -3157,7 +3141,6 @@ dependencies = [ "reqwest 0.12.5", "secp256k1", "serde", - "serde-inline-default", "serde_json", "sqlx", "tap_core 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3174,9 +3157,27 @@ dependencies = [ "wiremock", ] +[[package]] +name = "indexer-config" +version = "1.0.0-rc.4" +dependencies = [ + "alloy-primitives 0.6.4", + "bigdecimal 0.4.5", + "bip39", + "figment", + "serde", + "serde_repr", + "serde_test", + "serde_with", + "thegraph", + "toml", + "tracing", + "url", +] + [[package]] name = "indexer-tap-agent" -version = "0.1.0" +version = "1.0.0-rc.4" dependencies = [ "alloy-primitives 0.6.4", "alloy-sol-types 0.6.4", @@ -3185,14 +3186,13 @@ dependencies = [ "axum 0.7.5", "bigdecimal 0.4.5", "clap", - "env_logger 0.11.5", "ethereum-types", "ethers-signers", "eventuals", - "figment", "futures", "futures-util", "indexer-common", + "indexer-config", "jsonrpsee 0.20.3", "lazy_static", "log", @@ -3200,10 +3200,8 @@ dependencies = [ "ractor", "reqwest 0.12.5", "serde", - "serde_assert", "serde_json", "serde_yaml", - "shellexpand", "sqlx", "tap_aggregator", "tap_core 0.8.0 (git+https://github.com/semiotic-ai/timeline-aggregation-protocol.git?branch=obtain-invalid-receipts-0.3)", @@ -3211,7 +3209,6 @@ dependencies = [ "thegraph", "thiserror", "tokio", - "toml", "tracing", "tracing-subscriber", "wiremock", @@ -3805,7 +3802,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi", "libc", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", @@ -3964,7 +3961,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi", "libc", ] @@ -5568,26 +5565,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-inline-default" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9980133dc534d02ab08df3b384295223a45090c40a4c46240e3eaa982b495910" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.72", -] - -[[package]] -name = "serde_assert" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b7be0ad5a7b2eefaa5418eb141838270f1ad2d2c6e88acec3795d2425ffa97" -dependencies = [ - "serde", -] - [[package]] name = "serde_derive" version = "1.0.204" @@ -5631,6 +5608,17 @@ dependencies = [ "thiserror", ] +[[package]] +name = "serde_repr" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.72", +] + [[package]] name = "serde_spanned" version = "0.6.7" @@ -5640,6 +5628,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_test" +version = "1.0.176" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a2f49ace1498612d14f7e0b8245519584db8299541dfe31a06374a828d620ab" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5697,7 +5694,7 @@ dependencies = [ [[package]] name = "service" -version = "1.0.0-alpha.0" +version = "1.0.0-rc.4" dependencies = [ "anyhow", "async-graphql", @@ -5706,21 +5703,19 @@ dependencies = [ "build-info", "build-info-build", "clap", - "figment", "graphql", "hex-literal", "indexer-common", + "indexer-config", "lazy_static", "reqwest 0.12.5", "serde", "serde_json", - "shellexpand", "sqlx", "thegraph", "thegraph-graphql-http", "thiserror", "tokio", - "toml", "tracing", "tracing-subscriber", ] @@ -5789,12 +5784,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shellexpand" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" - [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -6498,22 +6487,13 @@ dependencies = [ "winapi", ] -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - [[package]] name = "test-log" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dffced63c2b5c7be278154d76b479f9f9920ed34e7574201407f0b14e2bbb93" dependencies = [ - "env_logger 0.11.5", + "env_logger", "test-log-macros", "tracing-subscriber", ] @@ -6548,16 +6528,15 @@ dependencies = [ [[package]] name = "thegraph-core" -version = "0.4.3" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aafbdb88afae23b3813cc2ea02340f4f628673df17ac7a44deb65f95f3f2a1a" +checksum = "2aa9a1cb7660670e72ba0e6b1cc3854e46548f690a95979f4b6d4d3f7fe4d061" dependencies = [ "alloy-primitives 0.7.7", "alloy-sol-types 0.7.7", "bs58", "ethers-core", "indoc", - "lazy_static", "reqwest 0.12.5", "serde", "serde_json", @@ -7079,9 +7058,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] diff --git a/Cargo.toml b/Cargo.toml index 008c8669..d3c1ead9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["common", "service", "tap-agent"] +members = ["common", "config", "service", "tap-agent"] resolver = "2" [profile.dev.package."*"] diff --git a/README.md b/README.md index fb8addfd..9ba009bf 100644 --- a/README.md +++ b/README.md @@ -27,8 +27,8 @@ Options: All the configuration is done through a TOML file. Please see up-to-date TOML configuration templates: -- [Minimal configuration template (recommended)](service/minimal-config-example.toml) -- [Maximal configuration template (not recommended, dangerous settings)](service/maximal-config-example.toml) +- [Minimal configuration template (recommended)](config/minimal-config-example.toml) +- [Maximal configuration template (not recommended, dangerous settings)](config/maximal-config-example.toml) ## Upgrading @@ -37,7 +37,7 @@ We follow conventional semantics for package versioning. An indexer may set a mi 1. **Review Release Notes**: Before upgrading, check the release notes for the new version to understand what changes, fixes, or new features are included. 2. **Review Documentation**: Check the up-to-date documentation for an accurate reflection of the changes made during the upgrade. 3. **Backup Configuration**: Save your current configuration files and any local modifications you've made to the existing codebase. -4. **Deploy**: Replace the old executable or docker image with the new one and restart the service to apply the upgrade. +4. **Deploy**: Replace the old executable or docker image with the new one and restart the service to apply the upgrade. 5. **Monitor and Validate**: After the upgrade, monitor system behavior and performance metrics to validate that the service is running as expected. These steps should ensure a smooth transition to the latest version of `indexer-service-rs`, harnessing new capabilities while maintaining system integrity. @@ -46,13 +46,11 @@ These steps should ensure a smooth transition to the latest version of `indexer- [Contributions guide](/contributing.md) - ### Supported request and response format examples - ``` ✗ curl http://localhost:7300/ -Ready to roll! +Ready to roll! ✗ curl http://localhost:7300/health {"healthy":true} @@ -74,31 +72,31 @@ Ready to roll! # Free query auth token check failed ✗ curl -X POST -H 'Content-Type: application/json' -H 'Authorization: blah' --data '{"query": "{_meta{block{number}}}"}' http://localhost:7300/subgraphs/id/0xb655ca6f49e73728a102219726ff678d61d8fb792874792e9f0d9887dc616600 -"Invalid Scalar-Receipt header provided"% +"Invalid Tap-Receipt header provided"% # Subgraph health check ✗ curl http://localhost:7300/subgraphs/health/QmVhiE4nax9i86UBnBmQCYDzvjWuwHShYh7aspGPQhU5Sj -"Subgraph deployment is up to date"% +"Subgraph deployment is up to date"% ## Unfound subgraph ✗ curl http://localhost:7300/subgraphs/health/QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB -"Invalid indexing status"% +"Invalid indexing status"% # Network queries # Checks for auth and configuration to serve-network-subgraph -✗ curl -X POST -H 'Content-Type: application/json' -H 'Authorization: token-for-network-subgraph' --data '{"query": "{_meta{block{number}}}"}' http://localhost:7300/network +✗ curl -X POST -H 'Content-Type: application/json' -H 'Authorization: token-for-network-subgraph' --data '{"query": "{_meta{block{number}}}"}' http://localhost:7300/network "Not enabled or authorized query" # Indexing status resolver - Route supported root field queries to graph node status endpoint -✗ curl -X POST -H 'Content-Type: application/json' --data '{"query": "{blockHashFromNumber(network:\"goerli\", blockNumber: 9069120)}"}' http://localhost:7300/status -{"data":{"blockHashFromNumber":"e1e5472636db73ba5496aee098dc21310683c95eb30fc46f9ba6c36d8b28d58e"}}% +✗ curl -X POST -H 'Content-Type: application/json' --data '{"query": "{blockHashFromNumber(network:\"goerli\", blockNumber: 9069120)}"}' http://localhost:7300/status +{"data":{"blockHashFromNumber":"e1e5472636db73ba5496aee098dc21310683c95eb30fc46f9ba6c36d8b28d58e"}}% -# Indexing status resolver - -✗ curl -X POST -H 'Content-Type: application/json' --data '{"query": "{indexingStatuses {subgraph health} }"}' http://localhost:7300/status +# Indexing status resolver - +✗ curl -X POST -H 'Content-Type: application/json' --data '{"query": "{indexingStatuses {subgraph health} }"}' http://localhost:7300/status {"data":{"indexingStatuses":[{"subgraph":"QmVhiE4nax9i86UBnBmQCYDzvjWuwHShYh7aspGPQhU5Sj","health":"healthy"},{"subgraph":"QmWVtsWk8Pqn3zY3czDjyoVreshRLmoz9jko3mQ4uvxQDj","health":"healthy"},{"subgraph":"QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB","health":"healthy"}]}} # Indexing status resolver - Filter out the unsupported queries -✗ curl -X POST -H 'Content-Type: application/json' --data '{"query": "{_meta{block{number}}}"}' http://localhost:7300/status -{"errors":[{"locations":[{"line":1,"column":2}],"message":"Type `Query` has no field `_meta`"}]}% +✗ curl -X POST -H 'Content-Type: application/json' --data '{"query": "{_meta{block{number}}}"}' http://localhost:7300/status +{"errors":[{"locations":[{"line":1,"column":2}],"message":"Type `Query` has no field `_meta`"}]}% ######## Cost server - read-only graphql query curl -X GET -H 'Content-Type: application/json' --data '{"query": "{ costModel(deployment: \"Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss\") { deployment model variables }} "}' http://localhost:7300/cost @@ -109,8 +107,8 @@ curl -X GET -H 'Content-Type: application/json' --data '{"query": "{ costModel(d curl -X GET -H 'Content-Type: application/json' --data '{"query": "{ costModel(deployment: \"Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vas\") { deployment model variables }} "}' http://localhost:7300/cost {"data":{"costModel":null}}% -curl -X GET -H 'Content-Type: application/json' --data '{"query": "{ costModel(deployment: \"Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss\") { deployment odel variables }} "}' http://localhost:7300/cost -{"errors":[{"message":"Cannot query field \"odel\" on type \"CostModel\". Did you mean \"model\"?","locations":[{"line":1,"column":88}]}]}% +curl -X GET -H 'Content-Type: application/json' --data '{"query": "{ costModel(deployment: \"Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss\") { deployment odel variables }} "}' http://localhost:7300/cost +{"errors":[{"message":"Cannot query field \"odel\" on type \"CostModel\". Did you mean \"model\"?","locations":[{"line":1,"column":88}]}]}% curl -X GET -H 'Content-Type: application/json' --data '{"query": "{ costModels(deployments: [\"Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss\"]) { deployment model variables }} "}' http://localhost:7300/cost {"data":{"costModels":[{"deployment":"0xbd499f7673ca32ef4a642207a8bebdd0fb03888cf2678b298438e3a1ae5206ea","model":"default => 0.00025;","variables":null}]}}% @@ -125,13 +123,13 @@ curl -X GET -H 'Content-Type: application/json' --data '{"query": "{ costModels( - `postgres` database connection required to indexer management server database, shared with the indexer agent - No migration in indexer service as it might introduce conflicts to the database; indexer agent is solely responsible for database management. - ### Indexer common components Temporarily live inside the indexer-service package under `src/common`. Simple indexer management client to track NetworkSubgraph and postgres connection. -- NetworkSubgraph instance track both remote API endpoint and local deployment query endpoint. + +- NetworkSubgraph instance track both remote API endpoint and local deployment query endpoint. - TODO: query indexing status of local deployment, only use remote API as fallback. - Keeps cost model schema and resolvers with postgres and graphQL types: `costModel(deployment)` and `costModels(deployments)`. If deployments is empty, all cost models are returned. - Global cost model fallback used when specific deployments are queried @@ -141,7 +139,7 @@ Simple indexer management client to track NetworkSubgraph and postgres connectio Linked dependency could not be linked directly with git url "https://github.com/graphprotocol/indexer" and path "packages/indexer-native/native" at the same time, and could not access it on crates.io. So copid the folder to local repo with the version at https://github.com/graphprotocol/indexer/blob/972658b3ce8c512ad7b4dc575d29cd9d5377e3fe/packages/indexer-native/native. -Since indexer-service will be written in Rust and no need for typescript, indexer-native's neon build and util has been removed. +Since indexer-service will be written in Rust and no need for typescript, indexer-native's neon build and util has been removed. Component `NativeSignatureVerifier` renamed to `SignatureVerifier`. @@ -194,7 +192,7 @@ Temporarily live inside the indexer-service package under `src/types` - [ ] query timing logs - [x] Deployment health server - [x] query status endpoint and process result -- [ ] Status server +- [ ] Status server - [x] indexing status resolver - to query indexingStatuses - [ ] Filter for unsupported queries - [x] Cost server @@ -211,7 +209,7 @@ Temporarily live inside the indexer-service package under `src/types` - [ ] Metrics - [x] Metrics setup - [x] serve basic indexer service metrics - - [ ] Add cost model metrics + - [ ] Add cost model metrics - [x] CLI args - [ ] App profiling - [ ] No gcloud profiling, can use `perf` to collect performance data. diff --git a/common/Cargo.toml b/common/Cargo.toml index 5eb5e00f..eaabc49a 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -40,13 +40,12 @@ build-info = "0.0.34" autometrics = { version = "1.0.1", features = ["prometheus-exporter"] } tracing = "0.1.40" tower_governor = "0.3.2" -tower-http = { version = "0.5.2", features = ["trace", "cors"] } +tower-http = { version = "0.5.2", features = ["trace", "cors", "normalize-path"] } tokio-util = "0.7.10" bigdecimal = "0.4.2" -serde-inline-default = "0.2.0" -thegraph-core = { version = "0.4.1", features = ["subgraph-client"] } +thegraph-core = { version = "0.5.2", features = ["subgraph-client"] } [dev-dependencies] -env_logger = "0.9.0" +env_logger = "0.11.0" test-log = "0.2.12" wiremock = "0.5.19" diff --git a/common/src/allocations/monitor.rs b/common/src/allocations/monitor.rs index 0ad16e5c..58e59a4f 100644 --- a/common/src/allocations/monitor.rs +++ b/common/src/allocations/monitor.rs @@ -129,4 +129,16 @@ mod test { .await; assert!(result.unwrap().len() > 2000) } + + #[tokio::test] + async fn test_network_query_empty_response() { + let result = get_allocations( + network_subgraph_client(), + Address::from_str("0xdeadbeefcafebabedeadbeefcafebabedeadbeef").unwrap(), + Duration::from_secs(1712448507), + ) + .await + .unwrap(); + assert!(result.is_empty()) + } } diff --git a/common/src/attestations/dispute_manager.rs b/common/src/attestations/dispute_manager.rs index 0197dbf8..b0dc1e41 100644 --- a/common/src/attestations/dispute_manager.rs +++ b/common/src/attestations/dispute_manager.rs @@ -13,7 +13,6 @@ use crate::subgraph_client::{Query, SubgraphClient}; pub fn dispute_manager( network_subgraph: &'static SubgraphClient, - graph_network_id: u64, interval: Duration, ) -> Eventual
{ #[derive(Deserialize)] @@ -31,15 +30,14 @@ pub fn dispute_manager( timer(interval).map_with_retry( move |_| async move { let response = network_subgraph - .query::(Query::new_with_variables( + .query::(Query::new( r#" - query network($id: ID!) { - graphNetwork(id: $id) { + query network { + graphNetwork(id: 1) { disputeManager } } "#, - [("id", graph_network_id.into())], )) .await .map_err(|e| e.to_string())?; @@ -47,16 +45,11 @@ pub fn dispute_manager( response.map_err(|e| e.to_string()).and_then(|data| { data.graph_network .map(|network| network.dispute_manager) - .ok_or_else(|| { - format!("Network {} not found in network subgraph", graph_network_id) - }) + .ok_or_else(|| "Network 1 not found in network subgraph".to_string()) }) }, move |err: String| { - warn!( - "Failed to query dispute manager for network {}: {}", - graph_network_id, err, - ); + warn!("Failed to query dispute manager for network: {}", err,); // Sleep for a bit before we retry sleep(interval.div_f32(2.0)) @@ -115,7 +108,7 @@ mod test { async fn test_parses_dispute_manager_from_network_subgraph_correctly() { let (network_subgraph, _mock_server) = setup_mock_network_subgraph().await; - let dispute_manager = dispute_manager(network_subgraph, 1, Duration::from_secs(60)); + let dispute_manager = dispute_manager(network_subgraph, Duration::from_secs(60)); assert_eq!( dispute_manager.value().await.unwrap(), diff --git a/common/src/indexer_errors.rs b/common/src/indexer_errors.rs index 08be7fdf..e8d29a66 100644 --- a/common/src/indexer_errors.rs +++ b/common/src/indexer_errors.rs @@ -204,9 +204,9 @@ impl IndexerErrorCode { Self::IE026 => "Failed to deploy subgraph deployment", Self::IE027 => "Failed to remove subgraph deployment", Self::IE028 => "Failed to reassign subgraph deployment", - Self::IE029 => "Invalid Scalar-Receipt header provided", - Self::IE030 => "No Scalar-Receipt header provided", - Self::IE031 => "Invalid Scalar-Receipt value provided", + Self::IE029 => "Invalid Tap-Receipt header provided", + Self::IE030 => "No Tap-Receipt header provided", + Self::IE031 => "Invalid Tap-Receipt value provided", Self::IE032 => "Failed to process paid query", Self::IE033 => "Failed to process free query", Self::IE034 => "Not authorized as an operator for the indexer", diff --git a/common/src/indexer_service/http/config.rs b/common/src/indexer_service/http/config.rs index f437a647..63b7a3e7 100644 --- a/common/src/indexer_service/http/config.rs +++ b/common/src/indexer_service/http/config.rs @@ -4,7 +4,6 @@ use std::net::SocketAddr; use serde::{Deserialize, Serialize}; -use serde_inline_default::serde_inline_default; use thegraph::types::Address; use thegraph::types::DeploymentId; @@ -13,7 +12,6 @@ pub struct DatabaseConfig { pub postgres_url: String, } -#[serde_inline_default] #[derive(Clone, Debug, Deserialize, Serialize)] pub struct SubgraphConfig { #[serde(default)] @@ -22,36 +20,29 @@ pub struct SubgraphConfig { pub deployment: Option, pub query_url: String, - #[serde_inline_default(60)] + pub query_auth_token: Option, pub syncing_interval: u64, - #[serde_inline_default(3600)] pub recently_closed_allocation_buffer_seconds: u64, } -#[serde_inline_default] #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ServerConfig { - #[serde_inline_default("0.0.0.0:7600".parse().unwrap())] pub host_and_port: SocketAddr, - #[serde_inline_default("0.0.0.0:7300".parse().unwrap())] pub metrics_host_and_port: SocketAddr, - #[serde_inline_default("/".to_string())] pub url_prefix: String, pub free_query_auth_token: Option, } -#[serde_inline_default] #[derive(Clone, Debug, Deserialize, Serialize)] pub struct IndexerServiceConfig { pub indexer: IndexerConfig, - #[serde_inline_default(serde_json::from_str(r#"{}"#).unwrap())] // Allow missing pub server: ServerConfig, pub database: DatabaseConfig, pub graph_node: Option, pub network_subgraph: SubgraphConfig, pub escrow_subgraph: SubgraphConfig, pub graph_network: GraphNetworkConfig, - pub scalar: ScalarConfig, + pub tap: TapConfig, } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -60,11 +51,8 @@ pub struct GraphNodeConfig { pub query_base_url: String, } -#[serde_inline_default] #[derive(Clone, Debug, Deserialize, Serialize)] pub struct GraphNetworkConfig { - #[serde_inline_default(1)] - pub id: u64, pub chain_id: u64, } @@ -74,12 +62,10 @@ pub struct IndexerConfig { pub operator_mnemonic: String, } -#[serde_inline_default] #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct ScalarConfig { +pub struct TapConfig { pub chain_id: u64, pub receipts_verifier_address: Address, - #[serde_inline_default(30)] pub timestamp_error_tolerance: u64, - pub receipt_max_value: u64, + pub receipt_max_value: u128, } diff --git a/common/src/indexer_service/http/indexer_service.rs b/common/src/indexer_service/http/indexer_service.rs index e2e79765..8ab7941a 100644 --- a/common/src/indexer_service/http/indexer_service.rs +++ b/common/src/indexer_service/http/indexer_service.rs @@ -10,14 +10,15 @@ use alloy_sol_types::eip712_domain; use anyhow; use autometrics::prometheus_exporter; use axum::extract::MatchedPath; +use axum::extract::Request as ExtractRequest; use axum::http::{Method, Request}; -use axum::serve; use axum::{ async_trait, response::{IntoResponse, Response}, routing::{get, post}, Extension, Json, Router, }; +use axum::{serve, ServiceExt}; use build_info::BuildInfo; use eventuals::Eventual; use reqwest::StatusCode; @@ -30,9 +31,7 @@ use thiserror::Error; use tokio::net::TcpListener; use tokio::signal; use tower_governor::{governor::GovernorConfigBuilder, GovernorLayer}; -use tower_http::cors; -use tower_http::cors::CorsLayer; -use tower_http::trace::TraceLayer; +use tower_http::{cors, cors::CorsLayer, normalize_path::NormalizePath, trace::TraceLayer}; use tracing::{info, info_span}; use crate::{ @@ -212,15 +211,14 @@ impl IndexerService { ) }) .transpose()?, - DeploymentDetails::for_query_url(&options.config.network_subgraph.query_url)?, + DeploymentDetails::for_query_url_with_token( + &options.config.network_subgraph.query_url, + options.config.network_subgraph.query_auth_token.clone(), + )?, ))); // Identify the dispute manager for the configured network - let dispute_manager = dispute_manager( - network_subgraph, - options.config.graph_network.id, - Duration::from_secs(3600), - ); + let dispute_manager = dispute_manager(network_subgraph, Duration::from_secs(3600)); // Monitor the indexer's own allocations let allocations = indexer_allocations( @@ -259,7 +257,10 @@ impl IndexerService { ) }) .transpose()?, - DeploymentDetails::for_query_url(&options.config.escrow_subgraph.query_url)?, + DeploymentDetails::for_query_url_with_token( + &options.config.escrow_subgraph.query_url, + options.config.escrow_subgraph.query_auth_token.clone(), + )?, ))); let escrow_accounts = escrow_accounts( @@ -285,15 +286,15 @@ impl IndexerService { let domain_separator = eip712_domain! { name: "TAP", version: "1", - chain_id: options.config.scalar.chain_id, - verifying_contract: options.config.scalar.receipts_verifier_address, + chain_id: options.config.tap.chain_id, + verifying_contract: options.config.tap.receipts_verifier_address, }; let indexer_context = IndexerTapContext::new(database.clone(), domain_separator.clone()).await; let timestamp_error_tolerance = - Duration::from_secs(options.config.scalar.timestamp_error_tolerance); + Duration::from_secs(options.config.tap.timestamp_error_tolerance); - let receipt_max_value = options.config.scalar.receipt_max_value; + let receipt_max_value = options.config.tap.receipt_max_value; let checks = IndexerTapContext::get_checks( database, @@ -301,7 +302,7 @@ impl IndexerService { escrow_accounts, domain_separator.clone(), timestamp_error_tolerance, - receipt_max_value.into(), + receipt_max_value, ) .await; @@ -389,40 +390,42 @@ impl IndexerService { ) .with_state(state.clone()); - let router = misc_routes - .merge(data_routes) - .merge(options.extra_routes) - .layer( - CorsLayer::new() - .allow_origin(cors::Any) - .allow_headers(cors::Any) - .allow_methods([Method::OPTIONS, Method::POST, Method::GET]), - ) - .layer( - TraceLayer::new_for_http() - .make_span_with(|req: &Request<_>| { - let method = req.method(); - let uri = req.uri(); - let matched_path = req - .extensions() - .get::() - .map(MatchedPath::as_str); - - info_span!( - "http_request", - %method, - %uri, - matched_path, - ) - }) - // we disable failures here because we doing our own error logging - .on_failure( - |_error: tower_http::classify::ServerErrorsFailureClass, - _latency: Duration, - _span: &tracing::Span| {}, - ), - ) - .with_state(state); + let router = NormalizePath::trim_trailing_slash( + misc_routes + .merge(data_routes) + .merge(options.extra_routes) + .layer( + CorsLayer::new() + .allow_origin(cors::Any) + .allow_headers(cors::Any) + .allow_methods([Method::OPTIONS, Method::POST, Method::GET]), + ) + .layer( + TraceLayer::new_for_http() + .make_span_with(|req: &Request<_>| { + let method = req.method(); + let uri = req.uri(); + let matched_path = req + .extensions() + .get::() + .map(MatchedPath::as_str); + + info_span!( + "http_request", + %method, + %uri, + matched_path, + ) + }) + // we disable failures here because we doing our own error logging + .on_failure( + |_error: tower_http::classify::ServerErrorsFailureClass, + _latency: Duration, + _span: &tracing::Span| {}, + ), + ) + .with_state(state), + ); Self::serve_metrics(options.config.server.metrics_host_and_port); @@ -436,7 +439,7 @@ impl IndexerService { Ok(serve( listener, - router.into_make_service_with_connect_info::(), + ServiceExt::::into_make_service_with_connect_info::(router), ) .with_graceful_shutdown(shutdown_signal()) .await?) diff --git a/common/src/indexer_service/http/mod.rs b/common/src/indexer_service/http/mod.rs index 0f6b7b25..97e88a59 100644 --- a/common/src/indexer_service/http/mod.rs +++ b/common/src/indexer_service/http/mod.rs @@ -5,12 +5,12 @@ mod config; mod indexer_service; mod metrics; mod request_handler; -mod scalar_receipt_header; mod static_subgraph; +mod tap_receipt_header; pub use config::{ - DatabaseConfig, GraphNetworkConfig, IndexerConfig, IndexerServiceConfig, ServerConfig, - SubgraphConfig, + DatabaseConfig, GraphNetworkConfig, GraphNodeConfig, IndexerConfig, IndexerServiceConfig, + ServerConfig, SubgraphConfig, TapConfig, }; pub use indexer_service::{ IndexerService, IndexerServiceImpl, IndexerServiceOptions, IndexerServiceRelease, diff --git a/common/src/indexer_service/http/request_handler.rs b/common/src/indexer_service/http/request_handler.rs index eebae0c6..9996b59f 100644 --- a/common/src/indexer_service/http/request_handler.rs +++ b/common/src/indexer_service/http/request_handler.rs @@ -18,14 +18,14 @@ use crate::{indexer_service::http::IndexerServiceResponse, prelude::AttestationS use super::{ indexer_service::{IndexerServiceError, IndexerServiceState}, - scalar_receipt_header::ScalarReceipt, + tap_receipt_header::TapReceipt, IndexerServiceImpl, }; #[autometrics::autometrics] pub async fn request_handler( Path(manifest_id): Path, - TypedHeader(receipt): TypedHeader, + TypedHeader(receipt): TypedHeader, State(state): State>>, headers: HeaderMap, body: Bytes, diff --git a/common/src/indexer_service/http/scalar_receipt_header.rs b/common/src/indexer_service/http/tap_receipt_header.rs similarity index 70% rename from common/src/indexer_service/http/scalar_receipt_header.rs rename to common/src/indexer_service/http/tap_receipt_header.rs index f7350acd..3d2f3f19 100644 --- a/common/src/indexer_service/http/scalar_receipt_header.rs +++ b/common/src/indexer_service/http/tap_receipt_header.rs @@ -8,15 +8,15 @@ use lazy_static::lazy_static; use tap_core::receipt::SignedReceipt; #[derive(Debug, PartialEq)] -pub struct ScalarReceipt(Option); +pub struct TapReceipt(Option); -impl ScalarReceipt { +impl TapReceipt { pub fn into_signed_receipt(self) -> Option { self.0 } } -impl Deref for ScalarReceipt { +impl Deref for TapReceipt { type Target = Option; fn deref(&self) -> &Self::Target { @@ -25,12 +25,12 @@ impl Deref for ScalarReceipt { } lazy_static! { - static ref SCALAR_RECEIPT: HeaderName = HeaderName::from_static("scalar-receipt"); + static ref TAP_RECEIPT: HeaderName = HeaderName::from_static("tap-receipt"); } -impl Header for ScalarReceipt { +impl Header for TapReceipt { fn name() -> &'static HeaderName { - &SCALAR_RECEIPT + &TAP_RECEIPT } fn decode<'i, I>(values: &mut I) -> Result @@ -46,7 +46,7 @@ impl Header for ScalarReceipt { .map(serde_json::from_str) .transpose() .map_err(|_| headers::Error::invalid())?; - Ok(ScalarReceipt(parsed_receipt)) + Ok(TapReceipt(parsed_receipt)) } fn encode(&self, _values: &mut E) @@ -67,39 +67,36 @@ mod test { use crate::test_vectors::create_signed_receipt; - use super::ScalarReceipt; + use super::TapReceipt; #[tokio::test] - async fn test_decode_valid_scalar_receipt_header() { + async fn test_decode_valid_tap_receipt_header() { let allocation = Address::from_str("0xdeadbeefcafebabedeadbeefcafebabedeadbeef").unwrap(); let original_receipt = create_signed_receipt(allocation, u64::MAX, u64::MAX, u128::MAX).await; let serialized_receipt = serde_json::to_string(&original_receipt).unwrap(); let header_value = HeaderValue::from_str(&serialized_receipt).unwrap(); let header_values = vec![&header_value]; - let decoded_receipt = ScalarReceipt::decode(&mut header_values.into_iter()) - .expect("scalar receipt header value should be valid"); + let decoded_receipt = TapReceipt::decode(&mut header_values.into_iter()) + .expect("tap receipt header value should be valid"); - assert_eq!( - decoded_receipt, - ScalarReceipt(Some(original_receipt.clone())) - ); + assert_eq!(decoded_receipt, TapReceipt(Some(original_receipt.clone()))); } #[test] - fn test_decode_non_string_scalar_receipt_header() { + fn test_decode_non_string_tap_receipt_header() { let header_value = HeaderValue::from_static("123"); let header_values = vec![&header_value]; - let result = ScalarReceipt::decode(&mut header_values.into_iter()); + let result = TapReceipt::decode(&mut header_values.into_iter()); assert!(result.is_err()); } #[test] - fn test_decode_invalid_scalar_receipt_header() { + fn test_decode_invalid_tap_receipt_header() { let header_value = HeaderValue::from_bytes(b"invalid").unwrap(); let header_values = vec![&header_value]; - let result = ScalarReceipt::decode(&mut header_values.into_iter()); + let result = TapReceipt::decode(&mut header_values.into_iter()); assert!(result.is_err()); } diff --git a/common/src/subgraph_client/client.rs b/common/src/subgraph_client/client.rs index bcaa082e..d957d3f4 100644 --- a/common/src/subgraph_client/client.rs +++ b/common/src/subgraph_client/client.rs @@ -80,6 +80,7 @@ pub struct DeploymentDetails { pub deployment: Option, pub status_url: Option, pub query_url: Url, + pub query_auth_token: Option, } impl DeploymentDetails { @@ -91,7 +92,9 @@ impl DeploymentDetails { Ok(Self { deployment: Some(deployment), status_url: Some(Url::parse(graph_node_status_url)?), - query_url: Url::parse(&format!("{graph_node_base_url}/subgraphs/id/{deployment}"))?, + query_url: Url::parse(graph_node_base_url)? + .join(&format!("subgraphs/id/{deployment}"))?, + query_auth_token: None, }) } @@ -100,6 +103,19 @@ impl DeploymentDetails { deployment: None, status_url: None, query_url: Url::parse(query_url)?, + query_auth_token: None, + }) + } + + pub fn for_query_url_with_token( + query_url: &str, + query_auth_token: Option, + ) -> Result { + Ok(Self { + deployment: None, + status_url: None, + query_url: Url::parse(query_url)?, + query_auth_token, }) } } @@ -113,10 +129,11 @@ struct DeploymentClient { impl DeploymentClient { pub fn new(http_client: reqwest::Client, details: DeploymentDetails) -> Self { - let subgraph_client = Mutex::new(GraphCoreSubgraphClient::new( - http_client.clone(), - details.query_url.clone(), - )); + let subgraph_client = Mutex::new( + GraphCoreSubgraphClient::builder(http_client.clone(), details.query_url.clone()) + .with_auth_token(details.query_auth_token) + .build(), + ); Self { http_client, subgraph_client, @@ -354,6 +371,7 @@ mod test { } #[tokio::test] + #[ignore = "depends on the defunct hosted-service"] async fn test_network_query() { let _mock_server = mock_graph_node_server().await; diff --git a/common/src/tap/checks/deny_list_check.rs b/common/src/tap/checks/deny_list_check.rs index 3c2445e4..0fe21cca 100644 --- a/common/src/tap/checks/deny_list_check.rs +++ b/common/src/tap/checks/deny_list_check.rs @@ -168,7 +168,7 @@ impl Check for DenyListCheck { { return Err(anyhow::anyhow!( "Received a receipt from a denylisted sender: {}", - receipt_signer + receipt_sender )); } diff --git a/config/Cargo.toml b/config/Cargo.toml new file mode 100644 index 00000000..02514b4c --- /dev/null +++ b/config/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "indexer-config" +version = "1.0.0-rc.4" +edition = "2021" + +[dependencies] +alloy-primitives = "0.6" +bigdecimal = { version = "0.4.3", features = ["serde"] } +bip39 = "2.0.0" +figment = { version = "0.10.19", features = ["env", "toml"] } +serde = "1.0.188" +serde_with = "3.8.1" +serde_repr = "0.1.19" +thegraph = { git = "https://github.com/edgeandnode/toolshed", tag = "thegraph-v0.5.0" } +url = { version = "2.5.0", features = ["serde"] } +tracing = "0.1.34" + +[dev-dependencies] +serde_test = "1.0.176" +toml = "0.8.12" diff --git a/config/default_values.toml b/config/default_values.toml new file mode 100644 index 00000000..36a9cd6b --- /dev/null +++ b/config/default_values.toml @@ -0,0 +1,24 @@ +[metrics] +port = 7300 + +[subgraphs.network] +syncing_interval_secs = 60 +recently_closed_allocation_buffer_secs = 3600 + +[subgraphs.escrow] +syncing_interval_secs = 60 + +[service] +serve_network_subgraph = false +serve_escrow_subgraph = false +host_and_port = "0.0.0.0:7600" +url_prefix = "/" + +[service.tap] +max_receipt_value_grt = "0.001" # We use strings to prevent rounding errors + +[tap.rav_request] +trigger_value_divisor = 10 +timestamp_buffer_secs = 60 +request_timeout_secs = 5 +max_receipts_per_request = 10000 diff --git a/config/maximal-config-example.toml b/config/maximal-config-example.toml new file mode 100644 index 00000000..fdf117f9 --- /dev/null +++ b/config/maximal-config-example.toml @@ -0,0 +1,137 @@ +# WARNING: This shows all the possible configuration options. Make sure you know what +# you are doing. +# Prefer starting with `minimal-config-example.toml`. +# +# All the optional values (missing from the minimal config) are set to the current +# default values. +# You will have to change *most* the values below to match your setup. +# +# Some of the config below are global graph network values, which you can find here: +# https://github.com/graphprotocol/indexer/tree/main/docs/networks +# +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# +# [database] +# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" + + +[indexer] +indexer_address = "0x1111111111111111111111111111111111111111" +operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" + +[metrics] +# Port to serve metrics. This one should stay private. +port = 7300 + +[database] +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. +postgres_url = "postgres://postgres@postgres:5432/postgres" + +[graph_node] +# URL to your graph-node's query endpoint +query_url = "http://graph-node:8000" +# URL to your graph-node's status endpoint +status_url = "http://graph-node:8000/graphql" + +[subgraphs.network] +# Query URL for the Graph Network subgraph. +query_url = "http://example.com/network-subgraph" +# Optional, Auth token will used a "bearer auth" +# query_auth_token = "super-secret" + +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Refreshing interval for the Graph contracts information from the Graph Network +# subgraph. +syncing_interval_secs = 60 +# Amount of time to keep treating an allocation as active after it has been closed. +# So that we can keep serving queries while the information about the allocation closure +# propagates to all the consumers. +recently_closed_allocation_buffer_secs = 3600 + +[subgraphs.escrow] +# Query URL for the Escrow subgraph. +query_url = "http://example.com/network-subgraph" +# Optional, Auth token will used a "bearer auth" +# query_auth_token = "super-secret" + +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Refreshing interval for the Escrow contracts information from the Escrow subgraph. +syncing_interval_secs = 60 + +[blockchain] +# The chain ID of the network that the graph network is running on +chain_id = 1337 +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +receipts_verifier_address = "0x2222222222222222222222222222222222222222" + +############################################## +# Specific configurations to indexer-service # +############################################## +[service] +# Host and port to serve the indexer-service query endpoint. This one should have a +# public ingress. +host_and_port = "0.0.0.0:7600" +# URL prefix for the query endpoint. +url_prefix = "/" +# Serve the network subgraph on `common.server.host_and_port`/network +serve_network_subgraph = false +# Serve the escrow subgraph on `common.server.host_and_port`/escrow +serve_escrow_subgraph = false +#### OPTIONAL VALUES #### +## use this to add a layer while serving network/escrow subgraph +# serve_auth_token = "token" +## allow queries using this token +# free_query_auth_token = "i-am-authorized-right?" + + +[service.tap] +# Maximum value of a receipt, in GRT wei. +# We need this because a large receipt, especially if it's larger than the RAV request trigger, +# or worse, the unaggregated receipts limit (tap-agent), can cause the indexer to refuse service +# to the sender for the duration of RAV request timestamp buffer. +max_receipt_value_grt = "0.001" # 0.001 GRT. We use strings to prevent rounding errors + +######################################## +# Specific configurations to tap-agent # +######################################## +[tap] +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: +# max_amount_willing_to_lose_grt = "0.1" +max_amount_willing_to_lose_grt = 20 + +[tap.rav_request] +# Trigger value is the amount used to trigger a rav request +# The dividor is used to define the trigger value of a RAV request using +# the following formula: +# +# max_amount_willing_to_lose_grt / trigger_value_divisor = trigger_value +# +# Must be value greater than 1, but recommended to be 10 or more +trigger_value_divisor = 10 +# Buffer (in seconds) to add between the current time and the timestamp of the +# last unaggregated fee when triggering a RAV request. +timestamp_buffer_secs = 60 +# Timeout (in seconds) for RAV requests. +request_timeout_secs = 5 +# Maximum number of receipts per aggregation request +max_receipts_per_request = 10000 + +[tap.sender_aggregator_endpoints] +# Key-Value of all senders and their aggregator endpoints +0xdeadbeefcafebabedeadbeefcafebabedeadbeef = "https://example.com/aggregate-receipts" +0x0123456789abcdef0123456789abcdef01234567 = "https://other.example.com/aggregate-receipts" diff --git a/config/minimal-config-example.toml b/config/minimal-config-example.toml new file mode 100644 index 00000000..1a34270c --- /dev/null +++ b/config/minimal-config-example.toml @@ -0,0 +1,67 @@ +# You will have to change *all* the values below to match your setup. +# +# Some of the config below are global graph network values, which you can find here: +# https://github.com/graphprotocol/indexer/tree/main/docs/networks +# +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# +# [database] +# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" + +[indexer] +indexer_address = "0x1111111111111111111111111111111111111111" +operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" + +[database] +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. +postgres_url = "postgres://postgres@postgres:5432/postgres" + +[graph_node] +# URL to your graph-node's query endpoint +query_url = "http://graph-node:8000" +# URL to your graph-node's status endpoint +status_url = "http://graph-node:8000/graphql" + +[subgraphs.network] +# Query URL for the Graph Network subgraph. +query_url = "http://example.com/network-subgraph" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + +[subgraphs.escrow] +# Query URL for the Escrow subgraph. +query_url = "http://example.com/network-subgraph" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + +[blockchain] +# The chain ID of the network that the graph network is running on +chain_id = 1337 +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +receipts_verifier_address = "0x2222222222222222222222222222222222222222" + +######################################## +# Specific configurations to tap-agent # +######################################## +[tap] +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: +# max_amount_willing_to_lose_grt = "0.1" +max_amount_willing_to_lose_grt = 20 + +[tap.sender_aggregator_endpoints] +# Key-Value of all senders and their aggregator endpoints +0xdeadbeefcafebabedeadbeefcafebabedeadbeef = "https://example.com/aggregate-receipts" +0x0123456789abcdef0123456789abcdef01234567 = "https://other.example.com/aggregate-receipts" diff --git a/config/src/config.rs b/config/src/config.rs new file mode 100644 index 00000000..03180d55 --- /dev/null +++ b/config/src/config.rs @@ -0,0 +1,306 @@ +// Copyright 2023-, GraphOps and Semiotic Labs. +// SPDX-License-Identifier: Apache-2.0 + +use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; +use figment::{ + providers::{Env, Format, Toml}, + Figment, +}; +use serde_repr::Deserialize_repr; +use serde_with::DurationSecondsWithFrac; +use std::{collections::HashMap, net::SocketAddr, path::PathBuf, str::FromStr, time::Duration}; +use tracing::warn; + +use alloy_primitives::Address; +use bip39::Mnemonic; +use serde::Deserialize; +use serde_with::serde_as; +use thegraph::types::DeploymentId; +use url::Url; + +use crate::NonZeroGRT; + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct Config { + pub indexer: IndexerConfig, + pub database: DatabaseConfig, + pub graph_node: GraphNodeConfig, + pub metrics: MetricsConfig, + pub subgraphs: SubgraphsConfig, + pub blockchain: BlockchainConfig, + pub service: ServiceConfig, + pub tap: TapConfig, +} + +pub enum ConfigPrefix { + Tap, + Service, +} + +impl ConfigPrefix { + fn get_prefix(&self) -> &'static str { + match self { + Self::Tap => "TAP_AGENT_", + Self::Service => "INDEXER_SERVICE_", + } + } +} + +impl Config { + pub fn parse(prefix: ConfigPrefix, filename: &PathBuf) -> Result { + let config_defaults = include_str!("../default_values.toml"); + + let config: Self = Figment::new() + .merge(Toml::string(config_defaults)) + .merge(Toml::file(filename)) + .merge(Env::prefixed(prefix.get_prefix())) + .extract() + .map_err(|e| e.to_string())?; + config.validate()?; + + Ok(config) + } + + // custom validation of the values + fn validate(&self) -> Result<(), String> { + match &self.tap.rav_request.trigger_value_divisor { + x if *x <= 1.into() => { + return Err("trigger_value_divisor must be greater than 1".to_string()) + } + x if *x > 1.into() && *x < 10.into() => warn!( + "It's recommended that trigger_value_divisor \ + be a value greater than 10." + ), + _ => {} + } + + let ten: BigDecimal = 10.into(); + let usual_grt_price = BigDecimal::from_str("0.0001").unwrap() * ten; + if self.tap.max_amount_willing_to_lose_grt.get_value() < usual_grt_price.to_u128().unwrap() + { + warn!( + "Your `max_amount_willing_to_lose_grt` value is too close to zero. \ + This may deny the sender too often or even break the whole system. \ + It's recommended it to be a value greater than 100x an usual query price." + ); + } + + if self.subgraphs.escrow.config.syncing_interval_secs < Duration::from_secs(10) + || self.subgraphs.network.config.syncing_interval_secs < Duration::from_secs(10) + { + warn!( + "Your `syncing_interval_secs` value it too low. \ + This may overload your graph-node instance, \ + a recommended value is about 60 seconds." + ); + } + + if self.subgraphs.escrow.config.syncing_interval_secs > Duration::from_secs(600) + || self.subgraphs.network.config.syncing_interval_secs > Duration::from_secs(600) + { + warn!( + "Your `syncing_interval_secs` value it too high. \ + This may cause issues while reacting to updates in the blockchain. \ + a recommended value is about 60 seconds." + ); + } + + if self.tap.rav_request.timestamp_buffer_secs < Duration::from_secs(10) { + warn!( + "Your `tap.rav_request.timestamp_buffer_secs` value it too low. \ + You may discart receipts in case of any synchronization issues, \ + a recommended value is about 30 seconds." + ); + } + + Ok(()) + } +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct IndexerConfig { + pub indexer_address: Address, + pub operator_mnemonic: Mnemonic, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct DatabaseConfig { + pub postgres_url: Url, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct GraphNodeConfig { + pub query_url: Url, + pub status_url: Url, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct MetricsConfig { + pub port: u16, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct SubgraphsConfig { + pub network: NetworkSubgraphConfig, + pub escrow: EscrowSubgraphConfig, +} + +#[serde_as] +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct NetworkSubgraphConfig { + #[serde(flatten)] + pub config: SubgraphConfig, + + #[serde_as(as = "DurationSecondsWithFrac")] + pub recently_closed_allocation_buffer_secs: Duration, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct EscrowSubgraphConfig { + #[serde(flatten)] + pub config: SubgraphConfig, +} + +#[serde_as] +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct SubgraphConfig { + pub query_url: Url, + pub query_auth_token: Option, + pub deployment_id: Option, + #[serde_as(as = "DurationSecondsWithFrac")] + pub syncing_interval_secs: Duration, +} + +#[derive(Debug, Deserialize_repr, Clone)] +#[cfg_attr(test, derive(PartialEq))] +#[repr(u64)] +pub enum TheGraphChainId { + Ethereum = 1, + Goerli = 5, + Sepolia = 11155111, + Arbitrum = 42161, + ArbitrumGoerli = 421613, + ArbitrumSepolia = 421614, + Test = 1337, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct BlockchainConfig { + pub chain_id: TheGraphChainId, + pub receipts_verifier_address: Address, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct ServiceConfig { + pub serve_network_subgraph: bool, + pub serve_escrow_subgraph: bool, + pub serve_auth_token: Option, + pub host_and_port: SocketAddr, + pub url_prefix: String, + pub tap: ServiceTapConfig, + pub free_query_auth_token: Option, +} + +#[serde_as] +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct ServiceTapConfig { + /// what's the maximum value we accept in a receipt + pub max_receipt_value_grt: NonZeroGRT, +} + +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct TapConfig { + /// what is the maximum amount the indexer is willing to lose in grt + pub max_amount_willing_to_lose_grt: NonZeroGRT, + pub rav_request: RavRequestConfig, + + pub sender_aggregator_endpoints: HashMap, +} + +impl TapConfig { + pub fn get_trigger_value(&self) -> u128 { + let grt_wei = self.max_amount_willing_to_lose_grt.get_value(); + let decimal = BigDecimal::from_u128(grt_wei).unwrap(); + let divisor = &self.rav_request.trigger_value_divisor; + (decimal / divisor) + .to_u128() + .expect("Could not represent the trigger value in u128") + } +} + +#[serde_as] +#[derive(Debug, Deserialize)] +#[cfg_attr(test, derive(PartialEq))] +#[serde(deny_unknown_fields)] +pub struct RavRequestConfig { + /// what divisor of the amount willing to lose to trigger the rav request + pub trigger_value_divisor: BigDecimal, + /// timestamp buffer + #[serde_as(as = "DurationSecondsWithFrac")] + pub timestamp_buffer_secs: Duration, + /// timeout duration while requesting a rav + #[serde_as(as = "DurationSecondsWithFrac")] + pub request_timeout_secs: Duration, + /// how many receipts are sent in a single rav requests + pub max_receipts_per_request: u64, +} + +#[cfg(test)] +mod tests { + use std::{fs, path::PathBuf}; + + use crate::{Config, ConfigPrefix}; + + #[test] + fn test_minimal_config() { + Config::parse( + ConfigPrefix::Service, + &PathBuf::from("minimal-config-example.toml"), + ) + .unwrap(); + } + + #[test] + fn test_maximal_config() { + // Generate full config by deserializing the minimal config and let the code fill in the defaults. + let max_config = Config::parse( + ConfigPrefix::Service, + &PathBuf::from("minimal-config-example.toml"), + ) + .unwrap(); + let max_config_file: Config = toml::from_str( + fs::read_to_string("maximal-config-example.toml") + .unwrap() + .as_str(), + ) + .unwrap(); + + assert_eq!(max_config, max_config_file); + } +} diff --git a/config/src/grt.rs b/config/src/grt.rs new file mode 100644 index 00000000..e1dbdf6a --- /dev/null +++ b/config/src/grt.rs @@ -0,0 +1,77 @@ +// Copyright 2023-, GraphOps and Semiotic Labs. +// SPDX-License-Identifier: Apache-2.0 + +use bigdecimal::{BigDecimal, ToPrimitive}; +use serde::{de::Error, Deserialize}; + +#[derive(Debug, PartialEq)] +pub struct NonZeroGRT(u128); + +impl NonZeroGRT { + pub fn get_value(&self) -> u128 { + self.0 + } +} + +impl<'de> Deserialize<'de> for NonZeroGRT { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let v = BigDecimal::deserialize(deserializer)?; + if v <= 0.into() { + return Err(Error::custom("GRT value must be greater than 0")); + } + // Convert to wei + let v = v * BigDecimal::from(10u64.pow(18)); + // Convert to u128 + let wei = v.to_u128().ok_or_else(|| { + Error::custom("GRT value cannot be represented as a u128 GRT wei value") + })?; + + Ok(Self(wei)) + } +} + +#[cfg(test)] +mod tests { + use serde_test::{assert_de_tokens, assert_de_tokens_error, Token}; + + use super::*; + + #[test] + fn test_parse_grt_value_to_u128_deserialize() { + assert_de_tokens(&NonZeroGRT(1_000_000_000_000_000_000), &[Token::Str("1")]); + assert_de_tokens(&NonZeroGRT(1_100_000_000_000_000_000), &[Token::Str("1.1")]); + assert_de_tokens( + &NonZeroGRT(1_100_000_000_000_000_000), + &[Token::String("1.1")], + ); + // The following doesn't work because of rounding errors + // assert_de_tokens(&NonZeroGRT(1_100_000_000_000_000_000), &[Token::F32(1.1)]); + // assert_de_tokens(&NonZeroGRT(1_100_000_000_000_000_000), &[Token::F64(1.1)]); + assert_de_tokens( + &NonZeroGRT(1_000_000_000_000_000_001), + &[Token::Str("1.000000000000000001")], + ); + + assert_de_tokens(&NonZeroGRT(1), &[Token::Str("0.000000000000000001")]); + assert_de_tokens_error::( + &[Token::Str("0")], + "GRT value must be greater than 0", + ); + assert_de_tokens_error::( + &[Token::Str("-1")], + "GRT value must be greater than 0", + ); + assert_de_tokens( + &NonZeroGRT(1_000_000_000_000_000_000), + &[Token::Str("1.0000000000000000001")], + ); + let v = Box::leak(Box::new(format!("{}0", u128::MAX))); + assert_de_tokens_error::( + &[Token::Str(v.as_str())], + "GRT value cannot be represented as a u128 GRT wei value", + ); + } +} diff --git a/config/src/lib.rs b/config/src/lib.rs new file mode 100644 index 00000000..0a4b8ac6 --- /dev/null +++ b/config/src/lib.rs @@ -0,0 +1,8 @@ +// Copyright 2023-, GraphOps and Semiotic Labs. +// SPDX-License-Identifier: Apache-2.0 + +mod config; +mod grt; + +pub use config::*; +pub use grt::*; diff --git a/docs/example-service-config.toml b/docs/example-service-config.toml deleted file mode 100644 index 5ab1fd50..00000000 --- a/docs/example-service-config.toml +++ /dev/null @@ -1,38 +0,0 @@ -[common.indexer] -indexer_address = "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" -operator_mnemonic = "foo bar baz ruux zorp quux quuz corge grault garply waldo fred" - -[common.graph_node] -status_url = "http://localhost:8030/graphql" -query_base_url = "http://localhost:8000" - -[common.database] -postgres_url = "postgresql://indexer@localhost:5432/indexer" - -[common.network_subgraph] -query_url = "http://localhost:8000/subgraphs/id/QmXSS5oeWPxHGtsVNWsXSz3p67zoaaao9FtkxcCMg3A8RL" -syncing_interval = 60 -# optional, default = false: -serve_subgraph = true -serve_auth_token = "i-need-network-subgraph-data" - -[common.escrow_subgraph] -query_url = "http://localhost:8000/subgraphs/id/QmW2mc83Wi6PnxA5X4rzdnBY3sTZH8FVVJiwMYL9Y6Z4UY" -syncing_interval = 60 -# optional, default = false: -serve_subgraph = false - -[common.graph_network] -id = 1 -chain_id = 1 - -[common.server] -url_prefix = "/" -host_and_port = "0.0.0.0:7600" -metrics_host_and_port = "0.0.0.0:7601" -# optional: -free_query_auth_token = "i-am-authorized-right?" - -[common.scalar] -chain_id = 1 -receipts_verifier_address = "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" diff --git a/service/Cargo.toml b/service/Cargo.toml index 58b5ab18..9ca169ad 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "service" -version = "1.0.0-alpha.0" +version = "1.0.0-rc.4" edition = "2021" license = "Apache-2.0" @@ -8,6 +8,7 @@ license = "Apache-2.0" [dependencies] indexer-common = { path = "../common" } +indexer-config = { path = "../config" } anyhow = "1.0.57" reqwest = { version = "0.12", features = ["json"] } tokio = { version = "1", features = ["rt", "macros", "sync", "full"] } @@ -40,9 +41,6 @@ thegraph-graphql-http = { version = "0.2.0", features = [ "http-client-reqwest", ] } build-info = "0.0.34" -figment = { version = "0.10", features = ["toml", "env"] } -toml = "0.8.12" -shellexpand = { version = "3.1.0", default-features = false, features = ["base-0"] } [dev-dependencies] hex-literal = "0.4.1" diff --git a/service/maximal-config-example.toml b/service/maximal-config-example.toml deleted file mode 100644 index 112693a8..00000000 --- a/service/maximal-config-example.toml +++ /dev/null @@ -1,91 +0,0 @@ -# WARNING: This shows all the possible configuration options. Make sure you know what -# you are doing. -# Prefer starting with `minimal-config-example.toml`. -# -# All the optional values (missing from the minimal config) are set to the current -# default values. -# You will have to change *most* the values below to match your setup. -# -# Some of the config below are global graph network values, which you can find here: -# https://github.com/graphprotocol/indexer/tree/main/docs/networks -# -# Pro tip: if you need to load some values from the environment into this config, you -# can use shell expansions with environment variables. Example: -# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" - - -[common.indexer] -indexer_address = "0x1111111111111111111111111111111111111111" -operator_mnemonic = "abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon" - -[common.server] -# Host and port to serve the indexer-service query endpoint. This one should have a -# public ingress. -host_and_port = "0.0.0.0:7600" -# Host and port to serve the indexer-service metrics. This one should stay private. -metrics_host_and_port = "0.0.0.0:7300" -# URL prefix for the query endpoint. -url_prefix = "/" - -[common.database] -# The URL of the Postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. -postgres_url = "postgresql://indexer@postgres:5432/indexer_components_0" - -[common.graph_node] -# URL to your graph-node's status endpoint -status_url = "http://graph-node:8030/graphql" -# URL to your graph-node's query endpoint -query_base_url = "http://graph-node:8000" - -[common.network_subgraph] -# Serve the network subgraph on `common.server.host_and_port`/network -serve_subgraph = false -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -deployment = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" -# Query URL for the Graph Network subgraph. -query_url = "http://example.com/network-subgraph" -# Refreshing interval for the Graph contracts information from the Graph Network -# subgraph. -syncing_interval = 60 -# Amount of time to keep treating an allocation as active after it has been closed. -# So that we can keep serving queries while the information about the allocation closure -# propagates to all the consumers. -recently_closed_allocation_buffer_seconds = 3600 - -[common.escrow_subgraph] -# Serve the escrow subgraph on `common.server.host_and_port`/escrow -serve_subgraph = false -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -deployment = "Qmbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" -# Query URL for the Escrow subgraph. -query_url = "http://example.com/escrow-subgraph" -# Refreshing interval for the Escrow contracts information from the Escrow subgraph. -syncing_interval = 60 -# Amount of time to keep treating an allocation as active after it has been closed. -# So that we can keep serving queries while the information about the allocation closure -# propagates to all the consumers. -recently_closed_allocation_buffer_seconds = 3600 - -[common.graph_network] -id = 1 -# The chain ID of the network that the graph network is running on -chain_id = 99999999999999 - -[common.scalar] -# The chain ID of the network that the graph network is running on -# Should be the same as the chain ID of the network that the graph network is running on -chain_id = 99999999999999 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. -receipts_verifier_address = "0x2222222222222222222222222222222222222222" -# Timestamp error tolerance for incoming query receipts. If a query receipt timestamp -# is outside now() +/- timestamp_error_tolerance, the query is rejected. -timestamp_error_tolerance = 30 -# Maximum value of a receipt, in GRT wei. -# We need this because a large receipt, especially if it's larger than the RAV request trigger, -# or worse, the unaggregated receipts limit (tap-agent), can cause the indexer to refuse service -# to the sender for the duration of RAV request timestamp buffer. -receipt_max_value = 1000000000000000 # 0.001 GRT diff --git a/service/minimal-config-example.toml b/service/minimal-config-example.toml deleted file mode 100644 index 9b4e40dd..00000000 --- a/service/minimal-config-example.toml +++ /dev/null @@ -1,54 +0,0 @@ -# You will have to change *all* the values below to match your setup. -# -# Some of the config below are global graph network values, which you can find here: -# https://github.com/graphprotocol/indexer/tree/main/docs/networks -# -# Pro tip: if you need to load some values from the environment into this config, you -# can use shell expansions with environment variables. Example: -# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" - -[common.indexer] -indexer_address = "0x1111111111111111111111111111111111111111" -operator_mnemonic = "abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon" - -[common.graph_node] -# URL to your graph-node's status endpoint -status_url = "http://graph-node:8030/graphql" -# URL to your graph-node's query endpoint -query_base_url = "http://graph-node:8000" - -[common.database] -# The URL of the Postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. -postgres_url = "postgresql://indexer@postgres:5432/indexer_components_0" - -[common.network_subgraph] -# Query URL for the Graph Network subgraph. -query_url = "http://example.com/network-subgraph" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -deployment = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - -[common.escrow_subgraph] -# Query URL for the Escrow subgraph. -query_url = "http://example.com/escrow-subgraph" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -deployment = "Qmbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" - -[common.graph_network] -# The chain ID of the network that the graph network is running on -chain_id = 99999999999999 - -[common.scalar] -# The chain ID of the network that the graph network is running on -# Should be the same as the chain ID of the network that the graph network is running on -chain_id = 99999999999999 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. -receipts_verifier_address = "0x2222222222222222222222222222222222222222" -# Maximum value of a receipt, in GRT wei. -# We need this because a large receipt, especially if it's larger than the RAV request trigger, -# or worse, the unaggregated receipts limit (tap-agent), can cause the indexer to refuse service -# to the sender for the duration of RAV request timestamp buffer. -receipt_max_value = 1000000000000000 # 0.001 GRT diff --git a/service/src/config.rs b/service/src/config.rs index d16fa002..152ab60f 100644 --- a/service/src/config.rs +++ b/service/src/config.rs @@ -1,69 +1,82 @@ // Copyright 2023-, GraphOps and Semiotic Labs. // SPDX-License-Identifier: Apache-2.0 -use std::path::PathBuf; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; -use anyhow::Result; -use figment::{ - providers::{Format, Toml}, - Figment, +use indexer_common::indexer_service::http::{ + DatabaseConfig, GraphNetworkConfig, GraphNodeConfig, IndexerConfig, IndexerServiceConfig, + ServerConfig, SubgraphConfig, TapConfig, }; -use indexer_common::indexer_service::http::IndexerServiceConfig; +use indexer_config::Config as MainConfig; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Config { - pub common: IndexerServiceConfig, -} - -impl Config { - pub fn load(filename: &PathBuf) -> Result { - let config_str = std::fs::read_to_string(filename)?; - - // Remove TOML comments, so that we can have shell expansion examples in the file. - let config_str = config_str - .lines() - .filter(|line| !line.trim().starts_with('#')) - .collect::>() - .join("\n"); - - let config_str = shellexpand::env(&config_str)?; - Figment::new() - .merge(Toml::string(&config_str)) - .extract() - .map_err(|e| e.into()) - } -} - -#[cfg(test)] -mod test { - use std::fs; - - use super::*; - - /// Test loading the minimal configuration example file. - /// Makes sure that the minimal template is up to date with the code. - /// Note that it doesn't check that the config is actually minimal, but rather that all missing - /// fields have defaults. The burden of making sure the config is minimal is on the developer. - #[test] - fn test_minimal_config() { - Config::load(&PathBuf::from("minimal-config-example.toml")).unwrap(); - } - - /// Test that the maximal configuration file is up to date with the code. - /// Make sure that `test_minimal_config` passes before looking at this. - #[test] - fn test_maximal_config() { - // Generate full config by deserializing the minimal config and let the code fill in the defaults. - let max_config = Config::load(&PathBuf::from("minimal-config-example.toml")).unwrap(); - // Deserialize the full config example file - let max_config_file: toml::Value = toml::from_str( - fs::read_to_string("maximal-config-example.toml") - .unwrap() - .as_str(), - ) - .unwrap(); +pub struct Config(pub IndexerServiceConfig); - assert_eq!(toml::Value::try_from(max_config).unwrap(), max_config_file); +impl From for Config { + fn from(value: MainConfig) -> Self { + Self(IndexerServiceConfig { + indexer: IndexerConfig { + indexer_address: value.indexer.indexer_address, + operator_mnemonic: value.indexer.operator_mnemonic.to_string(), + }, + server: ServerConfig { + host_and_port: value.service.host_and_port, + metrics_host_and_port: SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(0, 0, 0, 0), + value.metrics.port, + )), + url_prefix: value.service.url_prefix, + free_query_auth_token: value.service.free_query_auth_token, + }, + database: DatabaseConfig { + postgres_url: value.database.postgres_url.into(), + }, + graph_node: Some(GraphNodeConfig { + status_url: value.graph_node.status_url.into(), + query_base_url: value.graph_node.query_url.into(), + }), + network_subgraph: SubgraphConfig { + serve_subgraph: value.service.serve_network_subgraph, + serve_auth_token: value.service.serve_auth_token.clone(), + deployment: value.subgraphs.network.config.deployment_id, + query_url: value.subgraphs.network.config.query_url.into(), + query_auth_token: value.subgraphs.network.config.query_auth_token.clone(), + syncing_interval: value + .subgraphs + .network + .config + .syncing_interval_secs + .as_secs(), + recently_closed_allocation_buffer_seconds: value + .subgraphs + .network + .recently_closed_allocation_buffer_secs + .as_secs(), + }, + escrow_subgraph: SubgraphConfig { + serve_subgraph: value.service.serve_escrow_subgraph, + serve_auth_token: value.service.serve_auth_token, + deployment: value.subgraphs.escrow.config.deployment_id, + query_url: value.subgraphs.escrow.config.query_url.into(), + query_auth_token: value.subgraphs.network.config.query_auth_token, + syncing_interval: value + .subgraphs + .escrow + .config + .syncing_interval_secs + .as_secs(), + recently_closed_allocation_buffer_seconds: 0, + }, + graph_network: GraphNetworkConfig { + chain_id: value.blockchain.chain_id.clone() as u64, + }, + tap: TapConfig { + chain_id: value.blockchain.chain_id as u64, + receipts_verifier_address: value.blockchain.receipts_verifier_address, + timestamp_error_tolerance: value.tap.rav_request.timestamp_buffer_secs.as_secs(), + receipt_max_value: value.service.tap.max_receipt_value_grt.get_value(), + }, + }) } } diff --git a/service/src/service.rs b/service/src/service.rs index cafe24fa..7927fdcf 100644 --- a/service/src/service.rs +++ b/service/src/service.rs @@ -5,9 +5,10 @@ use std::sync::Arc; use std::time::Duration; use super::{config::Config, error::SubgraphServiceError, routes}; -use anyhow::Error; +use anyhow::anyhow; use axum::{async_trait, routing::post, Json, Router}; use indexer_common::indexer_service::http::{IndexerServiceImpl, IndexerServiceResponse}; +use indexer_config::Config as MainConfig; use reqwest::Url; use serde_json::{json, Value}; use sqlx::PgPool; @@ -84,11 +85,10 @@ impl IndexerServiceImpl for SubgraphService { deployment: DeploymentId, request: Self::Request, ) -> Result<(Self::Request, Self::Response), Self::Error> { - let deployment_url = Url::parse(&format!( - "{}/subgraphs/id/{}", - &self.state.graph_node_query_base_url, deployment - )) - .map_err(|_| SubgraphServiceError::InvalidDeployment(deployment))?; + let deployment_url = Url::parse(&self.state.graph_node_query_base_url) + .expect("Invalid `graph_node.query_url` in config") + .join(&format!("subgraphs/id/{deployment}")) + .map_err(|_| SubgraphServiceError::InvalidDeployment(deployment))?; let response = self .state @@ -116,21 +116,24 @@ impl IndexerServiceImpl for SubgraphService { } /// Run the subgraph indexer service -pub async fn run() -> Result<(), Error> { +pub async fn run() -> anyhow::Result<()> { // Parse command line and environment arguments let cli = Cli::parse(); // Load the json-rpc service configuration, which is a combination of the // general configuration options for any indexer service and specific // options added for JSON-RPC - let config = Config::load(&cli.config).map_err(|e| { - error!( - "Invalid configuration file `{}`: {}", - cli.config.display(), - e - ); - e - })?; + let config = + MainConfig::parse(indexer_config::ConfigPrefix::Service, &cli.config).map_err(|e| { + error!( + "Invalid configuration file `{}`: {}", + cli.config.display(), + e + ); + anyhow!(e) + })?; + + let config: Config = config.into(); // Parse basic configurations build_info::build_info!(fn build_info); @@ -141,7 +144,7 @@ pub async fn run() -> Result<(), Error> { // that is involved in serving requests let state = Arc::new(SubgraphServiceState { config: config.clone(), - database: database::connect(&config.common.database.postgres_url).await, + database: database::connect(&config.0.database.postgres_url).await, cost_schema: routes::cost::build_schema().await, graph_node_client: reqwest::ClientBuilder::new() .tcp_nodelay(true) @@ -149,14 +152,14 @@ pub async fn run() -> Result<(), Error> { .build() .expect("Failed to init HTTP client for Graph Node"), graph_node_status_url: config - .common + .0 .graph_node .as_ref() .expect("Config must have `common.graph_node.status_url` set") .status_url .clone(), graph_node_query_base_url: config - .common + .0 .graph_node .as_ref() .expect("config must have `common.graph_node.query_url` set") @@ -166,7 +169,7 @@ pub async fn run() -> Result<(), Error> { IndexerService::run(IndexerServiceOptions { release, - config: config.common.clone(), + config: config.0.clone(), url_namespace: "subgraphs", metrics_prefix: "subgraph", service_impl: SubgraphService::new(state.clone()), diff --git a/tap-agent/Cargo.toml b/tap-agent/Cargo.toml index 9d0f807a..297b595d 100644 --- a/tap-agent/Cargo.toml +++ b/tap-agent/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "indexer-tap-agent" -version = "0.1.0" +version = "1.0.0-rc.4" edition = "2021" publish = false @@ -9,7 +9,6 @@ name = "indexer-tap-agent" path = "src/main.rs" [dependencies] -env_logger = "*" alloy-primitives = "0.6" alloy-sol-types = "0.6" anyhow = "1.0.72" @@ -22,7 +21,8 @@ log = "0.4.19" prometheus = "0.13.3" axum = "0.7.5" futures-util = "0.3.28" -indexer-common = { version = "0.1.0", path = "../common" } +indexer-common = { path = "../common" } +indexer-config = { path = "../config" } jsonrpsee = { version = "0.20.2", features = ["http-client", "macros"] } lazy_static = "1.4.0" reqwest = "0.12" @@ -50,13 +50,9 @@ tracing-subscriber = { version = "0.3", features = [ "json", ] } ractor = "0.9" -figment = { version = "0.10.19", features = ["toml"] } -shellexpand = { version = "3.1.0", default-features = false, features = ["base-0"] } [dev-dependencies] ethers-signers = "2.0.8" tempfile = "3.8.0" wiremock = "0.5.19" futures = "0.3.30" -serde_assert = "0.7.1" -toml = "0.8.13" diff --git a/tap-agent/maximal-config-example.toml b/tap-agent/maximal-config-example.toml deleted file mode 100644 index 69a22887..00000000 --- a/tap-agent/maximal-config-example.toml +++ /dev/null @@ -1,78 +0,0 @@ -# WARNING: This shows all the possible configuration options. Make sure you know what -# you are doing. -# Prefer starting with `minimal-config-example.toml`. -# -# You will have to change *all* the values below to match your setup. -# -# Some of the config below are global graph network values, which you can find here: -# https://github.com/graphprotocol/indexer/tree/main/docs/networks -# -# Pro tip: if you need to load some values from the environment into this config, you -# can use shell expansions with environment variables. Example: -# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" - -[ethereum] -indexer_address = "0x0000000000000000000000000000000000000000" - -[receipts] -# The chain ID of the network that the graph network is running on -# Should be the same as the chain ID of the network that the graph network is running on -receipts_verifier_chain_id = 123546 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. -receipts_verifier_address = "0x1111111111111111111111111111111111111111" - -[indexer_infrastructure] -graph_node_query_endpoint = "http://graph-node:8030/graphql" -graph_node_status_endpoint = "http://graph-node:8000" -# Port to serve the tap-agent metrics -metrics_port = 7300 -# Rust log level. Possible values are: trace, debug, info, warn, error -log_level ="info" - -# Credentials for the postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. -[postgres] -postgres_host = "postgres" -postgres_port = 5432 -postgres_database = "indexer_components_0" -postgres_username = "indexer" -postgres_password = "let-me-in" - -[network_subgraph] -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -network_subgraph_deployment = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" -# Query URL for the Graph Network subgraph. -network_subgraph_endpoint = "http://example.com/network-subgraph" -# Interval (in ms) for syncing indexer allocations from the network -allocation_syncing_interval_ms = 60000 -# Interval (in seconds) that a closed allocation still accepts queries -recently_closed_allocation_buffer_seconds = 3600 - -[escrow_subgraph] -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -escrow_subgraph_deployment = "Qmbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" -# Query URL for the Escrow subgraph. -escrow_subgraph_endpoint = "http://example.com/escrow-subgraph" -# Interval (in ms) for syncing indexer escrow accounts from the escrow subgraph -escrow_syncing_interval_ms = 60000 - -[tap] -# Value of unaggregated fees that triggers a RAV request (in GRT). -rav_request_trigger_value = 10 -# Buffer (in ms) to add between the current time and the timestamp of the -# last unaggregated fee when triggering a RAV request. -rav_request_timestamp_buffer_ms = 60000 -# Timeout (in seconds) for RAV requests. -rav_request_timeout_secs = 5 -# YAML file with a map of sender addresses to aggregator endpoints. -sender_aggregator_endpoints_file = "endpoints.yaml" -# Maximum number of receipts per aggregation request -rav_request_receipt_limit = 10000 -# Maximum amount of unaggregated fees in GRT per sender. This is the amount of fees -# you are willing to risk at any given time. For ex. if the sender stops supplying RAVs for -# long enough and the fees exceed this amount, the indexer-service will stop accepting -# queries from the sender until the fees are aggregated. -max_unnaggregated_fees_per_sender = 20 diff --git a/tap-agent/minimal-config-example.toml b/tap-agent/minimal-config-example.toml deleted file mode 100644 index 417c277e..00000000 --- a/tap-agent/minimal-config-example.toml +++ /dev/null @@ -1,49 +0,0 @@ -# You will have to change *all* the values below to match your setup. -# -# Some of the config below are global graph network values, which you can find here: -# https://github.com/graphprotocol/indexer/tree/main/docs/networks -# -# Pro tip: if you need to load some values from the environment into this config, you -# can use shell expansions with environment variables. Example: -# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" - -[ethereum] -indexer_address = "0x0000000000000000000000000000000000000000" - -[receipts] -# The chain ID of the network that the graph network is running on -# Should be the same as the chain ID of the network that the graph network is running on -receipts_verifier_chain_id = 123546 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. -receipts_verifier_address = "0x1111111111111111111111111111111111111111" - -[indexer_infrastructure] -graph_node_query_endpoint = "http://graph-node:8030/graphql" -graph_node_status_endpoint = "http://graph-node:8000" - -# Credentials for the postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. -[postgres] -postgres_host = "postgres" -postgres_database = "indexer_components_0" -postgres_username = "indexer" -postgres_password = "let-me-in" - -[network_subgraph] -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -network_subgraph_deployment = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" -# Query URL for the Graph Network subgraph. -network_subgraph_endpoint = "http://example.com/network-subgraph" - -[escrow_subgraph] -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -escrow_subgraph_deployment = "Qmbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" -# Query URL for the Escrow subgraph. -escrow_subgraph_endpoint = "http://example.com/escrow-subgraph" - -[tap] -# YAML file with a map of sender addresses to aggregator endpoints. -sender_aggregator_endpoints_file = "endpoints.yaml" diff --git a/tap-agent/src/agent.rs b/tap-agent/src/agent.rs index 20ec2306..b9ee294e 100644 --- a/tap-agent/src/agent.rs +++ b/tap-agent/src/agent.rs @@ -15,7 +15,7 @@ use crate::agent::sender_accounts_manager::{ use crate::config::{ Config, EscrowSubgraph, Ethereum, IndexerInfrastructure, NetworkSubgraph, Tap, }; -use crate::{aggregator_endpoints, database, CONFIG, EIP_712_DOMAIN}; +use crate::{database, CONFIG, EIP_712_DOMAIN}; use sender_accounts_manager::SenderAccountsManager; pub mod sender_account; @@ -38,6 +38,7 @@ pub async fn start_agent() -> (ActorRef, JoinHandl NetworkSubgraph { network_subgraph_deployment, network_subgraph_endpoint, + network_subgraph_auth_token, allocation_syncing_interval_ms, recently_closed_allocation_buffer_seconds, }, @@ -45,12 +46,15 @@ pub async fn start_agent() -> (ActorRef, JoinHandl EscrowSubgraph { escrow_subgraph_deployment, escrow_subgraph_endpoint, + escrow_subgraph_auth_token, escrow_syncing_interval_ms, }, - tap: Tap { - sender_aggregator_endpoints_file, - .. - }, + tap: + Tap { + // TODO: replace with a proper implementation once the gateway registry contract is ready + sender_aggregator_endpoints, + .. + }, .. } = &*CONFIG; let pgpool = database::connect(postgres).await; @@ -69,8 +73,11 @@ pub async fn start_agent() -> (ActorRef, JoinHandl }) .transpose() .expect("Failed to parse graph node query endpoint and network subgraph deployment"), - DeploymentDetails::for_query_url(network_subgraph_endpoint) - .expect("Failed to parse network subgraph endpoint"), + DeploymentDetails::for_query_url_with_token( + network_subgraph_endpoint, + network_subgraph_auth_token.clone(), + ) + .expect("Failed to parse network subgraph endpoint"), ))); let indexer_allocations = indexer_allocations( @@ -92,8 +99,11 @@ pub async fn start_agent() -> (ActorRef, JoinHandl }) .transpose() .expect("Failed to parse graph node query endpoint and escrow subgraph deployment"), - DeploymentDetails::for_query_url(escrow_subgraph_endpoint) - .expect("Failed to parse escrow subgraph endpoint"), + DeploymentDetails::for_query_url_with_token( + escrow_subgraph_endpoint, + escrow_subgraph_auth_token.clone(), + ) + .expect("Failed to parse escrow subgraph endpoint"), ))); let escrow_accounts = escrow_accounts( @@ -103,10 +113,6 @@ pub async fn start_agent() -> (ActorRef, JoinHandl false, ); - // TODO: replace with a proper implementation once the gateway registry contract is ready - let sender_aggregator_endpoints = - aggregator_endpoints::load_aggregator_endpoints(sender_aggregator_endpoints_file.clone()); - let args = SenderAccountsManagerArgs { config: &CONFIG, domain_separator: EIP_712_DOMAIN.clone(), @@ -114,7 +120,7 @@ pub async fn start_agent() -> (ActorRef, JoinHandl indexer_allocations, escrow_accounts, escrow_subgraph, - sender_aggregator_endpoints, + sender_aggregator_endpoints: sender_aggregator_endpoints.clone(), prefix: None, }; diff --git a/tap-agent/src/agent/sender_account.rs b/tap-agent/src/agent/sender_account.rs index 4eaec8db..3b6aa108 100644 --- a/tap-agent/src/agent/sender_account.rs +++ b/tap-agent/src/agent/sender_account.rs @@ -37,18 +37,22 @@ type Balance = U256; pub enum SenderAccountMessage { UpdateBalanceAndLastRavs(Balance, RavMap), UpdateAllocationIds(HashSet
), + NewAllocationId(Address), UpdateReceiptFees(Address, UnaggregatedReceipts), + UpdateInvalidReceiptFees(Address, UnaggregatedReceipts), UpdateRav(SignedRAV), #[cfg(test)] GetSenderFeeTracker(ractor::RpcReplyPort), #[cfg(test)] GetDeny(ractor::RpcReplyPort), + #[cfg(test)] + IsSchedulerEnabled(ractor::RpcReplyPort), } /// A SenderAccount manages the receipts accounting between the indexer and the sender across /// multiple allocations. /// -/// Manages the lifecycle of Scalar TAP for the SenderAccount, including: +/// Manages the lifecycle of TAP for the SenderAccount, including: /// - Monitoring new receipts and keeping track of the cumulative unaggregated fees across /// allocations. /// - Requesting RAVs from the sender's TAP aggregator once the cumulative unaggregated fees reach a @@ -74,6 +78,7 @@ pub struct State { prefix: Option, sender_fee_tracker: SenderFeeTracker, rav_tracker: SenderFeeTracker, + invalid_receipts_tracker: SenderFeeTracker, allocation_ids: HashSet
, _indexer_allocations_handle: PipeHandle, _escrow_account_monitor: PipeHandle, @@ -177,7 +182,9 @@ impl State { let pending_fees_over_balance = pending_ravs + unaggregated_fees >= self.sender_balance.as_u128(); let max_unaggregated_fees = self.config.tap.max_unnaggregated_fees_per_sender; - let total_fee_over_max_value = unaggregated_fees >= max_unaggregated_fees; + let invalid_receipt_fees = self.invalid_receipts_tracker.get_total_fee(); + let total_fee_over_max_value = + unaggregated_fees + invalid_receipt_fees >= max_unaggregated_fees; tracing::trace!( %pending_fees_over_balance, @@ -409,6 +416,7 @@ impl Actor for SenderAccount { let state = State { sender_fee_tracker: SenderFeeTracker::default(), rav_tracker: SenderFeeTracker::default(), + invalid_receipts_tracker: SenderFeeTracker::default(), allocation_ids: allocation_ids.clone(), _indexer_allocations_handle, _escrow_account_monitor, @@ -463,6 +471,17 @@ impl Actor for SenderAccount { state.add_to_denylist().await; } } + SenderAccountMessage::UpdateInvalidReceiptFees(allocation_id, unaggregated_fees) => { + state + .invalid_receipts_tracker + .update(allocation_id, unaggregated_fees.value); + + // invalid receipts can't go down + let should_deny = !state.denied && state.deny_condition_reached(); + if should_deny { + state.add_to_denylist().await; + } + } SenderAccountMessage::UpdateReceiptFees(allocation_id, unaggregated_fees) => { // If we're here because of a new receipt, abort any scheduled UpdateReceiptFees if let Some(scheduled_rav_request) = state.scheduled_rav_request.take() { @@ -552,6 +571,19 @@ impl Actor for SenderAccount { ); state.allocation_ids = allocation_ids; } + SenderAccountMessage::NewAllocationId(allocation_id) => { + if let Err(error) = state + .create_sender_allocation(myself.clone(), allocation_id) + .await + { + error!( + %error, + %allocation_id, + "There was an error while creating Sender Allocation." + ); + } + state.allocation_ids.insert(allocation_id); + } SenderAccountMessage::UpdateBalanceAndLastRavs(new_balance, non_final_last_ravs) => { state.sender_balance = new_balance; @@ -594,6 +626,12 @@ impl Actor for SenderAccount { let _ = reply.send(state.denied); } } + #[cfg(test)] + SenderAccountMessage::IsSchedulerEnabled(reply) => { + if !reply.is_closed() { + let _ = reply.send(state.scheduled_rav_request.is_some()); + } + } } Ok(()) } @@ -631,12 +669,16 @@ impl Actor for SenderAccount { return Ok(()); }; - let tracker = &mut state.sender_fee_tracker; - tracker.update(allocation_id, 0); // clean up hashset state .sender_fee_tracker .unblock_allocation_id(allocation_id); + // update the receipt fees by reseting to 0 + myself.cast(SenderAccountMessage::UpdateReceiptFees( + allocation_id, + UnaggregatedReceipts::default(), + ))?; + // rav tracker is not updated because it's still not redeemed } SupervisionEvent::ActorPanicked(cell, error) => { @@ -713,7 +755,12 @@ pub mod tests { (Self::UpdateReceiptFees(l0, l1), Self::UpdateReceiptFees(r0, r1)) => { l0 == r0 && l1 == r1 } - _ => core::mem::discriminant(self) == core::mem::discriminant(other), + ( + Self::UpdateInvalidReceiptFees(l0, l1), + Self::UpdateInvalidReceiptFees(r0, r1), + ) => l0 == r0 && l1 == r1, + (Self::NewAllocationId(l0), Self::NewAllocationId(r0)) => l0 == r0, + (a, b) => unimplemented!("PartialEq not implementated for {a:?} and {b:?}"), } } } @@ -828,9 +875,57 @@ pub mod tests { handle.await.unwrap(); } + #[sqlx::test(migrations = "../migrations")] + async fn test_new_allocation_id(pgpool: PgPool) { + let (sender_account, handle, prefix, _) = create_sender_account( + pgpool, + HashSet::new(), + TRIGGER_VALUE, + TRIGGER_VALUE, + DUMMY_URL, + ) + .await; + + // we expect it to create a sender allocation + sender_account + .cast(SenderAccountMessage::NewAllocationId(*ALLOCATION_ID_0)) + .unwrap(); + + tokio::time::sleep(Duration::from_millis(10)).await; + + // verify if create sender account + let sender_allocation_id = format!("{}:{}:{}", prefix.clone(), SENDER.1, *ALLOCATION_ID_0); + let actor_ref = ActorRef::::where_is(sender_allocation_id.clone()); + assert!(actor_ref.is_some()); + + // nothing should change because we already created + sender_account + .cast(SenderAccountMessage::UpdateAllocationIds( + vec![*ALLOCATION_ID_0].into_iter().collect(), + )) + .unwrap(); + tokio::time::sleep(Duration::from_millis(10)).await; + + // try to delete sender allocation_id + sender_account + .cast(SenderAccountMessage::UpdateAllocationIds(HashSet::new())) + .unwrap(); + + tokio::time::sleep(Duration::from_millis(100)).await; + + let actor_ref = ActorRef::::where_is(sender_allocation_id.clone()); + assert!(actor_ref.is_none()); + + // safely stop the manager + sender_account.stop_and_wait(None, None).await.unwrap(); + + handle.await.unwrap(); + } + pub struct MockSenderAllocation { triggered_rav_request: Arc, next_rav_value: Arc>, + next_unaggregated_fees_value: Arc>, receipts: Arc>>, } @@ -842,11 +937,25 @@ pub mod tests { triggered_rav_request: triggered_rav_request.clone(), receipts: Arc::new(Mutex::new(Vec::new())), next_rav_value: Arc::new(Mutex::new(0)), + next_unaggregated_fees_value: Arc::new(Mutex::new(0)), }, triggered_rav_request, ) } + pub fn new_with_next_unaggregated_fees_value() -> (Self, Arc>) { + let unaggregated_fees = Arc::new(Mutex::new(0)); + ( + Self { + triggered_rav_request: Arc::new(AtomicU32::new(0)), + receipts: Arc::new(Mutex::new(Vec::new())), + next_rav_value: Arc::new(Mutex::new(0)), + next_unaggregated_fees_value: unaggregated_fees.clone(), + }, + unaggregated_fees, + ) + } + pub fn new_with_next_rav_value() -> (Self, Arc>) { let next_rav_value = Arc::new(Mutex::new(0)); ( @@ -854,6 +963,7 @@ pub mod tests { triggered_rav_request: Arc::new(AtomicU32::new(0)), receipts: Arc::new(Mutex::new(Vec::new())), next_rav_value: next_rav_value.clone(), + next_unaggregated_fees_value: Arc::new(Mutex::new(0)), }, next_rav_value, ) @@ -866,6 +976,7 @@ pub mod tests { triggered_rav_request: Arc::new(AtomicU32::new(0)), receipts: receipts.clone(), next_rav_value: Arc::new(Mutex::new(0)), + next_unaggregated_fees_value: Arc::new(Mutex::new(0)), }, receipts, ) @@ -902,7 +1013,13 @@ pub mod tests { 4, *self.next_rav_value.lock().unwrap(), ); - reply.send((UnaggregatedReceipts::default(), Some(signed_rav)))?; + reply.send(( + UnaggregatedReceipts { + value: *self.next_unaggregated_fees_value.lock().unwrap(), + last_id: 0, + }, + Some(signed_rav), + ))?; } SenderAllocationMessage::NewReceipt(receipt) => { self.receipts.lock().unwrap().push(receipt); @@ -997,7 +1114,7 @@ pub mod tests { )) .unwrap(); - tokio::time::sleep(Duration::from_millis(10)).await; + tokio::time::sleep(Duration::from_millis(20)).await; assert_eq!( triggered_rav_request.load(std::sync::atomic::Ordering::SeqCst), @@ -1107,7 +1224,7 @@ pub mod tests { }, )) .unwrap(); - tokio::time::sleep(Duration::from_millis(30)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let retry_value = triggered_rav_request.load(std::sync::atomic::Ordering::SeqCst); assert!(retry_value > 1, "It didn't retry more than once"); @@ -1154,7 +1271,23 @@ pub mod tests { )) .unwrap(); - tokio::time::sleep(Duration::from_millis(10)).await; + tokio::time::sleep(Duration::from_millis(20)).await; + }; + } + + macro_rules! update_invalid_receipt_fees { + ($value:expr) => { + sender_account + .cast(SenderAccountMessage::UpdateInvalidReceiptFees( + *ALLOCATION_ID_0, + UnaggregatedReceipts { + value: $value, + last_id: 11, + }, + )) + .unwrap(); + + tokio::time::sleep(Duration::from_millis(20)).await; }; } @@ -1178,6 +1311,28 @@ pub mod tests { let deny = get_deny_status(&sender_account).await; assert!(!deny); + update_receipt_fees!(0); + + update_invalid_receipt_fees!(max_unaggregated_fees_per_sender - 1); + let deny = get_deny_status(&sender_account).await; + assert!(!deny); + + update_invalid_receipt_fees!(max_unaggregated_fees_per_sender); + let deny = get_deny_status(&sender_account).await; + assert!(deny); + + // invalid receipts should not go down + update_invalid_receipt_fees!(0); + let deny = get_deny_status(&sender_account).await; + // keep denied + assert!(deny); + + // condition reached using receipts + update_receipt_fees!(0); + let deny = get_deny_status(&sender_account).await; + // allow sender + assert!(!deny); + sender_account.stop_and_wait(None, None).await.unwrap(); handle.await.unwrap(); } @@ -1399,7 +1554,7 @@ pub mod tests { HashMap::from([(SENDER.1, vec![SIGNER.1])]), )); - tokio::time::sleep(Duration::from_millis(10)).await; + tokio::time::sleep(Duration::from_millis(20)).await; let deny = call!(sender_account, SenderAccountMessage::GetDeny).unwrap(); assert!(deny, "should block the sender"); @@ -1418,4 +1573,69 @@ pub mod tests { sender_account.stop_and_wait(None, None).await.unwrap(); handle.await.unwrap(); } + + #[sqlx::test(migrations = "../migrations")] + async fn test_sender_denied_close_allocation_stop_retry(pgpool: PgPool) { + // we set to 1 to block the sender on a really low value + let max_unaggregated_fees_per_sender: u128 = 1; + + let (sender_account, handle, prefix, _) = create_sender_account( + pgpool, + HashSet::new(), + TRIGGER_VALUE, + max_unaggregated_fees_per_sender, + DUMMY_URL, + ) + .await; + + let (mock_sender_allocation, next_unaggregated_fees) = + MockSenderAllocation::new_with_next_unaggregated_fees_value(); + + let name = format!("{}:{}:{}", prefix, SENDER.1, *ALLOCATION_ID_0); + let (allocation, allocation_handle) = MockSenderAllocation::spawn_linked( + Some(name), + mock_sender_allocation, + (), + sender_account.get_cell(), + ) + .await + .unwrap(); + *next_unaggregated_fees.lock().unwrap() = TRIGGER_VALUE; + + // set retry + sender_account + .cast(SenderAccountMessage::UpdateReceiptFees( + *ALLOCATION_ID_0, + UnaggregatedReceipts { + value: TRIGGER_VALUE, + last_id: 11, + }, + )) + .unwrap(); + tokio::time::sleep(Duration::from_millis(100)).await; + + let deny = call!(sender_account, SenderAccountMessage::GetDeny).unwrap(); + assert!(deny, "should be blocked"); + + let scheduler_enabled = + call!(sender_account, SenderAccountMessage::IsSchedulerEnabled).unwrap(); + assert!(scheduler_enabled, "should have an scheduler enabled"); + + // close the allocation and trigger + allocation.stop_and_wait(None, None).await.unwrap(); + allocation_handle.await.unwrap(); + + tokio::time::sleep(Duration::from_millis(100)).await; + + // should remove the block and the retry + let deny = call!(sender_account, SenderAccountMessage::GetDeny).unwrap(); + assert!(!deny, "should be unblocked"); + + let scheuduler_enabled = + call!(sender_account, SenderAccountMessage::IsSchedulerEnabled).unwrap(); + assert!(!scheuduler_enabled, "should have an scheduler disabled"); + + sender_account.stop_and_wait(None, None).await.unwrap(); + handle.await.unwrap(); + } } diff --git a/tap-agent/src/agent/sender_accounts_manager.rs b/tap-agent/src/agent/sender_accounts_manager.rs index 3372cb28..eb4582de 100644 --- a/tap-agent/src/agent/sender_accounts_manager.rs +++ b/tap-agent/src/agent/sender_accounts_manager.rs @@ -8,8 +8,8 @@ use std::{collections::HashMap, str::FromStr}; use crate::agent::sender_allocation::SenderAllocationMessage; use crate::lazy_static; use alloy_sol_types::Eip712Domain; -use anyhow::anyhow; use anyhow::Result; +use anyhow::{anyhow, bail}; use eventuals::{Eventual, EventualExt, PipeHandle}; use indexer_common::escrow_accounts::EscrowAccounts; use indexer_common::prelude::{Allocation, SubgraphClient}; @@ -460,58 +460,98 @@ async fn new_receipts_watcher( "should be able to deserialize the Postgres Notify event payload as a \ NewReceiptNotification", ); + if let Err(e) = handle_notification( + new_receipt_notification, + &escrow_accounts, + prefix.as_deref(), + ) + .await + { + error!("{}", e); + } + } +} - tracing::debug!( - notification = ?new_receipt_notification, - "New receipt notification detected!" +async fn handle_notification( + new_receipt_notification: NewReceiptNotification, + escrow_accounts: &Eventual, + prefix: Option<&str>, +) -> Result<()> { + tracing::debug!( + notification = ?new_receipt_notification, + "New receipt notification detected!" + ); + + let Ok(sender_address) = escrow_accounts + .value() + .await + .expect("should be able to get escrow accounts") + .get_sender_for_signer(&new_receipt_notification.signer_address) + else { + // TODO: save the receipt in the failed receipts table? + bail!( + "No sender address found for receipt signer address {}. \ + This should not happen.", + new_receipt_notification.signer_address ); + }; - let Ok(sender_address) = escrow_accounts - .value() - .await - .expect("should be able to get escrow accounts") - .get_sender_for_signer(&new_receipt_notification.signer_address) - else { - error!( - "No sender address found for receipt signer address {}. \ - This should not happen.", - new_receipt_notification.signer_address - ); - // TODO: save the receipt in the failed receipts table? - continue; - }; - - let allocation_id = &new_receipt_notification.allocation_id; - let allocation_str = &allocation_id.to_string(); - - let actor_name = format!( - "{}{sender_address}:{allocation_id}", + let allocation_id = &new_receipt_notification.allocation_id; + let allocation_str = &allocation_id.to_string(); + + let actor_name = format!( + "{}{sender_address}:{allocation_id}", + prefix + .as_ref() + .map_or(String::default(), |prefix| format!("{prefix}:")) + ); + + let Some(sender_allocation) = ActorRef::::where_is(actor_name) else { + warn!( + "No sender_allocation found for sender_address {}, allocation_id {} to process new \ + receipt notification. Starting a new sender_allocation.", + sender_address, allocation_id + ); + let sender_account_name = format!( + "{}{sender_address}", prefix .as_ref() .map_or(String::default(), |prefix| format!("{prefix}:")) ); - if let Some(sender_allocation) = ActorRef::::where_is(actor_name) { - if let Err(e) = sender_allocation.cast(SenderAllocationMessage::NewReceipt( - new_receipt_notification, - )) { - error!( - "Error while forwarding new receipt notification to sender_allocation: {:?}", - e - ); - } - } else { - warn!( - "No sender_allocation found for sender_address {}, allocation_id {} to process new \ - receipt notification. This should not happen.", - sender_address, - allocation_id + let Some(sender_account) = ActorRef::::where_is(sender_account_name) + else { + bail!( + "No sender_account was found for address: {}.", + sender_address ); - } - RECEIPTS_CREATED - .with_label_values(&[&sender_address.to_string(), allocation_str]) - .inc(); - } + }; + sender_account + .cast(SenderAccountMessage::NewAllocationId(*allocation_id)) + .map_err(|e| { + anyhow!( + "Error while sendeing new allocation id message to sender_account: {:?}", + e + ) + })?; + return Ok(()); + }; + + sender_allocation + .cast(SenderAllocationMessage::NewReceipt( + new_receipt_notification, + )) + .map_err(|e| { + anyhow::anyhow!( + "Error while forwarding new receipt notification to sender_allocation: {:?}", + e + ) + })?; + + RECEIPTS_CREATED + .with_label_values(&[&sender_address.to_string(), allocation_str]) + .inc(); + Ok(()) } #[cfg(test)] @@ -522,6 +562,8 @@ mod tests { }; use crate::agent::sender_account::tests::{MockSenderAllocation, PREFIX_ID}; use crate::agent::sender_account::SenderAccountMessage; + use crate::agent::sender_accounts_manager::{handle_notification, NewReceiptNotification}; + use crate::agent::sender_allocation::tests::MockSenderAccount; use crate::config; use crate::tap::test_utils::{ create_rav, create_received_receipt, store_rav, store_receipt, ALLOCATION_ID_0, @@ -537,6 +579,7 @@ mod tests { use sqlx::postgres::PgListener; use sqlx::PgPool; use std::collections::{HashMap, HashSet}; + use std::sync::{Arc, Mutex}; use std::time::Duration; const DUMMY_URL: &str = "http://localhost:1234"; @@ -691,7 +734,7 @@ mod tests { )) .unwrap(); - tokio::time::sleep(std::time::Duration::from_millis(10)).await; + tokio::time::sleep(std::time::Duration::from_millis(20)).await; // verify if it gets removed let actor_ref = ActorRef::::where_is(format!("{}:{}", prefix, SENDER.1)); @@ -803,4 +846,49 @@ mod tests { new_receipts_watcher_handle.abort(); } + + #[tokio::test] + async fn test_create_allocation_id() { + let senders_to_signers = vec![(SENDER.1, vec![SIGNER.1])].into_iter().collect(); + let escrow_accounts = EscrowAccounts::new(HashMap::new(), senders_to_signers); + let escrow_accounts = Eventual::from_value(escrow_accounts); + + let prefix = format!( + "test-{}", + PREFIX_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + ); + + let last_message_emitted = Arc::new(Mutex::new(vec![])); + + let (sender_account, join_handle) = MockSenderAccount::spawn( + Some(format!("{}:{}", prefix.clone(), SENDER.1,)), + MockSenderAccount { + last_message_emitted: last_message_emitted.clone(), + }, + (), + ) + .await + .unwrap(); + + let new_receipt_notification = NewReceiptNotification { + id: 1, + allocation_id: *ALLOCATION_ID_0, + signer_address: SIGNER.1, + timestamp_ns: 1, + value: 1, + }; + + handle_notification(new_receipt_notification, &escrow_accounts, Some(&prefix)) + .await + .unwrap(); + + tokio::time::sleep(Duration::from_millis(10)).await; + + assert_eq!( + last_message_emitted.lock().unwrap().last().unwrap(), + &SenderAccountMessage::NewAllocationId(*ALLOCATION_ID_0) + ); + sender_account.stop_and_wait(None, None).await.unwrap(); + join_handle.await.unwrap(); + } } diff --git a/tap-agent/src/agent/sender_allocation.rs b/tap-agent/src/agent/sender_allocation.rs index ae253bca..b6b4c479 100644 --- a/tap-agent/src/agent/sender_allocation.rs +++ b/tap-agent/src/agent/sender_allocation.rs @@ -104,6 +104,7 @@ pub struct SenderAllocation; pub struct SenderAllocationState { unaggregated_fees: UnaggregatedReceipts, + invalid_receipts_fees: UnaggregatedReceipts, latest_rav: Option, pgpool: PgPool, tap_manager: TapManager, @@ -152,6 +153,16 @@ impl Actor for SenderAllocation { let allocation_id = args.allocation_id; let mut state = SenderAllocationState::new(args).await; + // update invalid receipts + state.invalid_receipts_fees = state.calculate_invalid_receipts_fee().await?; + if state.invalid_receipts_fees.value > 0 { + sender_account_ref.cast(SenderAccountMessage::UpdateInvalidReceiptFees( + allocation_id, + state.invalid_receipts_fees.clone(), + ))?; + } + + // update unaggregated_fees state.unaggregated_fees = state.calculate_unaggregated_fee().await?; sender_account_ref.cast(SenderAccountMessage::UpdateReceiptFees( allocation_id, @@ -249,12 +260,21 @@ impl Actor for SenderAllocation { unaggreated_fees.clone(), ))?; } + + UNAGGREGATED_FEES + .with_label_values(&[ + &state.sender.to_string(), + &state.allocation_id.to_string(), + ]) + .set(state.unaggregated_fees.value as f64); } // we use a blocking call here to ensure that only one RAV request is running at a time. SenderAllocationMessage::TriggerRAVRequest(reply) => { if state.unaggregated_fees.value > 0 { // auto backoff retry, on error ignore - let _ = state.request_rav().await; + if let Err(err) = state.request_rav().await { + error!(error = %err, "Error while requesting rav."); + } } if !reply.is_closed() { let _ = reply.send((state.unaggregated_fees.clone(), state.latest_rav.clone())); @@ -268,11 +288,6 @@ impl Actor for SenderAllocation { } } - // We expect the value to change for every received receipt, and after every RAV request. - UNAGGREGATED_FEES - .with_label_values(&[&state.sender.to_string(), &state.allocation_id.to_string()]) - .set(state.unaggregated_fees.value as f64); - Ok(()) } } @@ -329,6 +344,7 @@ impl SenderAllocationState { domain_separator, sender_account_ref: sender_account_ref.clone(), unaggregated_fees: UnaggregatedReceipts::default(), + invalid_receipts_fees: UnaggregatedReceipts::default(), latest_rav, } } @@ -395,6 +411,43 @@ impl SenderAllocationState { }) } + async fn calculate_invalid_receipts_fee(&self) -> Result { + tracing::trace!("calculate_invalid_receipts_fee()"); + let signers = signers_trimmed(&self.escrow_accounts, self.sender).await?; + + // TODO: Get `rav.timestamp_ns` from the TAP Manager's RAV storage adapter instead? + let res = sqlx::query!( + r#" + SELECT + MAX(id), + SUM(value) + FROM + scalar_tap_receipts_invalid + WHERE + allocation_id = $1 + AND signer_address IN (SELECT unnest($2::text[])) + "#, + self.allocation_id.encode_hex::(), + &signers + ) + .fetch_one(&self.pgpool) + .await?; + + ensure!( + res.sum.is_none() == res.max.is_none(), + "Exactly one of SUM(value) and MAX(id) is null. This should not happen." + ); + + Ok(UnaggregatedReceipts { + last_id: res.max.unwrap_or(0).try_into()?, + value: res + .sum + .unwrap_or(BigDecimal::from(0)) + .to_string() + .parse::()?, + }) + } + async fn request_rav(&mut self) -> Result<()> { let mut retries = 0; const MAX_RETRIES: u32 = 3; @@ -427,7 +480,7 @@ impl SenderAllocationState { /// Request a RAV from the sender's TAP aggregator. Only one RAV request will be running at a /// time through the use of an internal guard. - async fn rav_requester_single(&self) -> Result { + async fn rav_requester_single(&mut self) -> Result { tracing::trace!("rav_requester_single()"); let result = self .tap_manager @@ -520,6 +573,9 @@ impl SenderAllocationState { RAVS_CREATED .with_label_values(&[&self.sender.to_string(), &self.allocation_id.to_string()]) .inc(); + UNAGGREGATED_FEES + .with_label_values(&[&self.sender.to_string(), &self.allocation_id.to_string()]) + .set(self.unaggregated_fees.value as f64); Ok(response.data) } else { @@ -547,6 +603,7 @@ impl SenderAllocationState { }, )) } + } pub async fn mark_rav_last(&self) -> Result<()> { @@ -585,7 +642,10 @@ impl SenderAllocationState { } } - async fn store_invalid_receipts(&self, receipts: &[ReceiptWithState]) -> Result<()> { + async fn store_invalid_receipts( + &mut self, + receipts: &[ReceiptWithState], + ) -> Result<()> { for received_receipt in receipts.iter() { let receipt = received_receipt.signed_receipt(); let allocation_id = receipt.message.allocation_id; @@ -619,8 +679,32 @@ impl SenderAllocationState { ) .execute(&self.pgpool) .await - .map_err(|e| anyhow!("Failed to store failed receipt: {:?}", e))?; + .map_err(|e| anyhow!("Failed to store invalid receipt: {:?}", e))?; } + let fees = receipts + .iter() + .map(|receipt| receipt.signed_receipt().message.value) + .sum(); + + self.invalid_receipts_fees.value = self + .invalid_receipts_fees + .value + .checked_add(fees) + .unwrap_or_else(|| { + // This should never happen, but if it does, we want to know about it. + error!( + "Overflow when adding receipt value {} to invalid receipts fees {} \ + for allocation {} and sender {}. Setting total unaggregated fees to \ + u128::MAX.", + fees, self.invalid_receipts_fees.value, self.allocation_id, self.sender + ); + u128::MAX + }); + self.sender_account_ref + .cast(SenderAccountMessage::UpdateInvalidReceiptFees( + self.allocation_id, + self.invalid_receipts_fees.clone(), + ))?; Ok(()) } @@ -657,7 +741,7 @@ impl SenderAllocationState { } #[cfg(test)] -mod tests { +pub mod tests { use super::{ SenderAllocation, SenderAllocationArgs, SenderAllocationMessage, SenderAllocationState, }; @@ -670,8 +754,9 @@ mod tests { tap::{ escrow_adapter::EscrowAdapter, test_utils::{ - create_rav, create_received_receipt, store_rav, store_receipt, ALLOCATION_ID_0, - INDEXER, SENDER, SIGNER, TAP_EIP712_DOMAIN_SEPARATOR, + create_rav, create_received_receipt, store_invalid_receipt, store_rav, + store_receipt, ALLOCATION_ID_0, INDEXER, SENDER, SIGNER, + TAP_EIP712_DOMAIN_SEPARATOR, }, }, }; @@ -703,8 +788,8 @@ mod tests { const DUMMY_URL: &str = "http://localhost:1234"; - struct MockSenderAccount { - last_message_emitted: Arc>>, + pub struct MockSenderAccount { + pub last_message_emitted: Arc>>, } #[async_trait::async_trait] @@ -868,6 +953,49 @@ mod tests { assert_eq!(total_unaggregated_fees.value, 55u128); } + #[sqlx::test(migrations = "../migrations")] + async fn should_return_invalid_receipts_on_startup(pgpool: PgPool) { + let (last_message_emitted, sender_account, _join_handle) = + create_mock_sender_account().await; + // Add receipts to the database. + for i in 1..=10 { + let receipt = create_received_receipt(&ALLOCATION_ID_0, &SIGNER.0, i, i, i.into()); + store_invalid_receipt(&pgpool, receipt.signed_receipt()) + .await + .unwrap(); + } + + let sender_allocation = create_sender_allocation( + pgpool.clone(), + DUMMY_URL.to_string(), + DUMMY_URL, + Some(sender_account), + ) + .await; + + // Get total_unaggregated_fees + let total_unaggregated_fees = call!( + sender_allocation, + SenderAllocationMessage::GetUnaggregatedReceipts + ) + .unwrap(); + + // Should emit a message to the sender account with the unaggregated fees. + let expected_message = SenderAccountMessage::UpdateInvalidReceiptFees( + *ALLOCATION_ID_0, + UnaggregatedReceipts { + last_id: 10, + value: 55u128, + }, + ); + let last_message_emitted = last_message_emitted.lock().unwrap(); + assert_eq!(last_message_emitted.len(), 2); + assert_eq!(last_message_emitted.first(), Some(&expected_message)); + + // Check that the unaggregated fees are correct. + assert_eq!(total_unaggregated_fees.value, 0u128); + } + #[sqlx::test(migrations = "../migrations")] async fn test_receive_new_receipt(pgpool: PgPool) { let (last_message_emitted, sender_account, _join_handle) = @@ -957,14 +1085,22 @@ mod tests { store_receipt(&pgpool, receipt.signed_receipt()) .await .unwrap(); + + // store a copy that should fail in the uniqueness test + store_receipt(&pgpool, receipt.signed_receipt()) + .await + .unwrap(); } + let (last_message_emitted, sender_account, _join_handle) = + create_mock_sender_account().await; + // Create a sender_allocation. let sender_allocation = create_sender_allocation( pgpool.clone(), "http://".to_owned() + &aggregator_endpoint.to_string(), &mock_server.uri(), - None, + Some(sender_account), ) .await; @@ -978,6 +1114,19 @@ mod tests { // Check that the unaggregated fees are correct. assert_eq!(total_unaggregated_fees.value, 0u128); + // Check if the sender received invalid receipt fees + let expected_message = SenderAccountMessage::UpdateInvalidReceiptFees( + *ALLOCATION_ID_0, + UnaggregatedReceipts { + last_id: 0, + value: 45u128, + }, + ); + { + let last_message_emitted = last_message_emitted.lock().unwrap(); + assert_eq!(last_message_emitted.last(), Some(&expected_message)); + } + // Stop the TAP aggregator server. handle.stop().unwrap(); handle.stopped().await; @@ -998,7 +1147,7 @@ mod tests { .await; sender_allocation.stop_and_wait(None, None).await.unwrap(); - tokio::time::sleep(std::time::Duration::from_millis(10)).await; + tokio::time::sleep(std::time::Duration::from_millis(20)).await; // check if the actor is actually stopped assert_eq!(sender_allocation.get_status(), ActorStatus::Stopped); @@ -1126,6 +1275,28 @@ mod tests { assert_eq!(total_unaggregated_fees.value, 45u128); } + #[sqlx::test(migrations = "../migrations")] + async fn should_calculate_invalid_receipts_fee(pgpool: PgPool) { + let args = + create_sender_allocation_args(pgpool.clone(), DUMMY_URL.to_string(), DUMMY_URL, None) + .await; + let state = SenderAllocationState::new(args).await; + + // Add receipts to the database. + for i in 1..10 { + let receipt = create_received_receipt(&ALLOCATION_ID_0, &SIGNER.0, i, i, i.into()); + store_invalid_receipt(&pgpool, receipt.signed_receipt()) + .await + .unwrap(); + } + + // calculate invalid unaggregated fee + let total_invalid_receipts = state.calculate_invalid_receipts_fee().await.unwrap(); + + // Check that the unaggregated fees are correct. + assert_eq!(total_invalid_receipts.value, 45u128); + } + /// Test that the sender_allocation correctly updates the unaggregated fees from the /// database when there is a RAV in the database as well as receipts which timestamp are lesser /// and greater than the RAV's timestamp. @@ -1190,7 +1361,7 @@ mod tests { let args = create_sender_allocation_args(pgpool.clone(), DUMMY_URL.to_string(), DUMMY_URL, None) .await; - let state = SenderAllocationState::new(args).await; + let mut state = SenderAllocationState::new(args).await; let checks = Checks::new(vec![Arc::new(FailingCheck)]); diff --git a/tap-agent/src/aggregator_endpoints.rs b/tap-agent/src/aggregator_endpoints.rs deleted file mode 100644 index faad0527..00000000 --- a/tap-agent/src/aggregator_endpoints.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2023-, GraphOps and Semiotic Labs. -// SPDX-License-Identifier: Apache-2.0 - -use std::collections::HashMap; -use std::fs::File; -use std::io::BufReader; -use std::path::PathBuf; - -use thegraph::types::Address; - -/// Load a hashmap of sender addresses and their corresponding aggregator endpoints -/// from a yaml file. We're using serde_yaml. -pub fn load_aggregator_endpoints(file_path: PathBuf) -> HashMap { - let file = File::open(file_path).unwrap(); - let reader = BufReader::new(file); - let endpoints: HashMap = serde_yaml::from_reader(reader).unwrap(); - endpoints -} - -#[cfg(test)] -mod tests { - use std::{io::Write, str::FromStr}; - - use super::*; - - /// Test that we can load the aggregator endpoints from a yaml file. - /// The test is going to create a temporary yaml file using tempfile, load it, and - /// check that the endpoints are loaded correctly. - #[test] - fn test_load_aggregator_endpoints() { - let named_temp_file = tempfile::NamedTempFile::new().unwrap(); - let mut temp_file = named_temp_file.reopen().unwrap(); - let yaml = r#" - 0xdeadbeefcafebabedeadbeefcafebabedeadbeef: https://example.com/aggregate-receipts - 0x0123456789abcdef0123456789abcdef01234567: https://other.example.com/aggregate-receipts - "#; - temp_file.write_all(yaml.as_bytes()).unwrap(); - let endpoints = load_aggregator_endpoints(named_temp_file.path().to_path_buf()); - assert_eq!( - endpoints - .get(&Address::from_str("0xdeadbeefcafebabedeadbeefcafebabedeadbeef").unwrap()), - Some(&"https://example.com/aggregate-receipts".to_string()) - ); - } -} diff --git a/tap-agent/src/config.rs b/tap-agent/src/config.rs index 57c6d8b3..9d0d7c29 100644 --- a/tap-agent/src/config.rs +++ b/tap-agent/src/config.rs @@ -1,15 +1,13 @@ // Copyright 2023-, GraphOps and Semiotic Labs. // SPDX-License-Identifier: Apache-2.0 -use std::path::PathBuf; - -use bigdecimal::{BigDecimal, ToPrimitive}; use clap::Parser; +use indexer_config::{Config as IndexerConfig, ConfigPrefix}; +use reqwest::Url; +use std::path::PathBuf; +use std::{collections::HashMap, str::FromStr}; use anyhow::Result; -use figment::providers::{Format, Toml}; -use figment::Figment; -use serde::{de, Deserialize, Deserializer}; use thegraph::types::{Address, DeploymentId}; use tracing::subscriber::{set_global_default, SetGlobalDefaultError}; use tracing_subscriber::{EnvFilter, FmtSubscriber}; @@ -22,7 +20,78 @@ pub struct Cli { pub config: PathBuf, } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +impl From for Config { + fn from(value: IndexerConfig) -> Self { + Self { + ethereum: Ethereum { + indexer_address: value.indexer.indexer_address, + }, + receipts: Receipts { + receipts_verifier_chain_id: value.blockchain.chain_id as u64, + receipts_verifier_address: value.blockchain.receipts_verifier_address, + }, + indexer_infrastructure: IndexerInfrastructure { + metrics_port: value.metrics.port, + graph_node_query_endpoint: value.graph_node.query_url.into(), + graph_node_status_endpoint: value.graph_node.status_url.into(), + log_level: None, + }, + postgres: Postgres { + postgres_url: value.database.postgres_url, + }, + network_subgraph: NetworkSubgraph { + network_subgraph_deployment: value.subgraphs.network.config.deployment_id, + network_subgraph_endpoint: value.subgraphs.network.config.query_url.into(), + network_subgraph_auth_token: value.subgraphs.network.config.query_auth_token, + allocation_syncing_interval_ms: value + .subgraphs + .network + .config + .syncing_interval_secs + .as_millis() as u64, + recently_closed_allocation_buffer_seconds: value + .subgraphs + .network + .recently_closed_allocation_buffer_secs + .as_secs(), + }, + escrow_subgraph: EscrowSubgraph { + escrow_subgraph_deployment: value.subgraphs.escrow.config.deployment_id, + escrow_subgraph_endpoint: value.subgraphs.escrow.config.query_url.into(), + escrow_subgraph_auth_token: value.subgraphs.escrow.config.query_auth_token, + escrow_syncing_interval_ms: value + .subgraphs + .escrow + .config + .syncing_interval_secs + .as_millis() as u64, + }, + tap: Tap { + rav_request_trigger_value: value.tap.get_trigger_value(), + rav_request_timestamp_buffer_ms: value + .tap + .rav_request + .timestamp_buffer_secs + .as_millis() as u64, + rav_request_timeout_secs: value.tap.rav_request.request_timeout_secs.as_secs(), + sender_aggregator_endpoints: value + .tap + .sender_aggregator_endpoints + .into_iter() + .map(|(addr, url)| (addr, url.into())) + .collect(), + rav_request_receipt_limit: value.tap.rav_request.max_receipts_per_request, + max_unnaggregated_fees_per_sender: value + .tap + .max_amount_willing_to_lose_grt + .get_value(), + }, + config: None, + } + } +} + +#[derive(Clone, Debug, Default)] pub struct Config { pub ethereum: Ethereum, pub receipts: Receipts, @@ -34,18 +103,18 @@ pub struct Config { pub config: Option, } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +#[derive(Clone, Debug, Default)] pub struct Ethereum { pub indexer_address: Address, } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +#[derive(Clone, Debug, Default)] pub struct Receipts { pub receipts_verifier_chain_id: u64, pub receipts_verifier_address: Address, } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +#[derive(Clone, Debug, Default)] pub struct IndexerInfrastructure { pub metrics_port: u16, pub graph_node_query_endpoint: String, @@ -53,57 +122,46 @@ pub struct IndexerInfrastructure { pub log_level: Option, } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +#[derive(Clone, Debug)] pub struct Postgres { - pub postgres_host: String, - pub postgres_port: usize, - pub postgres_database: String, - pub postgres_username: String, - pub postgres_password: String, + pub postgres_url: Url, +} + +impl Default for Postgres { + fn default() -> Self { + Self { + postgres_url: Url::from_str("postgres:://postgres@postgres/postgres").unwrap(), + } + } } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +#[derive(Clone, Debug, Default)] pub struct NetworkSubgraph { pub network_subgraph_deployment: Option, pub network_subgraph_endpoint: String, + pub network_subgraph_auth_token: Option, pub allocation_syncing_interval_ms: u64, pub recently_closed_allocation_buffer_seconds: u64, } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +#[derive(Clone, Debug, Default)] pub struct EscrowSubgraph { pub escrow_subgraph_deployment: Option, pub escrow_subgraph_endpoint: String, + pub escrow_subgraph_auth_token: Option, pub escrow_syncing_interval_ms: u64, } -#[derive(Clone, Debug, Deserialize, Default, PartialEq)] +#[derive(Clone, Debug, Default)] pub struct Tap { - #[serde(deserialize_with = "parse_grt_value_to_nonzero_u128")] pub rav_request_trigger_value: u128, pub rav_request_timestamp_buffer_ms: u64, pub rav_request_timeout_secs: u64, - pub sender_aggregator_endpoints_file: PathBuf, + pub sender_aggregator_endpoints: HashMap, pub rav_request_receipt_limit: u64, - #[serde(deserialize_with = "parse_grt_value_to_nonzero_u128")] pub max_unnaggregated_fees_per_sender: u128, } -fn parse_grt_value_to_nonzero_u128<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let v = BigDecimal::deserialize(deserializer)?; - if v <= 0.into() { - return Err(de::Error::custom("GRT value must be greater than 0")); - } - // Convert to wei - let v = v * BigDecimal::from(10u64.pow(18)); - // Convert to u128 - v.to_u128() - .ok_or_else(|| de::Error::custom("GRT value cannot be represented as a u128 GRT wei value")) -} - /// Sets up tracing, allows log level to be set from the environment variables fn init_tracing(format: String) -> Result<(), SetGlobalDefaultError> { let filter = EnvFilter::from_default_env(); @@ -123,7 +181,9 @@ fn init_tracing(format: String) -> Result<(), SetGlobalDefaultError> { impl Config { pub fn from_cli() -> Result { let cli = Cli::parse(); - let config = Config::load(&cli.config)?; + let indexer_config = + IndexerConfig::parse(ConfigPrefix::Tap, &cli.config).map_err(|e| anyhow::anyhow!(e))?; + let config: Config = indexer_config.into(); // Enables tracing under RUST_LOG variable if let Some(log_setting) = &config.indexer_infrastructure.log_level { @@ -133,122 +193,9 @@ impl Config { // add a LogFormat to config init_tracing("pretty".to_string()).expect( "Could not set up global default subscriber for logger, check \ - environmental variable `RUST_LOG` or the CLI input `log-level`", + environmental variable `RUST_LOG`", ); Ok(config) } - - pub fn load(filename: &PathBuf) -> Result { - let config_defaults: &str = r##" - [indexer_infrastructure] - metrics_port = 7300 - log_level = "info" - - [postgres] - postgres_port = 5432 - - [network_subgraph] - allocation_syncing_interval_ms = 60000 - recently_closed_allocation_buffer_seconds = 3600 - - [escrow_subgraph] - escrow_syncing_interval_ms = 60000 - - [tap] - rav_request_trigger_value = 10 - rav_request_timestamp_buffer_ms = 60000 - rav_request_timeout_secs = 5 - rav_request_receipt_limit = 10000 - max_unnaggregated_fees_per_sender = 20 - "##; - - // Load the user config file - let config_str = std::fs::read_to_string(filename)?; - - // Remove TOML comments, so that we can have shell expansion examples in the file. - let config_str = config_str - .lines() - .filter(|line| !line.trim().starts_with('#')) - .collect::>() - .join("\n"); - - let config_str = shellexpand::env(&config_str)?; - - let config: Config = Figment::new() - .merge(Toml::string(config_defaults)) - .merge(Toml::string(&config_str)) - .extract()?; - - Ok(config) - } -} - -#[cfg(test)] -mod tests { - use std::fs; - - use serde_assert::{Deserializer, Token}; - - use super::*; - - #[test] - fn test_parse_grt_value_to_u128_deserialize() { - macro_rules! parse { - ($input:expr) => {{ - let mut deserializer = - Deserializer::builder([Token::Str($input.to_string())]).build(); - parse_grt_value_to_nonzero_u128(&mut deserializer) - }}; - } - - assert_eq!(parse!("1").unwrap(), 1_000_000_000_000_000_000); - assert_eq!(parse!("1.1").unwrap(), 1_100_000_000_000_000_000); - assert_eq!( - parse!("1.000000000000000001").unwrap(), - 1_000_000_000_000_000_001 - ); - assert_eq!(parse!("0.000000000000000001").unwrap(), 1); - assert_eq!( - parse!("0").unwrap_err().to_string(), - "GRT value must be greater than 0" - ); - assert_eq!( - parse!("-1").unwrap_err().to_string(), - "GRT value must be greater than 0" - ); - assert_eq!( - parse!("1.0000000000000000001").unwrap(), - 1_000_000_000_000_000_000 - ); - assert_eq!( - parse!(format!("{}0", u128::MAX)).unwrap_err().to_string(), - "GRT value cannot be represented as a u128 GRT wei value" - ); - } - - /// Test loading the minimal configuration example file. - /// Makes sure that the minimal template is up to date with the code. - /// Note that it doesn't check that the config is actually minimal, but rather that all missing - /// fields have defaults. The burden of making sure the config is minimal is on the developer. - #[test] - fn test_minimal_config() { - Config::load(&PathBuf::from("minimal-config-example.toml")).unwrap(); - } - - /// Test that the maximal configuration file is up to date with the code. - /// Make sure that `test_minimal_config` passes before looking at this. - #[test] - fn test_maximal_config() { - // Generate full config by deserializing the minimal config and let the code fill in the defaults. - let max_config = Config::load(&PathBuf::from("minimal-config-example.toml")).unwrap(); - let max_config_file: Config = toml::from_str( - fs::read_to_string("maximal-config-example.toml") - .unwrap() - .as_str(), - ) - .unwrap(); - - assert_eq!(max_config, max_config_file); - } } diff --git a/tap-agent/src/database.rs b/tap-agent/src/database.rs index 9cd8d90e..b9a2d8ad 100644 --- a/tap-agent/src/database.rs +++ b/tap-agent/src/database.rs @@ -9,24 +9,17 @@ use tracing::debug; use crate::config; pub async fn connect(config: &config::Postgres) -> PgPool { - let url = format!( - "postgresql://{}:{}@{}:{}/{}", - config.postgres_username, - config.postgres_password, - config.postgres_host, - config.postgres_port, - config.postgres_database - ); + let url = &config.postgres_url; debug!( - postgres_host = tracing::field::debug(&config.postgres_host), - postgres_port = tracing::field::debug(&config.postgres_port), - postgres_database = tracing::field::debug(&config.postgres_database), + postgres_host = tracing::field::debug(&url.host()), + postgres_port = tracing::field::debug(&url.port()), + postgres_database = tracing::field::debug(&url.path()), "Connecting to database" ); PgPoolOptions::new() .max_connections(50) .acquire_timeout(Duration::from_secs(3)) - .connect(&url) + .connect(url.as_str()) .await .expect("Could not connect to DATABASE_URL") } diff --git a/tap-agent/src/lib.rs b/tap-agent/src/lib.rs index 3fab301f..3f3e1116 100644 --- a/tap-agent/src/lib.rs +++ b/tap-agent/src/lib.rs @@ -17,7 +17,6 @@ lazy_static! { } pub mod agent; -pub mod aggregator_endpoints; pub mod config; pub mod database; pub mod metrics; diff --git a/tap-agent/src/main.rs b/tap-agent/src/main.rs index b3800301..d72cae58 100644 --- a/tap-agent/src/main.rs +++ b/tap-agent/src/main.rs @@ -12,7 +12,6 @@ use indexer_tap_agent::{agent, metrics, CONFIG}; async fn main() -> Result<()> { // Parse basic configurations, also initializes logging. lazy_static::initialize(&CONFIG); - debug!("Config: {:?}", *CONFIG); let (manager, handler) = agent::start_agent().await; info!("TAP Agent started."); diff --git a/tap-agent/src/tap/test_utils.rs b/tap-agent/src/tap/test_utils.rs index 9b53d1e3..ecb7966d 100644 --- a/tap-agent/src/tap/test_utils.rs +++ b/tap-agent/src/tap/test_utils.rs @@ -105,6 +105,36 @@ pub async fn store_receipt(pgpool: &PgPool, signed_receipt: &SignedReceipt) -> a Ok(id) } +pub async fn store_invalid_receipt( + pgpool: &PgPool, + signed_receipt: &SignedReceipt, +) -> anyhow::Result { + let encoded_signature = signed_receipt.signature.to_vec(); + + let record = sqlx::query!( + r#" + INSERT INTO scalar_tap_receipts_invalid (signer_address, signature, allocation_id, timestamp_ns, nonce, value) + VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id + "#, + signed_receipt + .recover_signer(&TAP_EIP712_DOMAIN_SEPARATOR) + .unwrap() + .encode_hex::(), + encoded_signature, + signed_receipt.message.allocation_id.encode_hex::(), + BigDecimal::from(signed_receipt.message.timestamp_ns), + BigDecimal::from(signed_receipt.message.nonce), + BigDecimal::from(BigInt::from(signed_receipt.message.value)), + ) + .fetch_one(pgpool) + .await?; + + // id is BIGSERIAL, so it should be safe to cast to u64. + let id: u64 = record.id.try_into()?; + Ok(id) +} + /// Fixture to generate a wallet and address pub fn wallet(index: u32) -> (LocalWallet, Address) { let wallet: LocalWallet = MnemonicBuilder::::default() diff --git a/template.toml b/template.toml deleted file mode 100644 index 8f0aca1c..00000000 --- a/template.toml +++ /dev/null @@ -1,28 +0,0 @@ -[ethereum] -ethereum = 'key' -ethereum_polling_interval = 4000 -mnemonic = 'abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon abondon' -indexer_address = '0xAcb05407d78129b5717bB51712D3e23a78A10929' - -[indexer_infrastructure] -port = 7300 -metrics_port = 7500 -graph_node_query_endpoint = 'http://localhost:8000' -graph_node_status_endpoint = 'http://localhost:8030/graphql' -log_level = 'Debug' -gcloud_profiling = false -free_query_auth_token = 'free-query-auth-token' - -[postgres] -postgres_host = '127.0.0.1' -postgres_port = 5432 -postgres_database = 'graph-node' -postgres_username = 'user' -postgres_password = 'pswd' - -[network_subgraph] -network_subgraph_endpoint = 'https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-testnet' -network_subgraph_auth_token = 'network-subgraph-auth-token' -serve_network_subgraph = true -allocation_syncing_interval = 120000 -client_signer_address = '0xe1EC4339019eC9628438F8755f847e3023e4ff9c'